Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[BREAKING CHANGE] deel-lip upgrade to Keras 3.0 #91

Open
wants to merge 44 commits into
base: keras3
Choose a base branch
from
Open
Changes from 1 commit
Commits
Show all changes
44 commits
Select commit Hold shift + click to select a range
1838266
fix(Keras3): layer.input.shape instead of layer.input_shape
cofri Aug 1, 2024
7bd62e3
fix(Keras 3): argument order changed in Layer.add_weight()
cofri Aug 1, 2024
c980436
fix (Keras 3): tf.Variable.read_value() was removed
cofri Aug 1, 2024
8cae0fc
fix (Keras 3): Reduction API removed, replaced with string
cofri Aug 1, 2024
72aca2f
fix(Keras 3): argument order changed in Loss.__init__()
cofri Aug 1, 2024
85bd915
fix(Keras 3): Input layer must have a shape as a tuple
cofri Aug 1, 2024
813b77d
fix(Keras 3): model.save() does not accept TF SavedModel format
cofri Aug 1, 2024
cef9657
fix(Keras 3): model.save(path) does not create path if not exist
cofri Aug 2, 2024
235a5ac
fix(Keras 3): Adam optimizer does not support `lr` argument anymore
cofri Aug 1, 2024
7faac08
fix(Keras 3): Conv2DTranspose has no arg `output_padding` anymore
cofri Aug 1, 2024
f166742
fix(Keras 3): argument order in Sequential
cofri Sep 9, 2024
ef426b8
feat(callbacks): upgrade to Keras 3
cofri Aug 6, 2024
51e8f28
feat(compute_layer_sv): upgrade to Keras 3
cofri Aug 6, 2024
21b02c6
feat(constraints): upgrade to Keras 3
cofri Aug 6, 2024
aecf96e
feat(initializers): upgrade to Keras 3
cofri Aug 6, 2024
730533d
feat(losses): upgrade to Keras 3
cofri Aug 6, 2024
a70137b
feat(metrics): upgrade to Keras 3
cofri Aug 6, 2024
7a9af98
feat(model): upgrade to Keras 3
cofri Aug 6, 2024
42bf583
feat(normalizers): upgrade to Keras 3
cofri Aug 6, 2024
f32b392
feat(regularizers): upgrade to Keras 3
cofri Aug 6, 2024
fbde10d
feat(utils): upgrade to Keras 3
cofri Aug 6, 2024
36bd039
feat(activations): upgrade to Keras 3
cofri Aug 6, 2024
168ed33
feat(unconstrained): upgrade to Keras 3
cofri Aug 6, 2024
7a461fe
feat(pooling): upgrade to Keras 3
cofri Aug 6, 2024
d105cfa
feat(dense): upgrade to Keras 3
cofri Aug 7, 2024
e48ec06
feat(convolutional): upgrade to Keras 3
cofri Aug 7, 2024
e504dd8
feat(init): upgrade to Keras 3
cofri Aug 7, 2024
2f7b8d6
feat(test_activations): upgrade to Keras 3
cofri Aug 7, 2024
8b3a403
feat(test_compute_layer_sv): upgrade to Keras 3
cofri Aug 8, 2024
c096ce1
feat(test_condense): upgrade to Keras 3
cofri Aug 8, 2024
0f5ce8d
feat(test_initializers): upgrade to Keras 3
cofri Aug 8, 2024
708e2b0
feat(test_losses): upgrade to Keras 3
cofri Aug 8, 2024
4c73c9d
feat(test_metrics): upgrade to Keras 3
cofri Aug 8, 2024
9e8bbb1
feat(test_models): upgrade to Keras 3
cofri Aug 8, 2024
c8d3d15
feat(test_normalizers): upgrade to Keras 3
cofri Aug 8, 2024
ef6e0c1
feat(test_regularizers): upgrade to Keras 3
cofri Aug 8, 2024
99b2f96
feat(test_unconstrained_layers): upgrade to Keras 3
cofri Aug 8, 2024
cb764ed
feat(test_layers): upgrade to Keras 3
cofri Aug 8, 2024
0ce0a71
feat(layers): save/load own variables in dense and conv
cofri Aug 8, 2024
f0cd52d
feat(notebooks): upgrade to Keras 3
cofri Sep 6, 2024
e697bbb
chore: enforce TF>=2.16 and Keras 3
cofri Aug 8, 2024
b462401
chore: bump to deel-lip version 2.0.0
cofri Aug 8, 2024
ce88cc1
chore: clean github actions to latest Python and TF versions
cofri Sep 6, 2024
ed92ee3
fix(callbacks): Keras SVD op is not as expected
cofri Sep 6, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
feat(regularizers): upgrade to Keras 3
In Keras 3, `keras.ops.matmul()` only takes tensors A and B, and no other
arguments like `transpose_a` and `transpose_b`.
  • Loading branch information
cofri committed Sep 9, 2024
commit f32b392ed4be7bd581418145d3222230b996e9a9
45 changes: 24 additions & 21 deletions deel/lip/regularizers.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,9 +31,9 @@
import warnings
from abc import ABC, abstractmethod

import tensorflow as tf
from tensorflow.keras.regularizers import Regularizer
from tensorflow.keras.utils import register_keras_serializable
import keras
import keras.ops as K
from keras.saving import register_keras_serializable


class Lorth(ABC):
Expand Down Expand Up @@ -133,14 +133,14 @@ def compute_lorth(self, w):
"""Compute regularization term based on Lorth.

Args:
w (tf.Tensor): the convolutional kernel.
w (Tensor): the convolutional kernel.

Returns:
tf.Tensor: value of the regularization term.
Tensor: value of the regularization term.
"""
output = self._compute_conv_kk(w)
target = self._compute_target(w, output.shape)
return tf.reduce_sum(tf.square(output - target)) - self.delta
return K.sum(K.square(output - target)) - self.delta


class Lorth2D(Lorth):
Expand All @@ -161,41 +161,41 @@ def __init__(self, kernel_shape=None, stride=1, conv_transpose=False) -> None:
super(Lorth2D, self).__init__(dim, kernel_shape, stride, conv_transpose)

def _compute_conv_kk(self, w):
w_reshape = tf.transpose(w, perm=[3, 0, 1, 2])
w_padded = tf.pad(
w_reshape = K.transpose(w, axes=[3, 0, 1, 2])
w_padded = K.pad(
w_reshape,
paddings=[
pad_width=[
[0, 0],
[self.padding, self.padding],
[self.padding, self.padding],
[0, 0],
],
)
return tf.nn.conv2d(w_padded, w, self.stride, padding="VALID")
return K.conv(w_padded, w, self.stride, padding="valid")

def _compute_target(self, w, convKxK_shape):
C_out = w.shape[-1]
outm3 = convKxK_shape[-3]
outm2 = convKxK_shape[-2]
ct = tf.cast(tf.math.floor(outm2 / 2), dtype=tf.int32)
ct = K.cast(K.floor(outm2 / 2), dtype="int32")

target_zeros = tf.zeros((outm3 * outm2 - 1, C_out, C_out))
target = tf.concat(
target_zeros = K.zeros((outm3 * outm2 - 1, C_out, C_out))
target = K.concatenate(
[
target_zeros[: ct * outm2 + ct],
tf.expand_dims(tf.eye(C_out), axis=0),
K.expand_dims(K.eye(C_out), axis=0),
target_zeros[ct * outm2 + ct :],
],
axis=0,
)

target = tf.reshape(target, (outm3, outm2, C_out, C_out))
target = tf.transpose(target, [2, 0, 1, 3])
target = K.reshape(target, (outm3, outm2, C_out, C_out))
target = K.transpose(target, axes=[2, 0, 1, 3])
return target


@register_keras_serializable("deel-lip", "LorthRegularizer")
class LorthRegularizer(Regularizer):
class LorthRegularizer(keras.Regularizer):
def __init__(
self,
kernel_shape=None,
Expand Down Expand Up @@ -249,7 +249,7 @@ def get_config(self):


@register_keras_serializable("deel-lip", "OrthDenseRegularizer")
class OrthDenseRegularizer(Regularizer):
class OrthDenseRegularizer(keras.Regularizer):
def __init__(self, lambda_orth=1.0) -> None:
"""
Regularize a Dense kernel to be orthogonal (all singular values are equal to 1)
Expand All @@ -264,9 +264,12 @@ def __init__(self, lambda_orth=1.0) -> None:
def _dense_orth_dist(self, w):
transp_b = w.shape[0] <= w.shape[1]
# W.W^T if h<=w; W^T.W otherwise
wwt = tf.matmul(w, w, transpose_a=not transp_b, transpose_b=transp_b)
idx = tf.eye(wwt.shape[0])
return tf.reduce_sum(tf.square(wwt - idx))
if transp_b:
wwt = K.matmul(w, K.transpose(w))
else:
wwt = K.matmul(K.transpose(w), w)
idx = K.eye(wwt.shape[0])
return K.sum(K.square(wwt - idx))

def __call__(self, x):
return self.lambda_orth * self._dense_orth_dist(x)
Expand Down