Im doing a model with a custom loss layer, the model runs 1 epoch and then I get this error that I can't figure out how to solve.
AssertionError: Exception encountered when calling layer 'model_21' (type Functional).
Could not compute output KerasTensor(type_spec=TensorSpec(shape=(None, 3, 1), dtype=tf.float32, name=None), name='Placeholder_2:0', description="created by layer 'out_layer'")
Call arguments received by layer 'model_21' (type Functional):
• inputs=tf.Tensor(shape=(1, 72, 72, 28), dtype=float32)
• training=False
• mask=None
My training and validation data is in the right format.
This is my code:
import tensorflow as tf
from tensorflow.keras import layers, models
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras import backend as K
# loss function for angle loss
def angle_loss(y_true, y_pred):
y_true = tf.squeeze(y_true, axis=-1)
y_pred = tf.squeeze(y_pred, axis=-1)
y_true_normalized = tf.math.l2_normalize(y_true, axis=-1)
y_pred_normalized = tf.math.l2_normalize(y_pred, axis=-1)
cosine_similarity = tf.reduce_sum(tf.multiply(y_true_normalized, y_pred_normalized), axis=-1)
angle_loss = 1 - cosine_similarity
return angle_loss
# loss function for position loss
def position_loss(n_true, pos_true, y_pred):
norm_n_true = tf.linalg.norm(n_true, axis=1)
v = pos_true - y_pred
abs_dot_v_n = K.abs(K.sum(tf.multiply(n_true, v), axis=1))
distance = tf.divide(abs_dot_v_n, norm_n_true)
distance_avg = K.mean(distance)
return distance_avg
class CustomLossLayer(layers.Layer):
def __init__(self, normal_loss, position_loss, **kwargs):
super(CustomLossLayer, self).__init__(**kwargs)
self.normal_loss_weight = self.add_weight(name='normal_loss_weight', initializer='ones', trainable=True)
self.position_loss_weight = self.add_weight(name='position_loss_weight', initializer='ones', trainable=True)
self.normal_loss = normal_loss
self.position_loss = position_loss
def call(self, inputs):
y_true, y_pred = inputs
normal_loss = self.normal_loss(y_true[0], y_pred[0])
position_loss = self.position_loss(y_true[0], y_true[1], y_pred[1])
loss = self.normal_loss_weight * normal_loss + self.position_loss_weight * position_loss
self.add_loss(loss, inputs=inputs)
self.add_metric(normal_loss, name='angle_loss')
self.add_metric(position_loss, name='position_loss')
return y_pred
def build_model(input_shape):
input_layer = layers.Input(shape=input_shape, name="input_1")
true_labels = [
tf.keras.Input(shape=(3, 1), name="true_labels_pos"),
tf.keras.Input(shape=(3, 1), name="true_labels_normal"),
]
conv1 = layers.Conv3D(32, kernel_size=(3, 3, 3), activation='relu', padding='same')(input_layer)
maxpool1 = layers.MaxPooling3D(pool_size=(2, 2, 2))(conv1)
conv2 = layers.Conv3D(64, kernel_size=(3, 3, 3), activation='relu', padding='same')(maxpool1)
maxpool2 = layers.MaxPooling3D(pool_size=(2, 2, 2))(conv2)
flatten = layers.Flatten()(maxpool2)
dense = layers.Dense(128, activation='relu')(flatten)
# Normal head
normal_output = layers.Dense(3, name='normal_head')(dense)
normal_output = layers.Reshape((3, 1), name='normal_output')(normal_output)
# Position head
position_output = layers.Dense(3, name='position_head')(dense)
position_output = layers.Reshape((3, 1), name='position_output')(position_output)
pred_labels = [normal_output, position_output]
out = CustomLossLayer(normal_loss=angle_loss, position_loss=position_loss, name="out_layer")([true_labels, pred_labels])
model = models.Model([input_layer, true_labels], out)
# Compile the model
model.compile(loss=None, optimizer='adam', weighted_metrics=[])
return model
# Build and train the model
model = build_model((72, 72, 28, 1))
early_stopping = EarlyStopping(monitor='val_loss', patience=3, verbose=1, restore_best_weights=True)
model_checkpoint = ModelCheckpoint('best_model.h5', monitor='val_loss', save_best_only=True, verbose=1)
# Train the model
history = model.fit([train_scans, train_position, train_normal],
validation_data=([val_scans, val_position, val_normal]),
epochs=10, batch_size=1, callbacks=[early_stopping, model_checkpoint])
How to resolve this?