I have trained my model with
import tensorflow as tf
import os
from keras.models import Model
from keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, Conv2DTranspose
from keras.layers import concatenate, BatchNormalization, Dropout, Lambda
from tensorflow.keras import backend as K
os.environ["SM_FRAMEWORK"] = "tf.keras"
import segmentation_models as sm
def jaccard_coef(y_true, y_pred):
y_true_flatten = K.flatten(y_true)
y_pred_flatten = K.flatten(y_pred)
intersection = K.sum(y_true_flatten * y_pred_flatten)
final_coef_value = (intersection + 1.0) / (K.sum(y_true_flatten) + K.sum(y_pred_flatten) - intersection + 1.0)
return final_coef_value
def multi_unet_model(n_classes=7, image_height=512, image_width=512, image_channels=3):
inputs = Input((image_height, image_width, image_channels))
source_input = inputs
c1 = Conv2D(16, (3,3), activation="relu", kernel_initializer="he_normal", padding="same")(source_input)
c1 = Dropout(0.2)(c1)
c1 = Conv2D(16, (3,3), activation="relu", kernel_initializer="he_normal", padding="same")(c1)
p1 = MaxPooling2D((2,2))(c1)
c2 = Conv2D(32, (3,3), activation="relu", kernel_initializer="he_normal", padding="same")(p1)
c2 = Dropout(0.2)(c2)
c2 = Conv2D(32, (3,3), activation="relu", kernel_initializer="he_normal", padding="same")(c2)
p2 = MaxPooling2D((2,2))(c2)
c3 = Conv2D(64, (3,3), activation="relu", kernel_initializer="he_normal", padding="same")(p2)
c3 = Dropout(0.2)(c3)
c3 = Conv2D(64, (3,3), activation="relu", kernel_initializer="he_normal", padding="same")(c3)
p3 = MaxPooling2D((2,2))(c3)
c4 = Conv2D(128, (3,3), activation="relu", kernel_initializer="he_normal", padding="same")(p3)
c4 = Dropout(0.2)(c4)
c4 = Conv2D(128, (3,3), activation="relu", kernel_initializer="he_normal", padding="same")(c4)
p4 = MaxPooling2D((2,2))(c4)
c5 = Conv2D(256, (3,3), activation="relu", kernel_initializer="he_normal", padding="same")(p4)
c5 = Dropout(0.2)(c5)
c5 = Conv2D(256, (3,3), activation="relu", kernel_initializer="he_normal", padding="same")(c5)
u6 = Conv2DTranspose(128, (2,2), strides=(2,2), padding="same")(c5)
u6 = concatenate([u6, c4], axis=3)
c6 = Conv2D(128, (3,3), activation="relu", kernel_initializer="he_normal", padding="same")(u6)
c6 = Dropout(0.2)(c6)
c6 = Conv2D(128, (3,3), activation="relu", kernel_initializer="he_normal", padding="same")(c6)
u7 = Conv2DTranspose(64, (2,2), strides=(2,2), padding="same")(c6)
u7 = concatenate([u7, c3], axis=3)
c7 = Conv2D(64, (3,3), activation="relu", kernel_initializer="he_normal", padding="same")(u7)
c7 = Dropout(0.2)(c7)
c7 = Conv2D(64, (3,3), activation="relu", kernel_initializer="he_normal", padding="same")(c7)
u8 = Conv2DTranspose(32, (2,2), strides=(2,2), padding="same")(c7)
u8 = concatenate([u8, c2], axis=3)
c8 = Conv2D(32, (3,3), activation="relu", kernel_initializer="he_normal", padding="same")(u8)
c8 = Dropout(0.2)(c8)
c8 = Conv2D(32, (3,3), activation="relu", kernel_initializer="he_normal", padding="same")(c8)
u9 = Conv2DTranspose(16, (2,2), strides=(2,2), padding="same")(c8)
u9 = concatenate([u9, c1], axis=3)
c9 = Conv2D(16, (3,3), activation="relu", kernel_initializer="he_normal", padding="same")(u9)
c9 = Dropout(0.2)(c9)
c9 = Conv2D(16, (3,3), activation="relu", kernel_initializer="he_normal", padding="same")(c9)
outputs = Conv2D(n_classes, (1,1), activation="softmax")(c9)
model = Model(inputs=[inputs], outputs=[outputs])
return model
# Percentages of data for each class
class_percentages = {
'urban_land': 7.8383,
'agriculture_land': 5.6154,
'rangeland': 9.6087,
'forest_land': 1.2616,
'water': 2.9373,
'barren_land': 1.0828,
'unknown': 0.00017716
}
# Calculate the inverse of percentages
inverse_percentages = {cls: 1 / pct for cls, pct in class_percentages.items()}
# Normalize weights
sum_inverse_percentages = sum(inverse_percentages.values())
weights = [weight / sum_inverse_percentages for cls, weight in inverse_percentages.items()]
print("Class Weights:", weights)
# Loss function
dice_loss = sm.losses.DiceLoss(class_weights=weights)
focal_loss = sm.losses.CategoricalFocalLoss()
total_loss = dice_loss + (1 * focal_loss)
# Metrics
metrics = ["accuracy", jaccard_coef]
with strategy.scope():
# Build the model
model = multi_unet_model()
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
# Compile the model
model.compile(optimizer=optimizer, loss=total_loss, metrics=metrics)
# Print model summary
model.summary()
train_size = int(len(dataset)*0.8)
train_dataset = dataset.take(train_size)
val_dataset = dataset.skip(train_size)
train_dataset = train_dataset.batch(batch_size=16)
train_dataset = strategy.experimental_distribute_dataset(train_dataset)
val_dataset = val_dataset.batch(batch_size=16)
val_dataset = strategy.experimental_distribute_dataset(val_dataset)
history = model.fit(train_dataset,
validation_data=val_dataset,
epochs=3,
)
then I saved model with
with strategy.scope():
model.save("new_3_epoch_model.h5")
and when I load the model with
load_model("/kaggle/working/data/imgs/new_3_epoch_model.h5", custom_objects={"dice_loss_plus_1focal_loss": total_loss, "jaccard_coef":jaccard_coef})
it shows this error:
TypeError Traceback (most recent call last)
Cell In[46], line 1
----> 1 load_model("/kaggle/working/data/imgs/new_3_epoch_model.h5", custom_objects={"dice_loss_plus_1focal_loss": total_loss, "jaccard_coef":jaccard_coef})
File /usr/local/lib/python3.10/site-packages/keras/src/saving/saving_api.py:183, in load_model(filepath, custom_objects, compile, safe_mode)
176 return saving_lib.load_model(
177 filepath,
178 custom_objects=custom_objects,
179 compile=compile,
180 safe_mode=safe_mode,
181 )
182 if str(filepath).endswith((".h5", ".hdf5")):
--> 183 return legacy_h5_format.load_model_from_hdf5(filepath)
184 elif str(filepath).endswith(".keras"):
185 raise ValueError(
186 f"File not found: filepath={filepath}. "
187 "Please ensure the file is an accessible `.keras` "
188 "zip file."
189 )
File /usr/local/lib/python3.10/site-packages/keras/src/legacy/saving/legacy_h5_format.py:155, in load_model_from_hdf5(filepath, custom_objects, compile)
151 training_config = json_utils.decode(training_config)
153 # Compile model.
154 model.compile(
--> 155 **saving_utils.compile_args_from_training_config(
156 training_config, custom_objects
157 )
158 )
159 saving_utils.try_build_compiled_arguments(model)
161 # Set optimizer weights.
File /usr/local/lib/python3.10/site-packages/keras/src/legacy/saving/saving_utils.py:145, in compile_args_from_training_config(training_config, custom_objects)
143 loss = _deserialize_nested_config(losses.deserialize, loss_config)
144 # Ensure backwards compatibility for losses in legacy H5 files
--> 145 loss = _resolve_compile_arguments_compat(loss, loss_config, losses)
147 # Recover metrics.
148 metrics = None
File /usr/local/lib/python3.10/site-packages/keras/src/legacy/saving/saving_utils.py:245, in _resolve_compile_arguments_compat(obj, obj_config, module)
237 """Resolves backwards compatiblity issues with training config arguments.
238
239 This helper function accepts built-in Keras modules such as optimizers,
(...)
242 this does nothing.
243 """
244 if isinstance(obj, str) and obj not in module.ALL_OBJECTS_DICT:
--> 245 obj = module.get(obj_config["config"]["name"])
246 return obj
TypeError: string indices must be integers