train and validation accuracy -- straight horizontal lines

385 Views Asked by At

After training the below model and plotting the train and validation accuracy I'm getting two straight horizontal lines (picture attached). These are the parameters

Params: mid_units: 256.0 activation: relu dropout: 0.34943936277356535 optimizer: adam batch_size: 64.0

for cls in os.listdir(path):
        for sound in tqdm(os.listdir(os.path.join(path, cls))):
         wav  = librosa.load(os.path.join(os.path.join(path, cls, sound)), sr=16000)[0].astype(np.float32)
         tmp_samples.append(wav)
         tmp_labels.append(cls)
         

    
    X_train, X_test, y_train , y_test  = train_test_split( tmp_samples, tmp_labels , test_size=0.60,shuffle=True) 
    
    X_test,X_valid, y_test ,   y_valid  = train_test_split( X_test, y_test , test_size=0.50,shuffle=True) 
    
   
    
    
    
    for x,y in zip(X_train,y_train):
        
         extract_features_with_aug(x, y, model, samples , labels )
         
    for x,y in zip(X_test,y_test):
       
        extract_features(x, y, model, plain_samples , plain_labels )
        
    for x,y in zip(X_valid,y_valid):
       
        extract_features(x, y, model, valid_sample,valid_label)
          
            

    X_train = np.asarray(samples)  
    y_train = np.asarray(labels)
    X_test = np.asarray(plain_samples) 
    y_test=np.asarray(plain_labels) 
    X_valid = np.asarray(valid_sample) 
    y_valid=np.asarray(valid_label) 
    
    
    X_train = shuffle(samples)  
    y_train = shuffle(labels)
    X_test = shuffle(plain_samples) 
    y_test=shuffle(plain_labels) 
    X_valid = shuffle(valid_sample) 
    y_valid=shuffle(valid_label) 
   
            
    return X_train, y_train , X_test ,  y_test ,X_valid,y_valid

Model:

input = layers.Input( batch_shape=(None,1024,1),dtype=tf.float32,name='audio') 

   
    drop=layers.Dropout( dropout_rate ) (input)
    fl= layers.Flatten() (drop)
    l= layers.Dense( mid_units , activation= activation )(fl)
    ba=layers.BatchNormalization() (l)
    drop2=layers.Dropout( dropout_rate  ) (ba)
    net=layers.Dense( 5, activation= activation )(drop2)
    model = Model(inputs=input, outputs=net)
    model.summary()
    
    return model

def train_model(
                X_train, y_train , X_test ,  y_test , X_valid,y_valid,
                fname,  # Path where to save the model
                mid_units, 
                activation , 
                dropout ,
                batch_size , 
                optimizer 

                ):

    # Generate the model
    general_model = create_model( mid_units, activation , dropout )

    general_model.compile(optimizer=  optimizer , loss='categorical_crossentropy',
                          metrics=['accuracy'])

    # Create some callbacks
    callbacks = [tf.keras.callbacks.ModelCheckpoint(filepath=fname, monitor='val_loss', save_best_only=True),
                 tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.95, patience=5, verbose=1,
                                                      min_lr=0.000001)]
    
################
    
    
    history = general_model.fit(X_train, y_train, epochs=EPOCHS, validation_data = ( X_valid,y_valid ), batch_size= batch_size ,
                                callbacks=callbacks, verbose=1)

For the training history I'm getting fixed values 3027/3027 [==============================] - 29s 9ms/step - loss: nan - accuracy: 0.2150 - val_loss: nan - val_accuracy: 0.2266 Epoch 97/100 3027/3027 [==============================] - 31s 10ms/step - loss: nan - accuracy: 0.2150 - val_loss: nan - val_accuracy: 0.2266 Epoch 98/100 3027/3027 [==============================] - 41s 14ms/step - loss: nan - accuracy: 0.2150 - val_loss: nan - val_accuracy: 0.2266 Epoch 99/100 3027/3027 [==============================] - 32s 11ms/step - loss: nan - accuracy: 0.2150 - val_loss: nan - val_accuracy: 0.2266 Epoch 100/100

enter image description here

0

There are 0 best solutions below