Version compatibility isue of typing entension persist

15 Views Asked by At

I have trained ML model and version used while traning the model was;

version of tensorflow2.6.4
version of keras2.6.0

Now i am trying to load the model for integration purpose using Flask and I am getting issues with typing_extension version

{'error': "cannot import name 'Never' from 'typing_extensions' (D:\\pythonProject\\venv\\lib\\site-packages\\typing_extensions.py)"}

these are version of libraries i have been using in pycharm

audioread                    3.0.1
blinker                      1.7.0
cachetools                   4.2.4
certifi                      2024.2.2
cffi                         1.16.0
charset-normalizer           3.3.2
clang                        5.0
click                        8.1.7
colorama                     0.4.6
decorator                    5.1.1
Flask                        3.0.2
flatbuffers                  1.12
gast                         0.4.0
google-auth                  1.35.0
google-auth-oauthlib         0.4.6
google-pasta                 0.2.0
grpcio                       1.62.1
h5py                         3.1.0
idna                         3.6
importlib_metadata           7.0.2
itsdangerous                 2.1.2
Jinja2                       3.1.3
joblib                       1.3.2
keras                        2.6.0
Keras-Preprocessing          1.1.2
lazy_loader                  0.3
libclang                     18.1.1
librosa                      0.10.1
llvmlite                     0.42.0
Markdown                     3.6
markdown-it-py               3.0.0
MarkupSafe                   2.1.5
mdurl                        0.1.2
ml-dtypes                    0.3.2
msgpack                      1.0.8
namex                        0.0.7
numba                        0.59.0
numpy                        1.22.0
oauthlib                     3.2.2
opt-einsum                   3.3.0
optree                       0.10.0
packaging                    24.0
pip                          24.0
platformdirs                 4.2.0
pooch                        1.8.1
protobuf                     3.20.0
pyasn1                       0.5.1
pyasn1-modules               0.3.0
pycparser                    2.21
Pygments                     2.17.2
requests                     2.31.0
requests-oauthlib            1.4.0
rich                         13.7.1
rsa                          4.9
scikit-learn                 1.4.1.post1
scipy                        1.7.3
setuptools                   68.2.0
six                          1.15.0
soundfile                    0.12.1
soxr                         0.3.7
tensorboard                  2.6.0
tensorboard-data-server      0.6.1
tensorboard-plugin-wit       1.8.1
tensorflow                   2.6.4
tensorflow-estimator         2.6.0
tensorflow-io-gcs-filesystem 0.31.0
termcolor                    1.1.0
threadpoolctl                3.3.0
typing-extensions            3.10.0.2
urllib3                      2.2.1
Werkzeug                     3.0.1
wheel                        0.41.2
wrapt                        1.12.1
zipp                         3.18.1

i have tried by uninstalling and again reinstalling the extension but encountring same isuue again and again.

version of typing extension is :

typing-extensions            3.10.0.2

below is the code I am running

test_file.py


import requests

url = 'http://localhost:5000/predict'  # Change the URL if your Flask app is running on a different port
data = {
    'file_path': r'D:\pythonProject\audios\n.ogg'  # Replace with the actual path to your audio file
}

response = requests.post(url, json=data)
print(response.json())

this is main.py file

from flask import Flask, request, jsonify
from tensorflow.keras.models import model_from_json

import pickle
import librosa
import numpy as np
import os
import logging
os.environ['KERAS_BACKEND'] = 'tensorflow'



# Set up logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)

# Suppress Tensorflow warnings
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

# Set Keras backend to TensorFlow
os.environ['KERAS_BACKEND'] = 'tensorflow'



import tensorflow as tf
print("version of tensorflow" + tf.__version__)

import keras
print("version of keras" + keras.__version__)


app = Flask(__name__)

# Load Keras model and weights
json_file = open('CNN_model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights("best_model1_weights.h5")
print("Loaded model from disk")

# Load scalers and encoders
with open('scaler2.pickle', 'rb') as f:
    scaler2 = pickle.load(f)

with open('encoder2.pickle', 'rb') as f:
    encoder2 = pickle.load(f)

# Define feature extraction functions
def zcr(data, frame_length, hop_length):
    zcr = librosa.feature.zero_crossing_rate(data, frame_length=frame_length, hop_length=hop_length)
    return np.squeeze(zcr)

def rmse(data, frame_length=2048, hop_length=512):
    rmse = librosa.feature.rms(data, frame_length=frame_length, hop_length=hop_length)
    return np.squeeze(rmse)

def mfcc(data, sr, frame_length=2048, hop_length=512, flatten=True):
    mfcc = librosa.feature.mfcc(data, sr=sr)
    return np.squeeze(mfcc.T) if not flatten else np.ravel(mfcc.T)

def extract_features(data, sr=22050, frame_length=2048, hop_length=512):
    result = np.array([])
    result = np.hstack((result,
                        zcr(data, frame_length, hop_length),
                        rmse(data, frame_length, hop_length),
                        mfcc(data, sr, frame_length, hop_length)
                       ))
    return result

def get_predict_feat(path):
    d, s_rate = librosa.load(path, duration=2.5, offset=0.6)
    res = extract_features(d)
    result = np.array(res)
    result = np.reshape(result, newshape=(1, 2376))
    i_result = scaler2.transform(result)
    final_result = np.expand_dims(i_result, axis=2)
    return final_result


# Define prediction route
@app.route('/predict', methods=['POST'])
def predict():
    try:
        # Get file path from request
        file_path = request.json['file_path']
        logger.info(f"Received request for prediction with file path: {file_path}")

        # Make prediction
        res = get_predict_feat(file_path)
        predictions = loaded_model.predict(res)
        emotion_labels = encoder2.categories_[0]
        results = {emotion: round(probability * 100, 2) for emotion, probability in zip(emotion_labels, predictions[0])}

        logger.info(f"Predictions: {results}")
        return jsonify(results)
    except Exception as e:
        logger.error(f"An error occurred during prediction: {e}")
        return jsonify({'error': str(e)}), 500

if __name__ == '__main__':
    app.run(debug=True)

0

There are 0 best solutions below