I trained an object detection model using Google's AutoML. I exported the model as a .tflite file and made some predictions via python, but i get way worse results and lower scores than the one showed on Vertex on the same images. The model is supposed to find nuts that still have the shell intact. Here is the prediciton shown on Vertex, the score, and then the one i get from the exported model via python.

As you can see in this case the exported model failed to predict the correct bbox. In some cases the bbox is correct but the score is way lower then the one shown on Vertex.
Here's the code i used:
import numpy as np
import tensorflow as tf
import cv2
import os
os.environ["TF_ENABLE_ONEDNN_OPTS"] = "0"
def draw_rect(image, box):
y_min = int(max(1, (box[0] * image.shape[0])))
x_min = int(max(1, (box[1] * image.shape[1])))
y_max = int(min(image.shape[0], (box[2] * image.shape[0])))
x_max = int(min(image.shape[1], (box[3] * image.shape[1])))
# draw a rectangle on the image
cv2.rectangle(image, (x_min, y_min), (x_max, y_max), (255, 0, 0), 2)
# Load TFLite model and allocate tensors.
interpreter = tf.lite.Interpreter(model_path="langhe_gusci_interi.tflite")
# Get input and output tensors.
input_details = interpreter.get_input_details()
print(input_details)
output_details = interpreter.get_output_details()
interpreter.allocate_tensors()
dir = "shell"
for file in os.listdir(dir):
print(file)
img = cv2.imread(os.path.join(dir, file))
new_img = cv2.resize(img, (512, 512))
interpreter.set_tensor(input_details[0]['index'], [new_img])
interpreter.invoke()
rects = interpreter.get_tensor(output_details[0]['index'])
scores = interpreter.get_tensor(
output_details[2]['index'])
# print highest score prediction
print(rects[0][0])
print(scores[0][0])
draw_rect(new_img,rects[0][0])
cv2.imshow("image", new_img)
cv2.waitKey(0)

