my face recognition code is working correctly, so it can recognize an already existing face in the pickle file and label it correctly, my problem is even if the face doesn't exist in the pickle file also recognizes it and assigns the random label from the database (pickle file), I think the problem is here:
for i in range(detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > 0.95:
box = detections[0, 0, i, 3:7] * np.array([frame.shape[1], frame.shape[0], frame.shape[1], frame.shape[0]])
(x, y, x2, y2) = box.astype("int")
face = frame[y:y2, x:x2, :]
if face is not None and not face.size == 0:
resized_img = cv2.resize(face, (50, 50)).flatten().reshape(1, -1)
else:
continue
if len(self.faces_data) > 0:
predicted_name = self.lda_classifier.predict(resized_img)
name = str(predicted_name[0]) if predicted_name[0] in self.labels else "unknown"
else:
name = "No face data available"
and here is the entire code:
import os
import cv2
import numpy as np
import mysql.connector
import pickle
import time
from datetime import datetime
from win32com.client import Dispatch
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from kivy.uix.screenmanager import Screen
from kivy.uix.button import Button
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
class Recognize(Screen):
def __init__(self, **kwargs):
super(Recognize, self).__init__(**kwargs)
self.video = cv2.VideoCapture(0)
self.modelFile = "models/res10_300x300_ssd_iter_140000.caffemodel"
self.configFile = "models/deploy.prototxt.txt"
self.net = cv2.dnn.readNetFromCaffe(self.configFile, self.modelFile)
self.faces_data = []
self.labels = []
self.load_faces_data()
self.lda_classifier = LinearDiscriminantAnalysis()
if len(self.faces_data) > 0:
self.lda_classifier.fit(self.faces_data, self.labels)
self.db = mysql.connector.connect(
host='localhost',
user='root',
password='',
database='face_data'
)
self.cursor = self.db.cursor()
self.attendance_recorded = False
self.layout = GridLayout(cols=1)
self.start_button = Button(text="Start Recognition")
self.start_button.bind(on_press=self.start_recognition)
self.layout.add_widget(self.start_button)
self.add_widget(self.layout)
self.navigation_bar = BoxLayout(orientation='horizontal', size_hint=(1, None), height=50)
self.navigation_bar.add_widget(Button(text="add", on_press=self.go_to_add_faces))
self.navigation_bar.add_widget(Button(text="recognize", on_press=self.go_to_face_recognition))
self.navigation_bar.add_widget(Button(text="report", on_press=self.go_to_report))
self.layout.add_widget(self.navigation_bar)
def go_to_add_faces(self, instance):
self.manager.current = "add"
def go_to_face_recognition(self, instance):
self.manager.current = "recognize"
def go_to_report(self, instance):
self.manager.current = "report"
def start_recognition(self, instance):
while True:
ret, frame = self.video.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0, (300, 300), (104, 117, 123))
self.net.setInput(blob)
detections = self.net.forward()
face_count = 0
for i in range(detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > 0.95:
box = detections[0, 0, i, 3:7] * np.array([frame.shape[1], frame.shape[0], frame.shape[1], frame.shape[0]])
(x, y, x2, y2) = box.astype("int")
face = frame[y:y2, x:x2, :]
if face is not None and not face.size == 0:
resized_img = cv2.resize(face, (50, 50)).flatten().reshape(1, -1)
else:
continue
if len(self.faces_data) > 0:
predicted_name = self.lda_classifier.predict(resized_img)
name = str(predicted_name[0]) if predicted_name[0] in self.labels else "unknown"
else:
name = "No face data available"
ts = time.time()
date = datetime.fromtimestamp(ts).date()
timestamp = datetime.fromtimestamp(ts).strftime("%H:%M")
cv2.putText(frame, name, (x, y - 15), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 1)
cv2.rectangle(frame, (x, y), (x2, y2), (50, 50, 255), 1)
face_count += 1
if face_count == 0:
cv2.putText(frame, "No face detected", (10, 30), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2)
elif face_count > 1:
cv2.putText(frame, "Multiple faces detected", (10, 30), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2)
cv2.imshow("Frame", frame)
k = cv2.waitKey(1)
if k == ord("o") and face_count == 1 and not self.attendance_recorded:
speak = Dispatch("SAPI.SpVoice")
speak.Speak('thank you ' + name)
time.sleep(1)
self.cursor = self.db.cursor()
self.cursor.execute("INSERT INTO attendance (user_name, date, time) VALUES (%s, %s, %s)",
(name, date, timestamp))
self.db.commit()
self.attendance_recorded = True
elif k != ord("o"):
self.attendance_recorded = False
if k == ord("q"):
break
self.db.close()
self.video.release()
cv2.destroyAllWindows()
def load_faces_data(self):
if os.path.exists('data/names_faces.pkl'):
with open('data/names_faces.pkl', 'rb') as f:
data = pickle.load(f)
for name, faces in data.items():
self.faces_data.extend(faces)
self.labels.extend([name] * len(faces))