-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.py
154 lines (126 loc) · 5.9 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
import cv2
from PySide2.QtCore import Qt, QTimer
from PySide2.QtWidgets import QApplication, QWidget, QLabel, QPushButton
from PySide2.QtGui import QPixmap, QImage, QImageReader
from Interface import *
import qdarkstyle
from PySide2 import QtCore
import random
import cv2
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from PySide2.QtGui import QIcon
progress_val = 0
class MainForm(QWidget, Ui_Form):
def __init__(self):
super(MainForm, self).__init__()
self.setupUi(self)
self.setStyleSheet(qdarkstyle.load_stylesheet_pyside2())
#self.widget_0.rpb_setBarStyle('Pizza')
for i in range(7):
self.widget_0.rpb_setBarStyle('Pizza')
getattr(self, f'widget_{i}').rpb_setBarStyle('Pizza') # Scale to 0-100
# ANIMATE THE PROGRESS
# ADD A TIMER TO CHANGE PROGRESS VALUES
self.timer = QtCore.QTimer(self)
self.timer.timeout.connect(self.update_prediction)
self.timer.timeout.connect(self.update_video_stream)
# Change all progresses to zero on start
QtCore.QTimer.singleShot(0, lambda: self.widget_0.rpb_setValue(0))
# Initialize webcam (not activated immediately)
self.video_capture = None
# Set up a timer to update the video stream
self.webcam_timer = QTimer(self)
self.webcam_timer.timeout.connect(self.update_video_stream)
self.webcam_timer.start(60)
self.runBtn.clicked.connect(self.start_webcam)
self.model = tf.keras.models.load_model('C:/Users/21379/Desktop/Master VMI 2024/Interaction Homme Machine/ProjetIhm/ResNet-50.h5')
self.emotions = ['Angry', 'Disgusted', 'Fearful', 'Happy', 'Sad', 'Surprised', 'Neutral']
self.cascPath = 'C:/Users/21379/Desktop/Master VMI 2024/Interaction Homme Machine/IHM Project/haarcascade_frontalface_default.xml'
self.face_cascade = cv2.CascadeClassifier(self.cascPath)
if self.face_cascade.empty():
print("Erreur : Impossible de charger le classificateur de visages !")
else:
print("Le classificateur de visages a été chargé avec succès !")
def start_webcam(self):
if self.video_capture is None:
# Initialize webcam when the "Run" button is pressed
self.video_capture = cv2.VideoCapture(0)
print("Webcam activated!")
# Start the timer for progress bar animation when webcam is activated
self.timer.start(30)
def reset_all_progress(self):
for i in range(7):
getattr(self, f'widget_{i}').rpb_setValue(0)
def update_prediction(self):
if self.video_capture is not None:
ret, frame = self.video_capture.read()
if not ret:
print("Error reading frame")
return
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = self.face_cascade.detectMultiScale(
gray_frame,
scaleFactor=1.1,
minNeighbors=4,
minSize=(30, 30)
)
if(len(faces)<= 0 ):
print('No Face Detected !')
self.reset_all_progress()
else:
for (x, y, w, h) in faces:
ROI_gray = gray_frame[y:y + h, x:x + w]
emotion = self.preprocess_input(ROI_gray)
prediction = self.model.predict(emotion)
#print(prediction)
# Update each progress bar with the respective emotion probability
for i in range(7): # Assuming that we have 7 emotions (Because we are using FER2013 DataSet)
probability_prob = prediction[0][i]
getattr(self, f'widget_{i}').rpb_setValue(int(probability_prob * 100)) # Scale to 0-100
progress_value = int(probability_prob * 100)
# Change progress bar color to green if value exceeds 50%
if progress_value > 50:
getattr(self, f'widget_{i}').rpb_setLineColor((255, 0, 0))
else:
getattr(self, f'widget_{i}').rpb_setLineColor((0, 255, 0))
def preprocess_input(self, image):
img_width = 197
img_height = 197
image = cv2.resize(image, (img_width, img_height))
ret = np.empty((img_height, img_width, 3))
ret[:, :, 0] = image
ret[:, :, 1] = image
ret[:, :, 2] = image
x = np.expand_dims(ret, axis=0)
mean = np.mean(x)
std = np.std(x)
x -= mean
x /= std
return x
def update_video_stream(self):
if self.video_capture is not None:
ret, frame = self.video_capture.read()
if ret:
# Convert the frame from BGR to RGB
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Convert the frame to QImage
height, width, channel = rgb_frame.shape
bytes_per_line = 3 * width
q_image = QImage(rgb_frame.data, width, height, bytes_per_line, QImage.Format_RGB888)
pixmap = QPixmap.fromImage(q_image)
self.videoLabel.setPixmap(pixmap)
def closeEvent(self, event):
# Release the webcam when the application is closed
if self.video_capture is not None:
self.video_capture.release()
event.accept()
if __name__ == "__main__":
import sys
app = QApplication(sys.argv)
mainWindow = MainForm()
mainWindow.setWindowTitle("Facial Expression Recognition")
mainWindow.setWindowIcon(QIcon('icon.png'))
mainWindow.show()
sys.exit(app.exec_())