Emotion Detection
Emotions.py
import numpy as np
import argparse
import matplotlib.pyplot as plt
import cv2
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout,
Flatten from tensorflow.keras.layers import Conv2D
from tensorflow.keras.optimizers import Adam from
tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import os
import sys
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# command line argument
ap = argparse.ArgumentParser()
ap.add_argument("--mode", help="train/display",
default="display") mode = ap.parse_args().mode
# plots accuracy and loss curves
def plot_model_history(model_history):
"""
Plot Accuracy and Loss curves given the model_history
"""
fig, axs = plt.subplots(1,2,figsize=(15,5))
# summarize history for accuracy
axs[0].plot(range(1,len(model_history.history['accuracy'])+1),model_history.h
istory['accuracy'])
axs[0].plot(range(1,len(model_history.history['val_accuracy'])+1),model_histo
ry.history['val_accuracy'])
axs[0].set_title('Model Accuracy')
axs[0].set_ylabel('Accuracy')
axs[0].set_xlabel('Epoch')
axs[0].set_xticks(np.arange(1,len(model_history.history['accuracy'])+1),len(m
odel_history.history['accuracy'])/10)
axs[0].legend(['train', 'val'], loc='best')
# summarize history for loss
axs[1].plot(range(1,len(model_history.history['loss'])+1),model_history.histo
ry['loss'])
axs[1].plot(range(1,len(model_history.history['val_loss'])+1),model_history.h
istory['val_loss'])
axs[1].set_title('Model Loss')
axs[1].set_ylabel('Loss')
axs[1].set_xlabel('Epoch')
axs[1].set_xticks(np.arange(1,len(model_history.history['loss'])+1),len(model
_history.history['loss'])/10)
axs[1].legend(['train', 'val'], loc='best')
fig.savefig('plot.png')
plt.show()
# Define data generators and train only if in train
mode if mode == "train":
train_dir = 'data/train'
val_dir = 'data/test'
if not os.path.exists(train_dir) or not os.path.exists(val_dir):
print("Error: Training data directories not found.")
print("Please download the FER-2013 dataset and place it in the data
directory.")
print("For display mode only, download the pre-trained model from:")
print("https://drive.google.com/file/d/1FUn0XNOzf-nQV7QjbBPA6-8GLoHNNgv-
/view ?usp=sharing")
sys.exit(1)
num_train = 28709
num_val = 7178
batch_size = 64
num_epoch = 50
train_datagen = ImageDataGenerator(rescale=1./255)
val_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(48,48),
batch_size=batch_size,
color_mode="grayscale",
class_mode='categorical')
validation_generator =
val_datagen.flow_from_directory( val_dir,
target_size=(48,48),
batch_size=batch_size,
color_mode="grayscale",
class_mode='categorical')
# Create the model
model = Sequential()
model.add(Conv2D(32,kernel_size=(3,3),activation='relu',input_shape=(48,48,1)
))
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(7, activation='softmax'))
# If you want to train the same model or try other models, go for
this if mode == "train":
model.compile(loss='categorical_crossentropy',optimizer=Adam(lr=0.0001,
decay=1e-6),metrics=['accuracy'])
model_info = model.fit_generator(
train_generator,
steps_per_epoch=num_train // batch_size,
epochs=num_epoch,
validation_data=validation_generator,
validation_steps=num_val // batch_size)
plot_model_history(model_info)
model.save_weights('model.h5')
# emotions will be displayed on your face from the webcam feed
elif mode == "display":
model_path = 'model.h5'
cascade_path = 'haarcascade_frontalface_default.xml'
if not os.path.exists(model_path):
print("Error: Model file 'model.h5' not found!")
print("Please download the pre-trained model from:")
print("https://drive.google.com/file/d/1FUn0XNOzf-nQV7QjbBPA6-8GLoHNNgv-
/view ?usp=sharing")
print("and place it in the src directory.")
sys.exit(1)
if not os.path.exists(cascade_path):
print("Error: Face detection cascade file not found!") print("Please
ensure 'haarcascade_frontalface_default.xml' is in the
src directory.")
sys.exit(1)
model.load_weights(model_path)
# prevents openCL usage and unnecessary logging
messages cv2.ocl.setUseOpenCL(False)
# dictionary which assigns each label an emotion (alphabetical order)
emotion_dict = {0: "Angry", 1: "Disgusted", 2: "Fearful", 3: "Happy", 4:
"Neutral", 5: "Sad", 6: "Surprised"}
# start the webcam feed
cap = cv2.VideoCapture(0)
if not cap.isOpened():
print("Error: Could not open
webcam!") sys.exit(1)
while True:
# Find haar cascade to draw bounding box around
face ret, frame = cap.read()
if not
ret: break
facecasc =
cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = facecasc.detectMultiScale(gray,scaleFactor=1.3,
minNeighbors=5)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y-50), (x+w, y+h+10), (255, 0, 0),
2) roi_gray = gray[y:y + h, x:x + w]
cropped_img =
np.expand_dims(np.expand_dims(cv2.resize(roi_gray, (48, 48)), -1), 0)
prediction = model.predict(cropped_img) maxindex =
int(np.argmax(prediction)) cv2.putText(frame,
emotion_dict[maxindex], (x+20, y-60),
cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
cv2.imshow('Video', cv2.resize(frame,(1600,960),interpolation
= cv2.INTER_CUBIC))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
Dataset.py
import numpy as np
import pandas as pd
from PIL import Image
from tqdm import tqdm
import os
# convert string to
integer def atoi(s):
n = 0
for i in s:
n = n*10 + ord(i)-ord("0")
return n
# making folders
outer_names = ['test','train']
inner_names = ['angry', 'disgusted', 'fearful', 'happy','sad','surprised',
'neutral']
os.makedirs('data', exist_ok=True)
for outer_name in outer_names:
os.makedirs(os.path.join('data',outer_name), exist_ok=True)
for inner_name in inner_names:
os.makedirs(os.path.join('data',outer_name,inner_name),
exist_ok=True)
# to keep count of each
category angry = 0
disgusted = 0
fearful = 0
happy = 0
sad = 0
surprised = 0
neutral = 0
angry_test = 0
disgusted_test =0
fearful_test = 0
happy_test = 0
sad_test = 0
surprised_test =0
neutral_test = 0
df = pd.read_csv('./fer2013.csv')
mat = np.zeros((48,48),dtype=np.uint8)
print("Saving images...")
# read the csv file line by line
for i in tqdm(range(len(df))):
txt = df['pixels'][i]
words = txt.split()
# the image size is 48x48
for j in range(2304):
xind = j // 48 yind = j % 48
mat[xind][yind] =
atoi(words[j])
img = Image.fromarray(mat)
# train
if i < 28709:
if df['emotion'][i] == 0:
img.save('train/angry/im'+str(angry)+'.png')
angry += 1
elif df['emotion'][i] == 1:
img.save('train/disgusted/im'+str(disgusted)+'.png')
disgusted += 1
elif df['emotion'][i] == 2:
img.save('train/fearful/im'+str(fearful)+'.png')
fearful += 1
elif df['emotion'][i] == 3:
img.save('train/happy/im'+str(happy)+'.png')
happy += 1
elif df['emotion'][i] == 4:
img.save('train/sad/im'+str(sad)+'.png')
sad += 1
elif df['emotion'][i] == 5:
img.save('train/surprised/im'+str(surprised)+'.png')
surprised += 1
elif df['emotion'][i] == 6:
img.save('train/neutral/im'+str(neutral)+'.png')
neutral += 1
# test
else:
if df['emotion'][i] == 0:
img.save('test/angry/im'+str(angry_test)+'.png')
angry_test += 1
elif df['emotion'][i] == 1:
img.save('test/disgusted/im'+str(disgusted_test)+'.png')
disgusted_test += 1
elif df['emotion'][i] == 2:
img.save('test/fearful/im'+str(fearful_test)+'.png')
fearful_test += 1
elif df['emotion'][i] == 3:
img.save('test/happy/im'+str(happy_test)+'.png')
happy_test += 1
elif df['emotion'][i] == 4:
img.save('test/sad/im'+str(sad_test)+'.png')
sad_test += 1
elif df['emotion'][i] == 5:
img.save('test/surprised/im'+str(surprised_test)+'.png')
surprised_test += 1
elif df['emotion'][i] == 6:
img.save('test/neutral/im'+str(neutral_test)+'.png')
neutral_test += 1
print("Done!")
OUTPUT