Realtime Video Based Stress Detector

This section will discuss about Video based stress Detector. Here all use case will be discussed.

This real time video based stress detector is developed using python 3.9 and can run in background. In this, model will predict the stress level from user's facial expressions and emotions.

With the help of facial recognition and numerous emotions will help to predict if a person is in stress or not.

Use Case:

  • Stressed User:

  • Not stressed User:

Code ZIP:

// Dependencies 

def eye_brow_distance(leye,reye):
    global points
    distq = dist.euclidean(leye,reye)
    points.append(int(distq))
    return distq

#After getting data it will use probability to detect emotion
def emotion_finder(faces,frame):
    global emotion_classifier
    EMOTIONS = ["angry" ,"disgust","scared", "happy", "sad", "surprised","neutral"]
    x,y,w,h = face_utils.rect_to_bb(faces)
    frame = frame[y:y+h,x:x+w]
    roi = cv2.resize(frame,(64,64))
    roi = roi.astype("float") / 255.0
    roi = img_to_array(roi)
    roi = np.expand_dims(roi,axis=0)
    preds = emotion_classifier.predict(roi)[0]
    emotion_probability = np.max(preds)
    label = EMOTIONS[preds.argmax()]
    if label in ['neutral','sad']:
        label = 'stressed'
    else:
        label = 'not stressed'
    return label

#This function is used to detect wheter stress is high or not   
def normalize_values(points,disp):
    normalized_value = abs(disp - np.min(points))/abs(np.max(points) - np.min(points))
    stress_value = np.exp(-(normalized_value))
    print(stress_value)
    if stress_value>=40:
        return stress_value,"high stress"
    else:
        return stress_value,"low stress"

# videp capturing model    
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("mental_stress_detection\Stress-Detection-master\shape_predictor_68_face_landmarks.dat")
emotion_classifier = load_model("mental_stress_detection\Stress-Detection-master\_mini_XCEPTION.102-0.66.hdf5", compile=False)
cap = cv2.VideoCapture(0)
points = []
while(True):
    _,frame = cap.read()
    frame = cv2.flip(frame,20)
    frame = imutils.resize(frame, width=500,height=500)
    
    
    (lBegin, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eyebrow"]
    (rBegin, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eyebrow"]

    #preprocessing the image
    gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
    
    #detecting eyebrows using opencv
    detections = detector(gray,0)
    for detection in detections:
        emotion = emotion_finder(detection,gray)
        cv2.putText(frame, emotion, (10,10),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
        shape = predictor(frame,detection)
        shape = face_utils.shape_to_np(shape)
           
        leyebrow = shape[lBegin:lEnd]
        reyebrow = shape[rBegin:rEnd]
            
        reyebrowhull = cv2.convexHull(reyebrow)
        leyebrowhull = cv2.convexHull(leyebrow)
        
        # Eyebrow colour
        cv2.drawContours(frame, [reyebrowhull], -1, (0, 255, 0), 1)
        cv2.drawContours(frame, [leyebrowhull], -1, (0, 255, 0), 1)

        distq = eye_brow_distance(leyebrow[-1],reyebrow[0])
        stress_value,stress_label = normalize_values(points,distq)
        cv2.putText(frame,"stress level:{}".format(str(int(stress_value*100))),(20,40),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)

    cv2.imshow("Frame", frame)

cv2.destroyAllWindows()
cap.release()
plt.plot(range(len(points)),points,'ro')
plt.title("Stress Levels")
plt.show()

Last updated