-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathapp.py
More file actions
115 lines (93 loc) · 4.13 KB
/
app.py
File metadata and controls
115 lines (93 loc) · 4.13 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
import cv2
import mediapipe as mp
import numpy as np
import streamlit as st
from datetime import datetime
# ===== STREAMLIT UI =====
st.title("🧠 AI-Powered Posture & Focus Tracker")
st.write("Monitors your posture and alertness during work or study sessions in real-time.")
# ===== MEDIAPIPE INITIALIZATION =====
mp_pose = mp.solutions.pose
pose = mp_pose.Pose()
mp_drawing = mp.solutions.drawing_utils
mp_face_mesh = mp.solutions.face_mesh
face_mesh = mp_face_mesh.FaceMesh(max_num_faces=1)
# ===== STREAMLIT WEBCAM INPUT =====
run = st.checkbox('Start Webcam')
FRAME_WINDOW = st.image([])
# ===== PARAMETERS =====
CLOSED_FRAMES_THRESHOLD = 15 # ~0.5s at 30 FPS
eye_closed_counter = 0
drowsy_events = 0
# Eye landmarks for EAR (left/right)
LEFT_EYE_IDX = [33, 160, 158, 133, 153, 144]
RIGHT_EYE_IDX = [362, 385, 387, 263, 373, 380]
# ===== FUNCTION: Eye Aspect Ratio =====
def eye_aspect_ratio(landmarks, eye_indices, frame_width, frame_height):
coords = [(int(landmarks[i].x * frame_width), int(landmarks[i].y * frame_height)) for i in eye_indices]
A = np.linalg.norm(np.array(coords[1]) - np.array(coords[5]))
B = np.linalg.norm(np.array(coords[2]) - np.array(coords[4]))
C = np.linalg.norm(np.array(coords[0]) - np.array(coords[3]))
ear = (A + B) / (2.0 * C)
return ear
# ===== MAIN LOOP =====
if run:
cap = cv2.VideoCapture(0)
focus_score = 0
frame_count = 0
start_time = datetime.now()
while True:
ret, frame = cap.read()
if not ret:
st.warning("Failed to access webcam")
break
frame = cv2.flip(frame, 1) # Mirror frame
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
results_pose = pose.process(rgb)
results_face = face_mesh.process(rgb)
frame_height, frame_width = frame.shape[:2]
posture_status = "Unknown"
# ===== POSTURE DETECTION =====
if results_pose.pose_landmarks:
mp_drawing.draw_landmarks(frame, results_pose.pose_landmarks, mp_pose.POSE_CONNECTIONS)
landmarks = results_pose.pose_landmarks.landmark
neck = np.array([landmarks[0].x, landmarks[0].y])
shoulder = np.array([landmarks[11].x, landmarks[11].y])
hip = np.array([landmarks[23].x, landmarks[23].y])
v1 = shoulder - neck
v2 = hip - shoulder
angle = np.degrees(np.arccos(np.dot(v1,v2) / (np.linalg.norm(v1) * np.linalg.norm(v2) + 1e-6)))
posture_status = "Good Posture" if angle > 150 else "Slouching"
color_posture = (0,255,0) if angle > 150 else (0,0,255)
cv2.putText(frame, posture_status, (20,40), cv2.FONT_HERSHEY_SIMPLEX, 1, color_posture, 2)
# ===== EYE DETECTION =====
ear = 0
if results_face.multi_face_landmarks:
face_landmarks = results_face.multi_face_landmarks[0].landmark
left_ear = eye_aspect_ratio(face_landmarks, LEFT_EYE_IDX, frame_width, frame_height)
right_ear = eye_aspect_ratio(face_landmarks, RIGHT_EYE_IDX, frame_width, frame_height)
ear = (left_ear + right_ear) / 2.0
if ear < 0.2:
eye_closed_counter += 1
if eye_closed_counter >= CLOSED_FRAMES_THRESHOLD:
cv2.putText(frame, "sleeping", (20,80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)
drowsy_events += 1
else:
eye_closed_counter = 0
# ===== FOCUS SCORE =====
if posture_status == "Good Posture" and ear >= 0.2:
focus_score += 1
frame_count += 1
# ===== DISPLAY FRAME =====
FRAME_WINDOW.image(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
# ===== END OF SESSION =====
cap.release()
end_time = datetime.now()
session_duration = (end_time - start_time).seconds
if frame_count > 0:
focus_score_percentage = (focus_score / frame_count) * 100
st.success(f"Focus Score: {focus_score_percentage:.2f}%")
st.info(f"Drowsy Events: {drowsy_events}")
st.info(f"Session Duration: {session_duration} seconds")
else:
st.info("No frames captured.")