refactor: visual emotion recognition agent now receives image in three parts: width, height and image bytes
ref: N25B-393
This commit is contained in:
@@ -64,6 +64,8 @@ class VisualEmotionRecognitionAgent(BaseAgent):
|
|||||||
|
|
||||||
self.video_in_socket = azmq.Context.instance().socket(zmq.SUB)
|
self.video_in_socket = azmq.Context.instance().socket(zmq.SUB)
|
||||||
|
|
||||||
|
self.video_in_socket.setsockopt(zmq.RCVHWM, 3)
|
||||||
|
|
||||||
if self.socket_bind:
|
if self.socket_bind:
|
||||||
self.video_in_socket.bind(self.socket_address)
|
self.video_in_socket.bind(self.socket_address)
|
||||||
else:
|
else:
|
||||||
@@ -71,7 +73,6 @@ class VisualEmotionRecognitionAgent(BaseAgent):
|
|||||||
|
|
||||||
self.video_in_socket.setsockopt_string(zmq.SUBSCRIBE, "")
|
self.video_in_socket.setsockopt_string(zmq.SUBSCRIBE, "")
|
||||||
self.video_in_socket.setsockopt(zmq.RCVTIMEO, self.timeout_ms)
|
self.video_in_socket.setsockopt(zmq.RCVTIMEO, self.timeout_ms)
|
||||||
self.video_in_socket.setsockopt(zmq.CONFLATE, 1)
|
|
||||||
|
|
||||||
self.add_behavior(self.emotion_update_loop())
|
self.add_behavior(self.emotion_update_loop())
|
||||||
|
|
||||||
@@ -95,21 +96,18 @@ class VisualEmotionRecognitionAgent(BaseAgent):
|
|||||||
try:
|
try:
|
||||||
await self._paused.wait()
|
await self._paused.wait()
|
||||||
|
|
||||||
frame_bytes = await self.video_in_socket.recv()
|
width, height, image_bytes = await self.video_in_socket.recv_multipart()
|
||||||
|
|
||||||
|
width = int.from_bytes(width, 'little')
|
||||||
|
height = int.from_bytes(height, 'little')
|
||||||
|
|
||||||
# Convert bytes to a numpy buffer
|
# Convert bytes to a numpy buffer
|
||||||
nparr = np.frombuffer(frame_bytes, np.uint8)
|
image_array = np.frombuffer(image_bytes, np.uint8)
|
||||||
|
|
||||||
# Decode image into the generic Numpy Array DeepFace expects
|
frame = image_array.reshape((height, width, 3))
|
||||||
frame_image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
|
|
||||||
|
|
||||||
if frame_image is None:
|
|
||||||
# Could not decode image, skip this frame
|
|
||||||
self.logger.warning("Received invalid video frame, skipping.")
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Get the dominant emotion from each face
|
# Get the dominant emotion from each face
|
||||||
current_emotions = self.emotion_recognizer.sorted_dominant_emotions(frame_image)
|
current_emotions = self.emotion_recognizer.sorted_dominant_emotions(frame)
|
||||||
# Update emotion counts for each detected face
|
# Update emotion counts for each detected face
|
||||||
for i, emotion in enumerate(current_emotions):
|
for i, emotion in enumerate(current_emotions):
|
||||||
face_stats[i][emotion] += 1
|
face_stats[i][emotion] += 1
|
||||||
@@ -133,6 +131,9 @@ class VisualEmotionRecognitionAgent(BaseAgent):
|
|||||||
except zmq.Again:
|
except zmq.Again:
|
||||||
self.logger.warning("No video frame received within timeout.")
|
self.logger.warning("No video frame received within timeout.")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.error(f"Error in emotion recognition loop: {e}")
|
||||||
|
|
||||||
|
|
||||||
async def update_emotions(self, prev_emotions: set[str], emotions: set[str]):
|
async def update_emotions(self, prev_emotions: set[str], emotions: set[str]):
|
||||||
"""
|
"""
|
||||||
|
|||||||
Reference in New Issue
Block a user