1 Commits

Author SHA1 Message Date
Storm
912af8d821 feat: implemented force speech functionality in RI and refactored actuation_receiver tests
Before actuation_receiver tests started a receiver with real zmq context. This led to flaky tests because of port congestion issues.

ref: N25B-386
2025-12-12 14:38:06 +01:00
40 changed files with 151 additions and 1590 deletions

View File

@@ -1,25 +0,0 @@
# Example .env file. To use, make a copy, call it ".env" (i.e. removing the ".example" suffix), then you edit values.
# To make a variable apply, uncomment it (remove the "#" in front of the line).
# First, some variables that are likely to be configured:
# The hostname or IP address of the Control Backend.
AGENT__CONTROL_BACKEND_HOST=localhost
# Variables that are unlikely to be configured, you can probably ignore these:
#AGENT__ACTUATION_RECEIVER_PORT=
#AGENT__MAIN_RECEIVER_PORT=
#AGENT__VIDEO_SENDER_PORT=
#AGENT__AUDIO_SENDER_PORT=
#VIDEO__CAMERA_INDEX=
#VIDEO__RESOLUTION=
#VIDEO__COLOR_SPACE=
#VIDEO__FPS=
#VIDEO__STREAM_NAME=
#VIDEO__IMAGE_BUFFER=
#AUDIO__SAMPLE_RATE=
#AUDIO__CHUNK_SIZE=
#AUDIO__CHANNELS=

View File

@@ -39,15 +39,10 @@ On Windows:
$env:PYTHONPATH="src"; python -m robot_interface.main
```
### Program Arguments
If you want to connect to the actual robot (or simulator), pass the `--qi-url` argument.
With both, if you want to connect to the actual robot (or simulator), pass the `--qi-url` argument.
There's also a `--microphone` argument that can be used to choose a microphone to use. If not given, the program will try the default microphone. If you don't know the name of the microphone, pass the argument with any value, and it will list the names of available microphones.
### Environment Variables
You may use environment variables to change settings. Make a copy of the [`.env.example`](.env.example) file, name it `.env` and put it in the root directory. The file itself describes how to do the configuration.
## Testing

View File

@@ -6,4 +6,3 @@ pytest-cov<3.0.0
sphinx
sphinx_rtd_theme
pre-commit
python-dotenv

View File

@@ -1,6 +0,0 @@
# -*- coding: utf-8 -*-
"""
This program has been developed by students from the bachelor Computer Science at Utrecht
University within the Software Project course.
© Copyright Utrecht University (Department of Information and Computing Sciences)
"""

View File

@@ -1,6 +0,0 @@
# -*- coding: utf-8 -*-
"""
This program has been developed by students from the bachelor Computer Science at Utrecht
University within the Software Project course.
© Copyright Utrecht University (Department of Information and Computing Sciences)
"""

View File

@@ -1,108 +1,95 @@
# -*- coding: utf-8 -*-
"""
This program has been developed by students from the bachelor Computer Science at Utrecht
University within the Software Project course.
© Copyright Utrecht University (Department of Information and Computing Sciences)
"""
from __future__ import unicode_literals
from robot_interface.utils.get_config import get_config
class AgentSettings(object):
"""
Agent port configuration.
:ivar control_backend_host: Hostname of the control backend, defaults to "localhost".
:vartype control_backend_host: string
:ivar actuation_receiver_port: Port for receiving actuation commands, defaults to 5557.
:vartype actuation_receiver_port: int
:ivar main_receiver_port: Port for receiving main messages, defaults to 5555.
:ivar actuating_receiver_port: Port for receiving actuation commands.
:vartype actuating_receiver_port: int
:ivar main_receiver_port: Port for receiving main messages.
:vartype main_receiver_port: int
:ivar video_sender_port: Port used for sending video frames, defaults to 5556.
:ivar video_sender_port: Port used for sending video frames.
:vartype video_sender_port: int
:ivar audio_sender_port: Port used for sending audio data, defaults to 5558.
:ivar audio_sender_port: Port used for sending audio data.
:vartype audio_sender_port: int
"""
def __init__(
self,
control_backend_host=None,
actuation_receiver_port=None,
main_receiver_port=None,
video_sender_port=None,
audio_sender_port=None,
self,
actuating_receiver_port=5557,
main_receiver_port=5555,
video_sender_port=5556,
audio_sender_port=5558,
):
self.control_backend_host = get_config(control_backend_host, "AGENT__CONTROL_BACKEND_HOST", "localhost")
self.actuation_receiver_port = get_config(actuation_receiver_port, "AGENT__ACTUATION_RECEIVER_PORT", 5557, int)
self.main_receiver_port = get_config(main_receiver_port, "AGENT__MAIN_RECEIVER_PORT", 5555, int)
self.video_sender_port = get_config(video_sender_port, "AGENT__VIDEO_SENDER_PORT", 5556, int)
self.audio_sender_port = get_config(audio_sender_port, "AGENT__AUDIO_SENDER_PORT", 5558, int)
self.actuating_receiver_port = actuating_receiver_port
self.main_receiver_port = main_receiver_port
self.video_sender_port = video_sender_port
self.audio_sender_port = audio_sender_port
class VideoConfig(object):
"""
Video configuration constants.
:ivar camera_index: Index of the camera used, defaults to 0.
:ivar camera_index: Index of the camera used.
:vartype camera_index: int
:ivar resolution: Video resolution mode, defaults to 2.
:ivar resolution: Video resolution mode.
:vartype resolution: int
:ivar color_space: Color space identifier, defaults to 11.
:ivar color_space: Color space identifier.
:vartype color_space: int
:ivar fps: Frames per second of the video stream, defaults to 15.
:ivar fps: Frames per second of the video stream.
:vartype fps: int
:ivar stream_name: Name of the video stream, defaults to "Pepper Video".
:ivar stream_name: Name of the video stream.
:vartype stream_name: str
:ivar image_buffer: Internal buffer size for video frames, defaults to 6.
:ivar image_buffer: Internal buffer size for video frames.
:vartype image_buffer: int
"""
def __init__(
self,
camera_index=None,
resolution=None,
color_space=None,
fps=None,
stream_name=None,
image_buffer=None,
camera_index=0,
resolution=2,
color_space=11,
fps=15,
stream_name="Pepper Video",
image_buffer=6,
):
self.camera_index = get_config(camera_index, "VIDEO__CAMERA_INDEX", 0, int)
self.resolution = get_config(resolution, "VIDEO__RESOLUTION", 2, int)
self.color_space = get_config(color_space, "VIDEO__COLOR_SPACE", 11, int)
self.fps = get_config(fps, "VIDEO__FPS", 15, int)
self.stream_name = get_config(stream_name, "VIDEO__STREAM_NAME", "Pepper Video")
self.image_buffer = get_config(image_buffer, "VIDEO__IMAGE_BUFFER", 6, int)
self.camera_index = camera_index
self.resolution = resolution
self.color_space = color_space
self.fps = fps
self.stream_name = stream_name
self.image_buffer = image_buffer
class AudioConfig(object):
"""
Audio configuration constants.
:ivar sample_rate: Audio sampling rate in Hz, defaults to 16000.
:ivar sample_rate: Audio sampling rate in Hz.
:vartype sample_rate: int
:ivar chunk_size: Size of audio chunks to capture/process, defaults to 512.
:ivar chunk_size: Size of audio chunks to capture/process.
:vartype chunk_size: int
:ivar channels: Number of audio channels, defaults to 1.
:ivar channels: Number of audio channels.
:vartype channels: int
"""
def __init__(self, sample_rate=None, chunk_size=None, channels=None):
self.sample_rate = get_config(sample_rate, "AUDIO__SAMPLE_RATE", 16000, int)
self.chunk_size = get_config(chunk_size, "AUDIO__CHUNK_SIZE", 512, int)
self.channels = get_config(channels, "AUDIO__CHANNELS", 1, int)
def __init__(self, sample_rate=16000, chunk_size=512, channels=1):
self.sample_rate = sample_rate
self.chunk_size = chunk_size
self.channels = channels
class MainConfig(object):
"""
Main system configuration.
:ivar poll_timeout_ms: Timeout for polling events, in milliseconds, defaults to 100.
:ivar poll_timeout_ms: Timeout for polling events, in milliseconds.
:vartype poll_timeout_ms: int
:ivar max_handler_time_ms: Maximum allowed handler time, in milliseconds, defaults to 50.
:ivar max_handler_time_ms: Maximum allowed handler time, in milliseconds.
:vartype max_handler_time_ms: int
"""
def __init__(self, poll_timeout_ms=None, max_handler_time_ms=None):
self.poll_timeout_ms = get_config(poll_timeout_ms, "MAIN__POLL_TIMEOUT_MS", 100, int)
self.max_handler_time_ms = get_config(max_handler_time_ms, "MAIN__MAX_HANDLER_TIME_MS", 50, int)
def __init__(self, poll_timeout_ms=100, max_handler_time_ms=50):
self.poll_timeout_ms = poll_timeout_ms
self.max_handler_time_ms = max_handler_time_ms
class Settings(object):

View File

@@ -1,6 +0,0 @@
# -*- coding: utf-8 -*-
"""
This program has been developed by students from the bachelor Computer Science at Utrecht
University within the Software Project course.
© Copyright Utrecht University (Department of Information and Computing Sciences)
"""

View File

@@ -1,12 +1,6 @@
# -*- coding: utf-8 -*-
"""
This program has been developed by students from the bachelor Computer Science at Utrecht
University within the Software Project course.
© Copyright Utrecht University (Department of Information and Computing Sciences)
"""
from __future__ import unicode_literals # So that we can log texts with Unicode characters
import logging
import time
from threading import Thread
import Queue
@@ -14,8 +8,8 @@ import zmq
from robot_interface.endpoints.receiver_base import ReceiverBase
from robot_interface.state import state
from robot_interface.core.config import settings
from robot_interface.endpoints.gesture_settings import GestureTags
class ActuationReceiver(ReceiverBase):
@@ -30,16 +24,12 @@ class ActuationReceiver(ReceiverBase):
:ivar _tts_service: The text-to-speech service object from the Qi session.
:vartype _tts_service: qi.Session | None
:ivar _animation_service: The animation/gesture service object from the Qi session.
:vartype _animation_service: qi.Session | None
"""
def __init__(self, zmq_context, port=settings.agent_settings.actuation_receiver_port):
def __init__(self, zmq_context, port=settings.agent_settings.actuating_receiver_port):
super(ActuationReceiver, self).__init__("actuation")
self.create_socket(zmq_context, zmq.SUB, port)
self.socket.setsockopt_string(zmq.SUBSCRIBE, u"") # Causes block if given in options
self._tts_service = None
self._animation_service = None
self._message_queue = Queue.Queue()
self.message_thread = Thread(target=self._handle_messages)
self.message_thread.start()
@@ -69,7 +59,7 @@ class ActuationReceiver(ReceiverBase):
if not self._tts_service:
self._tts_service = state.qi_session.service("ALTextToSpeech")
if message.get("is_priority"):
if (message.get("is_priority")):
# Bypass queue and speak immediately
self.clear_queue()
self._message_queue.put(text)
@@ -81,7 +71,6 @@ class ActuationReceiver(ReceiverBase):
"""
Safely drains all pending messages from the queue.
"""
logging.info("Message queue size: {}".format(self._message_queue.qsize()))
try:
while True:
# Remove items one by one without waiting
@@ -90,53 +79,6 @@ class ActuationReceiver(ReceiverBase):
pass
logging.info("Message queue cleared.")
def _handle_gesture(self, message, is_single):
"""
Handle a gesture actuation request.
:param message: The gesture to do, must contain properties "endpoint" and "data".
:type message: dict
:param is_single: Whether it's a specific single gesture or a gesture tag.
:type is_single: bool
"""
gesture = message.get("data")
if not gesture:
logging.warn("Received gesture to do, but it lacks data.")
return
if not isinstance(gesture, (str, unicode)):
logging.warn("Received gesture to do but it is not a string.")
return
logging.debug("Received gesture to do: {}".format(gesture))
if is_single:
if gesture not in GestureTags.single_gestures:
logging.warn("Received single gesture to do, but it does not exist in settings")
return
else:
if gesture not in GestureTags.tags:
logging.warn("Received single tag to do, but it does not exist in settings")
return
if not state.qi_session: return
# If state has a qi_session, we know that we can import qi
import qi # Takes a while only the first time it's imported
if not self._animation_service:
self._animation_service = state.qi_session.service("ALAnimationPlayer")
# Play the gesture. Pepper comes with predefined animations like "Wave", "Greet", "Clap"
# You can also create custom animations using Choregraphe and upload them to the robot.
if is_single:
logging.debug("Playing single gesture: {}".format(gesture))
getattr(qi, "async")(self._animation_service.run, gesture)
else:
logging.debug("Playing tag gesture: {}".format(gesture))
getattr(qi, "async")(self._animation_service.runTag, gesture)
def handle_message(self, message):
"""
Handle an actuation/speech message with the receiver.
@@ -146,30 +88,18 @@ class ActuationReceiver(ReceiverBase):
"""
if message["endpoint"] == "actuate/speech":
self._handle_speech(message)
if message["endpoint"] == "actuate/gesture/tag":
self._handle_gesture(message, False)
if message["endpoint"] == "actuate/gesture/single":
self._handle_gesture(message, True)
def _handle_messages(self):
while not state.exit_event.is_set():
try:
text = self._message_queue.get(timeout=0.1)
if not state.is_speaking: print("Started speaking.")
state.is_speaking = True
self._tts_service.say(text)
except Queue.Empty:
if state.is_speaking: print("Finished speaking.")
state.is_speaking = False
except RuntimeError:
logging.error("Lost connection to Pepper. Please check if you're connected to the "
"local WiFi and restart this application.")
state.exit_event.set()
logging.warn("Lost connection to Pepper. Please check if you're connected to the local WiFi and restart this application.")
state.exit_event.set()
def endpoint_description(self):
"""
Extend the default endpoint description with gesture tags.
Returned during negotiate/ports so the CB knows available gestures.
"""
desc = super(ActuationReceiver, self).endpoint_description()
desc["gestures"] = GestureTags.tags
desc["single_gestures"] = GestureTags.single_gestures
return desc

View File

@@ -1,10 +1,3 @@
# -*- coding: utf-8 -*-
"""
This program has been developed by students from the bachelor Computer Science at Utrecht
University within the Software Project course.
© Copyright Utrecht University (Department of Information and Computing Sciences)
"""
from __future__ import unicode_literals # So that `logging` can use Unicode characters in names
import threading
import logging
@@ -84,33 +77,21 @@ class AudioSender(SocketBase):
chunk = audio_settings.chunk_size # 320 at 16000 Hz is 20ms, 512 is required for Silero-VAD
# Docs say this only raises an error if neither `input` nor `output` is True
def open_stream():
return self.audio.open(
format=pyaudio.paFloat32,
channels=audio_settings.channels,
rate=audio_settings.sample_rate,
input=True,
input_device_index=self.microphone["index"],
frames_per_buffer=chunk,
)
stream = None
stream = self.audio.open(
format=pyaudio.paFloat32,
channels=audio_settings.channels,
rate=audio_settings.sample_rate,
input=True,
input_device_index=self.microphone["index"],
frames_per_buffer=chunk,
)
try:
# Test in case exit_event was set while waiting
if not state.exit_event.is_set():
stream = open_stream()
while not state.exit_event.is_set():
data = stream.read(chunk)
if (state.is_speaking): continue # Do not send audio while the robot is speaking
self.socket.send(data)
except IOError as e:
logger.error("Stopped listening: failed to get audio from microphone.", exc_info=e)
finally:
if stream:
try:
stream.stop_stream()
stream.close()
except IOError:
pass # Ignore errors on closing
stream.stop_stream()
stream.close()

View File

@@ -1,419 +0,0 @@
# -*- coding: utf-8 -*-
"""
This program has been developed by students from the bachelor Computer Science at Utrecht
University within the Software Project course.
© Copyright Utrecht University (Department of Information and Computing Sciences)
"""
class GestureTags:
tags = ["above", "affirmative", "afford", "agitated", "all", "allright", "alright", "any",
"assuage", "assuage", "attemper", "back", "bashful", "beg", "beseech", "blank",
"body language", "bored", "bow", "but", "call", "calm", "choose", "choice", "cloud",
"cogitate", "cool", "crazy", "disappointed", "down", "earth", "empty", "embarrassed",
"enthusiastic", "entire", "estimate", "except", "exalted", "excited", "explain", "far",
"field", "floor", "forlorn", "friendly", "front", "frustrated", "gentle", "gift",
"give", "ground", "happy", "hello", "her", "here", "hey", "hi", "him", "hopeless",
"hysterical", "I", "implore", "indicate", "joyful", "me", "meditate", "modest",
"negative", "nervous", "no", "not know", "nothing", "offer", "ok", "once upon a time",
"oppose", "or", "pacify", "pick", "placate", "please", "present", "proffer", "quiet",
"reason", "refute", "reject", "rousing", "sad", "select", "shamefaced", "show",
"show sky", "sky", "soothe", "sun", "supplicate", "tablet", "tall", "them", "there",
"think", "timid", "top", "unless", "up", "upstairs", "void", "warm", "winner", "yeah",
"yes", "yoo-hoo", "you", "your", "zero", "zestful"]
single_gestures = [
"animations/Stand/BodyTalk/Listening/Listening_1",
"animations/Stand/BodyTalk/Listening/Listening_2",
"animations/Stand/BodyTalk/Listening/Listening_3",
"animations/Stand/BodyTalk/Listening/Listening_4",
"animations/Stand/BodyTalk/Listening/Listening_5",
"animations/Stand/BodyTalk/Listening/Listening_6",
"animations/Stand/BodyTalk/Listening/Listening_7",
"animations/Stand/BodyTalk/Speaking/BodyTalk_1",
"animations/Stand/BodyTalk/Speaking/BodyTalk_10",
"animations/Stand/BodyTalk/Speaking/BodyTalk_11",
"animations/Stand/BodyTalk/Speaking/BodyTalk_12",
"animations/Stand/BodyTalk/Speaking/BodyTalk_13",
"animations/Stand/BodyTalk/Speaking/BodyTalk_14",
"animations/Stand/BodyTalk/Speaking/BodyTalk_15",
"animations/Stand/BodyTalk/Speaking/BodyTalk_16",
"animations/Stand/BodyTalk/Speaking/BodyTalk_2",
"animations/Stand/BodyTalk/Speaking/BodyTalk_3",
"animations/Stand/BodyTalk/Speaking/BodyTalk_4",
"animations/Stand/BodyTalk/Speaking/BodyTalk_5",
"animations/Stand/BodyTalk/Speaking/BodyTalk_6",
"animations/Stand/BodyTalk/Speaking/BodyTalk_7",
"animations/Stand/BodyTalk/Speaking/BodyTalk_8",
"animations/Stand/BodyTalk/Speaking/BodyTalk_9",
"animations/Stand/BodyTalk/Thinking/Remember_1",
"animations/Stand/BodyTalk/Thinking/Remember_2",
"animations/Stand/BodyTalk/Thinking/Remember_3",
"animations/Stand/BodyTalk/Thinking/ThinkingLoop_1",
"animations/Stand/BodyTalk/Thinking/ThinkingLoop_2",
"animations/Stand/Emotions/Negative/Angry_1",
"animations/Stand/Emotions/Negative/Angry_2",
"animations/Stand/Emotions/Negative/Angry_3",
"animations/Stand/Emotions/Negative/Angry_4",
"animations/Stand/Emotions/Negative/Anxious_1",
"animations/Stand/Emotions/Negative/Bored_1",
"animations/Stand/Emotions/Negative/Bored_2",
"animations/Stand/Emotions/Negative/Disappointed_1",
"animations/Stand/Emotions/Negative/Exhausted_1",
"animations/Stand/Emotions/Negative/Exhausted_2",
"animations/Stand/Emotions/Negative/Fear_1",
"animations/Stand/Emotions/Negative/Fear_2",
"animations/Stand/Emotions/Negative/Fearful_1",
"animations/Stand/Emotions/Negative/Frustrated_1",
"animations/Stand/Emotions/Negative/Humiliated_1",
"animations/Stand/Emotions/Negative/Hurt_1",
"animations/Stand/Emotions/Negative/Hurt_2",
"animations/Stand/Emotions/Negative/Late_1",
"animations/Stand/Emotions/Negative/Sad_1",
"animations/Stand/Emotions/Negative/Sad_2",
"animations/Stand/Emotions/Negative/Shocked_1",
"animations/Stand/Emotions/Negative/Sorry_1",
"animations/Stand/Emotions/Negative/Surprise_1",
"animations/Stand/Emotions/Negative/Surprise_2",
"animations/Stand/Emotions/Negative/Surprise_3",
"animations/Stand/Emotions/Neutral/Alienated_1",
"animations/Stand/Emotions/Neutral/AskForAttention_1",
"animations/Stand/Emotions/Neutral/AskForAttention_2",
"animations/Stand/Emotions/Neutral/AskForAttention_3",
"animations/Stand/Emotions/Neutral/Cautious_1",
"animations/Stand/Emotions/Neutral/Confused_1",
"animations/Stand/Emotions/Neutral/Determined_1",
"animations/Stand/Emotions/Neutral/Embarrassed_1",
"animations/Stand/Emotions/Neutral/Hesitation_1",
"animations/Stand/Emotions/Neutral/Innocent_1",
"animations/Stand/Emotions/Neutral/Lonely_1",
"animations/Stand/Emotions/Neutral/Mischievous_1",
"animations/Stand/Emotions/Neutral/Puzzled_1",
"animations/Stand/Emotions/Neutral/Sneeze",
"animations/Stand/Emotions/Neutral/Stubborn_1",
"animations/Stand/Emotions/Neutral/Suspicious_1",
"animations/Stand/Emotions/Positive/Amused_1",
"animations/Stand/Emotions/Positive/Confident_1",
"animations/Stand/Emotions/Positive/Ecstatic_1",
"animations/Stand/Emotions/Positive/Enthusiastic_1",
"animations/Stand/Emotions/Positive/Excited_1",
"animations/Stand/Emotions/Positive/Excited_2",
"animations/Stand/Emotions/Positive/Excited_3",
"animations/Stand/Emotions/Positive/Happy_1",
"animations/Stand/Emotions/Positive/Happy_2",
"animations/Stand/Emotions/Positive/Happy_3",
"animations/Stand/Emotions/Positive/Happy_4",
"animations/Stand/Emotions/Positive/Hungry_1",
"animations/Stand/Emotions/Positive/Hysterical_1",
"animations/Stand/Emotions/Positive/Interested_1",
"animations/Stand/Emotions/Positive/Interested_2",
"animations/Stand/Emotions/Positive/Laugh_1",
"animations/Stand/Emotions/Positive/Laugh_2",
"animations/Stand/Emotions/Positive/Laugh_3",
"animations/Stand/Emotions/Positive/Mocker_1",
"animations/Stand/Emotions/Positive/Optimistic_1",
"animations/Stand/Emotions/Positive/Peaceful_1",
"animations/Stand/Emotions/Positive/Proud_1",
"animations/Stand/Emotions/Positive/Proud_2",
"animations/Stand/Emotions/Positive/Proud_3",
"animations/Stand/Emotions/Positive/Relieved_1",
"animations/Stand/Emotions/Positive/Shy_1",
"animations/Stand/Emotions/Positive/Shy_2",
"animations/Stand/Emotions/Positive/Sure_1",
"animations/Stand/Emotions/Positive/Winner_1",
"animations/Stand/Emotions/Positive/Winner_2",
"animations/Stand/Gestures/Angry_1",
"animations/Stand/Gestures/Angry_2",
"animations/Stand/Gestures/Angry_3",
"animations/Stand/Gestures/BowShort_1",
"animations/Stand/Gestures/BowShort_2",
"animations/Stand/Gestures/BowShort_3",
"animations/Stand/Gestures/But_1",
"animations/Stand/Gestures/CalmDown_1",
"animations/Stand/Gestures/CalmDown_2",
"animations/Stand/Gestures/CalmDown_3",
"animations/Stand/Gestures/CalmDown_4",
"animations/Stand/Gestures/CalmDown_5",
"animations/Stand/Gestures/CalmDown_6",
"animations/Stand/Gestures/Choice_1",
"animations/Stand/Gestures/ComeOn_1",
"animations/Stand/Gestures/Confused_1",
"animations/Stand/Gestures/Confused_2",
"animations/Stand/Gestures/CountFive_1",
"animations/Stand/Gestures/CountFour_1",
"animations/Stand/Gestures/CountMore_1",
"animations/Stand/Gestures/CountOne_1",
"animations/Stand/Gestures/CountThree_1",
"animations/Stand/Gestures/CountTwo_1",
"animations/Stand/Gestures/Desperate_1",
"animations/Stand/Gestures/Desperate_2",
"animations/Stand/Gestures/Desperate_3",
"animations/Stand/Gestures/Desperate_4",
"animations/Stand/Gestures/Desperate_5",
"animations/Stand/Gestures/DontUnderstand_1",
"animations/Stand/Gestures/Enthusiastic_3",
"animations/Stand/Gestures/Enthusiastic_4",
"animations/Stand/Gestures/Enthusiastic_5",
"animations/Stand/Gestures/Everything_1",
"animations/Stand/Gestures/Everything_2",
"animations/Stand/Gestures/Everything_3",
"animations/Stand/Gestures/Everything_4",
"animations/Stand/Gestures/Everything_6",
"animations/Stand/Gestures/Excited_1",
"animations/Stand/Gestures/Explain_1",
"animations/Stand/Gestures/Explain_10",
"animations/Stand/Gestures/Explain_11",
"animations/Stand/Gestures/Explain_2",
"animations/Stand/Gestures/Explain_3",
"animations/Stand/Gestures/Explain_4",
"animations/Stand/Gestures/Explain_5",
"animations/Stand/Gestures/Explain_6",
"animations/Stand/Gestures/Explain_7",
"animations/Stand/Gestures/Explain_8",
"animations/Stand/Gestures/Far_1",
"animations/Stand/Gestures/Far_2",
"animations/Stand/Gestures/Far_3",
"animations/Stand/Gestures/Follow_1",
"animations/Stand/Gestures/Give_1",
"animations/Stand/Gestures/Give_2",
"animations/Stand/Gestures/Give_3",
"animations/Stand/Gestures/Give_4",
"animations/Stand/Gestures/Give_5",
"animations/Stand/Gestures/Give_6",
"animations/Stand/Gestures/Great_1",
"animations/Stand/Gestures/HeSays_1",
"animations/Stand/Gestures/HeSays_2",
"animations/Stand/Gestures/HeSays_3",
"animations/Stand/Gestures/Hey_1",
"animations/Stand/Gestures/Hey_10",
"animations/Stand/Gestures/Hey_2",
"animations/Stand/Gestures/Hey_3",
"animations/Stand/Gestures/Hey_4",
"animations/Stand/Gestures/Hey_6",
"animations/Stand/Gestures/Hey_7",
"animations/Stand/Gestures/Hey_8",
"animations/Stand/Gestures/Hey_9",
"animations/Stand/Gestures/Hide_1",
"animations/Stand/Gestures/Hot_1",
"animations/Stand/Gestures/Hot_2",
"animations/Stand/Gestures/IDontKnow_1",
"animations/Stand/Gestures/IDontKnow_2",
"animations/Stand/Gestures/IDontKnow_3",
"animations/Stand/Gestures/IDontKnow_4",
"animations/Stand/Gestures/IDontKnow_5",
"animations/Stand/Gestures/IDontKnow_6",
"animations/Stand/Gestures/Joy_1",
"animations/Stand/Gestures/Kisses_1",
"animations/Stand/Gestures/Look_1",
"animations/Stand/Gestures/Look_2",
"animations/Stand/Gestures/Maybe_1",
"animations/Stand/Gestures/Me_1",
"animations/Stand/Gestures/Me_2",
"animations/Stand/Gestures/Me_4",
"animations/Stand/Gestures/Me_7",
"animations/Stand/Gestures/Me_8",
"animations/Stand/Gestures/Mime_1",
"animations/Stand/Gestures/Mime_2",
"animations/Stand/Gestures/Next_1",
"animations/Stand/Gestures/No_1",
"animations/Stand/Gestures/No_2",
"animations/Stand/Gestures/No_3",
"animations/Stand/Gestures/No_4",
"animations/Stand/Gestures/No_5",
"animations/Stand/Gestures/No_6",
"animations/Stand/Gestures/No_7",
"animations/Stand/Gestures/No_8",
"animations/Stand/Gestures/No_9",
"animations/Stand/Gestures/Nothing_1",
"animations/Stand/Gestures/Nothing_2",
"animations/Stand/Gestures/OnTheEvening_1",
"animations/Stand/Gestures/OnTheEvening_2",
"animations/Stand/Gestures/OnTheEvening_3",
"animations/Stand/Gestures/OnTheEvening_4",
"animations/Stand/Gestures/OnTheEvening_5",
"animations/Stand/Gestures/Please_1",
"animations/Stand/Gestures/Please_2",
"animations/Stand/Gestures/Please_3",
"animations/Stand/Gestures/Reject_1",
"animations/Stand/Gestures/Reject_2",
"animations/Stand/Gestures/Reject_3",
"animations/Stand/Gestures/Reject_4",
"animations/Stand/Gestures/Reject_5",
"animations/Stand/Gestures/Reject_6",
"animations/Stand/Gestures/Salute_1",
"animations/Stand/Gestures/Salute_2",
"animations/Stand/Gestures/Salute_3",
"animations/Stand/Gestures/ShowFloor_1",
"animations/Stand/Gestures/ShowFloor_2",
"animations/Stand/Gestures/ShowFloor_3",
"animations/Stand/Gestures/ShowFloor_4",
"animations/Stand/Gestures/ShowFloor_5",
"animations/Stand/Gestures/ShowSky_1",
"animations/Stand/Gestures/ShowSky_10",
"animations/Stand/Gestures/ShowSky_11",
"animations/Stand/Gestures/ShowSky_12",
"animations/Stand/Gestures/ShowSky_2",
"animations/Stand/Gestures/ShowSky_3",
"animations/Stand/Gestures/ShowSky_4",
"animations/Stand/Gestures/ShowSky_5",
"animations/Stand/Gestures/ShowSky_6",
"animations/Stand/Gestures/ShowSky_7",
"animations/Stand/Gestures/ShowSky_8",
"animations/Stand/Gestures/ShowSky_9",
"animations/Stand/Gestures/ShowTablet_1",
"animations/Stand/Gestures/ShowTablet_2",
"animations/Stand/Gestures/ShowTablet_3",
"animations/Stand/Gestures/Shy_1",
"animations/Stand/Gestures/Stretch_1",
"animations/Stand/Gestures/Stretch_2",
"animations/Stand/Gestures/Surprised_1",
"animations/Stand/Gestures/TakePlace_1",
"animations/Stand/Gestures/TakePlace_2",
"animations/Stand/Gestures/Take_1",
"animations/Stand/Gestures/Thinking_1",
"animations/Stand/Gestures/Thinking_2",
"animations/Stand/Gestures/Thinking_3",
"animations/Stand/Gestures/Thinking_4",
"animations/Stand/Gestures/Thinking_5",
"animations/Stand/Gestures/Thinking_6",
"animations/Stand/Gestures/Thinking_7",
"animations/Stand/Gestures/Thinking_8",
"animations/Stand/Gestures/This_1",
"animations/Stand/Gestures/This_10",
"animations/Stand/Gestures/This_11",
"animations/Stand/Gestures/This_12",
"animations/Stand/Gestures/This_13",
"animations/Stand/Gestures/This_14",
"animations/Stand/Gestures/This_15",
"animations/Stand/Gestures/This_2",
"animations/Stand/Gestures/This_3",
"animations/Stand/Gestures/This_4",
"animations/Stand/Gestures/This_5",
"animations/Stand/Gestures/This_6",
"animations/Stand/Gestures/This_7",
"animations/Stand/Gestures/This_8",
"animations/Stand/Gestures/This_9",
"animations/Stand/Gestures/WhatSThis_1",
"animations/Stand/Gestures/WhatSThis_10",
"animations/Stand/Gestures/WhatSThis_11",
"animations/Stand/Gestures/WhatSThis_12",
"animations/Stand/Gestures/WhatSThis_13",
"animations/Stand/Gestures/WhatSThis_14",
"animations/Stand/Gestures/WhatSThis_15",
"animations/Stand/Gestures/WhatSThis_16",
"animations/Stand/Gestures/WhatSThis_2",
"animations/Stand/Gestures/WhatSThis_3",
"animations/Stand/Gestures/WhatSThis_4",
"animations/Stand/Gestures/WhatSThis_5",
"animations/Stand/Gestures/WhatSThis_6",
"animations/Stand/Gestures/WhatSThis_7",
"animations/Stand/Gestures/WhatSThis_8",
"animations/Stand/Gestures/WhatSThis_9",
"animations/Stand/Gestures/Whisper_1",
"animations/Stand/Gestures/Wings_1",
"animations/Stand/Gestures/Wings_2",
"animations/Stand/Gestures/Wings_3",
"animations/Stand/Gestures/Wings_4",
"animations/Stand/Gestures/Wings_5",
"animations/Stand/Gestures/Yes_1",
"animations/Stand/Gestures/Yes_2",
"animations/Stand/Gestures/Yes_3",
"animations/Stand/Gestures/YouKnowWhat_1",
"animations/Stand/Gestures/YouKnowWhat_2",
"animations/Stand/Gestures/YouKnowWhat_3",
"animations/Stand/Gestures/YouKnowWhat_4",
"animations/Stand/Gestures/YouKnowWhat_5",
"animations/Stand/Gestures/YouKnowWhat_6",
"animations/Stand/Gestures/You_1",
"animations/Stand/Gestures/You_2",
"animations/Stand/Gestures/You_3",
"animations/Stand/Gestures/You_4",
"animations/Stand/Gestures/You_5",
"animations/Stand/Gestures/Yum_1",
"animations/Stand/Reactions/EthernetOff_1",
"animations/Stand/Reactions/EthernetOn_1",
"animations/Stand/Reactions/Heat_1",
"animations/Stand/Reactions/Heat_2",
"animations/Stand/Reactions/LightShine_1",
"animations/Stand/Reactions/LightShine_2",
"animations/Stand/Reactions/LightShine_3",
"animations/Stand/Reactions/LightShine_4",
"animations/Stand/Reactions/SeeColor_1",
"animations/Stand/Reactions/SeeColor_2",
"animations/Stand/Reactions/SeeColor_3",
"animations/Stand/Reactions/SeeSomething_1",
"animations/Stand/Reactions/SeeSomething_3",
"animations/Stand/Reactions/SeeSomething_4",
"animations/Stand/Reactions/SeeSomething_5",
"animations/Stand/Reactions/SeeSomething_6",
"animations/Stand/Reactions/SeeSomething_7",
"animations/Stand/Reactions/SeeSomething_8",
"animations/Stand/Reactions/ShakeBody_1",
"animations/Stand/Reactions/ShakeBody_2",
"animations/Stand/Reactions/ShakeBody_3",
"animations/Stand/Reactions/TouchHead_1",
"animations/Stand/Reactions/TouchHead_2",
"animations/Stand/Reactions/TouchHead_3",
"animations/Stand/Reactions/TouchHead_4",
"animations/Stand/Waiting/AirGuitar_1",
"animations/Stand/Waiting/BackRubs_1",
"animations/Stand/Waiting/Bandmaster_1",
"animations/Stand/Waiting/Binoculars_1",
"animations/Stand/Waiting/BreathLoop_1",
"animations/Stand/Waiting/BreathLoop_2",
"animations/Stand/Waiting/BreathLoop_3",
"animations/Stand/Waiting/CallSomeone_1",
"animations/Stand/Waiting/Drink_1",
"animations/Stand/Waiting/DriveCar_1",
"animations/Stand/Waiting/Fitness_1",
"animations/Stand/Waiting/Fitness_2",
"animations/Stand/Waiting/Fitness_3",
"animations/Stand/Waiting/FunnyDancer_1",
"animations/Stand/Waiting/HappyBirthday_1",
"animations/Stand/Waiting/Helicopter_1",
"animations/Stand/Waiting/HideEyes_1",
"animations/Stand/Waiting/HideHands_1",
"animations/Stand/Waiting/Innocent_1",
"animations/Stand/Waiting/Knight_1",
"animations/Stand/Waiting/KnockEye_1",
"animations/Stand/Waiting/KungFu_1",
"animations/Stand/Waiting/LookHand_1",
"animations/Stand/Waiting/LookHand_2",
"animations/Stand/Waiting/LoveYou_1",
"animations/Stand/Waiting/Monster_1",
"animations/Stand/Waiting/MysticalPower_1",
"animations/Stand/Waiting/PlayHands_1",
"animations/Stand/Waiting/PlayHands_2",
"animations/Stand/Waiting/PlayHands_3",
"animations/Stand/Waiting/Relaxation_1",
"animations/Stand/Waiting/Relaxation_2",
"animations/Stand/Waiting/Relaxation_3",
"animations/Stand/Waiting/Relaxation_4",
"animations/Stand/Waiting/Rest_1",
"animations/Stand/Waiting/Robot_1",
"animations/Stand/Waiting/ScratchBack_1",
"animations/Stand/Waiting/ScratchBottom_1",
"animations/Stand/Waiting/ScratchEye_1",
"animations/Stand/Waiting/ScratchHand_1",
"animations/Stand/Waiting/ScratchHead_1",
"animations/Stand/Waiting/ScratchLeg_1",
"animations/Stand/Waiting/ScratchTorso_1",
"animations/Stand/Waiting/ShowMuscles_1",
"animations/Stand/Waiting/ShowMuscles_2",
"animations/Stand/Waiting/ShowMuscles_3",
"animations/Stand/Waiting/ShowMuscles_4",
"animations/Stand/Waiting/ShowMuscles_5",
"animations/Stand/Waiting/ShowSky_1",
"animations/Stand/Waiting/ShowSky_2",
"animations/Stand/Waiting/SpaceShuttle_1",
"animations/Stand/Waiting/Stretch_1",
"animations/Stand/Waiting/Stretch_2",
"animations/Stand/Waiting/TakePicture_1",
"animations/Stand/Waiting/Taxi_1",
"animations/Stand/Waiting/Think_1",
"animations/Stand/Waiting/Think_2",
"animations/Stand/Waiting/Think_3",
"animations/Stand/Waiting/Think_4",
"animations/Stand/Waiting/Waddle_1",
"animations/Stand/Waiting/Waddle_2",
"animations/Stand/Waiting/WakeUp_1",
"animations/Stand/Waiting/Zombie_1"]

View File

@@ -1,10 +1,3 @@
# -*- coding: utf-8 -*-
"""
This program has been developed by students from the bachelor Computer Science at Utrecht
University within the Software Project course.
© Copyright Utrecht University (Department of Information and Computing Sciences)
"""
import zmq
from robot_interface.endpoints.receiver_base import ReceiverBase
@@ -12,7 +5,6 @@ from robot_interface.state import state
from robot_interface.core.config import settings
class MainReceiver(ReceiverBase):
"""
The main receiver endpoint, responsible for handling ping and negotiation requests.
@@ -20,12 +12,10 @@ class MainReceiver(ReceiverBase):
:param zmq_context: The ZeroMQ context to use.
:type zmq_context: zmq.Context
:param port: The port to use, defaults to value in `settings.agent_settings.main_receiver_port`.
:param port: The port to use.
:type port: int
"""
def __init__(self, zmq_context, port=None):
if port is None:
port = settings.agent_settings.main_receiver_port
def __init__(self, zmq_context, port=settings.agent_settings.main_receiver_port):
super(MainReceiver, self).__init__("main")
self.create_socket(zmq_context, zmq.REP, port, bind=False)
@@ -78,30 +68,6 @@ class MainReceiver(ReceiverBase):
return MainReceiver._handle_port_negotiation(message)
return {"endpoint": "negotiate/error", "data": "The requested endpoint is not implemented."}
@staticmethod
def _handle_pause(message):
"""
Handle a pause request. Pauses or resumes the video and audio streams.
:param message: The pause request message.
:type message: dict
:return: A response dictionary indicating success.
:rtype: dict[str, str]
"""
if message.get("data"):
if state.active_event.is_set():
state.active_event.clear()
return {"endpoint": "pause", "data": "Streams paused."}
else:
return {"endpoint": "pause", "data": "Streams are already paused."}
else:
if not state.active_event.is_set():
state.active_event.set()
return {"endpoint": "pause", "data": "Streams resumed."}
else:
return {"endpoint": "pause", "data": "Streams are already running."}
def handle_message(self, message):
"""
@@ -119,7 +85,5 @@ class MainReceiver(ReceiverBase):
return self._handle_ping(message)
elif message["endpoint"].startswith("negotiate"):
return self._handle_negotiation(message)
elif message["endpoint"] == "pause":
return self._handle_pause(message)
return {"endpoint": "error", "data": "The requested endpoint is not supported."}

View File

@@ -1,10 +1,3 @@
# -*- coding: utf-8 -*-
"""
This program has been developed by students from the bachelor Computer Science at Utrecht
University within the Software Project course.
© Copyright Utrecht University (Department of Information and Computing Sciences)
"""
from abc import ABCMeta, abstractmethod
from robot_interface.endpoints.socket_base import SocketBase

View File

@@ -1,16 +1,7 @@
# -*- coding: utf-8 -*-
"""
This program has been developed by students from the bachelor Computer Science at Utrecht
University within the Software Project course.
© Copyright Utrecht University (Department of Information and Computing Sciences)
"""
from abc import ABCMeta
import zmq
from robot_interface.core.config import settings
class SocketBase(object):
"""
@@ -68,7 +59,7 @@ class SocketBase(object):
if bind:
self.socket.bind("tcp://*:{}".format(port))
else:
self.socket.connect("tcp://{}:{}".format(settings.agent_settings.control_backend_host, port))
self.socket.connect("tcp://localhost:{}".format(port))
def close(self):
"""Close the ZeroMQ socket."""

View File

@@ -1,22 +1,11 @@
# -*- coding: utf-8 -*-
"""
This program has been developed by students from the bachelor Computer Science at Utrecht
University within the Software Project course.
© Copyright Utrecht University (Department of Information and Computing Sciences)
"""
import struct
import zmq
import threading
import logging
import numpy as np
import cv2
from robot_interface.endpoints.socket_base import SocketBase
from robot_interface.state import state
from robot_interface.core.config import settings
class VideoSender(SocketBase):
"""
Video sender endpoint, responsible for sending video frames.
@@ -29,7 +18,7 @@ class VideoSender(SocketBase):
"""
def __init__(self, zmq_context, port=settings.agent_settings.video_sender_port):
super(VideoSender, self).__init__("video")
self.create_socket(zmq_context, zmq.PUB, port, [(zmq.SNDHWM,3)])
self.create_socket(zmq_context, zmq.PUB, port, [(zmq.CONFLATE,1)])
def start_video_rcv(self):
"""
@@ -39,9 +28,6 @@ class VideoSender(SocketBase):
"""
if not state.qi_session:
logging.info("No Qi session available. Not starting video loop.")
logging.info("Starting test video stream from local webcam.")
thread = threading.Thread(target=self.test_video_stream)
thread.start()
return
video = state.qi_session.service("ALVideoDevice")
@@ -55,40 +41,9 @@ class VideoSender(SocketBase):
thread = threading.Thread(target=self.video_rcv_loop, args=(video, vid_stream_name))
thread.start()
def test_video_stream(self):
"""
Test function to send video from a local webcam instead of the robot.
"""
cap = cv2.VideoCapture(0)
if not cap.isOpened():
logging.error("Could not open webcam for video stream test.")
return
while not state.exit_event.is_set():
ret, frame = cap.read()
if not ret:
logging.warning("Failed to read frame from webcam.")
continue
if cv2.waitKey(1) & 0xFF == ord('q'): # << Add this: Updates the window
break
height, width, channels = frame.shape
pixel_data = frame.tobytes()
width_bytes = struct.pack('<I', width)
height_bytes = struct.pack('<I', height)
self.socket.send_multipart([width_bytes, height_bytes, pixel_data])
cap.release()
def video_rcv_loop(self, vid_service, vid_stream_name):
"""
The main loop of retrieving video images from the robot.
Sends the image data over the ZMQ socket in 3 parts: image width, image height and raw image bytes.
:param vid_service: The video service object that the active Qi session is connected to.
:type vid_service: Object (Qi service object)
@@ -96,23 +51,10 @@ class VideoSender(SocketBase):
:param vid_stream_name: The name of a camera subscription on the video service object vid_service
:type vid_stream_name: str
"""
try:
while not state.exit_event.is_set():
try:
img = vid_service.getImageRemote(vid_stream_name)
if img is not None:
image_bytes = img[6]
width = img[0]
height = img[1]
width_bytes = struct.pack('<I', width)
height_bytes = struct.pack('<I', height)
self.socket.send_multipart([width_bytes, height_bytes, image_bytes])
except:
logging.warn("Failed to retrieve video image from robot.")
except KeyboardInterrupt:
logging.info("Video receiving loop interrupted by user.")
finally:
vid_service.unsubscribe(vid_stream_name)
logging.info("Unsubscribed from video stream.")
while not state.exit_event.is_set():
try:
img = vid_service.getImageRemote(vid_stream_name)
#Possibly limit images sent if queuing issues arise
self.socket.send(img[settings.video_config.image_buffer])
except:
logging.warn("Failed to retrieve video image from robot.")

View File

@@ -1,10 +1,3 @@
# -*- coding: utf-8 -*-
"""
This program has been developed by students from the bachelor Computer Science at Utrecht
University within the Software Project course.
© Copyright Utrecht University (Department of Information and Computing Sciences)
"""
import logging
from robot_interface.endpoints.audio_sender import AudioSender
@@ -91,7 +84,6 @@ def main():
context = zmq.Context()
state.initialize()
state.active_event.set()
try:
main_loop(context)

View File

@@ -1,10 +1,3 @@
# -*- coding: utf-8 -*-
"""
This program has been developed by students from the bachelor Computer Science at Utrecht
University within the Software Project course.
© Copyright Utrecht University (Department of Information and Computing Sciences)
"""
import logging
import signal
import threading
@@ -56,8 +49,6 @@ class State(object):
signal.signal(signal.SIGINT, handle_exit)
signal.signal(signal.SIGTERM, handle_exit)
self.active_event = threading.Event()
self.qi_session = get_qi_session()
self.is_initialized = True

View File

@@ -1,6 +0,0 @@
# -*- coding: utf-8 -*-
"""
This program has been developed by students from the bachelor Computer Science at Utrecht
University within the Software Project course.
© Copyright Utrecht University (Department of Information and Computing Sciences)
"""

View File

@@ -1,39 +0,0 @@
# -*- coding: utf-8 -*-
"""
This program has been developed by students from the bachelor Computer Science at Utrecht
University within the Software Project course.
© Copyright Utrecht University (Department of Information and Computing Sciences)
"""
import os
from dotenv import load_dotenv
load_dotenv()
def get_config(value, env, default, cast=None):
"""
Small utility to get a configuration value, returns `value` if it is not None, else it will try to get the
environment variable cast with `cast`. If the environment variable is not set, it will return `default`.
:param value: The value to check.
:type value: Any
:param env: The environment variable to check.
:type env: string
:param default: The default value to return if the environment variable is not set.
:type default: Any
:param cast: A function to use to cast the environment variable. Must support string input.
:type cast: Callable[[Any], Any], optional
:return: The value, the environment variable value, or the default.
:rtype: Any
"""
if value is not None:
return value
env = os.environ.get(env, default)
if cast is None:
return env
return cast(env)

View File

@@ -1,10 +1,3 @@
# -*- coding: utf-8 -*-
"""
This program has been developed by students from the bachelor Computer Science at Utrecht
University within the Software Project course.
© Copyright Utrecht University (Department of Information and Computing Sciences)
"""
from __future__ import unicode_literals # So that `print` can print Unicode characters in names
import logging
import sys

View File

@@ -1,10 +1,3 @@
# -*- coding: utf-8 -*-
"""
This program has been developed by students from the bachelor Computer Science at Utrecht
University within the Software Project course.
© Copyright Utrecht University (Department of Information and Computing Sciences)
"""
import logging
import sys

View File

@@ -1,10 +1,3 @@
# -*- coding: utf-8 -*-
"""
This program has been developed by students from the bachelor Computer Science at Utrecht
University within the Software Project course.
© Copyright Utrecht University (Department of Information and Computing Sciences)
"""
import time

View File

@@ -1,6 +0,0 @@
# -*- coding: utf-8 -*-
"""
This program has been developed by students from the bachelor Computer Science at Utrecht
University within the Software Project course.
© Copyright Utrecht University (Department of Information and Computing Sciences)
"""

View File

@@ -1,10 +1,3 @@
# -*- coding: utf-8 -*-
"""
This program has been developed by students from the bachelor Computer Science at Utrecht
University within the Software Project course.
© Copyright Utrecht University (Department of Information and Computing Sciences)
"""
from __future__ import unicode_literals # So that we can format strings with Unicode characters
import random
import sys

View File

@@ -1,17 +0,0 @@
# -*- coding: utf-8 -*-
"""
This program has been developed by students from the bachelor Computer Science at Utrecht
University within the Software Project course.
© Copyright Utrecht University (Department of Information and Computing Sciences)
"""
from mock import patch, MagicMock
import pytest
@pytest.fixture(autouse=True)
def mock_zmq_context():
with patch("zmq.Context") as mock:
mock.instance.return_value = MagicMock()
yield mock

View File

@@ -1,6 +0,0 @@
# -*- coding: utf-8 -*-
"""
This program has been developed by students from the bachelor Computer Science at Utrecht
University within the Software Project course.
© Copyright Utrecht University (Department of Information and Computing Sciences)
"""

View File

@@ -1,39 +0,0 @@
# -*- coding: utf-8 -*-
"""
This program has been developed by students from the bachelor Computer Science at Utrecht
University within the Software Project course.
© Copyright Utrecht University (Department of Information and Computing Sciences)
"""
from mock import patch, mock
from robot_interface.core.config import Settings
from robot_interface.endpoints.main_receiver import MainReceiver
def test_environment_variables(monkeypatch):
"""
When environment variables are set, creating settings should use these.
"""
monkeypatch.setenv("AGENT__CONTROL_BACKEND_HOST", "some_value_that_should_be_different")
settings = Settings()
assert settings.agent_settings.control_backend_host == "some_value_that_should_be_different"
@patch("robot_interface.endpoints.main_receiver.settings")
@patch("robot_interface.endpoints.socket_base.settings")
def test_create_endpoint_custom_host(base_settings, main_settings):
"""
When a custom host is given in the settings, check that an endpoint's socket connects to it.
"""
fake_context = mock.Mock()
fake_socket = mock.Mock()
fake_context.socket.return_value = fake_socket
base_settings.agent_settings.control_backend_host = "not_localhost"
main_settings.agent_settings.main_receiver_port = 9999
_ = MainReceiver(fake_context)
fake_socket.connect.assert_called_once_with("tcp://not_localhost:9999")

View File

@@ -1,10 +1,3 @@
# -*- coding: utf-8 -*-
"""
This program has been developed by students from the bachelor Computer Science at Utrecht
University within the Software Project course.
© Copyright Utrecht University (Department of Information and Computing Sciences)
"""
import pyaudio
import pytest

View File

@@ -1,6 +0,0 @@
# -*- coding: utf-8 -*-
"""
This program has been developed by students from the bachelor Computer Science at Utrecht
University within the Software Project course.
© Copyright Utrecht University (Department of Information and Computing Sciences)
"""

View File

@@ -1,18 +1,10 @@
# -*- coding: utf-8 -*-
"""
This program has been developed by students from the bachelor Computer Science at Utrecht
University within the Software Project course.
© Copyright Utrecht University (Department of Information and Computing Sciences)
"""
import sys
import mock
import pytest
import zmq
import Queue
from robot_interface.endpoints.actuation_receiver import ActuationReceiver
from robot_interface.endpoints.gesture_settings import GestureTags
@pytest.fixture
@@ -26,7 +18,6 @@ def zmq_context():
context = zmq.Context()
yield context
def test_force_speech_clears_queue(mocker):
"""
Tests that a force speech message clears the existing queue
@@ -61,7 +52,6 @@ def test_force_speech_clears_queue(mocker):
queued_item = receiver._message_queue.get()
assert queued_item == "Emergency Notification"
def test_handle_unimplemented_endpoint(mocker):
"""
Tests handling of unknown endpoints.
@@ -77,7 +67,6 @@ def test_handle_unimplemented_endpoint(mocker):
"data": None,
})
def test_speech_message_no_data(mocker):
"""
Tests that if the message data is empty, the receiver returns immediately
@@ -339,155 +328,4 @@ def test_clear_queue(mocker):
receiver.clear_queue()
# Assert the queue is empty
assert receiver._message_queue.qsize() == 0
def test_gesture_no_data(zmq_context, mocker):
receiver = ActuationReceiver(zmq_context)
receiver._handle_gesture({"endpoint": "actuate/gesture/single", "data": ""}, True)
# Just ensuring no crash
def test_gesture_invalid_data(zmq_context, mocker):
receiver = ActuationReceiver(zmq_context)
receiver._handle_gesture({"endpoint": "actuate/gesture/single", "data": 123}, True)
# No crash expected
def test_gesture_single_not_found(zmq_context, mocker):
mock_tags = mocker.patch("robot_interface.endpoints.actuation_receiver.GestureTags")
mock_tags.single_gestures = ["wave", "bow"] # allowed single gestures
receiver = ActuationReceiver(zmq_context)
receiver._handle_gesture({"endpoint": "actuate/gesture/single", "data": "unknown_gesture"}, True)
# No crash expected
def test_gesture_tag_not_found(zmq_context, mocker):
mock_tags = mocker.patch("robot_interface.endpoints.actuation_receiver.GestureTags")
mock_tags.tags = ["happy", "sad"]
receiver = ActuationReceiver(zmq_context)
receiver._handle_gesture({"endpoint": "actuate/gesture/tag", "data": "not_a_tag"}, False)
# No crash expected
def test_gesture_no_qi_session(zmq_context, mocker):
mock_state = mocker.patch("robot_interface.endpoints.actuation_receiver.state")
mock_state.qi_session = None
mock_tags = mocker.patch("robot_interface.endpoints.actuation_receiver.GestureTags")
mock_tags.single_gestures = ["hello"]
receiver = ActuationReceiver(zmq_context)
receiver._handle_gesture({"endpoint": "actuate/gesture/single", "data": "hello"}, True)
# No crash, path returns early
def test_gesture_single_success(zmq_context, mocker):
mock_state = mocker.patch("robot_interface.endpoints.actuation_receiver.state")
mock_qi = mock.Mock()
sys.modules["qi"] = mock_qi
# Setup gesture settings
mock_tags = mocker.patch("robot_interface.endpoints.actuation_receiver.GestureTags")
mock_tags.single_gestures = ["wave"]
mock_animation_service = mock.Mock()
mock_state.qi_session = mock.Mock()
mock_state.qi_session.service.return_value = mock_animation_service
receiver = ActuationReceiver(zmq_context)
receiver._handle_gesture({"endpoint": "actuate/gesture/single", "data": "wave"}, True)
mock_state.qi_session.service.assert_called_once_with("ALAnimationPlayer")
getattr(mock_qi, "async").assert_called_once()
assert getattr(mock_qi, "async").call_args[0][0] == mock_animation_service.run
assert getattr(mock_qi, "async").call_args[0][1] == "wave"
def test_gesture_tag_success(zmq_context, mocker):
mock_state = mocker.patch("robot_interface.endpoints.actuation_receiver.state")
mock_qi = mock.Mock()
sys.modules["qi"] = mock_qi
mock_tags = mocker.patch("robot_interface.endpoints.actuation_receiver.GestureTags")
mock_tags.tags = ["greeting"]
mock_animation_service = mock.Mock()
mock_state.qi_session = mock.Mock()
mock_state.qi_session.service.return_value = mock_animation_service
receiver = ActuationReceiver(zmq_context)
receiver._handle_gesture({"endpoint": "actuate/gesture/tag", "data": "greeting"}, False)
mock_state.qi_session.service.assert_called_once_with("ALAnimationPlayer")
getattr(mock_qi, "async").assert_called_once()
assert getattr(mock_qi, "async").call_args[0][0] == mock_animation_service.runTag
assert getattr(mock_qi, "async").call_args[0][1] == "greeting"
def test_handle_message_all_routes(zmq_context, mocker):
"""
Ensures all handle_message endpoint branches route correctly.
"""
receiver = ActuationReceiver(zmq_context)
mock_speech = mocker.patch.object(receiver, "_handle_speech")
mock_gesture = mocker.patch.object(receiver, "_handle_gesture")
receiver.handle_message({"endpoint": "actuate/speech", "data": "hi"})
receiver.handle_message({"endpoint": "actuate/gesture/tag", "data": "greeting"})
receiver.handle_message({"endpoint": "actuate/gesture/single", "data": "wave"})
mock_speech.assert_called_once()
assert mock_gesture.call_count == 2
def test_endpoint_description(zmq_context, mocker):
mock_tags = mocker.patch("robot_interface.endpoints.actuation_receiver.GestureTags")
mock_tags.tags = ["happy"]
mock_tags.single_gestures = ["wave"]
receiver = ActuationReceiver(zmq_context)
desc = receiver.endpoint_description()
assert "gestures" in desc
assert desc["gestures"] == ["happy"]
assert "single_gestures" in desc
assert desc["single_gestures"] == ["wave"]
def test_gesture_single_real_gesturetags(zmq_context, mocker):
"""
Uses the real GestureTags (no mocking) to ensure the receiver
references GestureTags.single_gestures correctly.
"""
# Ensure qi session exists so we pass the early return
mock_state = mocker.patch("robot_interface.endpoints.actuation_receiver.state")
mock_state.qi_session = mock.Mock()
# Mock qi.async to avoid real async calls
mock_qi = mock.Mock()
sys.modules["qi"] = mock_qi
# Mock animation service
mock_animation_service = mock.Mock()
mock_state.qi_session.service.return_value = mock_animation_service
receiver = ActuationReceiver(zmq_context)
# Pick a real gesture from GestureTags.single_gestures
assert len(GestureTags.single_gestures) > 0, "GestureTags.single_gestures must not be empty"
gesture = GestureTags.single_gestures[0]
receiver._handle_gesture(
{"endpoint": "actuate/gesture/single", "data": gesture},
is_single=True,
)
mock_state.qi_session.service.assert_called_once_with("ALAnimationPlayer")
getattr(mock_qi, "async").assert_called_once()
assert getattr(mock_qi, "async").call_args[0][0] == mock_animation_service.run
assert getattr(mock_qi, "async").call_args[0][1] == gesture
assert receiver._message_queue.qsize() == 0

View File

@@ -1,10 +1,4 @@
# -*- coding: utf-8 -*-
"""
This program has been developed by students from the bachelor Computer Science at Utrecht
University within the Software Project course.
© Copyright Utrecht University (Department of Information and Computing Sciences)
"""
# coding=utf-8
import os
import mock
@@ -39,6 +33,7 @@ def test_no_microphone(zmq_context, mocker):
sender.start()
assert sender.thread is None
mock_info_logger.assert_called()
sender.wait_until_done() # Should return early because we didn't start a thread
@@ -78,12 +73,11 @@ def test_sending_audio(mocker):
mock_choose_mic.return_value = {"name": u"Some mic", "index": 0L}
mock_state = mocker.patch("robot_interface.endpoints.audio_sender.state")
mock_state.exit_event.is_set.side_effect = [False, False, True]
mock_state.exit_event.is_set.side_effect = [False, True]
mock_zmq_context = mock.Mock()
send_socket = mock.Mock()
mock_state.is_speaking = False
# If there's something wrong with the microphone, it will raise an IOError when `read`ing.
stream = mock.Mock()
stream.read = _fake_read
@@ -98,247 +92,6 @@ def test_sending_audio(mocker):
send_socket.assert_called()
# SENDING PAUSE RESUME?
def test_stream_initial_wait_exit(mocker):
mock_choose_mic = mocker.patch("robot_interface.endpoints.audio_sender.choose_mic")
mock_choose_mic.return_value = {"name": u"Some mic", "index": 0L}
mock_pyaudio_cls = mocker.patch("robot_interface.endpoints.audio_sender.pyaudio.PyAudio")
mock_pyaudio_instance = mock_pyaudio_cls.return_value
mock_state = mocker.patch("robot_interface.endpoints.audio_sender.state")
mock_state.exit_event.is_set.return_value = True
mock_state.active_event.is_set.return_value = False
mock_zmq_context = mock.Mock()
sender = AudioSender(mock_zmq_context)
sender._stream()
mock_pyaudio_instance.open.assert_not_called()
def test_stream_pause_and_resume(mocker):
mock_stream = mock.Mock()
mock_stream.read.return_value = b"data"
mock_pyaudio_cls = mocker.patch("robot_interface.endpoints.audio_sender.pyaudio.PyAudio")
mock_pyaudio_instance = mock_pyaudio_cls.return_value
mock_pyaudio_instance.open.return_value = mock_stream
mock_state = mocker.patch("robot_interface.endpoints.audio_sender.state")
mock_state.active_event.is_set.return_value = False
mock_state.exit_event.is_set.side_effect = [False, False, False, True]
mock_zmq_context = mock.Mock()
sender = AudioSender(mock_zmq_context)
sender.socket = mock.Mock()
sender._stream()
assert mock_pyaudio_instance.open.call_count == 2
assert mock_stream.close.call_count == 2
assert mock_stream.stop_stream.call_count == 2
assert mock_state.active_event.wait.called
def test_stream_exit_during_pause(mocker):
mock_stream = mock.Mock()
mock_pyaudio_cls = mocker.patch("robot_interface.endpoints.audio_sender.pyaudio.PyAudio")
mock_pyaudio_instance = mock_pyaudio_cls.return_value
mock_pyaudio_instance.open.return_value = mock_stream
mock_state = mocker.patch("robot_interface.endpoints.audio_sender.state")
mock_state.active_event.is_set.return_value = False
mock_state.exit_event.is_set.side_effect = [False, False, True]
mock_zmq_context = mock.Mock()
sender = AudioSender(mock_zmq_context)
sender._stream()
assert mock_pyaudio_instance.open.call_count == 1
assert mock_stream.close.call_count == 1
def test_stream_read_error_recovery(mocker):
stream_fail = mock.Mock()
stream_fail.read.side_effect = IOError("Overflow")
stream_ok = mock.Mock()
stream_ok.read.return_value = b"data"
mock_pyaudio_cls = mocker.patch("robot_interface.endpoints.audio_sender.pyaudio.PyAudio")
mock_pyaudio_instance = mock_pyaudio_cls.return_value
mock_pyaudio_instance.open.side_effect = [stream_fail, stream_ok]
mock_state = mocker.patch("robot_interface.endpoints.audio_sender.state")
mock_state.active_event.is_set.return_value = True
mock_state.exit_event.is_set.side_effect = [False, False, False, True]
mock_zmq_context = mock.Mock()
sender = AudioSender(mock_zmq_context)
sender.socket = mock.Mock()
sender._stream()
stream_fail.close.assert_called()
assert mock_pyaudio_instance.open.call_count == 2
sender.socket.send.assert_called_with(b"data")
def test_stream_fatal_error(mocker):
mock_pyaudio_cls = mocker.patch("robot_interface.endpoints.audio_sender.pyaudio.PyAudio")
mock_pyaudio_instance = mock_pyaudio_cls.return_value
mock_pyaudio_instance.open.side_effect = IOError("Fatal error")
mock_state = mocker.patch("robot_interface.endpoints.audio_sender.state")
mock_state.active_event.is_set.return_value = True
mock_state.exit_event.is_set.return_value = False
mock_zmq_context = mock.Mock()
sender = AudioSender(mock_zmq_context)
sender._stream()
def test_wait_until_done(mocker):
mock_zmq_context = mock.Mock()
sender = AudioSender(mock_zmq_context)
sender.wait_until_done()
mock_thread = mocker.Mock()
sender.thread = mock_thread
sender.wait_until_done()
mock_thread.join.assert_called_once()
assert sender.thread is None
def test_stream_pause_close_error(mocker):
"""
Tests that an IOError during stream closure (when pausing) is ignored,
covering the 'pass' statement in the pause logic.
"""
mock_stream = mock.Mock()
mock_stream.read.return_value = b"data"
# Raise IOError when stopping the stream during pause
mock_stream.stop_stream.side_effect = IOError("Failed to stop")
mock_pyaudio_cls = mocker.patch("robot_interface.endpoints.audio_sender.pyaudio.PyAudio")
mock_pyaudio_instance = mock_pyaudio_cls.return_value
mock_pyaudio_instance.open.return_value = mock_stream
mock_state = mocker.patch("robot_interface.endpoints.audio_sender.state")
# 1. First False triggers the pause block
# 2. Second True resumes the loop
mock_state.active_event.is_set.side_effect = [False, True]
mock_state.exit_event.is_set.side_effect = [False, False, False, True]
mock_zmq_context = mock.Mock()
sender = AudioSender(mock_zmq_context)
sender.socket = mock.Mock()
sender._stream()
# Verification: The error should be swallowed, and the stream should re-open
assert mock_stream.stop_stream.called
assert mock_pyaudio_instance.open.call_count == 2
def test_stream_finally_close_error(mocker):
"""
Tests that an IOError during stream closure in the finally block is ignored,
covering the 'pass' statement in the finally logic.
"""
mock_stream = mock.Mock()
mock_stream.read.return_value = b"data"
# Raise IOError when stopping the stream at exit
mock_stream.stop_stream.side_effect = IOError("Cleanup failed")
mock_pyaudio_cls = mocker.patch("robot_interface.endpoints.audio_sender.pyaudio.PyAudio")
mock_pyaudio_instance = mock_pyaudio_cls.return_value
mock_pyaudio_instance.open.return_value = mock_stream
mock_state = mocker.patch("robot_interface.endpoints.audio_sender.state")
mock_state.active_event.is_set.return_value = True
mock_state.exit_event.is_set.side_effect = [False, False, True]
mock_zmq_context = mock.Mock()
sender = AudioSender(mock_zmq_context)
sender.socket = mock.Mock()
# Run
sender._stream()
# Assert: Should finish without raising exception despite the IOError in finally
assert mock_stream.stop_stream.called
def test_stream_recovery_failure(mocker):
"""
Tests the case where recovering from a read error (re-opening stream) also fails.
This ensures the outer try-except catches exceptions from the inner except block.
"""
mock_stream_initial = mock.Mock()
# Trigger the read error logic
mock_stream_initial.read.side_effect = IOError("Read failed")
mock_pyaudio_cls = mocker.patch("robot_interface.endpoints.audio_sender.pyaudio.PyAudio")
mock_pyaudio_instance = mock_pyaudio_cls.return_value
# First open works, Second open (recovery) fails fatally
mock_pyaudio_instance.open.side_effect = [
mock_stream_initial,
IOError("Recovery failed")
]
mock_state = mocker.patch("robot_interface.endpoints.audio_sender.state")
mock_state.active_event.is_set.return_value = True
mock_state.exit_event.is_set.return_value = False
mock_logger = mocker.patch("robot_interface.endpoints.audio_sender.logger")
mock_zmq_context = mock.Mock()
sender = AudioSender(mock_zmq_context)
sender.socket = mock.Mock()
sender._stream()
# Assert we hit the outer error log
mock_logger.error.assert_called()
def test_no_sending_if_speaking(mocker):
"""
Tests the successful sending of audio data over a ZeroMQ socket.
"""
mock_choose_mic = mocker.patch("robot_interface.endpoints.audio_sender.choose_mic")
mock_choose_mic.return_value = {"name": u"Some mic", "index": 0L}
mock_state = mocker.patch("robot_interface.endpoints.audio_sender.state")
mock_state.exit_event.is_set.side_effect = [False, True]
mock_zmq_context = mock.Mock()
send_socket = mock.Mock()
mock_state.is_speaking = True
# If there's something wrong with the microphone, it will raise an IOError when `read`ing.
stream = mock.Mock()
stream.read = _fake_read
sender = AudioSender(mock_zmq_context)
sender.socket.send = send_socket
sender.audio.open = mock.Mock()
sender.audio.open.return_value = stream
sender.start()
sender.wait_until_done()
send_socket.assert_not_called()
def _fake_read_error(num_frames):
"""

View File

@@ -1,52 +0,0 @@
# -*- coding: utf-8 -*-
"""
This program has been developed by students from the bachelor Computer Science at Utrecht
University within the Software Project course.
© Copyright Utrecht University (Department of Information and Computing Sciences)
"""
from robot_interface.utils.get_config import get_config
def test_get_config_prefers_explicit_value(monkeypatch):
"""
When a direct value is provided it should be returned without reading the environment.
"""
monkeypatch.setenv("GET_CONFIG_TEST", "from-env")
result = get_config("explicit", "GET_CONFIG_TEST", "default")
assert result == "explicit"
def test_get_config_returns_env_value(monkeypatch):
"""
If value is None the environment variable should be used.
"""
monkeypatch.setenv("GET_CONFIG_TEST", "from-env")
result = get_config(None, "GET_CONFIG_TEST", "default")
assert result == "from-env"
def test_get_config_casts_env_value(monkeypatch):
"""
The env value should be cast when a cast function is provided.
"""
monkeypatch.setenv("GET_CONFIG_PORT", "1234")
result = get_config(None, "GET_CONFIG_PORT", 0, int)
assert result == 1234
def test_get_config_casts_default_when_env_missing(monkeypatch):
"""
When the env var is missing it should fall back to the default and still apply the cast.
"""
monkeypatch.delenv("GET_CONFIG_MISSING", raising=False)
result = get_config(None, "GET_CONFIG_MISSING", "42", int)
assert result == 42

View File

@@ -1,10 +1,3 @@
# -*- coding: utf-8 -*-
"""
This program has been developed by students from the bachelor Computer Science at Utrecht
University within the Software Project course.
© Copyright Utrecht University (Department of Information and Computing Sciences)
"""
import pytest
import threading
import zmq

View File

@@ -1,10 +1,3 @@
# -*- coding: utf-8 -*-
"""
This program has been developed by students from the bachelor Computer Science at Utrecht
University within the Software Project course.
© Copyright Utrecht University (Department of Information and Computing Sciences)
"""
import mock
import pytest
import zmq

View File

@@ -1,10 +1,4 @@
# -*- coding: utf-8 -*-
"""
This program has been developed by students from the bachelor Computer Science at Utrecht
University within the Software Project course.
© Copyright Utrecht University (Department of Information and Computing Sciences)
"""
# coding=utf-8
import mock
import pytest

View File

@@ -1,10 +1,3 @@
# -*- coding: utf-8 -*-
"""
This program has been developed by students from the bachelor Computer Science at Utrecht
University within the Software Project course.
© Copyright Utrecht University (Department of Information and Computing Sciences)
"""
import sys
# Import module under test

View File

@@ -1,10 +1,3 @@
# -*- coding: utf-8 -*-
"""
This program has been developed by students from the bachelor Computer Science at Utrecht
University within the Software Project course.
© Copyright Utrecht University (Department of Information and Computing Sciences)
"""
import pytest
from robot_interface.endpoints.receiver_base import ReceiverBase

View File

@@ -1,10 +1,3 @@
# -*- coding: utf-8 -*-
"""
This program has been developed by students from the bachelor Computer Science at Utrecht
University within the Software Project course.
© Copyright Utrecht University (Department of Information and Computing Sciences)
"""
import mock
import zmq
from robot_interface.endpoints.socket_base import SocketBase

View File

@@ -1,10 +1,3 @@
# -*- coding: utf-8 -*-
"""
This program has been developed by students from the bachelor Computer Science at Utrecht
University within the Software Project course.
© Copyright Utrecht University (Department of Information and Computing Sciences)
"""
import threading
import signal
import pytest

View File

@@ -1,10 +1,3 @@
# -*- coding: utf-8 -*-
"""
This program has been developed by students from the bachelor Computer Science at Utrecht
University within the Software Project course.
© Copyright Utrecht University (Department of Information and Computing Sciences)
"""
import time
import mock

View File

@@ -1,171 +1,99 @@
# -*- coding: utf-8 -*-
"""
This program has been developed by students from the bachelor Computer Science at Utrecht
University within the Software Project course.
© Copyright Utrecht University (Department of Information and Computing Sciences)
"""
# coding=utf-8
import mock
import pytest
import zmq
from robot_interface.endpoints.video_sender import VideoSender
from robot_interface.state import state
from robot_interface.core.config import settings
@pytest.fixture
def zmq_context():
"""
Yields a real ZMQ context for socket creation.
"""
context = zmq.Context()
yield context
context.term()
"""Provide a ZMQ context."""
yield zmq.Context()
def test_init_defaults(zmq_context, mocker):
"""
Test initialization of the VideoSender.
"""
# We patch settings to ensure valid port access inside the class logic,
# although the default arg is evaluated at import time.
mocker.patch("robot_interface.endpoints.video_sender.settings")
mock_zmq = mock.Mock()
sender = VideoSender(mock_zmq)
# Verify socket type is PUB
assert sender.identifier == "video"
def test_start_no_qi_session(mocker):
"""
Test that the loop does not start if no Qi session is available.
"""
# Mock state to return None for qi_session
mock_state = mocker.patch("robot_interface.endpoints.video_sender.state")
mock_state.qi_session = None
mock_threading = mocker.patch("robot_interface.endpoints.video_sender.threading")
def _patch_basics(mocker):
"""Common patches: prevent real threads, port binds, and state errors."""
mocker.patch("robot_interface.endpoints.socket_base.zmq.Socket.bind")
mocker.patch("robot_interface.endpoints.video_sender.threading.Thread")
mocker.patch.object(state, "is_initialized", True)
mock_zmq = mock.Mock()
sender = VideoSender(mock_zmq)
def _patch_exit_event(mocker):
"""Make exit_event stop the loop after one iteration."""
fake_event = mock.Mock()
fake_event.is_set.side_effect = [False, True]
mocker.patch.object(state, "exit_event", fake_event)
def test_no_qi_session(zmq_context, mocker):
"""Video loop should not start without a qi_session."""
_patch_basics(mocker)
mocker.patch.object(state, "qi_session", None)
sender = VideoSender(zmq_context)
sender.start_video_rcv()
# Assertions
mock_threading.Thread.assert_not_called()
assert not hasattr(sender, "thread")
def test_video_streaming(zmq_context, mocker):
"""VideoSender should send retrieved image data."""
_patch_basics(mocker)
_patch_exit_event(mocker)
# Pepper's image buffer lives at index 6
mocker.patch.object(settings.video_config, "image_buffer", 6)
def test_start_success(mocker):
"""
Test successful startup of the video receiver thread.
"""
# Mock the Qi Session and Service
mock_state = mocker.patch("robot_interface.endpoints.video_sender.state")
mock_session = mock.Mock()
mock_state.qi_session = mock_session
mock_video_service = mock.Mock()
mock_session.service.return_value = mock_video_service
mock_video_service.subscribeCamera.return_value = "test_subscriber_id"
mock_video_service.getImageRemote.return_value = [None]*6 + ["fake_img"]
# Mock Settings
mock_settings = mocker.patch("robot_interface.endpoints.video_sender.settings")
mock_settings.video_config.camera_index = 0
mock_settings.video_config.resolution = 2
mock_settings.video_config.color_space = 11
mock_settings.video_config.fps = 30
mock_settings.video_config.stream_name = "test_stream"
fake_session = mock.Mock()
fake_session.service.return_value = mock_video_service
mocker.patch.object(state, "qi_session", fake_session)
mock_threading = mocker.patch("robot_interface.endpoints.video_sender.threading")
mocker.patch.object(
fake_session.service("ALVideoDevice"),
"subscribeCamera",
return_value="stream_name"
)
sender = VideoSender(zmq_context)
send_socket = mock.Mock()
sender.socket.send = send_socket
# Run
mock_zmq = mock.Mock()
sender = VideoSender(mock_zmq)
sender.start_video_rcv()
sender.video_rcv_loop(mock_video_service, "stream_name")
# Assertions
mock_session.service.assert_called_with("ALVideoDevice")
mock_video_service.subscribeCamera.assert_called_with("test_stream", 0, 2, 11, 30)
mock_threading.Thread.assert_called_once()
# Verify arguments passed to the thread target
call_args = mock_threading.Thread.call_args[1]
assert call_args["target"] == sender.video_rcv_loop
assert call_args["args"] == (mock_video_service, "test_subscriber_id")
# Ensure thread was started
mock_threading.Thread.return_value.start.assert_called_once()
send_socket.assert_called_with("fake_img")
def test_video_loop_happy_path(mocker):
"""
Test the main loop: Wait -> Get Image -> Send -> Repeat/Exit.
"""
# Mock settings for image buffer index
mock_settings = mocker.patch("robot_interface.endpoints.video_sender.settings")
mock_settings.video_config.image_buffer = 6
# Mock Video Service to return a fake image structure
# Standard NaoQi image is a list, binary data is usually at index 6
fake_image_data = b"binary_jpeg_data"
fake_image_list = [0] * 7
fake_image_list[6] = fake_image_data
mock_service = mock.Mock()
mock_service.getImageRemote.return_value = fake_image_list
def test_video_receive_error(zmq_context, mocker):
"""Errors retrieving images should not call send()."""
_patch_basics(mocker)
_patch_exit_event(mocker)
# Mock Events:
# exit_event: False (start), False (loop once), True (break)
mock_state = mocker.patch("robot_interface.endpoints.video_sender.state")
mock_state.exit_event.is_set.side_effect = [False, False, True]
mock_video_service = mock.Mock()
mock_video_service.getImageRemote.side_effect = Exception("boom")
# Run
mock_zmq = mock.Mock()
sender = VideoSender(mock_zmq)
sender.socket = mock.Mock() # Mock the socket to verify send
sender.video_rcv_loop(mock_service, "sub_id")
fake_session = mock.Mock()
fake_session.service.return_value = mock_video_service
mocker.patch.object(state, "qi_session", fake_session)
# Assertions
mock_state.active_event.wait.assert_called()
mock_service.getImageRemote.assert_called_with("sub_id")
sender.socket.send.assert_called_with(fake_image_data)
mocker.patch.object(
fake_session.service("ALVideoDevice"),
"subscribeCamera",
return_value="stream_name"
)
def test_video_loop_exit_during_wait(zmq_context, mocker):
"""
Test that the loop breaks immediately if exit_event is set while waiting.
"""
mock_service = mock.Mock()
mock_state = mocker.patch("robot_interface.endpoints.video_sender.state")
# 1. Loop check: False (enter loop)
# 2. Wait happens (mock returns instantly)
# 3. Post-wait check: True (break)
mock_state.exit_event.is_set.side_effect = [False, True]
sender = VideoSender(zmq_context)
send_socket = mock.Mock()
sender.socket.send = send_socket
mock_zqm = mock.Mock()
sender = VideoSender(mock_zqm)
sender.video_rcv_loop(mock_service, "sub_id")
sender.start_video_rcv()
sender.video_rcv_loop(mock_video_service, "stream_name")
# Assert we never tried to get an image
mock_service.getImageRemote.assert_not_called()
def test_video_loop_exception_handling(zmq_context, mocker):
"""
Test that exceptions during image retrieval are caught and logged,
and do not crash the thread.
"""
mock_settings = mocker.patch("robot_interface.endpoints.video_sender.settings")
mock_service = mock.Mock()
# First call raises Exception, Second call works (if we allowed it, but we exit)
mock_service.getImageRemote.side_effect = Exception("Camera disconnected")
mock_state = mocker.patch("robot_interface.endpoints.video_sender.state")
# Loop runs once then exits
mock_state.exit_event.is_set.side_effect = [False, False, True]
mock_zmq = mock.Mock()
sender = VideoSender(mock_zmq)
sender.socket = mock.Mock()
sender.video_rcv_loop(mock_service, "sub_id")
# Assertions
# Ensure loop didn't crash; it should have completed the iteration and checked exit_event
assert mock_state.exit_event.is_set.call_count >= 2
send_socket.assert_not_called()