Add useful experiment logs #48
@@ -348,7 +348,7 @@ class BDICoreAgent(BaseAgent):
|
|||||||
yield
|
yield
|
||||||
|
|
||||||
@self.actions.add(".reply_with_goal", 3)
|
@self.actions.add(".reply_with_goal", 3)
|
||||||
def _reply_with_goal(agent: "BDICoreAgent", term, intention):
|
def _reply_with_goal(agent, term, intention):
|
||||||
"""
|
"""
|
||||||
Let the LLM generate a response to a user's utterance with the current norms and a
|
Let the LLM generate a response to a user's utterance with the current norms and a
|
||||||
specific goal.
|
specific goal.
|
||||||
@@ -524,10 +524,6 @@ class BDICoreAgent(BaseAgent):
|
|||||||
|
|
||||||
yield
|
yield
|
||||||
|
|
||||||
@self.actions.add(".notify_ui", 0)
|
|
||||||
def _notify_ui(agent, term, intention):
|
|
||||||
pass
|
|
||||||
|
|
||||||
async def _send_to_llm(self, text: str, norms: str, goals: str):
|
async def _send_to_llm(self, text: str, norms: str, goals: str):
|
||||||
"""
|
"""
|
||||||
Sends a text query to the LLM agent asynchronously.
|
Sends a text query to the LLM agent asynchronously.
|
||||||
|
|||||||
@@ -345,6 +345,9 @@ class TextBeliefExtractorAgent(BaseAgent):
|
|||||||
async with httpx.AsyncClient() as client:
|
async with httpx.AsyncClient() as client:
|
||||||
response = await client.post(
|
response = await client.post(
|
||||||
settings.llm_settings.local_llm_url,
|
settings.llm_settings.local_llm_url,
|
||||||
|
headers={"Authorization": f"Bearer {settings.llm_settings.api_key}"}
|
||||||
|
if settings.llm_settings.api_key
|
||||||
|
else {},
|
||||||
json={
|
json={
|
||||||
"model": settings.llm_settings.local_llm_model,
|
"model": settings.llm_settings.local_llm_model,
|
||||||
"messages": [{"role": "user", "content": prompt}],
|
"messages": [{"role": "user", "content": prompt}],
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
import asyncio
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import re
|
import re
|
||||||
@@ -35,6 +36,10 @@ class LLMAgent(BaseAgent):
|
|||||||
def __init__(self, name: str):
|
def __init__(self, name: str):
|
||||||
super().__init__(name)
|
super().__init__(name)
|
||||||
self.history = []
|
self.history = []
|
||||||
|
self._querying = False
|
||||||
|
self._interrupted = False
|
||||||
|
self._interrupted_message = ""
|
||||||
|
self._go_ahead = asyncio.Event()
|
||||||
|
|
||||||
async def setup(self):
|
async def setup(self):
|
||||||
self.logger.info("Setting up %s.", self.name)
|
self.logger.info("Setting up %s.", self.name)
|
||||||
@@ -53,13 +58,13 @@ class LLMAgent(BaseAgent):
|
|||||||
case "prompt_message":
|
case "prompt_message":
|
||||||
try:
|
try:
|
||||||
prompt_message = LLMPromptMessage.model_validate_json(msg.body)
|
prompt_message = LLMPromptMessage.model_validate_json(msg.body)
|
||||||
await self._process_bdi_message(prompt_message)
|
self.add_behavior(self._process_bdi_message(prompt_message)) # no block
|
||||||
except ValidationError:
|
except ValidationError:
|
||||||
self.logger.debug("Prompt message from BDI core is invalid.")
|
self.logger.debug("Prompt message from BDI core is invalid.")
|
||||||
case "assistant_message":
|
case "assistant_message":
|
||||||
self.history.append({"role": "assistant", "content": msg.body})
|
self._apply_conversation_message({"role": "assistant", "content": msg.body})
|
||||||
case "user_message":
|
case "user_message":
|
||||||
self.history.append({"role": "user", "content": msg.body})
|
self._apply_conversation_message({"role": "user", "content": msg.body})
|
||||||
elif msg.sender == settings.agent_settings.bdi_program_manager_name:
|
elif msg.sender == settings.agent_settings.bdi_program_manager_name:
|
||||||
if msg.body == "clear_history":
|
if msg.body == "clear_history":
|
||||||
self.logger.debug("Clearing conversation history.")
|
self.logger.debug("Clearing conversation history.")
|
||||||
@@ -76,13 +81,46 @@ class LLMAgent(BaseAgent):
|
|||||||
|
|
||||||
:param message: The parsed prompt message containing text, norms, and goals.
|
:param message: The parsed prompt message containing text, norms, and goals.
|
||||||
"""
|
"""
|
||||||
|
if self._querying:
|
||||||
|
self.logger.debug("Received another BDI prompt while processing previous message.")
|
||||||
|
self._interrupted = True # interrupt the previous processing
|
||||||
|
await self._go_ahead.wait() # wait until we get the go-ahead
|
||||||
|
|
||||||
|
message.text = f"{self._interrupted_message} {message.text}"
|
||||||
|
|
||||||
|
self._go_ahead.clear()
|
||||||
|
self._querying = True
|
||||||
full_message = ""
|
full_message = ""
|
||||||
async for chunk in self._query_llm(message.text, message.norms, message.goals):
|
async for chunk in self._query_llm(message.text, message.norms, message.goals):
|
||||||
|
if self._interrupted:
|
||||||
|
self._interrupted_message = message.text
|
||||||
|
self.logger.debug("Interrupted processing of previous message.")
|
||||||
|
break
|
||||||
await self._send_reply(chunk)
|
await self._send_reply(chunk)
|
||||||
full_message += chunk
|
full_message += chunk
|
||||||
self.logger.debug("Finished processing BDI message. Response sent in chunks to BDI core.")
|
else:
|
||||||
|
self._querying = False
|
||||||
|
|
||||||
|
self._apply_conversation_message(
|
||||||
|
{
|
||||||
|
"role": "assistant",
|
||||||
|
"content": full_message,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
self.logger.debug(
|
||||||
|
"Finished processing BDI message. Response sent in chunks to BDI core."
|
||||||
|
)
|
||||||
await self._send_full_reply(full_message)
|
await self._send_full_reply(full_message)
|
||||||
|
|
||||||
|
self._go_ahead.set()
|
||||||
|
self._interrupted = False
|
||||||
|
|
||||||
|
def _apply_conversation_message(self, message: dict[str, str]):
|
||||||
|
if len(self.history) > 0 and message["role"] == self.history[-1]["role"]:
|
||||||
|
self.history[-1]["content"] += " " + message["content"]
|
||||||
|
return
|
||||||
|
self.history.append(message)
|
||||||
|
|
||||||
async def _send_reply(self, msg: str):
|
async def _send_reply(self, msg: str):
|
||||||
"""
|
"""
|
||||||
Sends a response message (chunk) back to the BDI Core Agent.
|
Sends a response message (chunk) back to the BDI Core Agent.
|
||||||
@@ -166,13 +204,6 @@ class LLMAgent(BaseAgent):
|
|||||||
full_message,
|
full_message,
|
||||||
extra={"role": "assistant", "reference": message_id, "partial": False},
|
extra={"role": "assistant", "reference": message_id, "partial": False},
|
||||||
)
|
)
|
||||||
|
|
||||||
self.history.append(
|
|
||||||
{
|
|
||||||
"role": "assistant",
|
|
||||||
"content": full_message,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
except httpx.HTTPError as err:
|
except httpx.HTTPError as err:
|
||||||
self.logger.error("HTTP error.", exc_info=err)
|
self.logger.error("HTTP error.", exc_info=err)
|
||||||
yield "LLM service unavailable."
|
yield "LLM service unavailable."
|
||||||
@@ -192,6 +223,9 @@ class LLMAgent(BaseAgent):
|
|||||||
async with client.stream(
|
async with client.stream(
|
||||||
"POST",
|
"POST",
|
||||||
settings.llm_settings.local_llm_url,
|
settings.llm_settings.local_llm_url,
|
||||||
|
headers={"Authorization": f"Bearer {settings.llm_settings.api_key}"}
|
||||||
|
if settings.llm_settings.api_key
|
||||||
|
else {},
|
||||||
json={
|
json={
|
||||||
"model": settings.llm_settings.local_llm_model,
|
"model": settings.llm_settings.local_llm_model,
|
||||||
"messages": messages,
|
"messages": messages,
|
||||||
|
|||||||
@@ -145,4 +145,6 @@ class OpenAIWhisperSpeechRecognizer(SpeechRecognizer):
|
|||||||
|
|
||||||
def recognize_speech(self, audio: np.ndarray) -> str:
|
def recognize_speech(self, audio: np.ndarray) -> str:
|
||||||
self.load_model()
|
self.load_model()
|
||||||
return whisper.transcribe(self.model, audio, **self._get_decode_options(audio))["text"]
|
return whisper.transcribe(self.model, audio, **self._get_decode_options(audio))[
|
||||||
|
"text"
|
||||||
|
].strip()
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ from control_backend.agents.bdi.agentspeak_generator import AgentSpeakGenerator
|
|||||||
from control_backend.core.agent_system import InternalMessage
|
from control_backend.core.agent_system import InternalMessage
|
||||||
from control_backend.core.config import settings
|
from control_backend.core.config import settings
|
||||||
from control_backend.schemas.belief_message import Belief, BeliefMessage
|
from control_backend.schemas.belief_message import Belief, BeliefMessage
|
||||||
from control_backend.schemas.program import ConditionalNorm, Program
|
from control_backend.schemas.program import ConditionalNorm, Goal, Program
|
||||||
from control_backend.schemas.ri_message import (
|
from control_backend.schemas.ri_message import (
|
||||||
GestureCommand,
|
GestureCommand,
|
||||||
PauseCommand,
|
PauseCommand,
|
||||||
@@ -249,6 +249,16 @@ class UserInterruptAgent(BaseAgent):
|
|||||||
self._cond_norm_map = {}
|
self._cond_norm_map = {}
|
||||||
self._cond_norm_reverse_map = {}
|
self._cond_norm_reverse_map = {}
|
||||||
|
|
||||||
|
def _register_goal(goal: Goal):
|
||||||
|
"""Recursively register goals and their subgoals."""
|
||||||
|
slug = AgentSpeakGenerator.slugify(goal)
|
||||||
|
self._goal_map[str(goal.id)] = slug
|
||||||
|
self._goal_reverse_map[slug] = str(goal.id)
|
||||||
|
|
||||||
|
for step in goal.plan.steps:
|
||||||
|
if isinstance(step, Goal):
|
||||||
|
_register_goal(step)
|
||||||
|
|
||||||
for phase in program.phases:
|
for phase in program.phases:
|
||||||
for trigger in phase.triggers:
|
for trigger in phase.triggers:
|
||||||
slug = AgentSpeakGenerator.slugify(trigger)
|
slug = AgentSpeakGenerator.slugify(trigger)
|
||||||
@@ -256,8 +266,7 @@ class UserInterruptAgent(BaseAgent):
|
|||||||
self._trigger_reverse_map[slug] = str(trigger.id)
|
self._trigger_reverse_map[slug] = str(trigger.id)
|
||||||
|
|
||||||
for goal in phase.goals:
|
for goal in phase.goals:
|
||||||
self._goal_map[str(goal.id)] = AgentSpeakGenerator.slugify(goal)
|
_register_goal(goal)
|
||||||
self._goal_reverse_map[AgentSpeakGenerator.slugify(goal)] = str(goal.id)
|
|
||||||
|
|
||||||
for goal, id in self._goal_reverse_map.items():
|
for goal, id in self._goal_reverse_map.items():
|
||||||
self.logger.debug(f"Goal mapping: UI ID {goal} -> {id}")
|
self.logger.debug(f"Goal mapping: UI ID {goal} -> {id}")
|
||||||
|
|||||||
@@ -123,6 +123,7 @@ class LLMSettings(BaseModel):
|
|||||||
|
|
||||||
local_llm_url: str = "http://localhost:1234/v1/chat/completions"
|
local_llm_url: str = "http://localhost:1234/v1/chat/completions"
|
||||||
local_llm_model: str = "gpt-oss"
|
local_llm_model: str = "gpt-oss"
|
||||||
|
api_key: str = ""
|
||||||
chat_temperature: float = 1.0
|
chat_temperature: float = 1.0
|
||||||
code_temperature: float = 0.3
|
code_temperature: float = 0.3
|
||||||
n_parallel: int = 4
|
n_parallel: int = 4
|
||||||
|
|||||||
@@ -67,8 +67,52 @@ async def test_llm_processing_success(mock_httpx_client, mock_settings):
|
|||||||
thread="prompt_message", # REQUIRED: thread must match handle_message logic
|
thread="prompt_message", # REQUIRED: thread must match handle_message logic
|
||||||
)
|
)
|
||||||
|
|
||||||
|
agent._process_bdi_message = AsyncMock()
|
||||||
|
|
||||||
await agent.handle_message(msg)
|
await agent.handle_message(msg)
|
||||||
|
|
||||||
|
agent._process_bdi_message.assert_called()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_process_bdi_message_success(mock_httpx_client, mock_settings):
|
||||||
|
# Setup the mock response for the stream
|
||||||
|
mock_response = MagicMock()
|
||||||
|
mock_response.raise_for_status = MagicMock()
|
||||||
|
|
||||||
|
# Simulate stream lines
|
||||||
|
lines = [
|
||||||
|
b'data: {"choices": [{"delta": {"content": "Hello"}}]}',
|
||||||
|
b'data: {"choices": [{"delta": {"content": " world"}}]}',
|
||||||
|
b'data: {"choices": [{"delta": {"content": "."}}]}',
|
||||||
|
b"data: [DONE]",
|
||||||
|
]
|
||||||
|
|
||||||
|
async def aiter_lines_gen():
|
||||||
|
for line in lines:
|
||||||
|
yield line.decode()
|
||||||
|
|
||||||
|
mock_response.aiter_lines.side_effect = aiter_lines_gen
|
||||||
|
|
||||||
|
mock_stream_context = MagicMock()
|
||||||
|
mock_stream_context.__aenter__ = AsyncMock(return_value=mock_response)
|
||||||
|
mock_stream_context.__aexit__ = AsyncMock(return_value=None)
|
||||||
|
|
||||||
|
# Configure the client
|
||||||
|
mock_httpx_client.stream = MagicMock(return_value=mock_stream_context)
|
||||||
|
|
||||||
|
# Setup Agent
|
||||||
|
agent = LLMAgent("llm_agent")
|
||||||
|
agent.send = AsyncMock() # Mock the send method to verify replies
|
||||||
|
|
||||||
|
mock_logger = MagicMock()
|
||||||
|
agent.logger = mock_logger
|
||||||
|
|
||||||
|
# Simulate receiving a message from BDI
|
||||||
|
prompt = LLMPromptMessage(text="Hi", norms=[], goals=[])
|
||||||
|
|
||||||
|
await agent._process_bdi_message(prompt)
|
||||||
|
|
||||||
# Verification
|
# Verification
|
||||||
# "Hello world." constitutes one sentence/chunk based on punctuation split
|
# "Hello world." constitutes one sentence/chunk based on punctuation split
|
||||||
# The agent should call send once with the full sentence, PLUS once more for full reply
|
# The agent should call send once with the full sentence, PLUS once more for full reply
|
||||||
@@ -85,28 +129,16 @@ async def test_llm_processing_errors(mock_httpx_client, mock_settings):
|
|||||||
agent = LLMAgent("llm_agent")
|
agent = LLMAgent("llm_agent")
|
||||||
agent.send = AsyncMock()
|
agent.send = AsyncMock()
|
||||||
prompt = LLMPromptMessage(text="Hi", norms=[], goals=[])
|
prompt = LLMPromptMessage(text="Hi", norms=[], goals=[])
|
||||||
msg = InternalMessage(
|
|
||||||
to="llm",
|
|
||||||
sender=mock_settings.agent_settings.bdi_core_name,
|
|
||||||
body=prompt.model_dump_json(),
|
|
||||||
thread="prompt_message",
|
|
||||||
)
|
|
||||||
|
|
||||||
# HTTP Error: stream method RAISES exception immediately
|
# HTTP Error: stream method RAISES exception immediately
|
||||||
mock_httpx_client.stream = MagicMock(side_effect=httpx.HTTPError("Fail"))
|
mock_httpx_client.stream = MagicMock(side_effect=httpx.HTTPError("Fail"))
|
||||||
|
|
||||||
await agent.handle_message(msg)
|
await agent._process_bdi_message(prompt)
|
||||||
|
|
||||||
# Check that error message was sent
|
# Check that error message was sent
|
||||||
assert agent.send.called
|
assert agent.send.called
|
||||||
assert "LLM service unavailable." in agent.send.call_args_list[0][0][0].body
|
assert "LLM service unavailable." in agent.send.call_args_list[0][0][0].body
|
||||||
|
|
||||||
# General Exception
|
|
||||||
agent.send.reset_mock()
|
|
||||||
mock_httpx_client.stream = MagicMock(side_effect=Exception("Boom"))
|
|
||||||
await agent.handle_message(msg)
|
|
||||||
assert "Error processing the request." in agent.send.call_args_list[0][0][0].body
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_llm_json_error(mock_httpx_client, mock_settings):
|
async def test_llm_json_error(mock_httpx_client, mock_settings):
|
||||||
@@ -131,13 +163,7 @@ async def test_llm_json_error(mock_httpx_client, mock_settings):
|
|||||||
agent.logger = MagicMock()
|
agent.logger = MagicMock()
|
||||||
|
|
||||||
prompt = LLMPromptMessage(text="Hi", norms=[], goals=[])
|
prompt = LLMPromptMessage(text="Hi", norms=[], goals=[])
|
||||||
msg = InternalMessage(
|
await agent._process_bdi_message(prompt)
|
||||||
to="llm",
|
|
||||||
sender=mock_settings.agent_settings.bdi_core_name,
|
|
||||||
body=prompt.model_dump_json(),
|
|
||||||
thread="prompt_message",
|
|
||||||
)
|
|
||||||
await agent.handle_message(msg)
|
|
||||||
|
|
||||||
agent.logger.error.assert_called() # Should log JSONDecodeError
|
agent.logger.error.assert_called() # Should log JSONDecodeError
|
||||||
|
|
||||||
|
|||||||
@@ -535,3 +535,158 @@ async def test_send_experiment_control_unknown(agent):
|
|||||||
agent.send.assert_awaited()
|
agent.send.assert_awaited()
|
||||||
msg = agent.send.call_args[0][0]
|
msg = agent.send.call_args[0][0]
|
||||||
assert msg.thread == ""
|
assert msg.thread == ""
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_create_mapping_recursive_goals(agent):
|
||||||
|
"""Verify that nested subgoals are correctly registered in the mapping."""
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
# 1. Setup IDs
|
||||||
|
parent_goal_id = uuid.uuid4()
|
||||||
|
child_goal_id = uuid.uuid4()
|
||||||
|
|
||||||
|
# 2. Create the child goal
|
||||||
|
child_goal = Goal(
|
||||||
|
id=child_goal_id,
|
||||||
|
name="child_goal",
|
||||||
|
description="I am a subgoal",
|
||||||
|
plan=Plan(id=uuid.uuid4(), name="p_child", steps=[]),
|
||||||
|
)
|
||||||
|
|
||||||
|
# 3. Create the parent goal and put the child goal inside its plan steps
|
||||||
|
parent_goal = Goal(
|
||||||
|
id=parent_goal_id,
|
||||||
|
name="parent_goal",
|
||||||
|
description="I am a parent",
|
||||||
|
plan=Plan(id=uuid.uuid4(), name="p_parent", steps=[child_goal]), # Nested here
|
||||||
|
)
|
||||||
|
|
||||||
|
# 4. Build the program
|
||||||
|
phase = Phase(
|
||||||
|
id=uuid.uuid4(),
|
||||||
|
name="phase1",
|
||||||
|
norms=[],
|
||||||
|
goals=[parent_goal], # Only the parent is top-level
|
||||||
|
triggers=[],
|
||||||
|
)
|
||||||
|
prog = Program(phases=[phase])
|
||||||
|
|
||||||
|
# 5. Execute mapping
|
||||||
|
msg = InternalMessage(to="me", thread="new_program", body=prog.model_dump_json())
|
||||||
|
await agent.handle_message(msg)
|
||||||
|
|
||||||
|
# 6. Assertions
|
||||||
|
# Check parent
|
||||||
|
assert str(parent_goal_id) in agent._goal_map
|
||||||
|
assert agent._goal_map[str(parent_goal_id)] == "parent_goal"
|
||||||
|
|
||||||
|
# Check child (This confirms the recursion worked)
|
||||||
|
assert str(child_goal_id) in agent._goal_map
|
||||||
|
assert agent._goal_map[str(child_goal_id)] == "child_goal"
|
||||||
|
assert agent._goal_reverse_map["child_goal"] == str(child_goal_id)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_receive_loop_advanced_scenarios(agent):
|
||||||
|
"""
|
||||||
|
Covers:
|
||||||
|
- JSONDecodeError (lines 86-88)
|
||||||
|
- Override: Trigger found (lines 108-109)
|
||||||
|
- Override: Norm found (lines 114-115)
|
||||||
|
- Override: Nothing found (line 134)
|
||||||
|
- Override Unachieve: Success & Fail (lines 136-145)
|
||||||
|
- Pause: Context true/false logs (lines 150-157)
|
||||||
|
- Next Phase (line 160)
|
||||||
|
"""
|
||||||
|
# 1. Setup Data Maps
|
||||||
|
agent._trigger_map["101"] = "trigger_slug"
|
||||||
|
agent._cond_norm_map["202"] = "norm_slug"
|
||||||
|
|
||||||
|
# 2. Define Payloads
|
||||||
|
# A. Invalid JSON
|
||||||
|
bad_json = b"INVALID{JSON"
|
||||||
|
|
||||||
|
# B. Override -> Trigger
|
||||||
|
override_trigger = json.dumps({"type": "override", "context": "101"}).encode()
|
||||||
|
|
||||||
|
# C. Override -> Norm
|
||||||
|
override_norm = json.dumps({"type": "override", "context": "202"}).encode()
|
||||||
|
|
||||||
|
# D. Override -> Unknown
|
||||||
|
override_fail = json.dumps({"type": "override", "context": "999"}).encode()
|
||||||
|
|
||||||
|
# E. Unachieve -> Success
|
||||||
|
unachieve_success = json.dumps({"type": "override_unachieve", "context": "202"}).encode()
|
||||||
|
|
||||||
|
# F. Unachieve -> Fail
|
||||||
|
unachieve_fail = json.dumps({"type": "override_unachieve", "context": "999"}).encode()
|
||||||
|
|
||||||
|
# G. Pause (True)
|
||||||
|
pause_true = json.dumps({"type": "pause", "context": "true"}).encode()
|
||||||
|
|
||||||
|
# H. Pause (False/Resume)
|
||||||
|
pause_false = json.dumps({"type": "pause", "context": ""}).encode()
|
||||||
|
|
||||||
|
# I. Next Phase
|
||||||
|
next_phase = json.dumps({"type": "next_phase", "context": ""}).encode()
|
||||||
|
|
||||||
|
# 3. Setup Socket
|
||||||
|
agent.sub_socket.recv_multipart.side_effect = [
|
||||||
|
(b"topic", bad_json),
|
||||||
|
(b"topic", override_trigger),
|
||||||
|
(b"topic", override_norm),
|
||||||
|
(b"topic", override_fail),
|
||||||
|
(b"topic", unachieve_success),
|
||||||
|
(b"topic", unachieve_fail),
|
||||||
|
(b"topic", pause_true),
|
||||||
|
(b"topic", pause_false),
|
||||||
|
(b"topic", next_phase),
|
||||||
|
asyncio.CancelledError, # End loop
|
||||||
|
]
|
||||||
|
|
||||||
|
# Mock internal helpers to verify calls
|
||||||
|
agent._send_to_bdi = AsyncMock()
|
||||||
|
agent._send_to_bdi_belief = AsyncMock()
|
||||||
|
agent._send_pause_command = AsyncMock()
|
||||||
|
agent._send_experiment_control_to_bdi_core = AsyncMock()
|
||||||
|
|
||||||
|
# 4. Run Loop
|
||||||
|
try:
|
||||||
|
await agent._receive_button_event()
|
||||||
|
except asyncio.CancelledError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# 5. Assertions
|
||||||
|
|
||||||
|
# JSON Error
|
||||||
|
agent.logger.error.assert_called_with("Received invalid JSON payload on topic %s", b"topic")
|
||||||
|
|
||||||
|
# Override Trigger
|
||||||
|
agent._send_to_bdi.assert_awaited_with("force_trigger", "trigger_slug")
|
||||||
|
|
||||||
|
# Override Norm
|
||||||
|
# We expect _send_to_bdi_belief to be called for the norm
|
||||||
|
# Note: The loop calls _send_to_bdi_belief(asl_cond_norm, "cond_norm")
|
||||||
|
agent._send_to_bdi_belief.assert_any_call("norm_slug", "cond_norm")
|
||||||
|
|
||||||
|
# Override Fail (Warning log)
|
||||||
|
agent.logger.warning.assert_any_call("Could not determine which element to override.")
|
||||||
|
|
||||||
|
# Unachieve Success
|
||||||
|
# Loop calls _send_to_bdi_belief(asl_cond_norm, "cond_norm", True)
|
||||||
|
agent._send_to_bdi_belief.assert_any_call("norm_slug", "cond_norm", True)
|
||||||
|
|
||||||
|
# Unachieve Fail
|
||||||
|
agent.logger.warning.assert_any_call("Could not determine which conditional norm to unachieve.")
|
||||||
|
|
||||||
|
# Pause Logic
|
||||||
|
agent._send_pause_command.assert_any_call("true")
|
||||||
|
agent.logger.info.assert_any_call("Sent pause command.")
|
||||||
|
|
||||||
|
# Resume Logic
|
||||||
|
agent._send_pause_command.assert_any_call("")
|
||||||
|
agent.logger.info.assert_any_call("Sent resume command.")
|
||||||
|
|
||||||
|
# Next Phase
|
||||||
|
agent._send_experiment_control_to_bdi_core.assert_awaited_with("next_phase")
|
||||||
|
|||||||
Reference in New Issue
Block a user