test: fix tests
ref: N25B-452
This commit is contained in:
@@ -512,10 +512,6 @@ class BDICoreAgent(BaseAgent):
|
|||||||
|
|
||||||
yield
|
yield
|
||||||
|
|
||||||
@self.actions.add(".notify_ui", 0)
|
|
||||||
def _notify_ui(agent, term, intention):
|
|
||||||
pass
|
|
||||||
|
|
||||||
async def _send_to_llm(self, text: str, norms: str, goals: str):
|
async def _send_to_llm(self, text: str, norms: str, goals: str):
|
||||||
"""
|
"""
|
||||||
Sends a text query to the LLM agent asynchronously.
|
Sends a text query to the LLM agent asynchronously.
|
||||||
|
|||||||
@@ -59,9 +59,9 @@ class LLMAgent(BaseAgent):
|
|||||||
except ValidationError:
|
except ValidationError:
|
||||||
self.logger.debug("Prompt message from BDI core is invalid.")
|
self.logger.debug("Prompt message from BDI core is invalid.")
|
||||||
case "assistant_message":
|
case "assistant_message":
|
||||||
self.history.append({"role": "assistant", "content": msg.body})
|
self._apply_conversation_message({"role": "assistant", "content": msg.body})
|
||||||
case "user_message":
|
case "user_message":
|
||||||
self.history.append({"role": "user", "content": msg.body})
|
self._apply_conversation_message({"role": "user", "content": msg.body})
|
||||||
elif msg.sender == settings.agent_settings.bdi_program_manager_name:
|
elif msg.sender == settings.agent_settings.bdi_program_manager_name:
|
||||||
if msg.body == "clear_history":
|
if msg.body == "clear_history":
|
||||||
self.logger.debug("Clearing conversation history.")
|
self.logger.debug("Clearing conversation history.")
|
||||||
@@ -98,7 +98,7 @@ class LLMAgent(BaseAgent):
|
|||||||
else:
|
else:
|
||||||
self._querying = False
|
self._querying = False
|
||||||
|
|
||||||
self.history.append(
|
self._apply_conversation_message(
|
||||||
{
|
{
|
||||||
"role": "assistant",
|
"role": "assistant",
|
||||||
"content": full_message,
|
"content": full_message,
|
||||||
@@ -112,6 +112,12 @@ class LLMAgent(BaseAgent):
|
|||||||
self._go_ahead.set()
|
self._go_ahead.set()
|
||||||
self._interrupted = False
|
self._interrupted = False
|
||||||
|
|
||||||
|
def _apply_conversation_message(self, message: dict[str, str]):
|
||||||
|
if len(self.history) > 0 and message["role"] == self.history[-1]["role"]:
|
||||||
|
self.history[-1]["content"] += " " + message["content"]
|
||||||
|
return
|
||||||
|
self.history.append(message)
|
||||||
|
|
||||||
async def _send_reply(self, msg: str):
|
async def _send_reply(self, msg: str):
|
||||||
"""
|
"""
|
||||||
Sends a response message (chunk) back to the BDI Core Agent.
|
Sends a response message (chunk) back to the BDI Core Agent.
|
||||||
|
|||||||
@@ -61,8 +61,52 @@ async def test_llm_processing_success(mock_httpx_client, mock_settings):
|
|||||||
thread="prompt_message", # REQUIRED: thread must match handle_message logic
|
thread="prompt_message", # REQUIRED: thread must match handle_message logic
|
||||||
)
|
)
|
||||||
|
|
||||||
|
agent._process_bdi_message = AsyncMock()
|
||||||
|
|
||||||
await agent.handle_message(msg)
|
await agent.handle_message(msg)
|
||||||
|
|
||||||
|
agent._process_bdi_message.assert_called()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_process_bdi_message_success(mock_httpx_client, mock_settings):
|
||||||
|
# Setup the mock response for the stream
|
||||||
|
mock_response = MagicMock()
|
||||||
|
mock_response.raise_for_status = MagicMock()
|
||||||
|
|
||||||
|
# Simulate stream lines
|
||||||
|
lines = [
|
||||||
|
b'data: {"choices": [{"delta": {"content": "Hello"}}]}',
|
||||||
|
b'data: {"choices": [{"delta": {"content": " world"}}]}',
|
||||||
|
b'data: {"choices": [{"delta": {"content": "."}}]}',
|
||||||
|
b"data: [DONE]",
|
||||||
|
]
|
||||||
|
|
||||||
|
async def aiter_lines_gen():
|
||||||
|
for line in lines:
|
||||||
|
yield line.decode()
|
||||||
|
|
||||||
|
mock_response.aiter_lines.side_effect = aiter_lines_gen
|
||||||
|
|
||||||
|
mock_stream_context = MagicMock()
|
||||||
|
mock_stream_context.__aenter__ = AsyncMock(return_value=mock_response)
|
||||||
|
mock_stream_context.__aexit__ = AsyncMock(return_value=None)
|
||||||
|
|
||||||
|
# Configure the client
|
||||||
|
mock_httpx_client.stream = MagicMock(return_value=mock_stream_context)
|
||||||
|
|
||||||
|
# Setup Agent
|
||||||
|
agent = LLMAgent("llm_agent")
|
||||||
|
agent.send = AsyncMock() # Mock the send method to verify replies
|
||||||
|
|
||||||
|
mock_logger = MagicMock()
|
||||||
|
agent.logger = mock_logger
|
||||||
|
|
||||||
|
# Simulate receiving a message from BDI
|
||||||
|
prompt = LLMPromptMessage(text="Hi", norms=[], goals=[])
|
||||||
|
|
||||||
|
await agent._process_bdi_message(prompt)
|
||||||
|
|
||||||
# Verification
|
# Verification
|
||||||
# "Hello world." constitutes one sentence/chunk based on punctuation split
|
# "Hello world." constitutes one sentence/chunk based on punctuation split
|
||||||
# The agent should call send once with the full sentence, PLUS once more for full reply
|
# The agent should call send once with the full sentence, PLUS once more for full reply
|
||||||
@@ -79,28 +123,16 @@ async def test_llm_processing_errors(mock_httpx_client, mock_settings):
|
|||||||
agent = LLMAgent("llm_agent")
|
agent = LLMAgent("llm_agent")
|
||||||
agent.send = AsyncMock()
|
agent.send = AsyncMock()
|
||||||
prompt = LLMPromptMessage(text="Hi", norms=[], goals=[])
|
prompt = LLMPromptMessage(text="Hi", norms=[], goals=[])
|
||||||
msg = InternalMessage(
|
|
||||||
to="llm",
|
|
||||||
sender=mock_settings.agent_settings.bdi_core_name,
|
|
||||||
body=prompt.model_dump_json(),
|
|
||||||
thread="prompt_message",
|
|
||||||
)
|
|
||||||
|
|
||||||
# HTTP Error: stream method RAISES exception immediately
|
# HTTP Error: stream method RAISES exception immediately
|
||||||
mock_httpx_client.stream = MagicMock(side_effect=httpx.HTTPError("Fail"))
|
mock_httpx_client.stream = MagicMock(side_effect=httpx.HTTPError("Fail"))
|
||||||
|
|
||||||
await agent.handle_message(msg)
|
await agent._process_bdi_message(prompt)
|
||||||
|
|
||||||
# Check that error message was sent
|
# Check that error message was sent
|
||||||
assert agent.send.called
|
assert agent.send.called
|
||||||
assert "LLM service unavailable." in agent.send.call_args_list[0][0][0].body
|
assert "LLM service unavailable." in agent.send.call_args_list[0][0][0].body
|
||||||
|
|
||||||
# General Exception
|
|
||||||
agent.send.reset_mock()
|
|
||||||
mock_httpx_client.stream = MagicMock(side_effect=Exception("Boom"))
|
|
||||||
await agent.handle_message(msg)
|
|
||||||
assert "Error processing the request." in agent.send.call_args_list[0][0][0].body
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_llm_json_error(mock_httpx_client, mock_settings):
|
async def test_llm_json_error(mock_httpx_client, mock_settings):
|
||||||
@@ -125,13 +157,7 @@ async def test_llm_json_error(mock_httpx_client, mock_settings):
|
|||||||
agent.logger = MagicMock()
|
agent.logger = MagicMock()
|
||||||
|
|
||||||
prompt = LLMPromptMessage(text="Hi", norms=[], goals=[])
|
prompt = LLMPromptMessage(text="Hi", norms=[], goals=[])
|
||||||
msg = InternalMessage(
|
await agent._process_bdi_message(prompt)
|
||||||
to="llm",
|
|
||||||
sender=mock_settings.agent_settings.bdi_core_name,
|
|
||||||
body=prompt.model_dump_json(),
|
|
||||||
thread="prompt_message",
|
|
||||||
)
|
|
||||||
await agent.handle_message(msg)
|
|
||||||
|
|
||||||
agent.logger.error.assert_called() # Should log JSONDecodeError
|
agent.logger.error.assert_called() # Should log JSONDecodeError
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user