test: fix tests after changing schema and

ref: N25B-299
This commit is contained in:
Twirre Meulenbelt
2025-11-24 20:53:53 +01:00
parent 3f22b854a7
commit 54502e441c
6 changed files with 74 additions and 58 deletions

View File

@@ -14,16 +14,16 @@ class Goal(BaseModel):
achieved: bool achieved: bool
class KeywordTrigger(BaseModel): class TriggerKeyword(BaseModel):
id: str id: str
keyword: str keyword: str
class Trigger(BaseModel): class KeywordTrigger(BaseModel):
id: str id: str
label: str label: str
type: str type: str
keywords: list[KeywordTrigger] keywords: list[TriggerKeyword]
class Phase(BaseModel): class Phase(BaseModel):
@@ -31,7 +31,7 @@ class Phase(BaseModel):
label: str label: str
norms: list[Norm] norms: list[Norm]
goals: list[Goal] goals: list[Goal]
triggers: list[Trigger] triggers: list[KeywordTrigger]
class Program(BaseModel): class Program(BaseModel):

View File

@@ -7,7 +7,7 @@ import pytest
from control_backend.agents.bdi.bdi_core_agent.bdi_core_agent import BDICoreAgent from control_backend.agents.bdi.bdi_core_agent.bdi_core_agent import BDICoreAgent
from control_backend.core.agent_system import InternalMessage from control_backend.core.agent_system import InternalMessage
from control_backend.core.config import settings from control_backend.core.config import settings
from control_backend.schemas.belief_message import BeliefMessage from control_backend.schemas.belief_message import Belief, BeliefMessage
@pytest.fixture @pytest.fixture
@@ -45,7 +45,7 @@ async def test_setup_no_asl(mock_agentspeak_env, agent):
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_handle_belief_collector_message(agent, mock_settings): async def test_handle_belief_collector_message(agent, mock_settings):
"""Test that incoming beliefs are added to the BDI agent""" """Test that incoming beliefs are added to the BDI agent"""
beliefs = {"user_said": ["Hello"]} beliefs = [Belief(name="user_said", arguments=["Hello"])]
msg = InternalMessage( msg = InternalMessage(
to="bdi_agent", to="bdi_agent",
sender=mock_settings.agent_settings.bdi_belief_collector_name, sender=mock_settings.agent_settings.bdi_belief_collector_name,
@@ -116,11 +116,11 @@ async def test_custom_actions(agent):
# Invoke action # Invoke action
mock_term = MagicMock() mock_term = MagicMock()
mock_term.args = ["Hello"] mock_term.args = ["Hello", "Norm", "Goal"]
mock_intention = MagicMock() mock_intention = MagicMock()
# Run generator # Run generator
gen = action_fn(agent, mock_term, mock_intention) gen = action_fn(agent, mock_term, mock_intention)
next(gen) # Execute next(gen) # Execute
agent._send_to_llm.assert_called_with("Hello") agent._send_to_llm.assert_called_with("Hello", "Norm", "Goal")

View File

@@ -8,6 +8,7 @@ from control_backend.agents.bdi import (
) )
from control_backend.core.agent_system import InternalMessage from control_backend.core.agent_system import InternalMessage
from control_backend.core.config import settings from control_backend.core.config import settings
from control_backend.schemas.belief_message import Belief
@pytest.fixture @pytest.fixture
@@ -57,10 +58,11 @@ async def test_handle_message_bad_json(agent, mocker):
async def test_handle_belief_text_sends_when_beliefs_exist(agent, mocker): async def test_handle_belief_text_sends_when_beliefs_exist(agent, mocker):
payload = {"type": "belief_extraction_text", "beliefs": {"user_said": ["hello"]}} payload = {"type": "belief_extraction_text", "beliefs": {"user_said": ["hello"]}}
spy = mocker.patch.object(agent, "_send_beliefs_to_bdi", new_callable=AsyncMock) spy = mocker.patch.object(agent, "_send_beliefs_to_bdi", new_callable=AsyncMock)
expected = [Belief(name="user_said", arguments=["hello"])]
await agent._handle_belief_text(payload, "origin") await agent._handle_belief_text(payload, "origin")
spy.assert_awaited_once_with(payload["beliefs"], origin="origin") spy.assert_awaited_once_with(expected, origin="origin")
@pytest.mark.asyncio @pytest.mark.asyncio
@@ -76,7 +78,7 @@ async def test_handle_belief_text_no_send_when_empty(agent, mocker):
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_send_beliefs_to_bdi(agent): async def test_send_beliefs_to_bdi(agent):
agent.send = AsyncMock() agent.send = AsyncMock()
beliefs = {"user_said": ["hello", "world"]} beliefs = [Belief(name="user_said", arguments=["hello", "world"])]
await agent._send_beliefs_to_bdi(beliefs, origin="origin") await agent._send_beliefs_to_bdi(beliefs, origin="origin")
@@ -84,4 +86,4 @@ async def test_send_beliefs_to_bdi(agent):
sent: InternalMessage = agent.send.call_args.args[0] sent: InternalMessage = agent.send.call_args.args[0]
assert sent.to == settings.agent_settings.bdi_core_name assert sent.to == settings.agent_settings.bdi_core_name
assert sent.thread == "beliefs" assert sent.thread == "beliefs"
assert json.loads(sent.body)["beliefs"] == beliefs assert json.loads(sent.body)["beliefs"] == [belief.model_dump() for belief in beliefs]

View File

@@ -7,6 +7,7 @@ import pytest
from control_backend.agents.llm.llm_agent import LLMAgent, LLMInstructions from control_backend.agents.llm.llm_agent import LLMAgent, LLMInstructions
from control_backend.core.agent_system import InternalMessage from control_backend.core.agent_system import InternalMessage
from control_backend.schemas.llm_prompt_message import LLMPromptMessage
@pytest.fixture @pytest.fixture
@@ -49,8 +50,11 @@ async def test_llm_processing_success(mock_httpx_client, mock_settings):
agent.send = AsyncMock() # Mock the send method to verify replies agent.send = AsyncMock() # Mock the send method to verify replies
# Simulate receiving a message from BDI # Simulate receiving a message from BDI
prompt = LLMPromptMessage(text="Hi", norms=[], goals=[])
msg = InternalMessage( msg = InternalMessage(
to="llm_agent", sender=mock_settings.agent_settings.bdi_core_name, body="Hi" to="llm_agent",
sender=mock_settings.agent_settings.bdi_core_name,
body=prompt.model_dump_json(),
) )
await agent.handle_message(msg) await agent.handle_message(msg)
@@ -68,7 +72,12 @@ async def test_llm_processing_success(mock_httpx_client, mock_settings):
async def test_llm_processing_errors(mock_httpx_client, mock_settings): async def test_llm_processing_errors(mock_httpx_client, mock_settings):
agent = LLMAgent("llm_agent") agent = LLMAgent("llm_agent")
agent.send = AsyncMock() agent.send = AsyncMock()
msg = InternalMessage(to="llm", sender=mock_settings.agent_settings.bdi_core_name, body="Hi") prompt = LLMPromptMessage(text="Hi", norms=[], goals=[])
msg = InternalMessage(
to="llm",
sender=mock_settings.agent_settings.bdi_core_name,
body=prompt.model_dump_json(),
)
# HTTP Error # HTTP Error
mock_httpx_client.stream = MagicMock(side_effect=httpx.HTTPError("Fail")) mock_httpx_client.stream = MagicMock(side_effect=httpx.HTTPError("Fail"))
@@ -103,8 +112,11 @@ async def test_llm_json_error(mock_httpx_client, mock_settings):
agent.send = AsyncMock() agent.send = AsyncMock()
with patch.object(agent.logger, "error") as log: with patch.object(agent.logger, "error") as log:
prompt = LLMPromptMessage(text="Hi", norms=[], goals=[])
msg = InternalMessage( msg = InternalMessage(
to="llm", sender=mock_settings.agent_settings.bdi_core_name, body="Hi" to="llm",
sender=mock_settings.agent_settings.bdi_core_name,
body=prompt.model_dump_json(),
) )
await agent.handle_message(msg) await agent.handle_message(msg)
log.assert_called() # Should log JSONDecodeError log.assert_called() # Should log JSONDecodeError
@@ -112,10 +124,10 @@ async def test_llm_json_error(mock_httpx_client, mock_settings):
def test_llm_instructions(): def test_llm_instructions():
# Full custom # Full custom
instr = LLMInstructions(norms="N", goals="G") instr = LLMInstructions(norms=["N1", "N2"], goals=["G1", "G2"])
text = instr.build_developer_instruction() text = instr.build_developer_instruction()
assert "Norms to follow:\nN" in text assert "Norms to follow:\n- N1\n- N2" in text
assert "Goals to reach:\nG" in text assert "Goals to reach:\n- G1\n- G2" in text
# Defaults # Defaults
instr_def = LLMInstructions() instr_def = LLMInstructions()

View File

@@ -29,22 +29,22 @@ def make_valid_program_dict():
"phases": [ "phases": [
{ {
"id": "phase1", "id": "phase1",
"name": "basephase", "label": "basephase",
"nextPhaseId": "phase2", "norms": [{"id": "n1", "label": "norm", "norm": "be nice"}],
"phaseData": { "goals": [
"norms": [{"id": "n1", "name": "norm", "value": "be nice"}], {"id": "g1", "label": "goal", "description": "test goal", "achieved": False}
"goals": [ ],
{"id": "g1", "name": "goal", "description": "test goal", "achieved": False} "triggers": [
], {
"triggers": [ "id": "t1",
{ "label": "trigger",
"id": "t1", "type": "keywords",
"label": "trigger", "keywords": [
"type": "keyword", {"id": "kw1", "keyword": "keyword1"},
"value": ["stop", "exit"], {"id": "kw2", "keyword": "keyword2"},
} ],
], },
}, ],
} }
] ]
} }

View File

@@ -1,49 +1,52 @@
import pytest import pytest
from pydantic import ValidationError from pydantic import ValidationError
from control_backend.schemas.program import Goal, Norm, Phase, PhaseData, Program, Trigger from control_backend.schemas.program import (
Goal,
KeywordTrigger,
Norm,
Phase,
Program,
TriggerKeyword,
)
def base_norm() -> Norm: def base_norm() -> Norm:
return Norm( return Norm(
id="norm1", id="norm1",
name="testNorm", label="testNorm",
value="you should act nice", norm="testNormNorm",
) )
def base_goal() -> Goal: def base_goal() -> Goal:
return Goal( return Goal(
id="goal1", id="goal1",
name="testGoal", label="testGoal",
description="you should act nice", description="testGoalDescription",
achieved=False, achieved=False,
) )
def base_trigger() -> Trigger: def base_trigger() -> KeywordTrigger:
return Trigger( return KeywordTrigger(
id="trigger1", id="trigger1",
label="testTrigger", label="testTrigger",
type="keyword", type="keywords",
value=["Stop", "Exit"], keywords=[
) TriggerKeyword(id="keyword1", keyword="testKeyword1"),
TriggerKeyword(id="keyword1", keyword="testKeyword2"),
],
def base_phase_data() -> PhaseData:
return PhaseData(
norms=[base_norm()],
goals=[base_goal()],
triggers=[base_trigger()],
) )
def base_phase() -> Phase: def base_phase() -> Phase:
return Phase( return Phase(
id="phase1", id="phase1",
name="basephase", label="basephase",
nextPhaseId="phase2", norms=[base_norm()],
phaseData=base_phase_data(), goals=[base_goal()],
triggers=[base_trigger()],
) )
@@ -65,7 +68,7 @@ def test_valid_program():
program = base_program() program = base_program()
validated = Program.model_validate(program) validated = Program.model_validate(program)
assert isinstance(validated, Program) assert isinstance(validated, Program)
assert validated.phases[0].phaseData.norms[0].name == "testNorm" assert validated.phases[0].norms[0].norm == "testNormNorm"
def test_valid_deepprogram(): def test_valid_deepprogram():
@@ -73,10 +76,9 @@ def test_valid_deepprogram():
validated = Program.model_validate(program) validated = Program.model_validate(program)
# validate nested components directly # validate nested components directly
phase = validated.phases[0] phase = validated.phases[0]
assert isinstance(phase.phaseData, PhaseData) assert isinstance(phase.goals[0], Goal)
assert isinstance(phase.phaseData.goals[0], Goal) assert isinstance(phase.triggers[0], KeywordTrigger)
assert isinstance(phase.phaseData.triggers[0], Trigger) assert isinstance(phase.norms[0], Norm)
assert isinstance(phase.phaseData.norms[0], Norm)
def test_invalid_program(): def test_invalid_program():