diff --git a/src/control_backend/schemas/program.py b/src/control_backend/schemas/program.py index 5bd8ba8..db94347 100644 --- a/src/control_backend/schemas/program.py +++ b/src/control_backend/schemas/program.py @@ -14,16 +14,16 @@ class Goal(BaseModel): achieved: bool -class KeywordTrigger(BaseModel): +class TriggerKeyword(BaseModel): id: str keyword: str -class Trigger(BaseModel): +class KeywordTrigger(BaseModel): id: str label: str type: str - keywords: list[KeywordTrigger] + keywords: list[TriggerKeyword] class Phase(BaseModel): @@ -31,7 +31,7 @@ class Phase(BaseModel): label: str norms: list[Norm] goals: list[Goal] - triggers: list[Trigger] + triggers: list[KeywordTrigger] class Program(BaseModel): diff --git a/test/unit/agents/bdi/test_bdi_core_agent.py b/test/unit/agents/bdi/test_bdi_core_agent.py index 43ee033..5c73b76 100644 --- a/test/unit/agents/bdi/test_bdi_core_agent.py +++ b/test/unit/agents/bdi/test_bdi_core_agent.py @@ -7,7 +7,7 @@ import pytest from control_backend.agents.bdi.bdi_core_agent.bdi_core_agent import BDICoreAgent from control_backend.core.agent_system import InternalMessage from control_backend.core.config import settings -from control_backend.schemas.belief_message import BeliefMessage +from control_backend.schemas.belief_message import Belief, BeliefMessage @pytest.fixture @@ -45,7 +45,7 @@ async def test_setup_no_asl(mock_agentspeak_env, agent): @pytest.mark.asyncio async def test_handle_belief_collector_message(agent, mock_settings): """Test that incoming beliefs are added to the BDI agent""" - beliefs = {"user_said": ["Hello"]} + beliefs = [Belief(name="user_said", arguments=["Hello"])] msg = InternalMessage( to="bdi_agent", sender=mock_settings.agent_settings.bdi_belief_collector_name, @@ -116,11 +116,11 @@ async def test_custom_actions(agent): # Invoke action mock_term = MagicMock() - mock_term.args = ["Hello"] + mock_term.args = ["Hello", "Norm", "Goal"] mock_intention = MagicMock() # Run generator gen = action_fn(agent, mock_term, mock_intention) next(gen) # Execute - agent._send_to_llm.assert_called_with("Hello") + agent._send_to_llm.assert_called_with("Hello", "Norm", "Goal") diff --git a/test/unit/agents/bdi/test_belief_collector.py b/test/unit/agents/bdi/test_belief_collector.py index ca89a9d..df28ac4 100644 --- a/test/unit/agents/bdi/test_belief_collector.py +++ b/test/unit/agents/bdi/test_belief_collector.py @@ -8,6 +8,7 @@ from control_backend.agents.bdi import ( ) from control_backend.core.agent_system import InternalMessage from control_backend.core.config import settings +from control_backend.schemas.belief_message import Belief @pytest.fixture @@ -57,10 +58,11 @@ async def test_handle_message_bad_json(agent, mocker): async def test_handle_belief_text_sends_when_beliefs_exist(agent, mocker): payload = {"type": "belief_extraction_text", "beliefs": {"user_said": ["hello"]}} spy = mocker.patch.object(agent, "_send_beliefs_to_bdi", new_callable=AsyncMock) + expected = [Belief(name="user_said", arguments=["hello"])] await agent._handle_belief_text(payload, "origin") - spy.assert_awaited_once_with(payload["beliefs"], origin="origin") + spy.assert_awaited_once_with(expected, origin="origin") @pytest.mark.asyncio @@ -76,7 +78,7 @@ async def test_handle_belief_text_no_send_when_empty(agent, mocker): @pytest.mark.asyncio async def test_send_beliefs_to_bdi(agent): agent.send = AsyncMock() - beliefs = {"user_said": ["hello", "world"]} + beliefs = [Belief(name="user_said", arguments=["hello", "world"])] await agent._send_beliefs_to_bdi(beliefs, origin="origin") @@ -84,4 +86,4 @@ async def test_send_beliefs_to_bdi(agent): sent: InternalMessage = agent.send.call_args.args[0] assert sent.to == settings.agent_settings.bdi_core_name assert sent.thread == "beliefs" - assert json.loads(sent.body)["beliefs"] == beliefs + assert json.loads(sent.body)["beliefs"] == [belief.model_dump() for belief in beliefs] diff --git a/test/unit/agents/llm/test_llm_agent.py b/test/unit/agents/llm/test_llm_agent.py index 4a8b7df..2f1b72e 100644 --- a/test/unit/agents/llm/test_llm_agent.py +++ b/test/unit/agents/llm/test_llm_agent.py @@ -7,6 +7,7 @@ import pytest from control_backend.agents.llm.llm_agent import LLMAgent, LLMInstructions from control_backend.core.agent_system import InternalMessage +from control_backend.schemas.llm_prompt_message import LLMPromptMessage @pytest.fixture @@ -49,8 +50,11 @@ async def test_llm_processing_success(mock_httpx_client, mock_settings): agent.send = AsyncMock() # Mock the send method to verify replies # Simulate receiving a message from BDI + prompt = LLMPromptMessage(text="Hi", norms=[], goals=[]) msg = InternalMessage( - to="llm_agent", sender=mock_settings.agent_settings.bdi_core_name, body="Hi" + to="llm_agent", + sender=mock_settings.agent_settings.bdi_core_name, + body=prompt.model_dump_json(), ) await agent.handle_message(msg) @@ -68,7 +72,12 @@ async def test_llm_processing_success(mock_httpx_client, mock_settings): async def test_llm_processing_errors(mock_httpx_client, mock_settings): agent = LLMAgent("llm_agent") agent.send = AsyncMock() - msg = InternalMessage(to="llm", sender=mock_settings.agent_settings.bdi_core_name, body="Hi") + prompt = LLMPromptMessage(text="Hi", norms=[], goals=[]) + msg = InternalMessage( + to="llm", + sender=mock_settings.agent_settings.bdi_core_name, + body=prompt.model_dump_json(), + ) # HTTP Error mock_httpx_client.stream = MagicMock(side_effect=httpx.HTTPError("Fail")) @@ -103,8 +112,11 @@ async def test_llm_json_error(mock_httpx_client, mock_settings): agent.send = AsyncMock() with patch.object(agent.logger, "error") as log: + prompt = LLMPromptMessage(text="Hi", norms=[], goals=[]) msg = InternalMessage( - to="llm", sender=mock_settings.agent_settings.bdi_core_name, body="Hi" + to="llm", + sender=mock_settings.agent_settings.bdi_core_name, + body=prompt.model_dump_json(), ) await agent.handle_message(msg) log.assert_called() # Should log JSONDecodeError @@ -112,10 +124,10 @@ async def test_llm_json_error(mock_httpx_client, mock_settings): def test_llm_instructions(): # Full custom - instr = LLMInstructions(norms="N", goals="G") + instr = LLMInstructions(norms=["N1", "N2"], goals=["G1", "G2"]) text = instr.build_developer_instruction() - assert "Norms to follow:\nN" in text - assert "Goals to reach:\nG" in text + assert "Norms to follow:\n- N1\n- N2" in text + assert "Goals to reach:\n- G1\n- G2" in text # Defaults instr_def = LLMInstructions() diff --git a/test/unit/api/v1/endpoints/test_program_endpoint.py b/test/unit/api/v1/endpoints/test_program_endpoint.py index f6bb261..178159c 100644 --- a/test/unit/api/v1/endpoints/test_program_endpoint.py +++ b/test/unit/api/v1/endpoints/test_program_endpoint.py @@ -29,22 +29,22 @@ def make_valid_program_dict(): "phases": [ { "id": "phase1", - "name": "basephase", - "nextPhaseId": "phase2", - "phaseData": { - "norms": [{"id": "n1", "name": "norm", "value": "be nice"}], - "goals": [ - {"id": "g1", "name": "goal", "description": "test goal", "achieved": False} - ], - "triggers": [ - { - "id": "t1", - "label": "trigger", - "type": "keyword", - "value": ["stop", "exit"], - } - ], - }, + "label": "basephase", + "norms": [{"id": "n1", "label": "norm", "norm": "be nice"}], + "goals": [ + {"id": "g1", "label": "goal", "description": "test goal", "achieved": False} + ], + "triggers": [ + { + "id": "t1", + "label": "trigger", + "type": "keywords", + "keywords": [ + {"id": "kw1", "keyword": "keyword1"}, + {"id": "kw2", "keyword": "keyword2"}, + ], + }, + ], } ] } diff --git a/test/unit/schemas/test_ui_program_message.py b/test/unit/schemas/test_ui_program_message.py index 36352d6..7ed544e 100644 --- a/test/unit/schemas/test_ui_program_message.py +++ b/test/unit/schemas/test_ui_program_message.py @@ -1,49 +1,52 @@ import pytest from pydantic import ValidationError -from control_backend.schemas.program import Goal, Norm, Phase, PhaseData, Program, Trigger +from control_backend.schemas.program import ( + Goal, + KeywordTrigger, + Norm, + Phase, + Program, + TriggerKeyword, +) def base_norm() -> Norm: return Norm( id="norm1", - name="testNorm", - value="you should act nice", + label="testNorm", + norm="testNormNorm", ) def base_goal() -> Goal: return Goal( id="goal1", - name="testGoal", - description="you should act nice", + label="testGoal", + description="testGoalDescription", achieved=False, ) -def base_trigger() -> Trigger: - return Trigger( +def base_trigger() -> KeywordTrigger: + return KeywordTrigger( id="trigger1", label="testTrigger", - type="keyword", - value=["Stop", "Exit"], - ) - - -def base_phase_data() -> PhaseData: - return PhaseData( - norms=[base_norm()], - goals=[base_goal()], - triggers=[base_trigger()], + type="keywords", + keywords=[ + TriggerKeyword(id="keyword1", keyword="testKeyword1"), + TriggerKeyword(id="keyword1", keyword="testKeyword2"), + ], ) def base_phase() -> Phase: return Phase( id="phase1", - name="basephase", - nextPhaseId="phase2", - phaseData=base_phase_data(), + label="basephase", + norms=[base_norm()], + goals=[base_goal()], + triggers=[base_trigger()], ) @@ -65,7 +68,7 @@ def test_valid_program(): program = base_program() validated = Program.model_validate(program) assert isinstance(validated, Program) - assert validated.phases[0].phaseData.norms[0].name == "testNorm" + assert validated.phases[0].norms[0].norm == "testNormNorm" def test_valid_deepprogram(): @@ -73,10 +76,9 @@ def test_valid_deepprogram(): validated = Program.model_validate(program) # validate nested components directly phase = validated.phases[0] - assert isinstance(phase.phaseData, PhaseData) - assert isinstance(phase.phaseData.goals[0], Goal) - assert isinstance(phase.phaseData.triggers[0], Trigger) - assert isinstance(phase.phaseData.norms[0], Norm) + assert isinstance(phase.goals[0], Goal) + assert isinstance(phase.triggers[0], KeywordTrigger) + assert isinstance(phase.norms[0], Norm) def test_invalid_program():