diff --git a/src/control_backend/agents/bdi/__init__.py b/src/control_backend/agents/bdi/__init__.py index c8c8d47..8d45440 100644 --- a/src/control_backend/agents/bdi/__init__.py +++ b/src/control_backend/agents/bdi/__init__.py @@ -1,4 +1,5 @@ -from .bdi_core_agent.bdi_core_agent import BDICoreAgent as BDICoreAgent +from control_backend.agents.bdi.bdi_core_agent import BDICoreAgent as BDICoreAgent + from .belief_collector_agent import ( BDIBeliefCollectorAgent as BDIBeliefCollectorAgent, ) diff --git a/src/control_backend/agents/bdi/bdi_core_agent/bdi_core_agent.py b/src/control_backend/agents/bdi/bdi_core_agent.py similarity index 100% rename from src/control_backend/agents/bdi/bdi_core_agent/bdi_core_agent.py rename to src/control_backend/agents/bdi/bdi_core_agent.py diff --git a/src/control_backend/agents/bdi/bdi_core_agent/rules.asl b/src/control_backend/agents/bdi/rules.asl similarity index 100% rename from src/control_backend/agents/bdi/bdi_core_agent/rules.asl rename to src/control_backend/agents/bdi/rules.asl diff --git a/src/control_backend/agents/llm/llm_agent.py b/src/control_backend/agents/llm/llm_agent.py index 2488195..0263b30 100644 --- a/src/control_backend/agents/llm/llm_agent.py +++ b/src/control_backend/agents/llm/llm_agent.py @@ -165,7 +165,7 @@ class LLMAgent(BaseAgent): :yield: Raw text tokens (deltas) from the SSE stream. :raises httpx.HTTPError: If the API returns a non-200 status. """ - async with httpx.AsyncClient(timeout=None) as client: + async with httpx.AsyncClient() as client: async with client.stream( "POST", settings.llm_settings.local_llm_url, diff --git a/src/control_backend/core/config.py b/src/control_backend/core/config.py index e0f0987..4a199ab 100644 --- a/src/control_backend/core/config.py +++ b/src/control_backend/core/config.py @@ -87,12 +87,10 @@ class LLMSettings(BaseModel): :ivar local_llm_url: URL for the local LLM API. :ivar local_llm_model: Name of the local LLM model to use. - :ivar request_timeout_s: Timeout in seconds for LLM requests. """ - local_llm_url: str = "http://localhost:11434/v1/chat/completions" + local_llm_url: str = "http://localhost:1234/v1/chat/completions" local_llm_model: str = "gpt-oss" - request_timeout_s: int = 10 class VADSettings(BaseModel): diff --git a/src/control_backend/main.py b/src/control_backend/main.py index 90d1d7d..6292de4 100644 --- a/src/control_backend/main.py +++ b/src/control_backend/main.py @@ -117,7 +117,7 @@ async def lifespan(app: FastAPI): BDICoreAgent, { "name": settings.agent_settings.bdi_core_name, - "asl": "src/control_backend/agents/bdi/bdi_core_agent/rules.asl", + "asl": "src/control_backend/agents/bdi/rules.asl", }, ), "BeliefCollectorAgent": ( diff --git a/test/unit/agents/bdi/test_bdi_core_agent.py b/test/unit/agents/bdi/test_bdi_core_agent.py index 5c73b76..7d4cfab 100644 --- a/test/unit/agents/bdi/test_bdi_core_agent.py +++ b/test/unit/agents/bdi/test_bdi_core_agent.py @@ -4,7 +4,7 @@ from unittest.mock import AsyncMock, MagicMock, mock_open, patch import agentspeak import pytest -from control_backend.agents.bdi.bdi_core_agent.bdi_core_agent import BDICoreAgent +from control_backend.agents.bdi.bdi_core_agent import BDICoreAgent from control_backend.core.agent_system import InternalMessage from control_backend.core.config import settings from control_backend.schemas.belief_message import Belief, BeliefMessage