docs: add docs to CB
Pretty much every class and method should have documentation now. ref: N25B-295
This commit is contained in:
@@ -16,9 +16,17 @@ from .llm_instructions import LLMInstructions
|
||||
|
||||
class LLMAgent(BaseAgent):
|
||||
"""
|
||||
Agent responsible for processing user text input and querying a locally
|
||||
hosted LLM for text generation. Receives messages from the BDI Core Agent
|
||||
and responds with processed LLM output.
|
||||
LLM Agent.
|
||||
|
||||
This agent is responsible for processing user text input and querying a locally
|
||||
hosted LLM for text generation. It acts as the conversational brain of the system.
|
||||
|
||||
It receives :class:`~control_backend.schemas.llm_prompt_message.LLMPromptMessage`
|
||||
payloads from the BDI Core Agent, constructs a conversation history, queries the
|
||||
LLM via HTTP, and streams the response back to the BDI agent in natural chunks
|
||||
(e.g., sentence by sentence).
|
||||
|
||||
:ivar history: A list of dictionaries representing the conversation history (Role/Content).
|
||||
"""
|
||||
|
||||
def __init__(self, name: str):
|
||||
@@ -29,6 +37,14 @@ class LLMAgent(BaseAgent):
|
||||
self.logger.info("Setting up %s.", self.name)
|
||||
|
||||
async def handle_message(self, msg: InternalMessage):
|
||||
"""
|
||||
Handle incoming messages.
|
||||
|
||||
Expects messages from :attr:`settings.agent_settings.bdi_core_name` containing
|
||||
an :class:`LLMPromptMessage` in the body.
|
||||
|
||||
:param msg: The received internal message.
|
||||
"""
|
||||
if msg.sender == settings.agent_settings.bdi_core_name:
|
||||
self.logger.debug("Processing message from BDI core.")
|
||||
try:
|
||||
@@ -40,6 +56,14 @@ class LLMAgent(BaseAgent):
|
||||
self.logger.debug("Message ignored (not from BDI core.")
|
||||
|
||||
async def _process_bdi_message(self, message: LLMPromptMessage):
|
||||
"""
|
||||
Orchestrate the LLM query and response streaming.
|
||||
|
||||
Iterates over the chunks yielded by :meth:`_query_llm` and forwards them
|
||||
individually to the BDI agent via :meth:`_send_reply`.
|
||||
|
||||
:param message: The parsed prompt message containing text, norms, and goals.
|
||||
"""
|
||||
async for chunk in self._query_llm(message.text, message.norms, message.goals):
|
||||
await self._send_reply(chunk)
|
||||
self.logger.debug(
|
||||
@@ -48,7 +72,9 @@ class LLMAgent(BaseAgent):
|
||||
|
||||
async def _send_reply(self, msg: str):
|
||||
"""
|
||||
Sends a response message back to the BDI Core Agent.
|
||||
Sends a response message (chunk) back to the BDI Core Agent.
|
||||
|
||||
:param msg: The text content of the chunk.
|
||||
"""
|
||||
reply = InternalMessage(
|
||||
to=settings.agent_settings.bdi_core_name,
|
||||
@@ -61,13 +87,18 @@ class LLMAgent(BaseAgent):
|
||||
self, prompt: str, norms: list[str], goals: list[str]
|
||||
) -> AsyncGenerator[str]:
|
||||
"""
|
||||
Sends a chat completion request to the local LLM service and streams the response by
|
||||
yielding fragments separated by punctuation like.
|
||||
Send a chat completion request to the local LLM service and stream the response.
|
||||
|
||||
It constructs the full prompt using
|
||||
:class:`~control_backend.agents.llm.llm_instructions.LLMInstructions`.
|
||||
It streams the response from the LLM and buffers tokens until a natural break (punctuation)
|
||||
is reached, then yields the chunk. This ensures that the robot speaks in complete phrases
|
||||
rather than individual tokens.
|
||||
|
||||
:param prompt: Input text prompt to pass to the LLM.
|
||||
:param norms: Norms the LLM should hold itself to.
|
||||
:param goals: Goals the LLM should achieve.
|
||||
:yield: Fragments of the LLM-generated content.
|
||||
:yield: Fragments of the LLM-generated content (e.g., sentences/phrases).
|
||||
"""
|
||||
self.history.append(
|
||||
{
|
||||
@@ -85,7 +116,7 @@ class LLMAgent(BaseAgent):
|
||||
*self.history,
|
||||
]
|
||||
|
||||
message_id = str(uuid.uuid4())
|
||||
message_id = str(uuid.uuid4()) # noqa
|
||||
|
||||
try:
|
||||
full_message = ""
|
||||
@@ -127,7 +158,13 @@ class LLMAgent(BaseAgent):
|
||||
yield "Error processing the request."
|
||||
|
||||
async def _stream_query_llm(self, messages) -> AsyncGenerator[str]:
|
||||
"""Raises httpx.HTTPError when the API gives an error."""
|
||||
"""
|
||||
Perform the raw HTTP streaming request to the LLM API.
|
||||
|
||||
:param messages: The list of message dictionaries (role/content).
|
||||
:yield: Raw text tokens (deltas) from the SSE stream.
|
||||
:raises httpx.HTTPError: If the API returns a non-200 status.
|
||||
"""
|
||||
async with httpx.AsyncClient(timeout=None) as client:
|
||||
async with client.stream(
|
||||
"POST",
|
||||
|
||||
Reference in New Issue
Block a user