From bece44bf7ddd4471cdf7b9bd2c9d3cc4fa774ddf Mon Sep 17 00:00:00 2001 From: Storm Date: Fri, 24 Oct 2025 17:25:25 +0200 Subject: [PATCH 1/5] feat: implemented basic belief-from-text extractor The communication with other agents has been tested with mock data as the other agents (transcriber and belief collector) are not yet implemented. ref: N25B-208 --- .../bdi/behaviours/text_belief_extractor.py | 76 +++++++++++++++++++ src/control_backend/agents/bdi/test_agent.py | 26 +++++++ .../agents/bdi/text_extractor.py | 10 +++ src/control_backend/main.py | 8 +- 4 files changed, 119 insertions(+), 1 deletion(-) create mode 100644 src/control_backend/agents/bdi/behaviours/text_belief_extractor.py create mode 100644 src/control_backend/agents/bdi/test_agent.py create mode 100644 src/control_backend/agents/bdi/text_extractor.py diff --git a/src/control_backend/agents/bdi/behaviours/text_belief_extractor.py b/src/control_backend/agents/bdi/behaviours/text_belief_extractor.py new file mode 100644 index 0000000..c73a42e --- /dev/null +++ b/src/control_backend/agents/bdi/behaviours/text_belief_extractor.py @@ -0,0 +1,76 @@ +import asyncio +from spade.behaviour import CyclicBehaviour +import logging +from spade.message import Message +import json +from control_backend.core.config import settings + + +class BeliefFromText(CyclicBehaviour): + logger = logging.getLogger("Belief From Text") + + # TODO: LLM prompt nog hardcoded + llm_instruction_prompt = """ + You are an information extraction assistent for a BDI agent. Your task is to extract values from + a user's text to bind a list of ungrounded beliefs. Rules: + You will receive a JSON object with "beliefs" (a list of ungrounded AgentSpeak beliefs) and "text" (user's transcript). + Analyze the text to find values that sematically match the variables (X,Y,Z) in the beliefs. + A single piece of text might contain multiple instances that match a belief. + Respond ONLY with a single JSON object. + The JSON object's keys should be the belief functors (e.g., "weather"). + The value for each key must be a list of lists. + Each inner list must contain the extracted arguments (as strings) for one instance of that belief. + CRITICAL: If no information in the text matches a belief, DO NOT include that key in your response. + """ + + # on_start agent receives message containing the beliefs to look out for and sets up the LLM with instruction prompt + #async def on_start(self): + # msg = await self.receive(timeout=0.1) + # self.beliefs = dict uit message + # send instruction prompt to LLM + + beliefs: dict[str,list[str]] + beliefs = { + "mood": ["X"], + "car": ["Y"] + } + + async def run(self): + msg = await self.receive(timeout=0.1) + if msg: + sender = msg.sender.node + match sender: + # TODO: Change to Transcriber agent name once implemented + case settings.agent_settings.test_agent_name: + self.logger.info("Received text from transcriber.") + await self._process_transcription(msg.body) + case _: + self.logger.info("Received message from other agent.") + pass + await asyncio.sleep(1) + + async def _process_transcription(self,text: str): + text_prompt = f"Text: {text}" + + beliefs_prompt = "These are the beliefs to be bound:\n" + for belief, values in self.beliefs.items(): + beliefs_prompt += f"{belief}({', '.join(values)})\n" + + prompt = text_prompt + beliefs_prompt + self.logger.info(prompt) + #prompt_msg = Message(to="LLMAgent@whatever") + #response = self.send(prompt_msg) + + # Mock response; response is beliefs in JSON format, it parses do dict[str,list[list[str]]] + response = '{"mood": [["happy"]]}' + # Verify by trying to parse + try: + json.loads(response) + belief_message = Message(to=settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host, body=response) + belief_message.thread = "beliefs" + + await self.send(belief_message) + self.logger.info("Sent beliefs to BDI.") + except: + #loading failed so the response is in wrong format, throw warning (let LLM respond to ask again?) + self.logger.warning("Received LLM response in incorrect format.") \ No newline at end of file diff --git a/src/control_backend/agents/bdi/test_agent.py b/src/control_backend/agents/bdi/test_agent.py new file mode 100644 index 0000000..eea2065 --- /dev/null +++ b/src/control_backend/agents/bdi/test_agent.py @@ -0,0 +1,26 @@ +import spade +from spade.agent import Agent +from spade.behaviour import OneShotBehaviour +from spade.message import Message +from spade.template import Template +from control_backend.core.config import AgentSettings, settings + +class SenderAgent(Agent): + class InformBehav(OneShotBehaviour): + async def run(self): + msg = Message(to=settings.agent_settings.text_belief_extractor_agent_name + '@' + settings.agent_settings.host) # Instantiate the message + msg.body = "This is a test input to extract beliefs from.\n" # Set the message content + + await self.send(msg) + print("Message sent!") + + # set exit_code for the behaviour + self.exit_code = "Job Finished!" + + # stop agent from behaviour + await self.agent.stop() + + async def setup(self): + print("SenderAgent started") + self.b = self.InformBehav() + self.add_behaviour(self.b) \ No newline at end of file diff --git a/src/control_backend/agents/bdi/text_extractor.py b/src/control_backend/agents/bdi/text_extractor.py new file mode 100644 index 0000000..2806a73 --- /dev/null +++ b/src/control_backend/agents/bdi/text_extractor.py @@ -0,0 +1,10 @@ +import spade +from spade.agent import Agent +import logging + +from control_backend.agents.bdi.behaviours.text_belief_extractor import BeliefFromText + +class TBeliefExtractor(Agent): + async def setup(self): + self.b = BeliefFromText() + self.add_behaviour(self.b) \ No newline at end of file diff --git a/src/control_backend/main.py b/src/control_backend/main.py index 1f377c4..10b4081 100644 --- a/src/control_backend/main.py +++ b/src/control_backend/main.py @@ -13,6 +13,8 @@ import zmq # Internal imports from control_backend.agents.bdi.bdi_core import BDICoreAgent +from control_backend.agents.bdi.text_extractor import TBeliefExtractor +from control_backend.agents.bdi.test_agent import SenderAgent from control_backend.api.v1.router import api_router from control_backend.core.config import AgentSettings, settings from control_backend.core.zmq_context import context @@ -32,8 +34,12 @@ async def lifespan(app: FastAPI): logger.info("Internal publishing socket bound to %s", internal_comm_socket) # Initiate agents - bdi_core = BDICoreAgent(settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host, settings.agent_settings.bdi_core_agent_name, "src/control_backend/agents/bdi/rules.asl") + bdi_core = BDICoreAgent(settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host, "pohpu7-huqsyH-qutduk", "src/control_backend/agents/bdi/rules.asl") await bdi_core.start() + text_belief_extractor = TBeliefExtractor(settings.agent_settings.text_belief_extractor_agent_name + '@' + settings.agent_settings.host, "pohpu7-huqsyH-qutduk") + await text_belief_extractor.start() + test_agent = SenderAgent(settings.agent_settings.test_agent_name + '@' + settings.agent_settings.host, "pohpu7-huqsyH-qutduk") + await test_agent.start() yield -- 2.49.1 From a43e5111dbf5a111a6adf639cdbb8b3e8f578267 Mon Sep 17 00:00:00 2001 From: Storm Date: Tue, 28 Oct 2025 13:28:42 +0100 Subject: [PATCH 2/5] fix: quick first fix in preparation of merge ref: N25B-208 --- .../bdi/behaviours/text_belief_extractor.py | 19 +++++++++++++++++-- src/control_backend/agents/bdi/test_agent.py | 3 +-- src/control_backend/main.py | 6 +++--- 3 files changed, 21 insertions(+), 7 deletions(-) diff --git a/src/control_backend/agents/bdi/behaviours/text_belief_extractor.py b/src/control_backend/agents/bdi/behaviours/text_belief_extractor.py index c73a42e..89df3ca 100644 --- a/src/control_backend/agents/bdi/behaviours/text_belief_extractor.py +++ b/src/control_backend/agents/bdi/behaviours/text_belief_extractor.py @@ -43,7 +43,7 @@ class BeliefFromText(CyclicBehaviour): # TODO: Change to Transcriber agent name once implemented case settings.agent_settings.test_agent_name: self.logger.info("Received text from transcriber.") - await self._process_transcription(msg.body) + await self._process_transcription_demo(msg.body) case _: self.logger.info("Received message from other agent.") pass @@ -73,4 +73,19 @@ class BeliefFromText(CyclicBehaviour): self.logger.info("Sent beliefs to BDI.") except: #loading failed so the response is in wrong format, throw warning (let LLM respond to ask again?) - self.logger.warning("Received LLM response in incorrect format.") \ No newline at end of file + self.logger.warning("Received LLM response in incorrect format.") + + async def _process_transcription_demo(self, txt: str): + """ + Demo version to process the transcription input to beliefs. For the demo only the belief 'user_said' is relevant so + this function simply makes a dict with key: "user_said", value: txt and passes this to the Belief Collector agent. + """ + belief = {"user_said": [[txt]]} + payload = json.dumps(belief) + # TODO: Change to belief collector + belief_msg = Message(to=settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host, body=payload) + belief_msg.thread = "beliefs" + + await self.send(belief_msg) + self.logger.info("Sent beliefs to Belief Collector.") + \ No newline at end of file diff --git a/src/control_backend/agents/bdi/test_agent.py b/src/control_backend/agents/bdi/test_agent.py index eea2065..2fd7485 100644 --- a/src/control_backend/agents/bdi/test_agent.py +++ b/src/control_backend/agents/bdi/test_agent.py @@ -2,8 +2,7 @@ import spade from spade.agent import Agent from spade.behaviour import OneShotBehaviour from spade.message import Message -from spade.template import Template -from control_backend.core.config import AgentSettings, settings +from control_backend.core.config import settings class SenderAgent(Agent): class InformBehav(OneShotBehaviour): diff --git a/src/control_backend/main.py b/src/control_backend/main.py index 10b4081..513f747 100644 --- a/src/control_backend/main.py +++ b/src/control_backend/main.py @@ -34,11 +34,11 @@ async def lifespan(app: FastAPI): logger.info("Internal publishing socket bound to %s", internal_comm_socket) # Initiate agents - bdi_core = BDICoreAgent(settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host, "pohpu7-huqsyH-qutduk", "src/control_backend/agents/bdi/rules.asl") + bdi_core = BDICoreAgent(settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host, "placeholder", "src/control_backend/agents/bdi/rules.asl") await bdi_core.start() - text_belief_extractor = TBeliefExtractor(settings.agent_settings.text_belief_extractor_agent_name + '@' + settings.agent_settings.host, "pohpu7-huqsyH-qutduk") + text_belief_extractor = TBeliefExtractor(settings.agent_settings.belief_collector_agent_name + '@' + settings.agent_settings.host, "placehodler") await text_belief_extractor.start() - test_agent = SenderAgent(settings.agent_settings.test_agent_name + '@' + settings.agent_settings.host, "pohpu7-huqsyH-qutduk") + test_agent = SenderAgent(settings.agent_settings.test_agent_name + '@' + settings.agent_settings.host, "placeholder") await test_agent.start() yield -- 2.49.1 From 041edd4c1efa89b901c6150812d1f74320bc5993 Mon Sep 17 00:00:00 2001 From: Storm Date: Wed, 29 Oct 2025 14:53:14 +0100 Subject: [PATCH 3/5] feat: implement demo version for demo ref: N25B-208 --- .../bdi/behaviours/text_belief_extractor.py | 2 +- src/control_backend/agents/bdi/test_agent.py | 25 ------------------- src/control_backend/main.py | 4 +-- 3 files changed, 2 insertions(+), 29 deletions(-) delete mode 100644 src/control_backend/agents/bdi/test_agent.py diff --git a/src/control_backend/agents/bdi/behaviours/text_belief_extractor.py b/src/control_backend/agents/bdi/behaviours/text_belief_extractor.py index 89df3ca..7d4a074 100644 --- a/src/control_backend/agents/bdi/behaviours/text_belief_extractor.py +++ b/src/control_backend/agents/bdi/behaviours/text_belief_extractor.py @@ -80,7 +80,7 @@ class BeliefFromText(CyclicBehaviour): Demo version to process the transcription input to beliefs. For the demo only the belief 'user_said' is relevant so this function simply makes a dict with key: "user_said", value: txt and passes this to the Belief Collector agent. """ - belief = {"user_said": [[txt]]} + belief = {"user_said": [txt]} payload = json.dumps(belief) # TODO: Change to belief collector belief_msg = Message(to=settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host, body=payload) diff --git a/src/control_backend/agents/bdi/test_agent.py b/src/control_backend/agents/bdi/test_agent.py deleted file mode 100644 index 2fd7485..0000000 --- a/src/control_backend/agents/bdi/test_agent.py +++ /dev/null @@ -1,25 +0,0 @@ -import spade -from spade.agent import Agent -from spade.behaviour import OneShotBehaviour -from spade.message import Message -from control_backend.core.config import settings - -class SenderAgent(Agent): - class InformBehav(OneShotBehaviour): - async def run(self): - msg = Message(to=settings.agent_settings.text_belief_extractor_agent_name + '@' + settings.agent_settings.host) # Instantiate the message - msg.body = "This is a test input to extract beliefs from.\n" # Set the message content - - await self.send(msg) - print("Message sent!") - - # set exit_code for the behaviour - self.exit_code = "Job Finished!" - - # stop agent from behaviour - await self.agent.stop() - - async def setup(self): - print("SenderAgent started") - self.b = self.InformBehav() - self.add_behaviour(self.b) \ No newline at end of file diff --git a/src/control_backend/main.py b/src/control_backend/main.py index 513f747..4cff191 100644 --- a/src/control_backend/main.py +++ b/src/control_backend/main.py @@ -36,10 +36,8 @@ async def lifespan(app: FastAPI): # Initiate agents bdi_core = BDICoreAgent(settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host, "placeholder", "src/control_backend/agents/bdi/rules.asl") await bdi_core.start() - text_belief_extractor = TBeliefExtractor(settings.agent_settings.belief_collector_agent_name + '@' + settings.agent_settings.host, "placehodler") + text_belief_extractor = TBeliefExtractor(settings.agent_settings.belief_collector_agent_name + '@' + settings.agent_settings.host, "placeholder") await text_belief_extractor.start() - test_agent = SenderAgent(settings.agent_settings.test_agent_name + '@' + settings.agent_settings.host, "placeholder") - await test_agent.start() yield -- 2.49.1 From 889ec1db5148a60f42fad6a78d870eff27c73221 Mon Sep 17 00:00:00 2001 From: Twirre Meulenbelt <43213592+TwirreM@users.noreply.github.com> Date: Wed, 29 Oct 2025 15:28:15 +0100 Subject: [PATCH 4/5] chore: fix merge conflicts and small items ref: N25B-208 --- .../bdi/behaviours/text_belief_extractor.py | 43 +++++++++++-------- src/control_backend/agents/bdi/test_agent.py | 3 +- .../agents/bdi/text_extractor.py | 3 +- src/control_backend/core/config.py | 5 ++- src/control_backend/main.py | 23 +++++++--- 5 files changed, 47 insertions(+), 30 deletions(-) diff --git a/src/control_backend/agents/bdi/behaviours/text_belief_extractor.py b/src/control_backend/agents/bdi/behaviours/text_belief_extractor.py index 89df3ca..aeba697 100644 --- a/src/control_backend/agents/bdi/behaviours/text_belief_extractor.py +++ b/src/control_backend/agents/bdi/behaviours/text_belief_extractor.py @@ -1,8 +1,10 @@ import asyncio -from spade.behaviour import CyclicBehaviour -import logging -from spade.message import Message import json +import logging + +from spade.behaviour import CyclicBehaviour +from spade.message import Message + from control_backend.core.config import settings @@ -11,8 +13,7 @@ class BeliefFromText(CyclicBehaviour): # TODO: LLM prompt nog hardcoded llm_instruction_prompt = """ - You are an information extraction assistent for a BDI agent. Your task is to extract values from - a user's text to bind a list of ungrounded beliefs. Rules: + You are an information extraction assistent for a BDI agent. Your task is to extract values from a user's text to bind a list of ungrounded beliefs. Rules: You will receive a JSON object with "beliefs" (a list of ungrounded AgentSpeak beliefs) and "text" (user's transcript). Analyze the text to find values that sematically match the variables (X,Y,Z) in the beliefs. A single piece of text might contain multiple instances that match a belief. @@ -22,14 +23,14 @@ class BeliefFromText(CyclicBehaviour): Each inner list must contain the extracted arguments (as strings) for one instance of that belief. CRITICAL: If no information in the text matches a belief, DO NOT include that key in your response. """ - + # on_start agent receives message containing the beliefs to look out for and sets up the LLM with instruction prompt #async def on_start(self): # msg = await self.receive(timeout=0.1) # self.beliefs = dict uit message # send instruction prompt to LLM - beliefs: dict[str,list[str]] + beliefs: dict[str, list[str]] beliefs = { "mood": ["X"], "car": ["Y"] @@ -48,14 +49,14 @@ class BeliefFromText(CyclicBehaviour): self.logger.info("Received message from other agent.") pass await asyncio.sleep(1) - - async def _process_transcription(self,text: str): + + async def _process_transcription(self, text: str): text_prompt = f"Text: {text}" - + beliefs_prompt = "These are the beliefs to be bound:\n" for belief, values in self.beliefs.items(): beliefs_prompt += f"{belief}({', '.join(values)})\n" - + prompt = text_prompt + beliefs_prompt self.logger.info(prompt) #prompt_msg = Message(to="LLMAgent@whatever") @@ -66,26 +67,30 @@ class BeliefFromText(CyclicBehaviour): # Verify by trying to parse try: json.loads(response) - belief_message = Message(to=settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host, body=response) + belief_message = Message( + to=settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host, + body=response) belief_message.thread = "beliefs" await self.send(belief_message) self.logger.info("Sent beliefs to BDI.") - except: - #loading failed so the response is in wrong format, throw warning (let LLM respond to ask again?) + except json.JSONDecodeError: + # Parsing failed, so the response is in the wrong format, log warning self.logger.warning("Received LLM response in incorrect format.") async def _process_transcription_demo(self, txt: str): """ - Demo version to process the transcription input to beliefs. For the demo only the belief 'user_said' is relevant so - this function simply makes a dict with key: "user_said", value: txt and passes this to the Belief Collector agent. + Demo version to process the transcription input to beliefs. For the demo only the belief + 'user_said' is relevant, so this function simply makes a dict with key: "user_said", + value: txt and passes this to the Belief Collector agent. """ belief = {"user_said": [[txt]]} payload = json.dumps(belief) # TODO: Change to belief collector - belief_msg = Message(to=settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host, body=payload) + belief_msg = Message(to=settings.agent_settings.bdi_core_agent_name + + '@' + settings.agent_settings.host, + body=payload) belief_msg.thread = "beliefs" - + await self.send(belief_msg) self.logger.info("Sent beliefs to Belief Collector.") - \ No newline at end of file diff --git a/src/control_backend/agents/bdi/test_agent.py b/src/control_backend/agents/bdi/test_agent.py index 2fd7485..ee467bb 100644 --- a/src/control_backend/agents/bdi/test_agent.py +++ b/src/control_backend/agents/bdi/test_agent.py @@ -1,9 +1,10 @@ -import spade from spade.agent import Agent from spade.behaviour import OneShotBehaviour from spade.message import Message + from control_backend.core.config import settings + class SenderAgent(Agent): class InformBehav(OneShotBehaviour): async def run(self): diff --git a/src/control_backend/agents/bdi/text_extractor.py b/src/control_backend/agents/bdi/text_extractor.py index 2806a73..596a3fe 100644 --- a/src/control_backend/agents/bdi/text_extractor.py +++ b/src/control_backend/agents/bdi/text_extractor.py @@ -1,9 +1,8 @@ -import spade from spade.agent import Agent -import logging from control_backend.agents.bdi.behaviours.text_belief_extractor import BeliefFromText + class TBeliefExtractor(Agent): async def setup(self): self.b = BeliefFromText() diff --git a/src/control_backend/core/config.py b/src/control_backend/core/config.py index 5d539d0..7cfd993 100644 --- a/src/control_backend/core/config.py +++ b/src/control_backend/core/config.py @@ -10,6 +10,7 @@ class AgentSettings(BaseModel): host: str = "localhost" bdi_core_agent_name: str = "bdi_core" belief_collector_agent_name: str = "belief_collector" + text_belief_extractor_agent_name: str = "text_belief_extractor" vad_agent_name: str = "vad_agent" llm_agent_name: str = "llm_agent" test_agent_name: str = "test_agent" @@ -19,8 +20,8 @@ class AgentSettings(BaseModel): class LLMSettings(BaseModel): - local_llm_url: str = "http://145.107.82.68:1234/v1/chat/completions" - local_llm_model: str = "openai/gpt-oss-120b" + local_llm_url: str = "http://localhost:1234/v1/chat/completions" + local_llm_model: str = "openai/gpt-oss-20b" class Settings(BaseSettings): app_title: str = "PepperPlus" diff --git a/src/control_backend/main.py b/src/control_backend/main.py index 998067b..56938a9 100644 --- a/src/control_backend/main.py +++ b/src/control_backend/main.py @@ -44,18 +44,29 @@ async def lifespan(app: FastAPI): ) await ri_communication_agent.start() - llm_agent = LLMAgent(settings.agent_settings.llm_agent_name + '@' + settings.agent_settings.host, - settings.agent_settings.llm_agent_name) + llm_agent = LLMAgent( + settings.agent_settings.llm_agent_name + '@' + settings.agent_settings.host, + settings.agent_settings.llm_agent_name, + ) await llm_agent.start() - bdi_core = BDICoreAgent(settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host, - settings.agent_settings.bdi_core_agent_name, "src/control_backend/agents/bdi/rules.asl") + bdi_core = BDICoreAgent( + settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host, + settings.agent_settings.bdi_core_agent_name, + "src/control_backend/agents/bdi/rules.asl", + ) await bdi_core.start() - text_belief_extractor = TBeliefExtractor(settings.agent_settings.belief_collector_agent_name + '@' + settings.agent_settings.host, "placehodler") + text_belief_extractor = TBeliefExtractor( + settings.agent_settings.text_belief_extractor_agent_name + '@' + settings.agent_settings.host, + settings.agent_settings.text_belief_extractor_agent_name, + ) await text_belief_extractor.start() - test_agent = SenderAgent(settings.agent_settings.test_agent_name + '@' + settings.agent_settings.host, "placeholder") + test_agent = SenderAgent( + settings.agent_settings.test_agent_name + '@' + settings.agent_settings.host, + settings.agent_settings.test_agent_name + ) await test_agent.start() _temp_vad_agent = VADAgent("tcp://localhost:5558", False) -- 2.49.1 From 792d360fa4a8cb998aa3eb7da38063b100f5dfe1 Mon Sep 17 00:00:00 2001 From: Twirre Meulenbelt <43213592+TwirreM@users.noreply.github.com> Date: Wed, 29 Oct 2025 15:30:53 +0100 Subject: [PATCH 5/5] chore: remove test agent again ref: N25B-208 --- src/control_backend/main.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/src/control_backend/main.py b/src/control_backend/main.py index 56938a9..ccbeca8 100644 --- a/src/control_backend/main.py +++ b/src/control_backend/main.py @@ -14,7 +14,6 @@ from control_backend.agents.bdi.bdi_core import BDICoreAgent from control_backend.agents.vad_agent import VADAgent from control_backend.agents.llm.llm import LLMAgent from control_backend.agents.bdi.text_extractor import TBeliefExtractor -from control_backend.agents.bdi.test_agent import SenderAgent from control_backend.api.v1.router import api_router from control_backend.core.config import settings from control_backend.core.zmq_context import context @@ -63,12 +62,6 @@ async def lifespan(app: FastAPI): ) await text_belief_extractor.start() - test_agent = SenderAgent( - settings.agent_settings.test_agent_name + '@' + settings.agent_settings.host, - settings.agent_settings.test_agent_name - ) - await test_agent.start() - _temp_vad_agent = VADAgent("tcp://localhost:5558", False) await _temp_vad_agent.start() -- 2.49.1