Merge remote-tracking branch 'origin/feat/reset-experiment-and-phase' into feat/visual-emotion-recognition
This commit is contained in:
@@ -120,7 +120,7 @@ class BaseAgent(ABC):
|
||||
task.cancel()
|
||||
self.logger.info(f"Agent {self.name} stopped")
|
||||
|
||||
async def send(self, message: InternalMessage):
|
||||
async def send(self, message: InternalMessage, should_log: bool = True):
|
||||
"""
|
||||
Send a message to another agent.
|
||||
|
||||
@@ -142,13 +142,17 @@ class BaseAgent(ABC):
|
||||
|
||||
if target:
|
||||
await target.inbox.put(message)
|
||||
self.logger.debug(f"Sent message {message.body} to {message.to} via regular inbox.")
|
||||
if should_log:
|
||||
self.logger.debug(
|
||||
f"Sent message {message.body} to {message.to} via regular inbox."
|
||||
)
|
||||
else:
|
||||
# Apparently target agent is on a different process, send via ZMQ
|
||||
topic = f"internal/{receiver}".encode()
|
||||
body = message.model_dump_json().encode()
|
||||
await self._internal_pub_socket.send_multipart([topic, body])
|
||||
self.logger.debug(f"Sent message {message.body} to {message.to} via ZMQ.")
|
||||
if should_log:
|
||||
self.logger.debug(f"Sent message {message.body} to {message.to} via ZMQ.")
|
||||
|
||||
async def _process_inbox(self):
|
||||
"""
|
||||
@@ -158,7 +162,6 @@ class BaseAgent(ABC):
|
||||
"""
|
||||
while self._running:
|
||||
msg = await self.inbox.get()
|
||||
self.logger.debug(f"Received message from {msg.sender}.")
|
||||
await self.handle_message(msg)
|
||||
|
||||
async def _receive_internal_zmq_loop(self):
|
||||
@@ -201,7 +204,16 @@ class BaseAgent(ABC):
|
||||
|
||||
:param coro: The coroutine to execute as a task.
|
||||
"""
|
||||
task = asyncio.create_task(coro)
|
||||
|
||||
async def try_coro(coro_: Coroutine):
|
||||
try:
|
||||
await coro_
|
||||
except asyncio.CancelledError:
|
||||
self.logger.debug("A behavior was canceled successfully: %s", coro_)
|
||||
except Exception:
|
||||
self.logger.warning("An exception occurred in a behavior.", exc_info=True)
|
||||
|
||||
task = asyncio.create_task(try_coro(coro))
|
||||
self._tasks.add(task)
|
||||
task.add_done_callback(self._tasks.discard)
|
||||
return task
|
||||
|
||||
@@ -74,10 +74,12 @@ class BehaviourSettings(BaseModel):
|
||||
:ivar vad_prob_threshold: Probability threshold for Voice Activity Detection.
|
||||
:ivar vad_initial_since_speech: Initial value for 'since speech' counter in VAD.
|
||||
:ivar vad_non_speech_patience_chunks: Number of non-speech chunks to wait before speech ended.
|
||||
:ivar vad_begin_silence_chunks: The number of chunks of silence to prepend to speech chunks.
|
||||
:ivar transcription_max_concurrent_tasks: Maximum number of concurrent transcription tasks.
|
||||
:ivar transcription_words_per_minute: Estimated words per minute for transcription timing.
|
||||
:ivar transcription_words_per_token: Estimated words per token for transcription timing.
|
||||
:ivar transcription_token_buffer: Buffer for transcription tokens.
|
||||
:ivar conversation_history_length_limit: The maximum amount of messages to extract beliefs from.
|
||||
"""
|
||||
|
||||
# ATTENTION: When adding/removing settings, make sure to update the .env.example file
|
||||
@@ -89,7 +91,8 @@ class BehaviourSettings(BaseModel):
|
||||
# VAD settings
|
||||
vad_prob_threshold: float = 0.5
|
||||
vad_initial_since_speech: int = 100
|
||||
vad_non_speech_patience_chunks: int = 3
|
||||
vad_non_speech_patience_chunks: int = 15
|
||||
vad_begin_silence_chunks: int = 6
|
||||
|
||||
# transcription behaviour
|
||||
transcription_max_concurrent_tasks: int = 3
|
||||
@@ -97,6 +100,9 @@ class BehaviourSettings(BaseModel):
|
||||
transcription_words_per_token: float = 0.75 # (3 words = 4 tokens)
|
||||
transcription_token_buffer: int = 10
|
||||
|
||||
# Text belief extractor settings
|
||||
conversation_history_length_limit: int = 10
|
||||
|
||||
|
||||
class LLMSettings(BaseModel):
|
||||
"""
|
||||
@@ -104,12 +110,19 @@ class LLMSettings(BaseModel):
|
||||
|
||||
:ivar local_llm_url: URL for the local LLM API.
|
||||
:ivar local_llm_model: Name of the local LLM model to use.
|
||||
:ivar chat_temperature: The temperature to use while generating chat responses.
|
||||
:ivar code_temperature: The temperature to use while generating code-like responses like during
|
||||
belief inference.
|
||||
:ivar n_parallel: The number of parallel calls allowed to be made to the LLM.
|
||||
"""
|
||||
|
||||
# ATTENTION: When adding/removing settings, make sure to update the .env.example file
|
||||
|
||||
local_llm_url: str = "http://localhost:1234/v1/chat/completions"
|
||||
local_llm_model: str = "gpt-oss"
|
||||
chat_temperature: float = 1.0
|
||||
code_temperature: float = 0.3
|
||||
n_parallel: int = 4
|
||||
|
||||
|
||||
class VADSettings(BaseModel):
|
||||
|
||||
Reference in New Issue
Block a user