build: integrate Docker functionality

Add environment variables throughout the code base to support Docker
compose integration.

ref: N25B-280
This commit is contained in:
2025-11-14 13:59:13 +01:00
parent 1518b14867
commit 9c538d927f
4 changed files with 36 additions and 12 deletions

14
.dockerignore Normal file
View File

@@ -0,0 +1,14 @@
.git
.venv
__pycache__/
*.pyc
.dockerignore
Dockerfile
README.md
.gitlab-ci.yml
.gitignore
.pre-commit-config.yaml
.githooks/
test/
.pytest_cache/
.ruff_cache/

View File

@@ -21,10 +21,13 @@ class RICommunicationAgent(BaseAgent):
password: str,
port: int = 5222,
verify_security: bool = False,
address="tcp://localhost:0000",
bind=False,
address=None,
bind=True,
):
super().__init__(jid, password, port, verify_security)
if not address:
self.logger.critical("No address set for negotiations.")
raise Exception # TODO: improve
self._address = address
self._bind = bind
@@ -119,10 +122,7 @@ class RICommunicationAgent(BaseAgent):
port = port_data["port"]
bind = port_data["bind"]
if not bind:
addr = f"tcp://localhost:{port}"
else:
addr = f"tcp://*:{port}"
addr = f"tcp://{settings.zmq_settings.external_host}:{port}"
match id:
case "main":

View File

@@ -1,3 +1,5 @@
import os
from pydantic import BaseModel
from pydantic_settings import BaseSettings, SettingsConfigDict
@@ -6,9 +8,11 @@ class ZMQSettings(BaseModel):
internal_pub_address: str = "tcp://localhost:5560"
internal_sub_address: str = "tcp://localhost:5561"
external_host: str = "0.0.0.0"
class AgentSettings(BaseModel):
host: str = "localhost"
host: str = os.environ.get("XMPP_HOST", "localhost")
bdi_core_agent_name: str = "bdi_core"
belief_collector_agent_name: str = "belief_collector"
text_belief_extractor_agent_name: str = "text_belief_extractor"
@@ -23,8 +27,8 @@ class AgentSettings(BaseModel):
class LLMSettings(BaseModel):
local_llm_url: str = "http://localhost:1234/v1/chat/completions"
local_llm_model: str = "openai/gpt-oss-20b"
local_llm_url: str = os.environ.get("LLM_URL", "http://localhost:1234/v1/") + "chat/completions"
local_llm_model: str = os.environ.get("LLM_MODEL", "openai/gpt-oss-20b")
class Settings(BaseSettings):

View File

@@ -1,5 +1,6 @@
import contextlib
import logging
import os
import threading
import zmq
@@ -49,7 +50,9 @@ async def lifespan(app: FastAPI):
# --- APPLICATION STARTUP ---
setup_logging()
logger.info("%s is starting up.", app.title)
logger.warning("testing extra", extra={"extra1": "one", "extra2": "two"})
logger.info(
"LLM_URL: %s, LLM_MODEL: %s", os.environ.get("LLM_URL"), os.environ.get("LLM_MODEL")
)
# Initiate sockets
proxy_thread = threading.Thread(target=setup_sockets)
@@ -72,7 +75,7 @@ async def lifespan(app: FastAPI):
"jid": f"{settings.agent_settings.ri_communication_agent_name}"
f"@{settings.agent_settings.host}",
"password": settings.agent_settings.ri_communication_agent_name,
"address": "tcp://*:5555",
"address": f"tcp://{settings.zmq_settings.external_host}:5555",
"bind": True,
},
),
@@ -114,7 +117,10 @@ async def lifespan(app: FastAPI):
),
"VADAgent": (
VADAgent,
{"audio_in_address": "tcp://localhost:5558", "audio_in_bind": False},
{
"audio_in_address": f"tcp://{settings.zmq_settings.external_host}:5558",
"audio_in_bind": True,
},
),
"ProgramManager": (
BDIProgramManager,