build: integrate Docker functionality
Add environment variables throughout the code base to support Docker compose integration. ref: N25B-280
This commit is contained in:
@@ -1,3 +1,5 @@
|
||||
import os
|
||||
|
||||
from pydantic import BaseModel
|
||||
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||
|
||||
@@ -6,9 +8,11 @@ class ZMQSettings(BaseModel):
|
||||
internal_pub_address: str = "tcp://localhost:5560"
|
||||
internal_sub_address: str = "tcp://localhost:5561"
|
||||
|
||||
external_host: str = "0.0.0.0"
|
||||
|
||||
|
||||
class AgentSettings(BaseModel):
|
||||
host: str = "localhost"
|
||||
host: str = os.environ.get("XMPP_HOST", "localhost")
|
||||
bdi_core_agent_name: str = "bdi_core"
|
||||
belief_collector_agent_name: str = "belief_collector"
|
||||
text_belief_extractor_agent_name: str = "text_belief_extractor"
|
||||
@@ -23,8 +27,8 @@ class AgentSettings(BaseModel):
|
||||
|
||||
|
||||
class LLMSettings(BaseModel):
|
||||
local_llm_url: str = "http://localhost:1234/v1/chat/completions"
|
||||
local_llm_model: str = "openai/gpt-oss-20b"
|
||||
local_llm_url: str = os.environ.get("LLM_URL", "http://localhost:1234/v1/") + "chat/completions"
|
||||
local_llm_model: str = os.environ.get("LLM_MODEL", "openai/gpt-oss-20b")
|
||||
|
||||
|
||||
class Settings(BaseSettings):
|
||||
|
||||
Reference in New Issue
Block a user