Main BDI brain structure implemented. Still some TODOs left, and very basic implementation (only one belief "user_said(Message)" and every message is sent straight to a function which is responsible for getting an LLM response. ref: N25B-197
21 lines
487 B
TOML
21 lines
487 B
TOML
[project]
|
|
name = "pepperplus-cb"
|
|
version = "0.1.0"
|
|
description = "Add your description here"
|
|
readme = "README.md"
|
|
requires-python = ">=3.13"
|
|
dependencies = [
|
|
"fastapi[all]>=0.115.6",
|
|
"mlx-whisper>=0.4.3 ; sys_platform == 'darwin'",
|
|
"openai-whisper>=20250625",
|
|
"pyaudio>=0.2.14",
|
|
"pydantic>=2.12.0",
|
|
"pydantic-settings>=2.11.0",
|
|
"pyzmq>=27.1.0",
|
|
"silero-vad>=6.0.0",
|
|
"spade>=4.1.0",
|
|
"spade-bdi>=0.3.2",
|
|
"torch>=2.8.0",
|
|
"uvicorn>=0.37.0",
|
|
]
|