Compare commits
1 Commits
feat/pause
...
feat/resea
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
dd2755b7f9 |
20
.env.example
Normal file
20
.env.example
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
# Example .env file. To use, make a copy, call it ".env" (i.e. removing the ".example" suffix), then you edit values.
|
||||||
|
|
||||||
|
# The hostname of the Robot Interface. Change if the Control Backend and Robot Interface are running on different computers.
|
||||||
|
RI_HOST="localhost"
|
||||||
|
|
||||||
|
# URL for the local LLM API. Must be an API that implements the OpenAI Chat Completions API, but most do.
|
||||||
|
LLM_SETTINGS__LOCAL_LLM_URL="http://localhost:1234/v1/chat/completions"
|
||||||
|
|
||||||
|
# Name of the local LLM model to use.
|
||||||
|
LLM_SETTINGS__LOCAL_LLM_MODEL="gpt-oss"
|
||||||
|
|
||||||
|
# Number of non-speech chunks to wait before speech ended. A chunk is approximately 31 ms. Increasing this number allows longer pauses in speech, but also increases response time.
|
||||||
|
BEHAVIOUR_SETTINGS__VAD_NON_SPEECH_PATIENCE_CHUNKS=3
|
||||||
|
|
||||||
|
# Timeout in milliseconds for socket polling. Increase this number if network latency/jitter is high, often the case when using Wi-Fi. Perhaps 500 ms. A symptom of this issue is transcriptions getting cut off.
|
||||||
|
BEHAVIOUR_SETTINGS__SOCKET_POLLER_TIMEOUT_MS=100
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# For an exhaustive list of options, see the control_backend.core.config module in the docs.
|
||||||
@@ -1,5 +1,7 @@
|
|||||||
version: 1
|
version: 1
|
||||||
|
|
||||||
|
# Maak nieuwe (obvervation action)
|
||||||
|
# tussen 20-30
|
||||||
custom_levels:
|
custom_levels:
|
||||||
OBSERVATION: 25
|
OBSERVATION: 25
|
||||||
ACTION: 26
|
ACTION: 26
|
||||||
@@ -19,6 +21,8 @@ formatters:
|
|||||||
format: "{name} {levelname} {levelno} {message} {created} {relativeCreated}"
|
format: "{name} {levelname} {levelno} {message} {created} {relativeCreated}"
|
||||||
style: "{"
|
style: "{"
|
||||||
|
|
||||||
|
# Maak class = logging.fileHandler
|
||||||
|
#
|
||||||
handlers:
|
handlers:
|
||||||
console:
|
console:
|
||||||
class: logging.StreamHandler
|
class: logging.StreamHandler
|
||||||
@@ -35,6 +39,8 @@ root:
|
|||||||
level: WARN
|
level: WARN
|
||||||
handlers: [console]
|
handlers: [console]
|
||||||
|
|
||||||
|
# Maak research logger, laagste level (21)
|
||||||
|
# Handler: UI Handler
|
||||||
loggers:
|
loggers:
|
||||||
control_backend:
|
control_backend:
|
||||||
level: LLM
|
level: LLM
|
||||||
|
|||||||
@@ -27,6 +27,7 @@ This + part might differ based on what model you choose.
|
|||||||
copy the model name in the module loaded and replace local_llm_modelL. In settings.
|
copy the model name in the module loaded and replace local_llm_modelL. In settings.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Running
|
## Running
|
||||||
To run the project (development server), execute the following command (while inside the root repository):
|
To run the project (development server), execute the following command (while inside the root repository):
|
||||||
|
|
||||||
@@ -34,6 +35,14 @@ To run the project (development server), execute the following command (while in
|
|||||||
uv run fastapi dev src/control_backend/main.py
|
uv run fastapi dev src/control_backend/main.py
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Environment Variables
|
||||||
|
|
||||||
|
You can use environment variables to change settings. Make a copy of the [`.env.example`](.env.example) file, name it `.env` and put it in the root directory. The file itself describes how to do the configuration.
|
||||||
|
|
||||||
|
For an exhaustive list of environment options, see the `control_backend.core.config` module in the docs.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Testing
|
## Testing
|
||||||
Testing happens automatically when opening a merge request to any branch. If you want to manually run the test suite, you can do so by running the following for unit tests:
|
Testing happens automatically when opening a merge request to any branch. If you want to manually run the test suite, you can do so by running the following for unit tests:
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,6 @@ dependencies = [
|
|||||||
"pydantic>=2.12.0",
|
"pydantic>=2.12.0",
|
||||||
"pydantic-settings>=2.11.0",
|
"pydantic-settings>=2.11.0",
|
||||||
"python-json-logger>=4.0.0",
|
"python-json-logger>=4.0.0",
|
||||||
"python-slugify>=8.0.4",
|
|
||||||
"pyyaml>=6.0.3",
|
"pyyaml>=6.0.3",
|
||||||
"pyzmq>=27.1.0",
|
"pyzmq>=27.1.0",
|
||||||
"silero-vad>=6.0.0",
|
"silero-vad>=6.0.0",
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ class RobotGestureAgent(BaseAgent):
|
|||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
name: str,
|
name: str,
|
||||||
address=settings.zmq_settings.ri_command_address,
|
address: str,
|
||||||
bind=False,
|
bind=False,
|
||||||
gesture_data=None,
|
gesture_data=None,
|
||||||
single_gesture_data=None,
|
single_gesture_data=None,
|
||||||
|
|||||||
@@ -1,203 +0,0 @@
|
|||||||
import typing
|
|
||||||
from dataclasses import dataclass, field
|
|
||||||
|
|
||||||
# --- Types ---
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class BeliefLiteral:
|
|
||||||
"""
|
|
||||||
Represents a literal or atom.
|
|
||||||
Example: phase(1), user_said("hello"), ~started
|
|
||||||
"""
|
|
||||||
|
|
||||||
functor: str
|
|
||||||
args: list[str] = field(default_factory=list)
|
|
||||||
negated: bool = False
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
# In ASL, 'not' is usually for closed-world assumption (prolog style),
|
|
||||||
# '~' is for explicit negation in beliefs.
|
|
||||||
# For simplicity in behavior trees, we often use 'not' for conditions.
|
|
||||||
prefix = "not " if self.negated else ""
|
|
||||||
if not self.args:
|
|
||||||
return f"{prefix}{self.functor}"
|
|
||||||
|
|
||||||
# Clean args to ensure strings are quoted if they look like strings,
|
|
||||||
# but usually the converter handles the quoting of string literals.
|
|
||||||
args_str = ", ".join(self.args)
|
|
||||||
return f"{prefix}{self.functor}({args_str})"
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class GoalLiteral:
|
|
||||||
name: str
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return f"!{self.name}"
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class ActionLiteral:
|
|
||||||
"""
|
|
||||||
Represents a step in a plan body.
|
|
||||||
Example: .say("Hello") or !achieve_goal
|
|
||||||
"""
|
|
||||||
|
|
||||||
code: str
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return self.code
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class BinaryOp:
|
|
||||||
"""
|
|
||||||
Represents logical operations.
|
|
||||||
Example: (A & B) | C
|
|
||||||
"""
|
|
||||||
|
|
||||||
left: "Expression | str"
|
|
||||||
operator: typing.Literal["&", "|"]
|
|
||||||
right: "Expression | str"
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
l_str = str(self.left)
|
|
||||||
r_str = str(self.right)
|
|
||||||
|
|
||||||
if isinstance(self.left, BinaryOp):
|
|
||||||
l_str = f"({l_str})"
|
|
||||||
if isinstance(self.right, BinaryOp):
|
|
||||||
r_str = f"({r_str})"
|
|
||||||
|
|
||||||
return f"{l_str} {self.operator} {r_str}"
|
|
||||||
|
|
||||||
|
|
||||||
Literal = BeliefLiteral | GoalLiteral | ActionLiteral
|
|
||||||
Expression = Literal | BinaryOp | str
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class Rule:
|
|
||||||
"""
|
|
||||||
Represents an inference rule.
|
|
||||||
Example: head :- body.
|
|
||||||
"""
|
|
||||||
|
|
||||||
head: Expression
|
|
||||||
body: Expression | None = None
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
if not self.body:
|
|
||||||
return f"{self.head}."
|
|
||||||
return f"{self.head} :- {self.body}."
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class PersistentRule:
|
|
||||||
"""
|
|
||||||
Represents an inference rule, where the inferred belief is persistent when formed.
|
|
||||||
"""
|
|
||||||
|
|
||||||
head: Expression
|
|
||||||
body: Expression
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
if not self.body:
|
|
||||||
raise Exception("Rule without body should not be persistent.")
|
|
||||||
|
|
||||||
lines = []
|
|
||||||
|
|
||||||
if isinstance(self.body, BinaryOp):
|
|
||||||
lines.append(f"+{self.body.left}")
|
|
||||||
if self.body.operator == "&":
|
|
||||||
lines.append(f" : {self.body.right}")
|
|
||||||
lines.append(f" <- +{self.head}.")
|
|
||||||
if self.body.operator == "|":
|
|
||||||
lines.append(f"+{self.body.right}")
|
|
||||||
lines.append(f" <- +{self.head}.")
|
|
||||||
|
|
||||||
return "\n".join(lines)
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class Plan:
|
|
||||||
"""
|
|
||||||
Represents a plan.
|
|
||||||
Syntax: +trigger : context <- body.
|
|
||||||
"""
|
|
||||||
|
|
||||||
trigger: BeliefLiteral | GoalLiteral
|
|
||||||
context: list[Expression] = field(default_factory=list)
|
|
||||||
body: list[ActionLiteral] = field(default_factory=list)
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
# Indentation settings
|
|
||||||
INDENT = " "
|
|
||||||
ARROW = "\n <- "
|
|
||||||
COLON = "\n : "
|
|
||||||
|
|
||||||
# Build Header
|
|
||||||
header = f"+{self.trigger}"
|
|
||||||
if self.context:
|
|
||||||
ctx_str = f" &\n{INDENT}".join(str(c) for c in self.context)
|
|
||||||
header += f"{COLON}{ctx_str}"
|
|
||||||
|
|
||||||
# Case 1: Empty body
|
|
||||||
if not self.body:
|
|
||||||
return f"{header}."
|
|
||||||
|
|
||||||
# Case 2: Short body (optional optimization, keeping it uniform usually better)
|
|
||||||
header += ARROW
|
|
||||||
|
|
||||||
lines = []
|
|
||||||
# We start the first action on the same line or next line.
|
|
||||||
# Let's put it on the next line for readability if there are multiple.
|
|
||||||
|
|
||||||
if len(self.body) == 1:
|
|
||||||
return f"{header}{self.body[0]}."
|
|
||||||
|
|
||||||
# First item
|
|
||||||
lines.append(f"{header}{self.body[0]};")
|
|
||||||
# Middle items
|
|
||||||
for item in self.body[1:-1]:
|
|
||||||
lines.append(f"{INDENT}{item};")
|
|
||||||
# Last item
|
|
||||||
lines.append(f"{INDENT}{self.body[-1]}.")
|
|
||||||
|
|
||||||
return "\n".join(lines)
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class AgentSpeakFile:
|
|
||||||
"""
|
|
||||||
Root element representing the entire generated file.
|
|
||||||
"""
|
|
||||||
|
|
||||||
initial_beliefs: list[Rule] = field(default_factory=list)
|
|
||||||
inference_rules: list[Rule | PersistentRule] = field(default_factory=list)
|
|
||||||
plans: list[Plan] = field(default_factory=list)
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
sections = []
|
|
||||||
|
|
||||||
if self.initial_beliefs:
|
|
||||||
sections.append("// --- Initial Beliefs & Facts ---")
|
|
||||||
sections.extend(str(rule) for rule in self.initial_beliefs)
|
|
||||||
sections.append("")
|
|
||||||
|
|
||||||
if self.inference_rules:
|
|
||||||
sections.append("// --- Inference Rules ---")
|
|
||||||
sections.extend(str(rule) for rule in self.inference_rules if isinstance(rule, Rule))
|
|
||||||
sections.append("")
|
|
||||||
sections.extend(
|
|
||||||
str(rule) for rule in self.inference_rules if isinstance(rule, PersistentRule)
|
|
||||||
)
|
|
||||||
sections.append("")
|
|
||||||
|
|
||||||
if self.plans:
|
|
||||||
sections.append("// --- Plans ---")
|
|
||||||
# Separate plans by a newline for readability
|
|
||||||
sections.extend(str(plan) + "\n" for plan in self.plans)
|
|
||||||
|
|
||||||
return "\n".join(sections)
|
|
||||||
@@ -1,425 +0,0 @@
|
|||||||
import asyncio
|
|
||||||
import time
|
|
||||||
from functools import singledispatchmethod
|
|
||||||
|
|
||||||
from slugify import slugify
|
|
||||||
|
|
||||||
from control_backend.agents.bdi import BDICoreAgent
|
|
||||||
from control_backend.agents.bdi.asl_ast import (
|
|
||||||
ActionLiteral,
|
|
||||||
AgentSpeakFile,
|
|
||||||
BeliefLiteral,
|
|
||||||
BinaryOp,
|
|
||||||
Expression,
|
|
||||||
GoalLiteral,
|
|
||||||
PersistentRule,
|
|
||||||
Plan,
|
|
||||||
Rule,
|
|
||||||
)
|
|
||||||
from control_backend.agents.bdi.bdi_program_manager import test_program
|
|
||||||
from control_backend.schemas.program import (
|
|
||||||
BasicBelief,
|
|
||||||
Belief,
|
|
||||||
ConditionalNorm,
|
|
||||||
GestureAction,
|
|
||||||
Goal,
|
|
||||||
InferredBelief,
|
|
||||||
KeywordBelief,
|
|
||||||
LLMAction,
|
|
||||||
LogicalOperator,
|
|
||||||
Phase,
|
|
||||||
Program,
|
|
||||||
ProgramElement,
|
|
||||||
SemanticBelief,
|
|
||||||
SpeechAction,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
async def do_things():
|
|
||||||
res = input("Wanna generate")
|
|
||||||
if res == "y":
|
|
||||||
program = AgentSpeakGenerator().generate(test_program)
|
|
||||||
filename = f"{int(time.time())}.asl"
|
|
||||||
with open(filename, "w") as f:
|
|
||||||
f.write(program)
|
|
||||||
else:
|
|
||||||
# filename = "0test.asl"
|
|
||||||
filename = "1766062491.asl"
|
|
||||||
bdi_agent = BDICoreAgent("BDICoreAgent", filename)
|
|
||||||
flag = asyncio.Event()
|
|
||||||
await bdi_agent.start()
|
|
||||||
await flag.wait()
|
|
||||||
|
|
||||||
|
|
||||||
def do_other_things():
|
|
||||||
print(AgentSpeakGenerator().generate(test_program))
|
|
||||||
|
|
||||||
|
|
||||||
class AgentSpeakGenerator:
|
|
||||||
"""
|
|
||||||
Converts a Pydantic Program behavior model into an AgentSpeak(L) AST,
|
|
||||||
then renders it to a string.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def generate(self, program: Program) -> str:
|
|
||||||
asl = AgentSpeakFile()
|
|
||||||
|
|
||||||
self._generate_startup(program, asl)
|
|
||||||
|
|
||||||
for i, phase in enumerate(program.phases):
|
|
||||||
next_phase = program.phases[i + 1] if i < len(program.phases) - 1 else None
|
|
||||||
|
|
||||||
self._generate_phase_flow(phase, next_phase, asl)
|
|
||||||
|
|
||||||
self._generate_norms(phase, asl)
|
|
||||||
|
|
||||||
self._generate_goals(phase, asl)
|
|
||||||
|
|
||||||
self._generate_triggers(phase, asl)
|
|
||||||
|
|
||||||
self._generate_fallbacks(program, asl)
|
|
||||||
|
|
||||||
return str(asl)
|
|
||||||
|
|
||||||
# --- Section: Startup & Phase Management ---
|
|
||||||
|
|
||||||
def _generate_startup(self, program: Program, asl: AgentSpeakFile):
|
|
||||||
if not program.phases:
|
|
||||||
return
|
|
||||||
|
|
||||||
# Initial belief: phase(start).
|
|
||||||
asl.initial_beliefs.append(Rule(head=BeliefLiteral("phase", ['"start"'])))
|
|
||||||
|
|
||||||
# Startup plan: +started : phase(start) <- -phase(start); +phase(first_id).
|
|
||||||
asl.plans.append(
|
|
||||||
Plan(
|
|
||||||
trigger=BeliefLiteral("started"),
|
|
||||||
context=[BeliefLiteral("phase", ['"start"'])],
|
|
||||||
body=[
|
|
||||||
ActionLiteral('-phase("start")'),
|
|
||||||
ActionLiteral(f'+phase("{program.phases[0].id}")'),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Initial plans:
|
|
||||||
asl.plans.append(
|
|
||||||
Plan(
|
|
||||||
trigger=GoalLiteral("generate_response_with_goal(Goal)"),
|
|
||||||
context=[BeliefLiteral("user_said", ["Message"])],
|
|
||||||
body=[
|
|
||||||
ActionLiteral("+responded_this_turn"),
|
|
||||||
ActionLiteral(".findall(Norm, norm(Norm), Norms)"),
|
|
||||||
ActionLiteral(".reply_with_goal(Message, Norms, Goal)"),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
def _generate_phase_flow(self, phase: Phase, next_phase: Phase | None, asl: AgentSpeakFile):
|
|
||||||
"""Generates the main loop listener and the transition logic for this phase."""
|
|
||||||
|
|
||||||
# +user_said(Message) : phase(ID) <- !goal1; !goal2; !transition_phase.
|
|
||||||
goal_actions = [ActionLiteral("-responded_this_turn")]
|
|
||||||
goal_actions += [
|
|
||||||
ActionLiteral(f"!check_{self._slugify_str(keyword)}")
|
|
||||||
for keyword in self._get_keyword_conditionals(phase)
|
|
||||||
]
|
|
||||||
goal_actions += [ActionLiteral(f"!{self._slugify(g)}") for g in phase.goals]
|
|
||||||
goal_actions.append(ActionLiteral("!transition_phase"))
|
|
||||||
|
|
||||||
asl.plans.append(
|
|
||||||
Plan(
|
|
||||||
trigger=BeliefLiteral("user_said", ["Message"]),
|
|
||||||
context=[BeliefLiteral("phase", [f'"{phase.id}"'])],
|
|
||||||
body=goal_actions,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# +!transition_phase : phase(ID) <- -phase(ID); +(NEXT_ID).
|
|
||||||
next_id = str(next_phase.id) if next_phase else "end"
|
|
||||||
|
|
||||||
transition_context = [BeliefLiteral("phase", [f'"{phase.id}"'])]
|
|
||||||
if phase.goals:
|
|
||||||
transition_context.append(BeliefLiteral(f"achieved_{self._slugify(phase.goals[-1])}"))
|
|
||||||
|
|
||||||
asl.plans.append(
|
|
||||||
Plan(
|
|
||||||
trigger=GoalLiteral("transition_phase"),
|
|
||||||
context=transition_context,
|
|
||||||
body=[
|
|
||||||
ActionLiteral(f'-phase("{phase.id}")'),
|
|
||||||
ActionLiteral(f'+phase("{next_id}")'),
|
|
||||||
ActionLiteral("user_said(Anything)"),
|
|
||||||
ActionLiteral("-+user_said(Anything)"),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
def _get_keyword_conditionals(self, phase: Phase) -> list[str]:
|
|
||||||
res = []
|
|
||||||
for belief in self._extract_basic_beliefs_from_phase(phase):
|
|
||||||
if isinstance(belief, KeywordBelief):
|
|
||||||
res.append(belief.keyword)
|
|
||||||
|
|
||||||
return res
|
|
||||||
|
|
||||||
# --- Section: Norms & Beliefs ---
|
|
||||||
|
|
||||||
def _generate_norms(self, phase: Phase, asl: AgentSpeakFile):
|
|
||||||
for norm in phase.norms:
|
|
||||||
norm_slug = f'"{norm.norm}"'
|
|
||||||
head = BeliefLiteral("norm", [norm_slug])
|
|
||||||
|
|
||||||
# Base context is the phase
|
|
||||||
phase_lit = BeliefLiteral("phase", [f'"{phase.id}"'])
|
|
||||||
|
|
||||||
if isinstance(norm, ConditionalNorm):
|
|
||||||
self._ensure_belief_inference(norm.condition, asl)
|
|
||||||
|
|
||||||
condition_expr = self._belief_to_expr(norm.condition)
|
|
||||||
body = BinaryOp(phase_lit, "&", condition_expr)
|
|
||||||
else:
|
|
||||||
body = phase_lit
|
|
||||||
|
|
||||||
asl.inference_rules.append(Rule(head=head, body=body))
|
|
||||||
|
|
||||||
def _ensure_belief_inference(self, belief: Belief, asl: AgentSpeakFile):
|
|
||||||
"""
|
|
||||||
Recursively adds rules to infer beliefs.
|
|
||||||
Checks strictly to avoid duplicates if necessary,
|
|
||||||
though ASL engines often handle redefinition or we can use a set to track processed IDs.
|
|
||||||
"""
|
|
||||||
if isinstance(belief, KeywordBelief):
|
|
||||||
pass
|
|
||||||
# # Rule: keyword_said("word") :- user_said(M) & .substring("word", M, P) & P >= 0.
|
|
||||||
# kwd_slug = f'"{belief.keyword}"'
|
|
||||||
# head = BeliefLiteral("keyword_said", [kwd_slug])
|
|
||||||
#
|
|
||||||
# # Avoid duplicates
|
|
||||||
# if any(str(r.head) == str(head) for r in asl.inference_rules):
|
|
||||||
# return
|
|
||||||
#
|
|
||||||
# body = BinaryOp(
|
|
||||||
# BeliefLiteral("user_said", ["Message"]),
|
|
||||||
# "&",
|
|
||||||
# BinaryOp(f".substring({kwd_slug}, Message, Pos)", "&", "Pos >= 0"),
|
|
||||||
# )
|
|
||||||
#
|
|
||||||
# asl.inference_rules.append(Rule(head=head, body=body))
|
|
||||||
|
|
||||||
elif isinstance(belief, InferredBelief):
|
|
||||||
self._ensure_belief_inference(belief.left, asl)
|
|
||||||
self._ensure_belief_inference(belief.right, asl)
|
|
||||||
|
|
||||||
slug = self._slugify(belief)
|
|
||||||
head = BeliefLiteral(slug)
|
|
||||||
|
|
||||||
if any(str(r.head) == str(head) for r in asl.inference_rules):
|
|
||||||
return
|
|
||||||
|
|
||||||
op_char = "&" if belief.operator == LogicalOperator.AND else "|"
|
|
||||||
body = BinaryOp(
|
|
||||||
self._belief_to_expr(belief.left), op_char, self._belief_to_expr(belief.right)
|
|
||||||
)
|
|
||||||
asl.inference_rules.append(PersistentRule(head=head, body=body))
|
|
||||||
|
|
||||||
def _belief_to_expr(self, belief: Belief) -> Expression:
|
|
||||||
if isinstance(belief, KeywordBelief):
|
|
||||||
return BeliefLiteral("keyword_said", [f'"{belief.keyword}"'])
|
|
||||||
else:
|
|
||||||
return BeliefLiteral(self._slugify(belief))
|
|
||||||
|
|
||||||
# --- Section: Goals ---
|
|
||||||
|
|
||||||
def _generate_goals(self, phase: Phase, asl: AgentSpeakFile):
|
|
||||||
previous_goal: Goal | None = None
|
|
||||||
for goal in phase.goals:
|
|
||||||
self._generate_goal_plan_recursive(goal, str(phase.id), previous_goal, asl)
|
|
||||||
previous_goal = goal
|
|
||||||
|
|
||||||
def _generate_goal_plan_recursive(
|
|
||||||
self,
|
|
||||||
goal: Goal,
|
|
||||||
phase_id: str,
|
|
||||||
previous_goal: Goal | None,
|
|
||||||
asl: AgentSpeakFile,
|
|
||||||
responded_needed: bool = True,
|
|
||||||
can_fail: bool = True,
|
|
||||||
):
|
|
||||||
goal_slug = self._slugify(goal)
|
|
||||||
|
|
||||||
# phase(ID) & not responded_this_turn & not achieved_goal
|
|
||||||
context = [
|
|
||||||
BeliefLiteral("phase", [f'"{phase_id}"']),
|
|
||||||
]
|
|
||||||
|
|
||||||
if responded_needed:
|
|
||||||
context.append(BeliefLiteral("responded_this_turn", negated=True))
|
|
||||||
if can_fail:
|
|
||||||
context.append(BeliefLiteral(f"achieved_{goal_slug}", negated=True))
|
|
||||||
|
|
||||||
if previous_goal:
|
|
||||||
prev_slug = self._slugify(previous_goal)
|
|
||||||
context.append(BeliefLiteral(f"achieved_{prev_slug}"))
|
|
||||||
|
|
||||||
body_actions = []
|
|
||||||
sub_goals_to_process = []
|
|
||||||
|
|
||||||
for step in goal.plan.steps:
|
|
||||||
if isinstance(step, Goal):
|
|
||||||
sub_slug = self._slugify(step)
|
|
||||||
body_actions.append(ActionLiteral(f"!{sub_slug}"))
|
|
||||||
sub_goals_to_process.append(step)
|
|
||||||
elif isinstance(step, SpeechAction):
|
|
||||||
body_actions.append(ActionLiteral(f'.say("{step.text}")'))
|
|
||||||
elif isinstance(step, GestureAction):
|
|
||||||
body_actions.append(ActionLiteral(f'.gesture("{step.gesture}")'))
|
|
||||||
elif isinstance(step, LLMAction):
|
|
||||||
body_actions.append(ActionLiteral(f'!generate_response_with_goal("{step.goal}")'))
|
|
||||||
|
|
||||||
# Mark achievement
|
|
||||||
if not goal.can_fail:
|
|
||||||
body_actions.append(ActionLiteral(f"+achieved_{goal_slug}"))
|
|
||||||
|
|
||||||
asl.plans.append(Plan(trigger=GoalLiteral(goal_slug), context=context, body=body_actions))
|
|
||||||
asl.plans.append(
|
|
||||||
Plan(trigger=GoalLiteral(goal_slug), context=[], body=[ActionLiteral("true")])
|
|
||||||
)
|
|
||||||
|
|
||||||
prev_sub = None
|
|
||||||
for sub_goal in sub_goals_to_process:
|
|
||||||
self._generate_goal_plan_recursive(sub_goal, phase_id, prev_sub, asl)
|
|
||||||
prev_sub = sub_goal
|
|
||||||
|
|
||||||
# --- Section: Triggers ---
|
|
||||||
|
|
||||||
def _generate_triggers(self, phase: Phase, asl: AgentSpeakFile):
|
|
||||||
for keyword in self._get_keyword_conditionals(phase):
|
|
||||||
asl.plans.append(
|
|
||||||
Plan(
|
|
||||||
trigger=GoalLiteral(f"check_{self._slugify_str(keyword)}"),
|
|
||||||
context=[
|
|
||||||
ActionLiteral(
|
|
||||||
f'user_said(Message) & .substring("{keyword}", Message, Pos) & Pos >= 0'
|
|
||||||
)
|
|
||||||
],
|
|
||||||
body=[
|
|
||||||
ActionLiteral(f'+keyword_said("{keyword}")'),
|
|
||||||
ActionLiteral(f'-keyword_said("{keyword}")'),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
)
|
|
||||||
asl.plans.append(
|
|
||||||
Plan(
|
|
||||||
trigger=GoalLiteral(f"check_{self._slugify_str(keyword)}"),
|
|
||||||
body=[ActionLiteral("true")],
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
for trigger in phase.triggers:
|
|
||||||
self._ensure_belief_inference(trigger.condition, asl)
|
|
||||||
|
|
||||||
trigger_belief_slug = self._belief_to_expr(trigger.condition)
|
|
||||||
|
|
||||||
body_actions = []
|
|
||||||
sub_goals = []
|
|
||||||
|
|
||||||
for step in trigger.plan.steps:
|
|
||||||
if isinstance(step, Goal):
|
|
||||||
sub_slug = self._slugify(step)
|
|
||||||
body_actions.append(ActionLiteral(f"!{sub_slug}"))
|
|
||||||
sub_goals.append(step)
|
|
||||||
elif isinstance(step, SpeechAction):
|
|
||||||
body_actions.append(ActionLiteral(f'.say("{step.text}")'))
|
|
||||||
elif isinstance(step, GestureAction):
|
|
||||||
body_actions.append(
|
|
||||||
ActionLiteral(f'.gesture("{step.gesture.type}", "{step.gesture.name}")')
|
|
||||||
)
|
|
||||||
elif isinstance(step, LLMAction):
|
|
||||||
body_actions.append(
|
|
||||||
ActionLiteral(f'!generate_response_with_goal("{step.goal}")')
|
|
||||||
)
|
|
||||||
|
|
||||||
asl.plans.append(
|
|
||||||
Plan(
|
|
||||||
trigger=BeliefLiteral(trigger_belief_slug),
|
|
||||||
context=[BeliefLiteral("phase", [f'"{phase.id}"'])],
|
|
||||||
body=body_actions,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Recurse for triggered goals
|
|
||||||
prev_sub = None
|
|
||||||
for sub_goal in sub_goals:
|
|
||||||
self._generate_goal_plan_recursive(
|
|
||||||
sub_goal, str(phase.id), prev_sub, asl, False, False
|
|
||||||
)
|
|
||||||
prev_sub = sub_goal
|
|
||||||
|
|
||||||
# --- Section: Fallbacks ---
|
|
||||||
|
|
||||||
def _generate_fallbacks(self, program: Program, asl: AgentSpeakFile):
|
|
||||||
asl.plans.append(
|
|
||||||
Plan(trigger=GoalLiteral("transition_phase"), context=[], body=[ActionLiteral("true")])
|
|
||||||
)
|
|
||||||
|
|
||||||
# --- Helpers ---
|
|
||||||
|
|
||||||
@singledispatchmethod
|
|
||||||
def _slugify(self, element: ProgramElement) -> str:
|
|
||||||
if element.name:
|
|
||||||
raise NotImplementedError("Cannot slugify this element.")
|
|
||||||
return self._slugify_str(element.name)
|
|
||||||
|
|
||||||
@_slugify.register
|
|
||||||
def _(self, goal: Goal) -> str:
|
|
||||||
if goal.name:
|
|
||||||
return self._slugify_str(goal.name)
|
|
||||||
return f"goal_{goal.id.hex}"
|
|
||||||
|
|
||||||
@_slugify.register
|
|
||||||
def _(self, kwb: KeywordBelief) -> str:
|
|
||||||
return f"keyword_said({kwb.keyword})"
|
|
||||||
|
|
||||||
@_slugify.register
|
|
||||||
def _(self, sb: SemanticBelief) -> str:
|
|
||||||
return self._slugify_str(sb.description)
|
|
||||||
|
|
||||||
@_slugify.register
|
|
||||||
def _(self, ib: InferredBelief) -> str:
|
|
||||||
return self._slugify_str(ib.name)
|
|
||||||
|
|
||||||
def _slugify_str(self, text: str) -> str:
|
|
||||||
return slugify(text, separator="_", stopwords=["a", "an", "the", "we", "you", "I"])
|
|
||||||
|
|
||||||
def _extract_basic_beliefs_from_program(self, program: Program) -> list[BasicBelief]:
|
|
||||||
beliefs = []
|
|
||||||
|
|
||||||
for phase in program.phases:
|
|
||||||
beliefs.extend(self._extract_basic_beliefs_from_phase(phase))
|
|
||||||
|
|
||||||
return beliefs
|
|
||||||
|
|
||||||
def _extract_basic_beliefs_from_phase(self, phase: Phase) -> list[BasicBelief]:
|
|
||||||
beliefs = []
|
|
||||||
|
|
||||||
for norm in phase.norms:
|
|
||||||
if isinstance(norm, ConditionalNorm):
|
|
||||||
beliefs += self._extract_basic_beliefs_from_belief(norm.condition)
|
|
||||||
|
|
||||||
for trigger in phase.triggers:
|
|
||||||
beliefs += self._extract_basic_beliefs_from_belief(trigger.condition)
|
|
||||||
|
|
||||||
return beliefs
|
|
||||||
|
|
||||||
def _extract_basic_beliefs_from_belief(self, belief: Belief) -> list[BasicBelief]:
|
|
||||||
if isinstance(belief, InferredBelief):
|
|
||||||
return self._extract_basic_beliefs_from_belief(
|
|
||||||
belief.left
|
|
||||||
) + self._extract_basic_beliefs_from_belief(belief.right)
|
|
||||||
return [belief]
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
asyncio.run(do_things())
|
|
||||||
# do_other_things()y
|
|
||||||
@@ -1,272 +0,0 @@
|
|||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
from abc import ABC, abstractmethod
|
|
||||||
from dataclasses import dataclass, field
|
|
||||||
from enum import StrEnum
|
|
||||||
|
|
||||||
|
|
||||||
class AstNode(ABC):
|
|
||||||
"""
|
|
||||||
Abstract base class for all elements of an AgentSpeak program.
|
|
||||||
"""
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def _to_agentspeak(self) -> str:
|
|
||||||
"""
|
|
||||||
Generates the AgentSpeak code string.
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def __str__(self) -> str:
|
|
||||||
return self._to_agentspeak()
|
|
||||||
|
|
||||||
|
|
||||||
class AstExpression(AstNode, ABC):
|
|
||||||
"""
|
|
||||||
Intermediate class for anything that can be used in a logical expression.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __and__(self, other: ExprCoalescible) -> AstBinaryOp:
|
|
||||||
return AstBinaryOp(self, BinaryOperatorType.AND, _coalesce_expr(other))
|
|
||||||
|
|
||||||
def __or__(self, other: ExprCoalescible) -> AstBinaryOp:
|
|
||||||
return AstBinaryOp(self, BinaryOperatorType.OR, _coalesce_expr(other))
|
|
||||||
|
|
||||||
def __invert__(self) -> AstLogicalExpression:
|
|
||||||
if isinstance(self, AstLogicalExpression):
|
|
||||||
self.negated = not self.negated
|
|
||||||
return self
|
|
||||||
return AstLogicalExpression(self, negated=True)
|
|
||||||
|
|
||||||
|
|
||||||
type ExprCoalescible = AstExpression | str | int | float
|
|
||||||
|
|
||||||
|
|
||||||
def _coalesce_expr(value: ExprCoalescible) -> AstExpression:
|
|
||||||
if isinstance(value, AstExpression):
|
|
||||||
return value
|
|
||||||
if isinstance(value, str):
|
|
||||||
return AstString(value)
|
|
||||||
if isinstance(value, (int, float)):
|
|
||||||
return AstNumber(value)
|
|
||||||
raise TypeError(f"Cannot coalesce type {type(value)} into an AstTerm.")
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class AstTerm(AstExpression, ABC):
|
|
||||||
"""
|
|
||||||
Base class for terms appearing inside literals.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __ge__(self, other: ExprCoalescible) -> AstBinaryOp:
|
|
||||||
return AstBinaryOp(self, BinaryOperatorType.GREATER_EQUALS, _coalesce_expr(other))
|
|
||||||
|
|
||||||
def __gt__(self, other: ExprCoalescible) -> AstBinaryOp:
|
|
||||||
return AstBinaryOp(self, BinaryOperatorType.GREATER_THAN, _coalesce_expr(other))
|
|
||||||
|
|
||||||
def __le__(self, other: ExprCoalescible) -> AstBinaryOp:
|
|
||||||
return AstBinaryOp(self, BinaryOperatorType.LESS_EQUALS, _coalesce_expr(other))
|
|
||||||
|
|
||||||
def __lt__(self, other: ExprCoalescible) -> AstBinaryOp:
|
|
||||||
return AstBinaryOp(self, BinaryOperatorType.LESS_THAN, _coalesce_expr(other))
|
|
||||||
|
|
||||||
def __eq__(self, other: ExprCoalescible) -> AstBinaryOp:
|
|
||||||
return AstBinaryOp(self, BinaryOperatorType.EQUALS, _coalesce_expr(other))
|
|
||||||
|
|
||||||
def __ne__(self, other: ExprCoalescible) -> AstBinaryOp:
|
|
||||||
return AstBinaryOp(self, BinaryOperatorType.NOT_EQUALS, _coalesce_expr(other))
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class AstAtom(AstTerm):
|
|
||||||
"""
|
|
||||||
Grounded expression in all lowercase.
|
|
||||||
"""
|
|
||||||
|
|
||||||
value: str
|
|
||||||
|
|
||||||
def _to_agentspeak(self) -> str:
|
|
||||||
return self.value.lower()
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class AstVar(AstTerm):
|
|
||||||
"""
|
|
||||||
Ungrounded variable expression. First letter capitalized.
|
|
||||||
"""
|
|
||||||
|
|
||||||
name: str
|
|
||||||
|
|
||||||
def _to_agentspeak(self) -> str:
|
|
||||||
return self.name.capitalize()
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class AstNumber(AstTerm):
|
|
||||||
value: int | float
|
|
||||||
|
|
||||||
def _to_agentspeak(self) -> str:
|
|
||||||
return str(self.value)
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class AstString(AstTerm):
|
|
||||||
value: str
|
|
||||||
|
|
||||||
def _to_agentspeak(self) -> str:
|
|
||||||
return f'"{self.value}"'
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class AstLiteral(AstTerm):
|
|
||||||
functor: str
|
|
||||||
terms: list[AstTerm] = field(default_factory=list)
|
|
||||||
|
|
||||||
def _to_agentspeak(self) -> str:
|
|
||||||
if not self.terms:
|
|
||||||
return self.functor
|
|
||||||
args = ", ".join(map(str, self.terms))
|
|
||||||
return f"{self.functor}({args})"
|
|
||||||
|
|
||||||
|
|
||||||
class BinaryOperatorType(StrEnum):
|
|
||||||
AND = "&"
|
|
||||||
OR = "|"
|
|
||||||
GREATER_THAN = ">"
|
|
||||||
LESS_THAN = "<"
|
|
||||||
EQUALS = "=="
|
|
||||||
NOT_EQUALS = "\\=="
|
|
||||||
GREATER_EQUALS = ">="
|
|
||||||
LESS_EQUALS = "<="
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class AstBinaryOp(AstExpression):
|
|
||||||
left: AstExpression
|
|
||||||
operator: BinaryOperatorType
|
|
||||||
right: AstExpression
|
|
||||||
|
|
||||||
def __post_init__(self):
|
|
||||||
self.left = _as_logical(self.left)
|
|
||||||
self.right = _as_logical(self.right)
|
|
||||||
|
|
||||||
def _to_agentspeak(self) -> str:
|
|
||||||
l_str = str(self.left)
|
|
||||||
r_str = str(self.right)
|
|
||||||
|
|
||||||
assert isinstance(self.left, AstLogicalExpression)
|
|
||||||
assert isinstance(self.right, AstLogicalExpression)
|
|
||||||
|
|
||||||
if isinstance(self.left.expression, AstBinaryOp) or self.left.negated:
|
|
||||||
l_str = f"({l_str})"
|
|
||||||
if isinstance(self.right.expression, AstBinaryOp) or self.right.negated:
|
|
||||||
r_str = f"({r_str})"
|
|
||||||
|
|
||||||
return f"{l_str} {self.operator.value} {r_str}"
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class AstLogicalExpression(AstExpression):
|
|
||||||
expression: AstExpression
|
|
||||||
negated: bool = False
|
|
||||||
|
|
||||||
def _to_agentspeak(self) -> str:
|
|
||||||
expr_str = str(self.expression)
|
|
||||||
if isinstance(self.expression, AstBinaryOp) and self.negated:
|
|
||||||
expr_str = f"({expr_str})"
|
|
||||||
return f"{'not ' if self.negated else ''}{expr_str}"
|
|
||||||
|
|
||||||
|
|
||||||
def _as_logical(expr: AstExpression) -> AstLogicalExpression:
|
|
||||||
if isinstance(expr, AstLogicalExpression):
|
|
||||||
return expr
|
|
||||||
return AstLogicalExpression(expr)
|
|
||||||
|
|
||||||
|
|
||||||
class StatementType(StrEnum):
|
|
||||||
EMPTY = ""
|
|
||||||
DO_ACTION = "."
|
|
||||||
ACHIEVE_GOAL = "!"
|
|
||||||
# TEST_GOAL = "?" # TODO
|
|
||||||
ADD_BELIEF = "+"
|
|
||||||
REMOVE_BELIEF = "-"
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class AstStatement(AstNode):
|
|
||||||
"""
|
|
||||||
A statement that can appear inside a plan.
|
|
||||||
"""
|
|
||||||
|
|
||||||
type: StatementType
|
|
||||||
expression: AstExpression
|
|
||||||
|
|
||||||
def _to_agentspeak(self) -> str:
|
|
||||||
return f"{self.type.value}{self.expression}"
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class AstRule(AstNode):
|
|
||||||
result: AstExpression
|
|
||||||
condition: AstExpression | None = None
|
|
||||||
|
|
||||||
def __post_init__(self):
|
|
||||||
if self.condition is not None:
|
|
||||||
self.condition = _as_logical(self.condition)
|
|
||||||
|
|
||||||
def _to_agentspeak(self) -> str:
|
|
||||||
if not self.condition:
|
|
||||||
return f"{self.result}."
|
|
||||||
return f"{self.result} :- {self.condition}."
|
|
||||||
|
|
||||||
|
|
||||||
class TriggerType(StrEnum):
|
|
||||||
ADDED_BELIEF = "+"
|
|
||||||
# REMOVED_BELIEF = "-" # TODO
|
|
||||||
# MODIFIED_BELIEF = "^" # TODO
|
|
||||||
ADDED_GOAL = "+!"
|
|
||||||
# REMOVED_GOAL = "-!" # TODO
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class AstPlan(AstNode):
|
|
||||||
type: TriggerType
|
|
||||||
trigger_literal: AstExpression
|
|
||||||
context: list[AstExpression]
|
|
||||||
body: list[AstStatement]
|
|
||||||
|
|
||||||
def _to_agentspeak(self) -> str:
|
|
||||||
assert isinstance(self.trigger_literal, AstLiteral)
|
|
||||||
|
|
||||||
indent = " " * 6
|
|
||||||
colon = " : "
|
|
||||||
arrow = " <- "
|
|
||||||
|
|
||||||
lines = []
|
|
||||||
|
|
||||||
lines.append(f"{self.type.value}{self.trigger_literal}")
|
|
||||||
|
|
||||||
if self.context:
|
|
||||||
lines.append(colon + f" &\n{indent}".join(str(c) for c in self.context))
|
|
||||||
|
|
||||||
if self.body:
|
|
||||||
lines.append(arrow + f";\n{indent}".join(str(s) for s in self.body) + ".")
|
|
||||||
|
|
||||||
lines.append("")
|
|
||||||
|
|
||||||
return "\n".join(lines)
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class AstProgram(AstNode):
|
|
||||||
rules: list[AstRule] = field(default_factory=list)
|
|
||||||
plans: list[AstPlan] = field(default_factory=list)
|
|
||||||
|
|
||||||
def _to_agentspeak(self) -> str:
|
|
||||||
lines = []
|
|
||||||
lines.extend(map(str, self.rules))
|
|
||||||
|
|
||||||
lines.extend(["", ""])
|
|
||||||
lines.extend(map(str, self.plans))
|
|
||||||
|
|
||||||
return "\n".join(lines)
|
|
||||||
@@ -11,7 +11,7 @@ from pydantic import ValidationError
|
|||||||
from control_backend.agents.base import BaseAgent
|
from control_backend.agents.base import BaseAgent
|
||||||
from control_backend.core.agent_system import InternalMessage
|
from control_backend.core.agent_system import InternalMessage
|
||||||
from control_backend.core.config import settings
|
from control_backend.core.config import settings
|
||||||
from control_backend.schemas.belief_message import BeliefMessage
|
from control_backend.schemas.belief_message import Belief, BeliefMessage
|
||||||
from control_backend.schemas.llm_prompt_message import LLMPromptMessage
|
from control_backend.schemas.llm_prompt_message import LLMPromptMessage
|
||||||
from control_backend.schemas.ri_message import SpeechCommand
|
from control_backend.schemas.ri_message import SpeechCommand
|
||||||
|
|
||||||
@@ -124,8 +124,8 @@ class BDICoreAgent(BaseAgent):
|
|||||||
|
|
||||||
if msg.thread == "beliefs":
|
if msg.thread == "beliefs":
|
||||||
try:
|
try:
|
||||||
belief_changes = BeliefMessage.model_validate_json(msg.body)
|
beliefs = BeliefMessage.model_validate_json(msg.body).beliefs
|
||||||
self._apply_belief_changes(belief_changes)
|
self._apply_beliefs(beliefs)
|
||||||
except ValidationError:
|
except ValidationError:
|
||||||
self.logger.exception("Error processing belief.")
|
self.logger.exception("Error processing belief.")
|
||||||
return
|
return
|
||||||
@@ -145,29 +145,22 @@ class BDICoreAgent(BaseAgent):
|
|||||||
)
|
)
|
||||||
await self.send(out_msg)
|
await self.send(out_msg)
|
||||||
|
|
||||||
def _apply_belief_changes(self, belief_changes: BeliefMessage):
|
def _apply_beliefs(self, beliefs: list[Belief]):
|
||||||
"""
|
"""
|
||||||
Update the belief base with a list of new beliefs.
|
Update the belief base with a list of new beliefs.
|
||||||
|
|
||||||
For beliefs in ``belief_changes.replace``, it removes all existing beliefs with that name
|
If ``replace=True`` is set on a belief, it removes all existing beliefs with that name
|
||||||
before adding one new one.
|
before adding the new one.
|
||||||
|
|
||||||
:param belief_changes: The changes in beliefs to apply.
|
|
||||||
"""
|
"""
|
||||||
if not belief_changes.create and not belief_changes.replace and not belief_changes.delete:
|
if not beliefs:
|
||||||
return
|
return
|
||||||
|
|
||||||
for belief in belief_changes.create:
|
for belief in beliefs:
|
||||||
|
if belief.replace:
|
||||||
|
self._remove_all_with_name(belief.name)
|
||||||
self._add_belief(belief.name, belief.arguments)
|
self._add_belief(belief.name, belief.arguments)
|
||||||
|
|
||||||
for belief in belief_changes.replace:
|
def _add_belief(self, name: str, args: Iterable[str] = []):
|
||||||
self._remove_all_with_name(belief.name)
|
|
||||||
self._add_belief(belief.name, belief.arguments)
|
|
||||||
|
|
||||||
for belief in belief_changes.delete:
|
|
||||||
self._remove_belief(belief.name, belief.arguments)
|
|
||||||
|
|
||||||
def _add_belief(self, name: str, args: list[str] = None):
|
|
||||||
"""
|
"""
|
||||||
Add a single belief to the BDI agent.
|
Add a single belief to the BDI agent.
|
||||||
|
|
||||||
@@ -175,13 +168,9 @@ class BDICoreAgent(BaseAgent):
|
|||||||
:param args: Arguments for the belief.
|
:param args: Arguments for the belief.
|
||||||
"""
|
"""
|
||||||
# new_args = (agentspeak.Literal(arg) for arg in args) # TODO: Eventually support multiple
|
# new_args = (agentspeak.Literal(arg) for arg in args) # TODO: Eventually support multiple
|
||||||
args = args or []
|
merged_args = DELIMITER.join(arg for arg in args)
|
||||||
if args:
|
new_args = (agentspeak.Literal(merged_args),)
|
||||||
merged_args = DELIMITER.join(arg for arg in args)
|
term = agentspeak.Literal(name, new_args)
|
||||||
new_args = (agentspeak.Literal(merged_args),)
|
|
||||||
term = agentspeak.Literal(name, new_args)
|
|
||||||
else:
|
|
||||||
term = agentspeak.Literal(name)
|
|
||||||
|
|
||||||
self.bdi_agent.call(
|
self.bdi_agent.call(
|
||||||
agentspeak.Trigger.addition,
|
agentspeak.Trigger.addition,
|
||||||
@@ -249,7 +238,8 @@ class BDICoreAgent(BaseAgent):
|
|||||||
@self.actions.add(".reply", 3)
|
@self.actions.add(".reply", 3)
|
||||||
def _reply(agent: "BDICoreAgent", term, intention):
|
def _reply(agent: "BDICoreAgent", term, intention):
|
||||||
"""
|
"""
|
||||||
Let the LLM generate a response to a user's utterance with the current norms and goals.
|
Sends text to the LLM (AgentSpeak action).
|
||||||
|
Example: .reply("Hello LLM!", "Some norm", "Some goal")
|
||||||
"""
|
"""
|
||||||
message_text = agentspeak.grounded(term.args[0], intention.scope)
|
message_text = agentspeak.grounded(term.args[0], intention.scope)
|
||||||
norms = agentspeak.grounded(term.args[1], intention.scope)
|
norms = agentspeak.grounded(term.args[1], intention.scope)
|
||||||
@@ -262,71 +252,15 @@ class BDICoreAgent(BaseAgent):
|
|||||||
asyncio.create_task(self._send_to_llm(str(message_text), str(norms), str(goals)))
|
asyncio.create_task(self._send_to_llm(str(message_text), str(norms), str(goals)))
|
||||||
yield
|
yield
|
||||||
|
|
||||||
@self.actions.add(".reply_with_goal", 3)
|
async def _send_to_llm(self, text: str, norms: str = None, goals: str = None):
|
||||||
def _reply_with_goal(agent: "BDICoreAgent", term, intention):
|
|
||||||
"""
|
|
||||||
Let the LLM generate a response to a user's utterance with the current norms and a
|
|
||||||
specific goal.
|
|
||||||
"""
|
|
||||||
message_text = agentspeak.grounded(term.args[0], intention.scope)
|
|
||||||
norms = agentspeak.grounded(term.args[1], intention.scope)
|
|
||||||
goal = agentspeak.grounded(term.args[2], intention.scope)
|
|
||||||
|
|
||||||
self.logger.debug(
|
|
||||||
'"reply_with_goal" action called with message=%s, norms=%s, goal=%s',
|
|
||||||
message_text,
|
|
||||||
norms,
|
|
||||||
goal,
|
|
||||||
)
|
|
||||||
# asyncio.create_task(self._send_to_llm(str(message_text), norms, str(goal)))
|
|
||||||
yield
|
|
||||||
|
|
||||||
@self.actions.add(".say", 1)
|
|
||||||
def _say(agent: "BDICoreAgent", term, intention):
|
|
||||||
"""
|
|
||||||
Make the robot say the given text instantly.
|
|
||||||
"""
|
|
||||||
message_text = agentspeak.grounded(term.args[0], intention.scope)
|
|
||||||
|
|
||||||
self.logger.debug('"say" action called with text=%s', message_text)
|
|
||||||
|
|
||||||
# speech_command = SpeechCommand(data=message_text)
|
|
||||||
# speech_message = InternalMessage(
|
|
||||||
# to=settings.agent_settings.robot_speech_name,
|
|
||||||
# sender=settings.agent_settings.bdi_core_name,
|
|
||||||
# body=speech_command.model_dump_json(),
|
|
||||||
# )
|
|
||||||
# asyncio.create_task(agent.send(speech_message))
|
|
||||||
yield
|
|
||||||
|
|
||||||
@self.actions.add(".gesture", 2)
|
|
||||||
def _gesture(agent: "BDICoreAgent", term, intention):
|
|
||||||
"""
|
|
||||||
Make the robot perform the given gesture instantly.
|
|
||||||
"""
|
|
||||||
gesture_type = agentspeak.grounded(term.args[0], intention.scope)
|
|
||||||
gesture_name = agentspeak.grounded(term.args[1], intention.scope)
|
|
||||||
|
|
||||||
self.logger.debug(
|
|
||||||
'"gesture" action called with type=%s, name=%s',
|
|
||||||
gesture_type,
|
|
||||||
gesture_name,
|
|
||||||
)
|
|
||||||
|
|
||||||
# gesture = Gesture(type=gesture_type, name=gesture_name)
|
|
||||||
# gesture_message = InternalMessage(
|
|
||||||
# to=settings.agent_settings.robot_gesture_name,
|
|
||||||
# sender=settings.agent_settings.bdi_core_name,
|
|
||||||
# body=gesture.model_dump_json(),
|
|
||||||
# )
|
|
||||||
# asyncio.create_task(agent.send(gesture_message))
|
|
||||||
yield
|
|
||||||
|
|
||||||
async def _send_to_llm(self, text: str, norms: str, goals: str):
|
|
||||||
"""
|
"""
|
||||||
Sends a text query to the LLM agent asynchronously.
|
Sends a text query to the LLM agent asynchronously.
|
||||||
"""
|
"""
|
||||||
prompt = LLMPromptMessage(text=text, norms=norms.split("\n"), goals=goals.split("\n"))
|
prompt = LLMPromptMessage(
|
||||||
|
text=text,
|
||||||
|
norms=norms.split("\n") if norms else [],
|
||||||
|
goals=goals.split("\n") if norms else [],
|
||||||
|
)
|
||||||
msg = InternalMessage(
|
msg = InternalMessage(
|
||||||
to=settings.agent_settings.llm_name,
|
to=settings.agent_settings.llm_name,
|
||||||
sender=self.name,
|
sender=self.name,
|
||||||
|
|||||||
@@ -1,598 +1,12 @@
|
|||||||
import uuid
|
|
||||||
from collections.abc import Iterable
|
|
||||||
|
|
||||||
import zmq
|
import zmq
|
||||||
from pydantic import ValidationError
|
from pydantic import ValidationError
|
||||||
from slugify import slugify
|
|
||||||
from zmq.asyncio import Context
|
from zmq.asyncio import Context
|
||||||
|
|
||||||
from control_backend.agents import BaseAgent
|
from control_backend.agents import BaseAgent
|
||||||
|
from control_backend.core.agent_system import InternalMessage
|
||||||
from control_backend.core.config import settings
|
from control_backend.core.config import settings
|
||||||
from control_backend.schemas.program import (
|
from control_backend.schemas.belief_message import Belief, BeliefMessage
|
||||||
Action,
|
from control_backend.schemas.program import Program
|
||||||
BasicBelief,
|
|
||||||
BasicNorm,
|
|
||||||
Belief,
|
|
||||||
ConditionalNorm,
|
|
||||||
GestureAction,
|
|
||||||
Goal,
|
|
||||||
InferredBelief,
|
|
||||||
KeywordBelief,
|
|
||||||
LLMAction,
|
|
||||||
LogicalOperator,
|
|
||||||
Phase,
|
|
||||||
Plan,
|
|
||||||
Program,
|
|
||||||
ProgramElement,
|
|
||||||
SemanticBelief,
|
|
||||||
SpeechAction,
|
|
||||||
Trigger,
|
|
||||||
)
|
|
||||||
|
|
||||||
test_program = Program(
|
|
||||||
phases=[
|
|
||||||
Phase(
|
|
||||||
norms=[
|
|
||||||
BasicNorm(norm="Talk like a pirate", id=uuid.uuid4()),
|
|
||||||
ConditionalNorm(
|
|
||||||
condition=InferredBelief(
|
|
||||||
left=KeywordBelief(keyword="Arr", id=uuid.uuid4()),
|
|
||||||
right=SemanticBelief(
|
|
||||||
description="testing", name="semantic belief", id=uuid.uuid4()
|
|
||||||
),
|
|
||||||
operator=LogicalOperator.OR,
|
|
||||||
name="Talking to a pirate",
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
),
|
|
||||||
norm="Use nautical terms",
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
),
|
|
||||||
ConditionalNorm(
|
|
||||||
condition=SemanticBelief(
|
|
||||||
description="We are talking to a child",
|
|
||||||
name="talking to child",
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
),
|
|
||||||
norm="Do not use cuss words",
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
),
|
|
||||||
],
|
|
||||||
triggers=[
|
|
||||||
Trigger(
|
|
||||||
condition=InferredBelief(
|
|
||||||
left=KeywordBelief(keyword="key", id=uuid.uuid4()),
|
|
||||||
right=InferredBelief(
|
|
||||||
left=KeywordBelief(keyword="key2", id=uuid.uuid4()),
|
|
||||||
right=SemanticBelief(
|
|
||||||
description="Decode this", name="semantic belief 2", id=uuid.uuid4()
|
|
||||||
),
|
|
||||||
operator=LogicalOperator.OR,
|
|
||||||
name="test trigger inferred inner",
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
),
|
|
||||||
operator=LogicalOperator.OR,
|
|
||||||
name="test trigger inferred outer",
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
),
|
|
||||||
plan=Plan(
|
|
||||||
steps=[
|
|
||||||
SpeechAction(text="Testing trigger", id=uuid.uuid4()),
|
|
||||||
Goal(
|
|
||||||
name="Testing trigger",
|
|
||||||
plan=Plan(
|
|
||||||
steps=[LLMAction(goal="Do something", id=uuid.uuid4())],
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
),
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
),
|
|
||||||
],
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
),
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
)
|
|
||||||
],
|
|
||||||
goals=[
|
|
||||||
Goal(
|
|
||||||
name="Determine user age",
|
|
||||||
plan=Plan(
|
|
||||||
steps=[LLMAction(goal="Determine the age of the user.", id=uuid.uuid4())],
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
),
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
),
|
|
||||||
Goal(
|
|
||||||
name="Find the user's name",
|
|
||||||
plan=Plan(
|
|
||||||
steps=[
|
|
||||||
Goal(
|
|
||||||
name="Greet the user",
|
|
||||||
plan=Plan(
|
|
||||||
steps=[LLMAction(goal="Greet the user.", id=uuid.uuid4())],
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
),
|
|
||||||
can_fail=False,
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
),
|
|
||||||
Goal(
|
|
||||||
name="Ask for name",
|
|
||||||
plan=Plan(
|
|
||||||
steps=[
|
|
||||||
LLMAction(goal="Obtain the user's name.", id=uuid.uuid4())
|
|
||||||
],
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
),
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
),
|
|
||||||
],
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
),
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
),
|
|
||||||
Goal(
|
|
||||||
name="Tell a joke",
|
|
||||||
plan=Plan(
|
|
||||||
steps=[LLMAction(goal="Tell a joke.", id=uuid.uuid4())], id=uuid.uuid4()
|
|
||||||
),
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
),
|
|
||||||
],
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
),
|
|
||||||
Phase(
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
norms=[
|
|
||||||
BasicNorm(norm="Use very gentle speech.", id=uuid.uuid4()),
|
|
||||||
ConditionalNorm(
|
|
||||||
condition=SemanticBelief(
|
|
||||||
description="We are talking to a child",
|
|
||||||
name="talking to child",
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
),
|
|
||||||
norm="Do not use cuss words",
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
),
|
|
||||||
],
|
|
||||||
triggers=[
|
|
||||||
Trigger(
|
|
||||||
condition=InferredBelief(
|
|
||||||
left=KeywordBelief(keyword="help", id=uuid.uuid4()),
|
|
||||||
right=SemanticBelief(
|
|
||||||
description="User is stuck", name="stuck", id=uuid.uuid4()
|
|
||||||
),
|
|
||||||
operator=LogicalOperator.OR,
|
|
||||||
name="help_or_stuck",
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
),
|
|
||||||
plan=Plan(
|
|
||||||
steps=[
|
|
||||||
Goal(
|
|
||||||
name="Unblock user",
|
|
||||||
plan=Plan(
|
|
||||||
steps=[
|
|
||||||
LLMAction(
|
|
||||||
goal="Provide a step-by-step path to "
|
|
||||||
"resolve the user's issue.",
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
)
|
|
||||||
],
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
),
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
),
|
|
||||||
],
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
),
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
),
|
|
||||||
],
|
|
||||||
goals=[
|
|
||||||
Goal(
|
|
||||||
name="Clarify intent",
|
|
||||||
plan=Plan(
|
|
||||||
steps=[
|
|
||||||
LLMAction(
|
|
||||||
goal="Ask 1-2 targeted questions to clarify the "
|
|
||||||
"user's intent, then proceed.",
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
)
|
|
||||||
],
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
),
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
),
|
|
||||||
Goal(
|
|
||||||
name="Provide solution",
|
|
||||||
plan=Plan(
|
|
||||||
steps=[
|
|
||||||
LLMAction(
|
|
||||||
goal="Deliver a solution to complete the user's goal.",
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
)
|
|
||||||
],
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
),
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
),
|
|
||||||
Goal(
|
|
||||||
name="Summarize next steps",
|
|
||||||
plan=Plan(
|
|
||||||
steps=[
|
|
||||||
LLMAction(
|
|
||||||
goal="Summarize what the user should do next.", id=uuid.uuid4()
|
|
||||||
)
|
|
||||||
],
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
),
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
),
|
|
||||||
],
|
|
||||||
),
|
|
||||||
]
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def do_things():
|
|
||||||
print(AgentSpeakGenerator().generate(test_program))
|
|
||||||
|
|
||||||
|
|
||||||
class AgentSpeakGenerator:
|
|
||||||
"""
|
|
||||||
Converts Pydantic representation of behavior programs into AgentSpeak(L) code string.
|
|
||||||
"""
|
|
||||||
|
|
||||||
arrow_prefix = f"{' ' * 2}<-{' ' * 2}"
|
|
||||||
colon_prefix = f"{' ' * 2}:{' ' * 3}"
|
|
||||||
indent_prefix = " " * 6
|
|
||||||
|
|
||||||
def generate(self, program: Program) -> str:
|
|
||||||
lines = []
|
|
||||||
lines.append("")
|
|
||||||
|
|
||||||
lines += self._generate_initial_beliefs(program)
|
|
||||||
|
|
||||||
lines += self._generate_basic_flow(program)
|
|
||||||
|
|
||||||
lines += self._generate_phase_transitions(program)
|
|
||||||
|
|
||||||
lines += self._generate_norms(program)
|
|
||||||
|
|
||||||
lines += self._generate_belief_inference(program)
|
|
||||||
|
|
||||||
lines += self._generate_goals(program)
|
|
||||||
|
|
||||||
lines += self._generate_triggers(program)
|
|
||||||
|
|
||||||
return "\n".join(lines)
|
|
||||||
|
|
||||||
def _generate_initial_beliefs(self, program: Program) -> Iterable[str]:
|
|
||||||
yield "// --- Initial beliefs and agent startup ---"
|
|
||||||
|
|
||||||
yield "phase(start)."
|
|
||||||
|
|
||||||
yield ""
|
|
||||||
|
|
||||||
yield "+started"
|
|
||||||
yield f"{self.colon_prefix}phase(start)"
|
|
||||||
yield f"{self.arrow_prefix}phase({program.phases[0].id if program.phases else 'end'})."
|
|
||||||
|
|
||||||
yield from ["", ""]
|
|
||||||
|
|
||||||
def _generate_basic_flow(self, program: Program) -> Iterable[str]:
|
|
||||||
yield "// --- Basic flow ---"
|
|
||||||
|
|
||||||
for phase in program.phases:
|
|
||||||
yield from self._generate_basic_flow_per_phase(phase)
|
|
||||||
|
|
||||||
yield from ["", ""]
|
|
||||||
|
|
||||||
def _generate_basic_flow_per_phase(self, phase: Phase) -> Iterable[str]:
|
|
||||||
yield "+user_said(Message)"
|
|
||||||
yield f"{self.colon_prefix}phase({phase.id})"
|
|
||||||
|
|
||||||
goals = phase.goals
|
|
||||||
if goals:
|
|
||||||
yield f"{self.arrow_prefix}{self._slugify(goals[0], include_prefix=True)}"
|
|
||||||
for goal in goals[1:]:
|
|
||||||
yield f"{self.indent_prefix}{self._slugify(goal, include_prefix=True)}"
|
|
||||||
|
|
||||||
yield f"{self.indent_prefix if goals else self.arrow_prefix}!transition_phase."
|
|
||||||
|
|
||||||
def _generate_phase_transitions(self, program: Program) -> Iterable[str]:
|
|
||||||
yield "// --- Phase transitions ---"
|
|
||||||
|
|
||||||
if len(program.phases) == 0:
|
|
||||||
yield from ["", ""]
|
|
||||||
return
|
|
||||||
|
|
||||||
# TODO: remove outdated things
|
|
||||||
|
|
||||||
for i in range(-1, len(program.phases)):
|
|
||||||
predecessor = program.phases[i] if i >= 0 else None
|
|
||||||
successor = program.phases[i + 1] if i < len(program.phases) - 1 else None
|
|
||||||
yield from self._generate_phase_transition(predecessor, successor)
|
|
||||||
|
|
||||||
yield from self._generate_phase_transition(None, None) # to avoid failing plan
|
|
||||||
|
|
||||||
yield from ["", ""]
|
|
||||||
|
|
||||||
def _generate_phase_transition(
|
|
||||||
self, phase: Phase | None = None, next_phase: Phase | None = None
|
|
||||||
) -> Iterable[str]:
|
|
||||||
yield "+!transition_phase"
|
|
||||||
|
|
||||||
if phase is None and next_phase is None: # base case true to avoid failing plan
|
|
||||||
yield f"{self.arrow_prefix}true."
|
|
||||||
return
|
|
||||||
|
|
||||||
yield f"{self.colon_prefix}phase({phase.id if phase else 'start'})"
|
|
||||||
yield f"{self.arrow_prefix}-+phase({next_phase.id if next_phase else 'end'})."
|
|
||||||
|
|
||||||
def _generate_norms(self, program: Program) -> Iterable[str]:
|
|
||||||
yield "// --- Norms ---"
|
|
||||||
|
|
||||||
for phase in program.phases:
|
|
||||||
for norm in phase.norms:
|
|
||||||
if type(norm) is BasicNorm:
|
|
||||||
yield f"{self._slugify(norm)} :- phase({phase.id})."
|
|
||||||
if type(norm) is ConditionalNorm:
|
|
||||||
yield (
|
|
||||||
f"{self._slugify(norm)} :- phase({phase.id}) & "
|
|
||||||
f"{self._slugify(norm.condition)}."
|
|
||||||
)
|
|
||||||
|
|
||||||
yield from ["", ""]
|
|
||||||
|
|
||||||
def _generate_belief_inference(self, program: Program) -> Iterable[str]:
|
|
||||||
yield "// --- Belief inference rules ---"
|
|
||||||
|
|
||||||
for phase in program.phases:
|
|
||||||
for norm in phase.norms:
|
|
||||||
if not isinstance(norm, ConditionalNorm):
|
|
||||||
continue
|
|
||||||
|
|
||||||
yield from self._belief_inference_recursive(norm.condition)
|
|
||||||
|
|
||||||
for trigger in phase.triggers:
|
|
||||||
yield from self._belief_inference_recursive(trigger.condition)
|
|
||||||
|
|
||||||
yield from ["", ""]
|
|
||||||
|
|
||||||
def _belief_inference_recursive(self, belief: Belief) -> Iterable[str]:
|
|
||||||
if type(belief) is KeywordBelief:
|
|
||||||
yield (
|
|
||||||
f"{self._slugify(belief)} :- user_said(Message) & "
|
|
||||||
f'.substring(Message, "{belief.keyword}", Pos) & Pos >= 0.'
|
|
||||||
)
|
|
||||||
if type(belief) is InferredBelief:
|
|
||||||
yield (
|
|
||||||
f"{self._slugify(belief)} :- {self._slugify(belief.left)} "
|
|
||||||
f"{'&' if belief.operator == LogicalOperator.AND else '|'} "
|
|
||||||
f"{self._slugify(belief.right)}."
|
|
||||||
)
|
|
||||||
|
|
||||||
yield from self._belief_inference_recursive(belief.left)
|
|
||||||
yield from self._belief_inference_recursive(belief.right)
|
|
||||||
|
|
||||||
def _generate_goals(self, program: Program) -> Iterable[str]:
|
|
||||||
yield "// --- Goals ---"
|
|
||||||
|
|
||||||
for phase in program.phases:
|
|
||||||
previous_goal: Goal | None = None
|
|
||||||
for goal in phase.goals:
|
|
||||||
yield from self._generate_goal_plan_recursive(goal, phase, previous_goal)
|
|
||||||
previous_goal = goal
|
|
||||||
|
|
||||||
yield from ["", ""]
|
|
||||||
|
|
||||||
def _generate_goal_plan_recursive(
|
|
||||||
self, goal: Goal, phase: Phase, previous_goal: Goal | None = None
|
|
||||||
) -> Iterable[str]:
|
|
||||||
yield f"+{self._slugify(goal, include_prefix=True)}"
|
|
||||||
|
|
||||||
# Context
|
|
||||||
yield f"{self.colon_prefix}phase({phase.id}) &"
|
|
||||||
yield f"{self.indent_prefix}not responded_this_turn &"
|
|
||||||
yield f"{self.indent_prefix}not achieved_{self._slugify(goal)} &"
|
|
||||||
if previous_goal:
|
|
||||||
yield f"{self.indent_prefix}achieved_{self._slugify(previous_goal)}"
|
|
||||||
else:
|
|
||||||
yield f"{self.indent_prefix}true"
|
|
||||||
|
|
||||||
extra_goals_to_generate = []
|
|
||||||
|
|
||||||
steps = goal.plan.steps
|
|
||||||
|
|
||||||
if len(steps) == 0:
|
|
||||||
yield f"{self.arrow_prefix}true."
|
|
||||||
return
|
|
||||||
|
|
||||||
first_step = steps[0]
|
|
||||||
yield (
|
|
||||||
f"{self.arrow_prefix}{self._slugify(first_step, include_prefix=True)}"
|
|
||||||
f"{'.' if len(steps) == 1 and goal.can_fail else ';'}"
|
|
||||||
)
|
|
||||||
if isinstance(first_step, Goal):
|
|
||||||
extra_goals_to_generate.append(first_step)
|
|
||||||
|
|
||||||
for step in steps[1:-1]:
|
|
||||||
yield f"{self.indent_prefix}{self._slugify(step, include_prefix=True)};"
|
|
||||||
if isinstance(step, Goal):
|
|
||||||
extra_goals_to_generate.append(step)
|
|
||||||
|
|
||||||
if len(steps) > 1:
|
|
||||||
last_step = steps[-1]
|
|
||||||
yield (
|
|
||||||
f"{self.indent_prefix}{self._slugify(last_step, include_prefix=True)}"
|
|
||||||
f"{'.' if goal.can_fail else ';'}"
|
|
||||||
)
|
|
||||||
if isinstance(last_step, Goal):
|
|
||||||
extra_goals_to_generate.append(last_step)
|
|
||||||
|
|
||||||
if not goal.can_fail:
|
|
||||||
yield f"{self.indent_prefix}+achieved_{self._slugify(goal)}."
|
|
||||||
|
|
||||||
yield f"+{self._slugify(goal, include_prefix=True)}"
|
|
||||||
yield f"{self.arrow_prefix}true."
|
|
||||||
|
|
||||||
yield ""
|
|
||||||
|
|
||||||
extra_previous_goal: Goal | None = None
|
|
||||||
for extra_goal in extra_goals_to_generate:
|
|
||||||
yield from self._generate_goal_plan_recursive(extra_goal, phase, extra_previous_goal)
|
|
||||||
extra_previous_goal = extra_goal
|
|
||||||
|
|
||||||
def _generate_triggers(self, program: Program) -> Iterable[str]:
|
|
||||||
yield "// --- Triggers ---"
|
|
||||||
|
|
||||||
for phase in program.phases:
|
|
||||||
for trigger in phase.triggers:
|
|
||||||
yield from self._generate_trigger_plan(trigger, phase)
|
|
||||||
|
|
||||||
yield from ["", ""]
|
|
||||||
|
|
||||||
def _generate_trigger_plan(self, trigger: Trigger, phase: Phase) -> Iterable[str]:
|
|
||||||
belief_name = self._slugify(trigger.condition)
|
|
||||||
|
|
||||||
yield f"+{belief_name}"
|
|
||||||
yield f"{self.colon_prefix}phase({phase.id})"
|
|
||||||
|
|
||||||
extra_goals_to_generate = []
|
|
||||||
|
|
||||||
steps = trigger.plan.steps
|
|
||||||
|
|
||||||
if len(steps) == 0:
|
|
||||||
yield f"{self.arrow_prefix}true."
|
|
||||||
return
|
|
||||||
|
|
||||||
first_step = steps[0]
|
|
||||||
yield (
|
|
||||||
f"{self.arrow_prefix}{self._slugify(first_step, include_prefix=True)}"
|
|
||||||
f"{'.' if len(steps) == 1 else ';'}"
|
|
||||||
)
|
|
||||||
if isinstance(first_step, Goal):
|
|
||||||
extra_goals_to_generate.append(first_step)
|
|
||||||
|
|
||||||
for step in steps[1:-1]:
|
|
||||||
yield f"{self.indent_prefix}{self._slugify(step, include_prefix=True)};"
|
|
||||||
if isinstance(step, Goal):
|
|
||||||
extra_goals_to_generate.append(step)
|
|
||||||
|
|
||||||
if len(steps) > 1:
|
|
||||||
last_step = steps[-1]
|
|
||||||
yield f"{self.indent_prefix}{self._slugify(last_step, include_prefix=True)}."
|
|
||||||
if isinstance(last_step, Goal):
|
|
||||||
extra_goals_to_generate.append(last_step)
|
|
||||||
|
|
||||||
yield ""
|
|
||||||
|
|
||||||
extra_previous_goal: Goal | None = None
|
|
||||||
for extra_goal in extra_goals_to_generate:
|
|
||||||
yield from self._generate_trigger_plan_recursive(extra_goal, phase, extra_previous_goal)
|
|
||||||
extra_previous_goal = extra_goal
|
|
||||||
|
|
||||||
def _generate_trigger_plan_recursive(
|
|
||||||
self, goal: Goal, phase: Phase, previous_goal: Goal | None = None
|
|
||||||
) -> Iterable[str]:
|
|
||||||
yield f"+{self._slugify(goal, include_prefix=True)}"
|
|
||||||
|
|
||||||
extra_goals_to_generate = []
|
|
||||||
|
|
||||||
steps = goal.plan.steps
|
|
||||||
|
|
||||||
if len(steps) == 0:
|
|
||||||
yield f"{self.arrow_prefix}true."
|
|
||||||
return
|
|
||||||
|
|
||||||
first_step = steps[0]
|
|
||||||
yield (
|
|
||||||
f"{self.arrow_prefix}{self._slugify(first_step, include_prefix=True)}"
|
|
||||||
f"{'.' if len(steps) == 1 and goal.can_fail else ';'}"
|
|
||||||
)
|
|
||||||
if isinstance(first_step, Goal):
|
|
||||||
extra_goals_to_generate.append(first_step)
|
|
||||||
|
|
||||||
for step in steps[1:-1]:
|
|
||||||
yield f"{self.indent_prefix}{self._slugify(step, include_prefix=True)};"
|
|
||||||
if isinstance(step, Goal):
|
|
||||||
extra_goals_to_generate.append(step)
|
|
||||||
|
|
||||||
if len(steps) > 1:
|
|
||||||
last_step = steps[-1]
|
|
||||||
yield (
|
|
||||||
f"{self.indent_prefix}{self._slugify(last_step, include_prefix=True)}"
|
|
||||||
f"{'.' if goal.can_fail else ';'}"
|
|
||||||
)
|
|
||||||
if isinstance(last_step, Goal):
|
|
||||||
extra_goals_to_generate.append(last_step)
|
|
||||||
|
|
||||||
if not goal.can_fail:
|
|
||||||
yield f"{self.indent_prefix}+achieved_{self._slugify(goal)}."
|
|
||||||
|
|
||||||
yield f"+{self._slugify(goal, include_prefix=True)}"
|
|
||||||
yield f"{self.arrow_prefix}true."
|
|
||||||
|
|
||||||
yield ""
|
|
||||||
|
|
||||||
extra_previous_goal: Goal | None = None
|
|
||||||
for extra_goal in extra_goals_to_generate:
|
|
||||||
yield from self._generate_goal_plan_recursive(extra_goal, phase, extra_previous_goal)
|
|
||||||
extra_previous_goal = extra_goal
|
|
||||||
|
|
||||||
def _slugify(self, element: ProgramElement, include_prefix: bool = False) -> str:
|
|
||||||
def base_slugify_call(text: str):
|
|
||||||
return slugify(text, separator="_", stopwords=["a", "the"])
|
|
||||||
|
|
||||||
if type(element) is KeywordBelief:
|
|
||||||
return f'keyword_said("{element.keyword}")'
|
|
||||||
|
|
||||||
if type(element) is SemanticBelief:
|
|
||||||
name = element.name
|
|
||||||
return f"semantic_{base_slugify_call(name if name else element.description)}"
|
|
||||||
|
|
||||||
if isinstance(element, BasicNorm):
|
|
||||||
return f'norm("{element.norm}")'
|
|
||||||
|
|
||||||
if isinstance(element, Goal):
|
|
||||||
return f"{'!' if include_prefix else ''}{base_slugify_call(element.name)}"
|
|
||||||
|
|
||||||
if isinstance(element, SpeechAction):
|
|
||||||
return f'.say("{element.text}")'
|
|
||||||
|
|
||||||
if isinstance(element, GestureAction):
|
|
||||||
return f'.gesture("{element.gesture}")'
|
|
||||||
|
|
||||||
if isinstance(element, LLMAction):
|
|
||||||
return f'!generate_response_with_goal("{element.goal}")'
|
|
||||||
|
|
||||||
if isinstance(element, Action.__value__):
|
|
||||||
raise NotImplementedError(
|
|
||||||
"Have not implemented an ASL string representation for this action."
|
|
||||||
)
|
|
||||||
|
|
||||||
if element.name == "":
|
|
||||||
raise ValueError("Name must be initialized for this type of ProgramElement.")
|
|
||||||
|
|
||||||
return base_slugify_call(element.name)
|
|
||||||
|
|
||||||
def _extract_basic_beliefs_from_program(self, program: Program) -> list[BasicBelief]:
|
|
||||||
beliefs = []
|
|
||||||
|
|
||||||
for phase in program.phases:
|
|
||||||
for norm in phase.norms:
|
|
||||||
if isinstance(norm, ConditionalNorm):
|
|
||||||
beliefs += self._extract_basic_beliefs_from_belief(norm.condition)
|
|
||||||
|
|
||||||
for trigger in phase.triggers:
|
|
||||||
beliefs += self._extract_basic_beliefs_from_belief(trigger.condition)
|
|
||||||
|
|
||||||
return beliefs
|
|
||||||
|
|
||||||
def _extract_basic_beliefs_from_belief(self, belief: Belief) -> list[BasicBelief]:
|
|
||||||
if isinstance(belief, InferredBelief):
|
|
||||||
return self._extract_basic_beliefs_from_belief(
|
|
||||||
belief.left
|
|
||||||
) + self._extract_basic_beliefs_from_belief(belief.right)
|
|
||||||
return [belief]
|
|
||||||
|
|
||||||
|
|
||||||
class BDIProgramManager(BaseAgent):
|
class BDIProgramManager(BaseAgent):
|
||||||
@@ -611,40 +25,55 @@ class BDIProgramManager(BaseAgent):
|
|||||||
super().__init__(**kwargs)
|
super().__init__(**kwargs)
|
||||||
self.sub_socket = None
|
self.sub_socket = None
|
||||||
|
|
||||||
# async def _send_to_bdi(self, program: Program):
|
async def _send_to_bdi(self, program: Program):
|
||||||
# """
|
"""
|
||||||
# Convert a received program into BDI beliefs and send them to the BDI Core Agent.
|
Convert a received program into BDI beliefs and send them to the BDI Core Agent.
|
||||||
#
|
|
||||||
# Currently, it takes the **first phase** of the program and extracts:
|
Currently, it takes the **first phase** of the program and extracts:
|
||||||
# - **Norms**: Constraints or rules the agent must follow.
|
- **Norms**: Constraints or rules the agent must follow.
|
||||||
# - **Goals**: Objectives the agent must achieve.
|
- **Goals**: Objectives the agent must achieve.
|
||||||
#
|
|
||||||
# These are sent as a ``BeliefMessage`` with ``replace=True``, meaning they will
|
These are sent as a ``BeliefMessage`` with ``replace=True``, meaning they will
|
||||||
# overwrite any existing norms/goals of the same name in the BDI agent.
|
overwrite any existing norms/goals of the same name in the BDI agent.
|
||||||
#
|
|
||||||
# :param program: The program object received from the API.
|
:param program: The program object received from the API.
|
||||||
# """
|
"""
|
||||||
# first_phase = program.phases[0]
|
first_phase = program.phases[0]
|
||||||
# norms_belief = Belief(
|
norms_belief = Belief(
|
||||||
# name="norms",
|
name="norms",
|
||||||
# arguments=[norm.norm for norm in first_phase.norms],
|
arguments=[norm.norm for norm in first_phase.norms],
|
||||||
# replace=True,
|
replace=True,
|
||||||
# )
|
)
|
||||||
# goals_belief = Belief(
|
goals_belief = Belief(
|
||||||
# name="goals",
|
name="goals",
|
||||||
# arguments=[goal.description for goal in first_phase.goals],
|
arguments=[goal.description for goal in first_phase.goals],
|
||||||
# replace=True,
|
replace=True,
|
||||||
# )
|
)
|
||||||
# program_beliefs = BeliefMessage(beliefs=[norms_belief, goals_belief])
|
program_beliefs = BeliefMessage(beliefs=[norms_belief, goals_belief])
|
||||||
#
|
|
||||||
# message = InternalMessage(
|
message = InternalMessage(
|
||||||
# to=settings.agent_settings.bdi_core_name,
|
to=settings.agent_settings.bdi_core_name,
|
||||||
# sender=self.name,
|
sender=self.name,
|
||||||
# body=program_beliefs.model_dump_json(),
|
body=program_beliefs.model_dump_json(),
|
||||||
# thread="beliefs",
|
thread="beliefs",
|
||||||
# )
|
)
|
||||||
# await self.send(message)
|
await self.send(message)
|
||||||
# self.logger.debug("Sent new norms and goals to the BDI agent.")
|
self.logger.debug("Sent new norms and goals to the BDI agent.")
|
||||||
|
|
||||||
|
async def _send_clear_llm_history(self):
|
||||||
|
"""
|
||||||
|
Clear the LLM Agent's conversation history.
|
||||||
|
|
||||||
|
Sends an empty history to the LLM Agent to reset its state.
|
||||||
|
"""
|
||||||
|
message = InternalMessage(
|
||||||
|
to=settings.agent_settings.llm_name,
|
||||||
|
sender=self.name,
|
||||||
|
body="clear_history",
|
||||||
|
threads="clear history message",
|
||||||
|
)
|
||||||
|
await self.send(message)
|
||||||
|
self.logger.debug("Sent message to LLM agent to clear history.")
|
||||||
|
|
||||||
async def _receive_programs(self):
|
async def _receive_programs(self):
|
||||||
"""
|
"""
|
||||||
@@ -652,18 +81,20 @@ class BDIProgramManager(BaseAgent):
|
|||||||
|
|
||||||
It listens to the ``program`` topic on the internal ZMQ SUB socket.
|
It listens to the ``program`` topic on the internal ZMQ SUB socket.
|
||||||
When a program is received, it is validated and forwarded to BDI via :meth:`_send_to_bdi`.
|
When a program is received, it is validated and forwarded to BDI via :meth:`_send_to_bdi`.
|
||||||
|
Additionally, the LLM history is cleared via :meth:`_send_clear_llm_history`.
|
||||||
"""
|
"""
|
||||||
while True:
|
while True:
|
||||||
topic, body = await self.sub_socket.recv_multipart()
|
topic, body = await self.sub_socket.recv_multipart()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
program = Program.model_validate_json(body)
|
program = Program.model_validate_json(body)
|
||||||
|
await self._send_to_bdi(program)
|
||||||
|
await self._send_clear_llm_history()
|
||||||
|
|
||||||
except ValidationError:
|
except ValidationError:
|
||||||
self.logger.exception("Received an invalid program.")
|
self.logger.exception("Received an invalid program.")
|
||||||
continue
|
continue
|
||||||
|
|
||||||
await self._send_to_bdi(program)
|
|
||||||
|
|
||||||
async def setup(self):
|
async def setup(self):
|
||||||
"""
|
"""
|
||||||
Initialize the agent.
|
Initialize the agent.
|
||||||
@@ -678,7 +109,3 @@ class BDIProgramManager(BaseAgent):
|
|||||||
self.sub_socket.subscribe("program")
|
self.sub_socket.subscribe("program")
|
||||||
|
|
||||||
self.add_behavior(self._receive_programs())
|
self.add_behavior(self._receive_programs())
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
do_things()
|
|
||||||
|
|||||||
@@ -144,7 +144,7 @@ class BDIBeliefCollectorAgent(BaseAgent):
|
|||||||
msg = InternalMessage(
|
msg = InternalMessage(
|
||||||
to=settings.agent_settings.bdi_core_name,
|
to=settings.agent_settings.bdi_core_name,
|
||||||
sender=self.name,
|
sender=self.name,
|
||||||
body=BeliefMessage(create=beliefs).model_dump_json(),
|
body=BeliefMessage(beliefs=beliefs).model_dump_json(),
|
||||||
thread="beliefs",
|
thread="beliefs",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -1,23 +1,8 @@
|
|||||||
import asyncio
|
|
||||||
import json
|
import json
|
||||||
|
|
||||||
import httpx
|
|
||||||
from pydantic import ValidationError
|
|
||||||
from slugify import slugify
|
|
||||||
|
|
||||||
from control_backend.agents.base import BaseAgent
|
from control_backend.agents.base import BaseAgent
|
||||||
from control_backend.core.agent_system import InternalMessage
|
from control_backend.core.agent_system import InternalMessage
|
||||||
from control_backend.core.config import settings
|
from control_backend.core.config import settings
|
||||||
from control_backend.schemas.belief_message import Belief as InternalBelief
|
|
||||||
from control_backend.schemas.belief_message import BeliefMessage
|
|
||||||
from control_backend.schemas.chat_history import ChatHistory, ChatMessage
|
|
||||||
from control_backend.schemas.program import (
|
|
||||||
Belief,
|
|
||||||
ConditionalNorm,
|
|
||||||
InferredBelief,
|
|
||||||
Program,
|
|
||||||
SemanticBelief,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class TextBeliefExtractorAgent(BaseAgent):
|
class TextBeliefExtractorAgent(BaseAgent):
|
||||||
@@ -27,110 +12,46 @@ class TextBeliefExtractorAgent(BaseAgent):
|
|||||||
This agent is responsible for processing raw text (e.g., from speech transcription) and
|
This agent is responsible for processing raw text (e.g., from speech transcription) and
|
||||||
extracting semantic beliefs from it.
|
extracting semantic beliefs from it.
|
||||||
|
|
||||||
It uses the available beliefs received from the program manager to try to extract beliefs from a
|
In the current demonstration version, it performs a simple wrapping of the user's input
|
||||||
user's message, sends and updated beliefs to the BDI core, and forms a ``user_said`` belief from
|
into a ``user_said`` belief. In a full implementation, this agent would likely interact
|
||||||
the message itself.
|
with an LLM or NLU engine to extract intent, entities, and other structured information.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, name: str):
|
|
||||||
super().__init__(name)
|
|
||||||
self.beliefs: dict[str, bool] = {}
|
|
||||||
self.available_beliefs: list[SemanticBelief] = []
|
|
||||||
self.conversation = ChatHistory(messages=[])
|
|
||||||
|
|
||||||
async def setup(self):
|
async def setup(self):
|
||||||
"""
|
"""
|
||||||
Initialize the agent and its resources.
|
Initialize the agent and its resources.
|
||||||
"""
|
"""
|
||||||
self.logger.info("Setting up %s.", self.name)
|
self.logger.info("Settting up %s.", self.name)
|
||||||
|
# Setup LLM belief context if needed (currently demo is just passthrough)
|
||||||
|
self.beliefs = {"mood": ["X"], "car": ["Y"]}
|
||||||
|
|
||||||
async def handle_message(self, msg: InternalMessage):
|
async def handle_message(self, msg: InternalMessage):
|
||||||
"""
|
"""
|
||||||
Handle incoming messages. Expect messages from the Transcriber agent, LLM agent, and the
|
Handle incoming messages, primarily from the Transcription Agent.
|
||||||
Program manager agent.
|
|
||||||
|
|
||||||
:param msg: The received message.
|
:param msg: The received message containing transcribed text.
|
||||||
"""
|
"""
|
||||||
sender = msg.sender
|
sender = msg.sender
|
||||||
|
if sender == settings.agent_settings.transcription_name:
|
||||||
|
self.logger.debug("Received text from transcriber: %s", msg.body)
|
||||||
|
await self._process_transcription_demo(msg.body)
|
||||||
|
else:
|
||||||
|
self.logger.info("Discarding message from %s", sender)
|
||||||
|
|
||||||
match sender:
|
async def _process_transcription_demo(self, txt: str):
|
||||||
case settings.agent_settings.transcription_name:
|
|
||||||
self.logger.debug("Received text from transcriber: %s", msg.body)
|
|
||||||
self._apply_conversation_message(ChatMessage(role="user", content=msg.body))
|
|
||||||
await self._infer_new_beliefs()
|
|
||||||
await self._user_said(msg.body)
|
|
||||||
case settings.agent_settings.llm_name:
|
|
||||||
self.logger.debug("Received text from LLM: %s", msg.body)
|
|
||||||
self._apply_conversation_message(ChatMessage(role="assistant", content=msg.body))
|
|
||||||
case settings.agent_settings.bdi_program_manager_name:
|
|
||||||
self._handle_program_manager_message(msg)
|
|
||||||
case _:
|
|
||||||
self.logger.info("Discarding message from %s", sender)
|
|
||||||
return
|
|
||||||
|
|
||||||
def _apply_conversation_message(self, message: ChatMessage):
|
|
||||||
"""
|
"""
|
||||||
Save the chat message to our conversation history, taking into account the conversation
|
Process the transcribed text and generate beliefs.
|
||||||
length limit.
|
|
||||||
|
|
||||||
:param message: The chat message to add to the conversation history.
|
**Demo Implementation:**
|
||||||
|
Currently, this method takes the raw text ``txt`` and wraps it into a belief structure:
|
||||||
|
``user_said("txt")``.
|
||||||
|
|
||||||
|
This belief is then sent to the :class:`BDIBeliefCollectorAgent`.
|
||||||
|
|
||||||
|
:param txt: The raw transcribed text string.
|
||||||
"""
|
"""
|
||||||
length_limit = settings.behaviour_settings.conversation_history_length_limit
|
# For demo, just wrapping user text as user_said belief
|
||||||
self.conversation.messages = (self.conversation.messages + [message])[-length_limit:]
|
belief = {"beliefs": {"user_said": [txt]}, "type": "belief_extraction_text"}
|
||||||
|
|
||||||
def _handle_program_manager_message(self, msg: InternalMessage):
|
|
||||||
"""
|
|
||||||
Handle a message from the program manager: extract available beliefs from it.
|
|
||||||
|
|
||||||
:param msg: The received message from the program manager.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
program = Program.model_validate_json(msg.body)
|
|
||||||
except ValidationError:
|
|
||||||
self.logger.warning(
|
|
||||||
"Received message from program manager but it is not a valid program."
|
|
||||||
)
|
|
||||||
return
|
|
||||||
|
|
||||||
self.logger.debug("Received a program from the program manager.")
|
|
||||||
|
|
||||||
self.available_beliefs = self._extract_basic_beliefs_from_program(program)
|
|
||||||
|
|
||||||
# TODO Copied from an incomplete version of the program manager. Use that one instead.
|
|
||||||
@staticmethod
|
|
||||||
def _extract_basic_beliefs_from_program(program: Program) -> list[SemanticBelief]:
|
|
||||||
beliefs = []
|
|
||||||
|
|
||||||
for phase in program.phases:
|
|
||||||
for norm in phase.norms:
|
|
||||||
if isinstance(norm, ConditionalNorm):
|
|
||||||
beliefs += TextBeliefExtractorAgent._extract_basic_beliefs_from_belief(
|
|
||||||
norm.condition
|
|
||||||
)
|
|
||||||
|
|
||||||
for trigger in phase.triggers:
|
|
||||||
beliefs += TextBeliefExtractorAgent._extract_basic_beliefs_from_belief(
|
|
||||||
trigger.condition
|
|
||||||
)
|
|
||||||
|
|
||||||
return beliefs
|
|
||||||
|
|
||||||
# TODO Copied from an incomplete version of the program manager. Use that one instead.
|
|
||||||
@staticmethod
|
|
||||||
def _extract_basic_beliefs_from_belief(belief: Belief) -> list[SemanticBelief]:
|
|
||||||
if isinstance(belief, InferredBelief):
|
|
||||||
return TextBeliefExtractorAgent._extract_basic_beliefs_from_belief(
|
|
||||||
belief.left
|
|
||||||
) + TextBeliefExtractorAgent._extract_basic_beliefs_from_belief(belief.right)
|
|
||||||
return [belief]
|
|
||||||
|
|
||||||
async def _user_said(self, text: str):
|
|
||||||
"""
|
|
||||||
Create a belief for the user's full speech.
|
|
||||||
|
|
||||||
:param text: User's transcribed text.
|
|
||||||
"""
|
|
||||||
belief = {"beliefs": {"user_said": [text]}, "type": "belief_extraction_text"}
|
|
||||||
payload = json.dumps(belief)
|
payload = json.dumps(belief)
|
||||||
|
|
||||||
belief_msg = InternalMessage(
|
belief_msg = InternalMessage(
|
||||||
@@ -139,207 +60,6 @@ class TextBeliefExtractorAgent(BaseAgent):
|
|||||||
body=payload,
|
body=payload,
|
||||||
thread="beliefs",
|
thread="beliefs",
|
||||||
)
|
)
|
||||||
|
|
||||||
await self.send(belief_msg)
|
await self.send(belief_msg)
|
||||||
|
self.logger.info("Sent %d beliefs to the belief collector.", len(belief["beliefs"]))
|
||||||
async def _infer_new_beliefs(self):
|
|
||||||
"""
|
|
||||||
Process conversation history to extract beliefs, semantically. Any changed beliefs are sent
|
|
||||||
to the BDI core.
|
|
||||||
"""
|
|
||||||
# Return instantly if there are no beliefs to infer
|
|
||||||
if not self.available_beliefs:
|
|
||||||
return
|
|
||||||
|
|
||||||
candidate_beliefs = await self._infer_turn()
|
|
||||||
belief_changes = BeliefMessage()
|
|
||||||
for belief_key, belief_value in candidate_beliefs.items():
|
|
||||||
if belief_value is None:
|
|
||||||
continue
|
|
||||||
old_belief_value = self.beliefs.get(belief_key)
|
|
||||||
if belief_value == old_belief_value:
|
|
||||||
continue
|
|
||||||
|
|
||||||
self.beliefs[belief_key] = belief_value
|
|
||||||
|
|
||||||
belief = InternalBelief(name=belief_key, arguments=None)
|
|
||||||
if belief_value:
|
|
||||||
belief_changes.create.append(belief)
|
|
||||||
else:
|
|
||||||
belief_changes.delete.append(belief)
|
|
||||||
|
|
||||||
# Return if there were no changes in beliefs
|
|
||||||
if not belief_changes.has_values():
|
|
||||||
return
|
|
||||||
|
|
||||||
beliefs_message = InternalMessage(
|
|
||||||
to=settings.agent_settings.bdi_core_name,
|
|
||||||
sender=self.name,
|
|
||||||
body=belief_changes.model_dump_json(),
|
|
||||||
thread="beliefs",
|
|
||||||
)
|
|
||||||
await self.send(beliefs_message)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _split_into_chunks[T](items: list[T], n: int) -> list[list[T]]:
|
|
||||||
k, m = divmod(len(items), n)
|
|
||||||
return [items[i * k + min(i, m) : (i + 1) * k + min(i + 1, m)] for i in range(n)]
|
|
||||||
|
|
||||||
async def _infer_turn(self) -> dict:
|
|
||||||
"""
|
|
||||||
Process the stored conversation history to extract semantic beliefs. Returns a list of
|
|
||||||
beliefs that have been set to ``True``, ``False`` or ``None``.
|
|
||||||
|
|
||||||
:return: A dict mapping belief names to a value ``True``, ``False`` or ``None``.
|
|
||||||
"""
|
|
||||||
n_parallel = max(1, min(settings.llm_settings.n_parallel - 1, len(self.available_beliefs)))
|
|
||||||
all_beliefs = await asyncio.gather(
|
|
||||||
*[
|
|
||||||
self._infer_beliefs(self.conversation, beliefs)
|
|
||||||
for beliefs in self._split_into_chunks(self.available_beliefs, n_parallel)
|
|
||||||
]
|
|
||||||
)
|
|
||||||
retval = {}
|
|
||||||
for beliefs in all_beliefs:
|
|
||||||
if beliefs is None:
|
|
||||||
continue
|
|
||||||
retval.update(beliefs)
|
|
||||||
return retval
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _create_belief_schema(belief: SemanticBelief) -> tuple[str, dict]:
|
|
||||||
# TODO: use real belief names
|
|
||||||
return belief.name or slugify(belief.description), {
|
|
||||||
"type": ["boolean", "null"],
|
|
||||||
"description": belief.description,
|
|
||||||
}
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _create_beliefs_schema(beliefs: list[SemanticBelief]) -> dict:
|
|
||||||
belief_schemas = [
|
|
||||||
TextBeliefExtractorAgent._create_belief_schema(belief) for belief in beliefs
|
|
||||||
]
|
|
||||||
|
|
||||||
return {
|
|
||||||
"type": "object",
|
|
||||||
"properties": dict(belief_schemas),
|
|
||||||
"required": [name for name, _ in belief_schemas],
|
|
||||||
}
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _format_message(message: ChatMessage):
|
|
||||||
return f"{message.role.upper()}:\n{message.content}"
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _format_conversation(conversation: ChatHistory):
|
|
||||||
return "\n\n".join(
|
|
||||||
[TextBeliefExtractorAgent._format_message(message) for message in conversation.messages]
|
|
||||||
)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _format_beliefs(beliefs: list[SemanticBelief]):
|
|
||||||
# TODO: use real belief names
|
|
||||||
return "\n".join(
|
|
||||||
[
|
|
||||||
f"- {belief.name or slugify(belief.description)}: {belief.description}"
|
|
||||||
for belief in beliefs
|
|
||||||
]
|
|
||||||
)
|
|
||||||
|
|
||||||
async def _infer_beliefs(
|
|
||||||
self,
|
|
||||||
conversation: ChatHistory,
|
|
||||||
beliefs: list[SemanticBelief],
|
|
||||||
) -> dict | None:
|
|
||||||
"""
|
|
||||||
Infer given beliefs based on the given conversation.
|
|
||||||
:param conversation: The conversation to infer beliefs from.
|
|
||||||
:param beliefs: The beliefs to infer.
|
|
||||||
:return: A dict containing belief names and a boolean whether they hold, or None if the
|
|
||||||
belief cannot be inferred based on the given conversation.
|
|
||||||
"""
|
|
||||||
example = {
|
|
||||||
"example_belief": True,
|
|
||||||
}
|
|
||||||
|
|
||||||
prompt = f"""{self._format_conversation(conversation)}
|
|
||||||
|
|
||||||
Given the above conversation, what beliefs can be inferred?
|
|
||||||
If there is no relevant information about a belief belief, give null.
|
|
||||||
In case messages conflict, prefer using the most recent messages for inference.
|
|
||||||
|
|
||||||
Choose from the following list of beliefs, formatted as (belief_name, description):
|
|
||||||
{self._format_beliefs(beliefs)}
|
|
||||||
|
|
||||||
Respond with a JSON similar to the following, but with the property names as given above:
|
|
||||||
{json.dumps(example, indent=2)}
|
|
||||||
"""
|
|
||||||
|
|
||||||
schema = self._create_beliefs_schema(beliefs)
|
|
||||||
|
|
||||||
return await self._retry_query_llm(prompt, schema)
|
|
||||||
|
|
||||||
async def _retry_query_llm(self, prompt: str, schema: dict, tries: int = 3) -> dict | None:
|
|
||||||
"""
|
|
||||||
Query the LLM with the given prompt and schema, return an instance of a dict conforming
|
|
||||||
to this schema. Try ``tries`` times, or return None.
|
|
||||||
|
|
||||||
:param prompt: Prompt to be queried.
|
|
||||||
:param schema: Schema to be queried.
|
|
||||||
:return: An instance of a dict conforming to this schema, or None if failed.
|
|
||||||
"""
|
|
||||||
try_count = 0
|
|
||||||
while try_count < tries:
|
|
||||||
try_count += 1
|
|
||||||
|
|
||||||
try:
|
|
||||||
return await self._query_llm(prompt, schema)
|
|
||||||
except (httpx.HTTPError, json.JSONDecodeError, KeyError) as e:
|
|
||||||
if try_count < tries:
|
|
||||||
continue
|
|
||||||
self.logger.exception(
|
|
||||||
"Failed to get LLM response after %d tries.",
|
|
||||||
try_count,
|
|
||||||
exc_info=e,
|
|
||||||
)
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
async def _query_llm(prompt: str, schema: dict) -> dict:
|
|
||||||
"""
|
|
||||||
Query an LLM with the given prompt and schema, return an instance of a dict conforming to
|
|
||||||
that schema.
|
|
||||||
|
|
||||||
:param prompt: The prompt to be queried.
|
|
||||||
:param schema: Schema to use during response.
|
|
||||||
:return: A dict conforming to this schema.
|
|
||||||
:raises httpx.HTTPStatusError: If the LLM server responded with an error.
|
|
||||||
:raises json.JSONDecodeError: If the LLM response was not valid JSON. May happen if the
|
|
||||||
response was cut off early due to length limitations.
|
|
||||||
:raises KeyError: If the LLM server responded with no error, but the response was invalid.
|
|
||||||
"""
|
|
||||||
async with httpx.AsyncClient() as client:
|
|
||||||
response = await client.post(
|
|
||||||
settings.llm_settings.local_llm_url,
|
|
||||||
json={
|
|
||||||
"model": settings.llm_settings.local_llm_model,
|
|
||||||
"messages": [{"role": "user", "content": prompt}],
|
|
||||||
"response_format": {
|
|
||||||
"type": "json_schema",
|
|
||||||
"json_schema": {
|
|
||||||
"name": "Beliefs",
|
|
||||||
"strict": True,
|
|
||||||
"schema": schema,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"reasoning_effort": "low",
|
|
||||||
"temperature": settings.llm_settings.code_temperature,
|
|
||||||
"stream": False,
|
|
||||||
},
|
|
||||||
timeout=None,
|
|
||||||
)
|
|
||||||
response.raise_for_status()
|
|
||||||
|
|
||||||
response_json = response.json()
|
|
||||||
json_message = response_json["choices"][0]["message"]["content"]
|
|
||||||
return json.loads(json_message)
|
|
||||||
|
|||||||
@@ -3,14 +3,11 @@ import json
|
|||||||
|
|
||||||
import zmq
|
import zmq
|
||||||
import zmq.asyncio as azmq
|
import zmq.asyncio as azmq
|
||||||
from pydantic import ValidationError
|
|
||||||
from zmq.asyncio import Context
|
from zmq.asyncio import Context
|
||||||
|
|
||||||
from control_backend.agents import BaseAgent
|
from control_backend.agents import BaseAgent
|
||||||
from control_backend.agents.actuation.robot_gesture_agent import RobotGestureAgent
|
from control_backend.agents.actuation.robot_gesture_agent import RobotGestureAgent
|
||||||
from control_backend.core.agent_system import InternalMessage
|
|
||||||
from control_backend.core.config import settings
|
from control_backend.core.config import settings
|
||||||
from control_backend.schemas.ri_message import PauseCommand
|
|
||||||
|
|
||||||
from ..actuation.robot_speech_agent import RobotSpeechAgent
|
from ..actuation.robot_speech_agent import RobotSpeechAgent
|
||||||
from ..perception import VADAgent
|
from ..perception import VADAgent
|
||||||
@@ -41,7 +38,7 @@ class RICommunicationAgent(BaseAgent):
|
|||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
name: str,
|
name: str,
|
||||||
address=settings.zmq_settings.ri_command_address,
|
address=settings.zmq_settings.ri_communication_address,
|
||||||
bind=False,
|
bind=False,
|
||||||
):
|
):
|
||||||
super().__init__(name)
|
super().__init__(name)
|
||||||
@@ -171,7 +168,7 @@ class RICommunicationAgent(BaseAgent):
|
|||||||
bind = port_data["bind"]
|
bind = port_data["bind"]
|
||||||
|
|
||||||
if not bind:
|
if not bind:
|
||||||
addr = f"tcp://localhost:{port}"
|
addr = f"tcp://{settings.ri_host}:{port}"
|
||||||
else:
|
else:
|
||||||
addr = f"tcp://*:{port}"
|
addr = f"tcp://*:{port}"
|
||||||
|
|
||||||
@@ -301,11 +298,3 @@ class RICommunicationAgent(BaseAgent):
|
|||||||
self.logger.debug("Restarting communication negotiation.")
|
self.logger.debug("Restarting communication negotiation.")
|
||||||
if await self._negotiate_connection(max_retries=1):
|
if await self._negotiate_connection(max_retries=1):
|
||||||
self.connected = True
|
self.connected = True
|
||||||
|
|
||||||
async def handle_message(self, msg : InternalMessage):
|
|
||||||
try:
|
|
||||||
pause_command = PauseCommand.model_validate_json(msg.body)
|
|
||||||
self._req_socket.send_json(pause_command.model_dump())
|
|
||||||
self.logger.debug(self._req_socket.recv_json())
|
|
||||||
except ValidationError:
|
|
||||||
self.logger.warning("Incorrect message format for PauseCommand.")
|
|
||||||
|
|||||||
@@ -52,6 +52,10 @@ class LLMAgent(BaseAgent):
|
|||||||
await self._process_bdi_message(prompt_message)
|
await self._process_bdi_message(prompt_message)
|
||||||
except ValidationError:
|
except ValidationError:
|
||||||
self.logger.debug("Prompt message from BDI core is invalid.")
|
self.logger.debug("Prompt message from BDI core is invalid.")
|
||||||
|
elif msg.sender == settings.agent_settings.bdi_program_manager_name:
|
||||||
|
if msg.body == "clear_history":
|
||||||
|
self.logger.debug("Clearing conversation history.")
|
||||||
|
self.history.clear()
|
||||||
else:
|
else:
|
||||||
self.logger.debug("Message ignored (not from BDI core.")
|
self.logger.debug("Message ignored (not from BDI core.")
|
||||||
|
|
||||||
@@ -64,12 +68,11 @@ class LLMAgent(BaseAgent):
|
|||||||
|
|
||||||
:param message: The parsed prompt message containing text, norms, and goals.
|
:param message: The parsed prompt message containing text, norms, and goals.
|
||||||
"""
|
"""
|
||||||
full_message = ""
|
|
||||||
async for chunk in self._query_llm(message.text, message.norms, message.goals):
|
async for chunk in self._query_llm(message.text, message.norms, message.goals):
|
||||||
await self._send_reply(chunk)
|
await self._send_reply(chunk)
|
||||||
full_message += chunk
|
self.logger.debug(
|
||||||
self.logger.debug("Finished processing BDI message. Response sent in chunks to BDI core.")
|
"Finished processing BDI message. Response sent in chunks to BDI core."
|
||||||
await self._send_full_reply(full_message)
|
)
|
||||||
|
|
||||||
async def _send_reply(self, msg: str):
|
async def _send_reply(self, msg: str):
|
||||||
"""
|
"""
|
||||||
@@ -84,19 +87,6 @@ class LLMAgent(BaseAgent):
|
|||||||
)
|
)
|
||||||
await self.send(reply)
|
await self.send(reply)
|
||||||
|
|
||||||
async def _send_full_reply(self, msg: str):
|
|
||||||
"""
|
|
||||||
Sends a response message (full) to agents that need it.
|
|
||||||
|
|
||||||
:param msg: The text content of the message.
|
|
||||||
"""
|
|
||||||
message = InternalMessage(
|
|
||||||
to=settings.agent_settings.text_belief_extractor_name,
|
|
||||||
sender=self.name,
|
|
||||||
body=msg,
|
|
||||||
)
|
|
||||||
await self.send(message)
|
|
||||||
|
|
||||||
async def _query_llm(
|
async def _query_llm(
|
||||||
self, prompt: str, norms: list[str], goals: list[str]
|
self, prompt: str, norms: list[str], goals: list[str]
|
||||||
) -> AsyncGenerator[str]:
|
) -> AsyncGenerator[str]:
|
||||||
@@ -186,7 +176,7 @@ class LLMAgent(BaseAgent):
|
|||||||
json={
|
json={
|
||||||
"model": settings.llm_settings.local_llm_model,
|
"model": settings.llm_settings.local_llm_model,
|
||||||
"messages": messages,
|
"messages": messages,
|
||||||
"temperature": settings.llm_settings.chat_temperature,
|
"temperature": 0.3,
|
||||||
"stream": True,
|
"stream": True,
|
||||||
},
|
},
|
||||||
) as response:
|
) as response:
|
||||||
|
|||||||
@@ -1,68 +0,0 @@
|
|||||||
import asyncio
|
|
||||||
import json
|
|
||||||
|
|
||||||
import zmq
|
|
||||||
from zmq.asyncio import Context
|
|
||||||
|
|
||||||
from control_backend.agents.base import BaseAgent
|
|
||||||
from control_backend.core.agent_system import InternalMessage
|
|
||||||
from control_backend.core.config import settings
|
|
||||||
|
|
||||||
|
|
||||||
class TestPauseAgent(BaseAgent):
|
|
||||||
def __init__(self, name: str):
|
|
||||||
super().__init__(name)
|
|
||||||
|
|
||||||
async def setup(self):
|
|
||||||
context = Context.instance()
|
|
||||||
self.pub_socket = context.socket(zmq.PUB)
|
|
||||||
self.pub_socket.connect(settings.zmq_settings.internal_pub_address)
|
|
||||||
|
|
||||||
self.add_behavior(self._pause_command_loop())
|
|
||||||
self.logger.debug("TestPauseAgent setup complete.")
|
|
||||||
|
|
||||||
async def _pause_command_loop(self):
|
|
||||||
print("Starting Pause command test loop.")
|
|
||||||
while True:
|
|
||||||
pause_command = {
|
|
||||||
"endpoint": "pause",
|
|
||||||
"data": True,
|
|
||||||
}
|
|
||||||
|
|
||||||
message = InternalMessage(
|
|
||||||
to="ri_communication_agent",
|
|
||||||
sender=self.name,
|
|
||||||
body=json.dumps(pause_command),
|
|
||||||
)
|
|
||||||
await self.send(message)
|
|
||||||
|
|
||||||
# User interrupt message
|
|
||||||
data = {
|
|
||||||
"type": "pause",
|
|
||||||
"context": True,
|
|
||||||
}
|
|
||||||
await self.pub_socket.send_multipart([b"button_pressed", json.dumps(data).encode()])
|
|
||||||
|
|
||||||
self.logger.info("Pausing robot actions.")
|
|
||||||
await asyncio.sleep(15) # Simulate delay between messages
|
|
||||||
|
|
||||||
pause_command = {
|
|
||||||
"endpoint": "pause",
|
|
||||||
"data": False,
|
|
||||||
}
|
|
||||||
message = InternalMessage(
|
|
||||||
to="ri_communication_agent",
|
|
||||||
sender=self.name,
|
|
||||||
body=json.dumps(pause_command),
|
|
||||||
)
|
|
||||||
await self.send(message)
|
|
||||||
|
|
||||||
# User interrupt message
|
|
||||||
data = {
|
|
||||||
"type": "pause",
|
|
||||||
"context": False,
|
|
||||||
}
|
|
||||||
await self.pub_socket.send_multipart([b"button_pressed", json.dumps(data).encode()])
|
|
||||||
|
|
||||||
self.logger.info("Resuming robot actions.")
|
|
||||||
await asyncio.sleep(15) # Simulate delay between messages
|
|
||||||
@@ -7,7 +7,6 @@ import zmq.asyncio as azmq
|
|||||||
|
|
||||||
from control_backend.agents import BaseAgent
|
from control_backend.agents import BaseAgent
|
||||||
from control_backend.core.config import settings
|
from control_backend.core.config import settings
|
||||||
from control_backend.schemas.internal_message import InternalMessage
|
|
||||||
|
|
||||||
from ...schemas.program_status import PROGRAM_STATUS, ProgramStatus
|
from ...schemas.program_status import PROGRAM_STATUS, ProgramStatus
|
||||||
from .transcription_agent.transcription_agent import TranscriptionAgent
|
from .transcription_agent.transcription_agent import TranscriptionAgent
|
||||||
@@ -87,12 +86,6 @@ class VADAgent(BaseAgent):
|
|||||||
self.audio_buffer = np.array([], dtype=np.float32)
|
self.audio_buffer = np.array([], dtype=np.float32)
|
||||||
self.i_since_speech = settings.behaviour_settings.vad_initial_since_speech
|
self.i_since_speech = settings.behaviour_settings.vad_initial_since_speech
|
||||||
self._ready = asyncio.Event()
|
self._ready = asyncio.Event()
|
||||||
|
|
||||||
# Pause control
|
|
||||||
self._reset_needed = False
|
|
||||||
self._paused = asyncio.Event()
|
|
||||||
self._paused.set() # Not paused at start
|
|
||||||
|
|
||||||
self.model = None
|
self.model = None
|
||||||
|
|
||||||
async def setup(self):
|
async def setup(self):
|
||||||
@@ -110,12 +103,11 @@ class VADAgent(BaseAgent):
|
|||||||
|
|
||||||
self._connect_audio_in_socket()
|
self._connect_audio_in_socket()
|
||||||
|
|
||||||
audio_out_port = self._connect_audio_out_socket()
|
audio_out_address = self._connect_audio_out_socket()
|
||||||
if audio_out_port is None:
|
if audio_out_address is None:
|
||||||
self.logger.error("Could not bind output socket, stopping.")
|
self.logger.error("Could not bind output socket, stopping.")
|
||||||
await self.stop()
|
await self.stop()
|
||||||
return
|
return
|
||||||
audio_out_address = f"tcp://localhost:{audio_out_port}"
|
|
||||||
|
|
||||||
# Connect to internal communication socket
|
# Connect to internal communication socket
|
||||||
self.program_sub_socket = azmq.Context.instance().socket(zmq.SUB)
|
self.program_sub_socket = azmq.Context.instance().socket(zmq.SUB)
|
||||||
@@ -168,13 +160,14 @@ class VADAgent(BaseAgent):
|
|||||||
self.audio_in_socket.connect(self.audio_in_address)
|
self.audio_in_socket.connect(self.audio_in_address)
|
||||||
self.audio_in_poller = SocketPoller[bytes](self.audio_in_socket)
|
self.audio_in_poller = SocketPoller[bytes](self.audio_in_socket)
|
||||||
|
|
||||||
def _connect_audio_out_socket(self) -> int | None:
|
def _connect_audio_out_socket(self) -> str | None:
|
||||||
"""
|
"""
|
||||||
Returns the port bound, or None if binding failed.
|
Returns the address that was bound to, or None if binding failed.
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
self.audio_out_socket = azmq.Context.instance().socket(zmq.PUB)
|
self.audio_out_socket = azmq.Context.instance().socket(zmq.PUB)
|
||||||
return self.audio_out_socket.bind_to_random_port("tcp://localhost", max_tries=100)
|
self.audio_out_socket.bind(settings.zmq_settings.vad_pub_address)
|
||||||
|
return settings.zmq_settings.vad_pub_address
|
||||||
except zmq.ZMQBindError:
|
except zmq.ZMQBindError:
|
||||||
self.logger.error("Failed to bind an audio output socket after 100 tries.")
|
self.logger.error("Failed to bind an audio output socket after 100 tries.")
|
||||||
self.audio_out_socket = None
|
self.audio_out_socket = None
|
||||||
@@ -220,16 +213,6 @@ class VADAgent(BaseAgent):
|
|||||||
"""
|
"""
|
||||||
await self._ready.wait()
|
await self._ready.wait()
|
||||||
while self._running:
|
while self._running:
|
||||||
await self._paused.wait()
|
|
||||||
|
|
||||||
# After being unpaused, reset stream and buffers
|
|
||||||
if self._reset_needed:
|
|
||||||
self.logger.debug("Resuming: resetting stream and buffers.")
|
|
||||||
await self._reset_stream()
|
|
||||||
self.audio_buffer = np.array([], dtype=np.float32)
|
|
||||||
self.i_since_speech = settings.behaviour_settings.vad_initial_since_speech
|
|
||||||
self._reset_needed = False
|
|
||||||
|
|
||||||
assert self.audio_in_poller is not None
|
assert self.audio_in_poller is not None
|
||||||
data = await self.audio_in_poller.poll()
|
data = await self.audio_in_poller.poll()
|
||||||
if data is None:
|
if data is None:
|
||||||
@@ -271,27 +254,3 @@ class VADAgent(BaseAgent):
|
|||||||
# At this point, we know that the speech has ended.
|
# At this point, we know that the speech has ended.
|
||||||
# Prepend the last chunk that had no speech, for a more fluent boundary
|
# Prepend the last chunk that had no speech, for a more fluent boundary
|
||||||
self.audio_buffer = chunk
|
self.audio_buffer = chunk
|
||||||
|
|
||||||
async def handle_message(self, msg: InternalMessage):
|
|
||||||
"""
|
|
||||||
Handle incoming messages.
|
|
||||||
|
|
||||||
Expects messages to pause or resume the VAD processing from User Interrupt Agent.
|
|
||||||
|
|
||||||
:param msg: The received internal message.
|
|
||||||
"""
|
|
||||||
sender = msg.sender
|
|
||||||
|
|
||||||
if sender == settings.agent_settings.user_interrupt_name:
|
|
||||||
if msg.body == "PAUSE":
|
|
||||||
self.logger.info("Pausing VAD processing.")
|
|
||||||
self._paused.clear()
|
|
||||||
# If the robot needs to pick up speaking where it left off, do not set _reset_needed
|
|
||||||
self._reset_needed = True
|
|
||||||
elif msg.body == "RESUME":
|
|
||||||
self.logger.info("Resuming VAD processing.")
|
|
||||||
self._paused.set()
|
|
||||||
else:
|
|
||||||
self.logger.warning(f"Unknown command from User Interrupt Agent: {msg.body}")
|
|
||||||
else:
|
|
||||||
self.logger.debug(f"Ignoring message from unknown sender: {sender}")
|
|
||||||
@@ -6,12 +6,7 @@ from zmq.asyncio import Context
|
|||||||
from control_backend.agents import BaseAgent
|
from control_backend.agents import BaseAgent
|
||||||
from control_backend.core.agent_system import InternalMessage
|
from control_backend.core.agent_system import InternalMessage
|
||||||
from control_backend.core.config import settings
|
from control_backend.core.config import settings
|
||||||
from control_backend.schemas.ri_message import (
|
from control_backend.schemas.ri_message import GestureCommand, RIEndpoint, SpeechCommand
|
||||||
GestureCommand,
|
|
||||||
PauseCommand,
|
|
||||||
RIEndpoint,
|
|
||||||
SpeechCommand,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class UserInterruptAgent(BaseAgent):
|
class UserInterruptAgent(BaseAgent):
|
||||||
@@ -76,12 +71,6 @@ class UserInterruptAgent(BaseAgent):
|
|||||||
"Forwarded button press (override) with context '%s' to BDIProgramManager.",
|
"Forwarded button press (override) with context '%s' to BDIProgramManager.",
|
||||||
event_context,
|
event_context,
|
||||||
)
|
)
|
||||||
elif event_type == "pause":
|
|
||||||
await self._send_pause_command(event_context)
|
|
||||||
if event_context:
|
|
||||||
self.logger.info("Sent pause command.")
|
|
||||||
else:
|
|
||||||
self.logger.info("Sent resume command.")
|
|
||||||
else:
|
else:
|
||||||
self.logger.warning(
|
self.logger.warning(
|
||||||
"Received button press with unknown type '%s' (context: '%s').",
|
"Received button press with unknown type '%s' (context: '%s').",
|
||||||
@@ -141,38 +130,6 @@ class UserInterruptAgent(BaseAgent):
|
|||||||
belief_id,
|
belief_id,
|
||||||
)
|
)
|
||||||
|
|
||||||
async def _send_pause_command(self, pause : bool):
|
|
||||||
"""
|
|
||||||
Send a pause command to the Robot Interface via the RI Communication Agent.
|
|
||||||
Send a pause command to the other internal agents; for now just VAD agent.
|
|
||||||
"""
|
|
||||||
cmd = PauseCommand(data=pause)
|
|
||||||
message = InternalMessage(
|
|
||||||
to=settings.agent_settings.ri_communication_name,
|
|
||||||
sender=self.name,
|
|
||||||
body=cmd.model_dump_json(),
|
|
||||||
)
|
|
||||||
await self.send(message)
|
|
||||||
|
|
||||||
if pause:
|
|
||||||
# Send pause to VAD agent
|
|
||||||
vad_message = InternalMessage(
|
|
||||||
to=settings.agent_settings.vad_name,
|
|
||||||
sender=self.name,
|
|
||||||
body="PAUSE",
|
|
||||||
)
|
|
||||||
await self.send(vad_message)
|
|
||||||
self.logger.info("Sent pause command to VAD Agent and RI Communication Agent.")
|
|
||||||
else:
|
|
||||||
# Send resume to VAD agent
|
|
||||||
vad_message = InternalMessage(
|
|
||||||
to=settings.agent_settings.vad_name,
|
|
||||||
sender=self.name,
|
|
||||||
body="RESUME",
|
|
||||||
)
|
|
||||||
await self.send(vad_message)
|
|
||||||
self.logger.info("Sent resume command to VAD Agent and RI Communication Agent.")
|
|
||||||
|
|
||||||
async def setup(self):
|
async def setup(self):
|
||||||
"""
|
"""
|
||||||
Initialize the agent.
|
Initialize the agent.
|
||||||
|
|||||||
@@ -1,3 +1,12 @@
|
|||||||
|
"""
|
||||||
|
An exhaustive overview of configurable options. All of these can be set using environment variables
|
||||||
|
by nesting with double underscores (__). Start from the ``Settings`` class.
|
||||||
|
|
||||||
|
For example, ``settings.ri_host`` becomes ``RI_HOST``, and
|
||||||
|
``settings.zmq_settings.ri_communication_address`` becomes
|
||||||
|
``ZMQ_SETTINGS__RI_COMMUNICATION_ADDRESS``.
|
||||||
|
"""
|
||||||
|
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
from pydantic_settings import BaseSettings, SettingsConfigDict
|
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||||
|
|
||||||
@@ -8,16 +17,17 @@ class ZMQSettings(BaseModel):
|
|||||||
|
|
||||||
:ivar internal_pub_address: Address for the internal PUB socket.
|
:ivar internal_pub_address: Address for the internal PUB socket.
|
||||||
:ivar internal_sub_address: Address for the internal SUB socket.
|
:ivar internal_sub_address: Address for the internal SUB socket.
|
||||||
:ivar ri_command_address: Address for sending commands to the Robot Interface.
|
:ivar ri_communication_address: Address for the endpoint that the Robot Interface connects to.
|
||||||
:ivar ri_communication_address: Address for receiving communication from the Robot Interface.
|
:ivar vad_pub_address: Address that the VAD agent binds to and publishes audio segments to.
|
||||||
:ivar vad_agent_address: Address for the Voice Activity Detection (VAD) agent.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
# ATTENTION: When adding/removing settings, make sure to update the .env.example file
|
||||||
|
|
||||||
internal_pub_address: str = "tcp://localhost:5560"
|
internal_pub_address: str = "tcp://localhost:5560"
|
||||||
internal_sub_address: str = "tcp://localhost:5561"
|
internal_sub_address: str = "tcp://localhost:5561"
|
||||||
ri_command_address: str = "tcp://localhost:0000"
|
|
||||||
ri_communication_address: str = "tcp://*:5555"
|
ri_communication_address: str = "tcp://*:5555"
|
||||||
internal_gesture_rep_adress: str = "tcp://localhost:7788"
|
internal_gesture_rep_adress: str = "tcp://localhost:7788"
|
||||||
|
vad_pub_address: str = "inproc://vad_stream"
|
||||||
|
|
||||||
|
|
||||||
class AgentSettings(BaseModel):
|
class AgentSettings(BaseModel):
|
||||||
@@ -36,6 +46,8 @@ class AgentSettings(BaseModel):
|
|||||||
:ivar robot_speech_name: Name of the Robot Speech Agent.
|
:ivar robot_speech_name: Name of the Robot Speech Agent.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
# ATTENTION: When adding/removing settings, make sure to update the .env.example file
|
||||||
|
|
||||||
# agent names
|
# agent names
|
||||||
bdi_core_name: str = "bdi_core_agent"
|
bdi_core_name: str = "bdi_core_agent"
|
||||||
bdi_belief_collector_name: str = "belief_collector_agent"
|
bdi_belief_collector_name: str = "belief_collector_agent"
|
||||||
@@ -65,9 +77,10 @@ class BehaviourSettings(BaseModel):
|
|||||||
:ivar transcription_words_per_minute: Estimated words per minute for transcription timing.
|
:ivar transcription_words_per_minute: Estimated words per minute for transcription timing.
|
||||||
:ivar transcription_words_per_token: Estimated words per token for transcription timing.
|
:ivar transcription_words_per_token: Estimated words per token for transcription timing.
|
||||||
:ivar transcription_token_buffer: Buffer for transcription tokens.
|
:ivar transcription_token_buffer: Buffer for transcription tokens.
|
||||||
:ivar conversation_history_length_limit: The maximum amount of messages to extract beliefs from.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
# ATTENTION: When adding/removing settings, make sure to update the .env.example file
|
||||||
|
|
||||||
sleep_s: float = 1.0
|
sleep_s: float = 1.0
|
||||||
comm_setup_max_retries: int = 5
|
comm_setup_max_retries: int = 5
|
||||||
socket_poller_timeout_ms: int = 100
|
socket_poller_timeout_ms: int = 100
|
||||||
@@ -83,9 +96,6 @@ class BehaviourSettings(BaseModel):
|
|||||||
transcription_words_per_token: float = 0.75 # (3 words = 4 tokens)
|
transcription_words_per_token: float = 0.75 # (3 words = 4 tokens)
|
||||||
transcription_token_buffer: int = 10
|
transcription_token_buffer: int = 10
|
||||||
|
|
||||||
# Text belief extractor settings
|
|
||||||
conversation_history_length_limit: int = 10
|
|
||||||
|
|
||||||
|
|
||||||
class LLMSettings(BaseModel):
|
class LLMSettings(BaseModel):
|
||||||
"""
|
"""
|
||||||
@@ -93,17 +103,12 @@ class LLMSettings(BaseModel):
|
|||||||
|
|
||||||
:ivar local_llm_url: URL for the local LLM API.
|
:ivar local_llm_url: URL for the local LLM API.
|
||||||
:ivar local_llm_model: Name of the local LLM model to use.
|
:ivar local_llm_model: Name of the local LLM model to use.
|
||||||
:ivar chat_temperature: The temperature to use while generating chat responses.
|
|
||||||
:ivar code_temperature: The temperature to use while generating code-like responses like during
|
|
||||||
belief inference.
|
|
||||||
:ivar n_parallel: The number of parallel calls allowed to be made to the LLM.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
# ATTENTION: When adding/removing settings, make sure to update the .env.example file
|
||||||
|
|
||||||
local_llm_url: str = "http://localhost:1234/v1/chat/completions"
|
local_llm_url: str = "http://localhost:1234/v1/chat/completions"
|
||||||
local_llm_model: str = "gpt-oss"
|
local_llm_model: str = "gpt-oss"
|
||||||
chat_temperature: float = 1.0
|
|
||||||
code_temperature: float = 0.3
|
|
||||||
n_parallel: int = 4
|
|
||||||
|
|
||||||
|
|
||||||
class VADSettings(BaseModel):
|
class VADSettings(BaseModel):
|
||||||
@@ -115,6 +120,8 @@ class VADSettings(BaseModel):
|
|||||||
:ivar sample_rate_hz: Sample rate in Hz for the VAD model.
|
:ivar sample_rate_hz: Sample rate in Hz for the VAD model.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
# ATTENTION: When adding/removing settings, make sure to update the .env.example file
|
||||||
|
|
||||||
repo_or_dir: str = "snakers4/silero-vad"
|
repo_or_dir: str = "snakers4/silero-vad"
|
||||||
model_name: str = "silero_vad"
|
model_name: str = "silero_vad"
|
||||||
sample_rate_hz: int = 16000
|
sample_rate_hz: int = 16000
|
||||||
@@ -128,6 +135,8 @@ class SpeechModelSettings(BaseModel):
|
|||||||
:ivar openai_model_name: Model name for OpenAI-based speech recognition.
|
:ivar openai_model_name: Model name for OpenAI-based speech recognition.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
# ATTENTION: When adding/removing settings, make sure to update the .env.example file
|
||||||
|
|
||||||
# model identifiers for speech recognition
|
# model identifiers for speech recognition
|
||||||
mlx_model_name: str = "mlx-community/whisper-small.en-mlx"
|
mlx_model_name: str = "mlx-community/whisper-small.en-mlx"
|
||||||
openai_model_name: str = "small.en"
|
openai_model_name: str = "small.en"
|
||||||
@@ -139,6 +148,7 @@ class Settings(BaseSettings):
|
|||||||
|
|
||||||
:ivar app_title: Title of the application.
|
:ivar app_title: Title of the application.
|
||||||
:ivar ui_url: URL of the frontend UI.
|
:ivar ui_url: URL of the frontend UI.
|
||||||
|
:ivar ri_host: The hostname of the Robot Interface.
|
||||||
:ivar zmq_settings: ZMQ configuration.
|
:ivar zmq_settings: ZMQ configuration.
|
||||||
:ivar agent_settings: Agent name configuration.
|
:ivar agent_settings: Agent name configuration.
|
||||||
:ivar behaviour_settings: Behavior configuration.
|
:ivar behaviour_settings: Behavior configuration.
|
||||||
@@ -151,6 +161,8 @@ class Settings(BaseSettings):
|
|||||||
|
|
||||||
ui_url: str = "http://localhost:5173"
|
ui_url: str = "http://localhost:5173"
|
||||||
|
|
||||||
|
ri_host: str = "localhost"
|
||||||
|
|
||||||
zmq_settings: ZMQSettings = ZMQSettings()
|
zmq_settings: ZMQSettings = ZMQSettings()
|
||||||
|
|
||||||
agent_settings: AgentSettings = AgentSettings()
|
agent_settings: AgentSettings = AgentSettings()
|
||||||
|
|||||||
@@ -39,11 +39,10 @@ from control_backend.agents.communication import RICommunicationAgent
|
|||||||
# LLM Agents
|
# LLM Agents
|
||||||
from control_backend.agents.llm import LLMAgent
|
from control_backend.agents.llm import LLMAgent
|
||||||
|
|
||||||
# Other backend imports
|
|
||||||
from control_backend.agents.mock_agents.test_pause_ri import TestPauseAgent
|
|
||||||
|
|
||||||
# User Interrupt Agent
|
# User Interrupt Agent
|
||||||
from control_backend.agents.user_interrupt.user_interrupt_agent import UserInterruptAgent
|
from control_backend.agents.user_interrupt.user_interrupt_agent import UserInterruptAgent
|
||||||
|
|
||||||
|
# Other backend imports
|
||||||
from control_backend.api.v1.router import api_router
|
from control_backend.api.v1.router import api_router
|
||||||
from control_backend.core.config import settings
|
from control_backend.core.config import settings
|
||||||
from control_backend.logging import setup_logging
|
from control_backend.logging import setup_logging
|
||||||
@@ -142,12 +141,6 @@ async def lifespan(app: FastAPI):
|
|||||||
"name": settings.agent_settings.bdi_program_manager_name,
|
"name": settings.agent_settings.bdi_program_manager_name,
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
"TestPauseAgent": (
|
|
||||||
TestPauseAgent,
|
|
||||||
{
|
|
||||||
"name": "pause_test_agent",
|
|
||||||
},
|
|
||||||
),
|
|
||||||
"UserInterruptAgent": (
|
"UserInterruptAgent": (
|
||||||
UserInterruptAgent,
|
UserInterruptAgent,
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -6,27 +6,18 @@ class Belief(BaseModel):
|
|||||||
Represents a single belief in the BDI system.
|
Represents a single belief in the BDI system.
|
||||||
|
|
||||||
:ivar name: The functor or name of the belief (e.g., 'user_said').
|
:ivar name: The functor or name of the belief (e.g., 'user_said').
|
||||||
:ivar arguments: A list of string arguments for the belief, or None if the belief has no
|
:ivar arguments: A list of string arguments for the belief.
|
||||||
arguments.
|
:ivar replace: If True, existing beliefs with this name should be replaced by this one.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
name: str
|
name: str
|
||||||
arguments: list[str] | None
|
arguments: list[str]
|
||||||
|
replace: bool = False
|
||||||
|
|
||||||
|
|
||||||
class BeliefMessage(BaseModel):
|
class BeliefMessage(BaseModel):
|
||||||
"""
|
"""
|
||||||
A container for communicating beliefs between agents.
|
A container for transporting a list of beliefs between agents.
|
||||||
|
|
||||||
:ivar create: Beliefs to create.
|
|
||||||
:ivar delete: Beliefs to delete.
|
|
||||||
:ivar replace: Beliefs to replace. Deletes all beliefs with the same name, replacing them with
|
|
||||||
one new belief.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
create: list[Belief] = []
|
beliefs: list[Belief]
|
||||||
delete: list[Belief] = []
|
|
||||||
replace: list[Belief] = []
|
|
||||||
|
|
||||||
def has_values(self) -> bool:
|
|
||||||
return len(self.create) > 0 or len(self.delete) > 0 or len(self.replace) > 0
|
|
||||||
|
|||||||
@@ -1,10 +0,0 @@
|
|||||||
from pydantic import BaseModel
|
|
||||||
|
|
||||||
|
|
||||||
class ChatMessage(BaseModel):
|
|
||||||
role: str
|
|
||||||
content: str
|
|
||||||
|
|
||||||
|
|
||||||
class ChatHistory(BaseModel):
|
|
||||||
messages: list[ChatMessage]
|
|
||||||
@@ -1,202 +1,64 @@
|
|||||||
from enum import Enum
|
from pydantic import BaseModel
|
||||||
from typing import Literal
|
|
||||||
|
|
||||||
from pydantic import UUID4, BaseModel
|
|
||||||
|
|
||||||
|
|
||||||
class ProgramElement(BaseModel):
|
class Norm(BaseModel):
|
||||||
"""
|
|
||||||
Represents a basic element of our behavior program.
|
|
||||||
|
|
||||||
:ivar name: The researcher-assigned name of the element.
|
|
||||||
:ivar id: Unique identifier.
|
|
||||||
"""
|
|
||||||
|
|
||||||
name: str
|
|
||||||
id: UUID4
|
|
||||||
|
|
||||||
|
|
||||||
class LogicalOperator(Enum):
|
|
||||||
AND = "AND"
|
|
||||||
OR = "OR"
|
|
||||||
|
|
||||||
|
|
||||||
type Belief = KeywordBelief | SemanticBelief | InferredBelief
|
|
||||||
type BasicBelief = KeywordBelief | SemanticBelief
|
|
||||||
|
|
||||||
|
|
||||||
class KeywordBelief(ProgramElement):
|
|
||||||
"""
|
|
||||||
Represents a belief that is set when the user spoken text contains a certain keyword.
|
|
||||||
|
|
||||||
:ivar keyword: The keyword on which this belief gets set.
|
|
||||||
"""
|
|
||||||
|
|
||||||
name: str = ""
|
|
||||||
keyword: str
|
|
||||||
|
|
||||||
|
|
||||||
class SemanticBelief(ProgramElement):
|
|
||||||
"""
|
|
||||||
Represents a belief that is set by semantic LLM validation.
|
|
||||||
|
|
||||||
:ivar description: Description of how to form the belief, used by the LLM.
|
|
||||||
"""
|
|
||||||
|
|
||||||
name: str = ""
|
|
||||||
description: str
|
|
||||||
|
|
||||||
|
|
||||||
class InferredBelief(ProgramElement):
|
|
||||||
"""
|
|
||||||
Represents a belief that gets formed by combining two beliefs with a logical AND or OR.
|
|
||||||
|
|
||||||
These beliefs can also be :class:`InferredBelief`, leading to arbitrarily deep nesting.
|
|
||||||
|
|
||||||
:ivar operator: The logical operator to apply.
|
|
||||||
:ivar left: The left part of the logical expression.
|
|
||||||
:ivar right: The right part of the logical expression.
|
|
||||||
"""
|
|
||||||
|
|
||||||
name: str = ""
|
|
||||||
operator: LogicalOperator
|
|
||||||
left: Belief
|
|
||||||
right: Belief
|
|
||||||
|
|
||||||
|
|
||||||
class Norm(ProgramElement):
|
|
||||||
name: str = ""
|
|
||||||
norm: str
|
|
||||||
critical: bool = False
|
|
||||||
|
|
||||||
|
|
||||||
class BasicNorm(Norm):
|
|
||||||
"""
|
"""
|
||||||
Represents a behavioral norm.
|
Represents a behavioral norm.
|
||||||
|
|
||||||
|
:ivar id: Unique identifier.
|
||||||
|
:ivar label: Human-readable label.
|
||||||
:ivar norm: The actual norm text describing the behavior.
|
:ivar norm: The actual norm text describing the behavior.
|
||||||
:ivar critical: When true, this norm should absolutely not be violated (checked separately).
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
pass
|
id: str
|
||||||
|
label: str
|
||||||
|
norm: str
|
||||||
|
|
||||||
|
|
||||||
class ConditionalNorm(Norm):
|
class Goal(BaseModel):
|
||||||
"""
|
"""
|
||||||
Represents a norm that is only active when a condition is met (i.e., a certain belief holds).
|
Represents an objective to be achieved.
|
||||||
|
|
||||||
:ivar condition: When to activate this norm.
|
:ivar id: Unique identifier.
|
||||||
|
:ivar label: Human-readable label.
|
||||||
|
:ivar description: Detailed description of the goal.
|
||||||
|
:ivar achieved: Status flag indicating if the goal has been met.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
condition: Belief
|
id: str
|
||||||
|
label: str
|
||||||
|
description: str
|
||||||
|
achieved: bool
|
||||||
|
|
||||||
|
|
||||||
type PlanElement = Goal | Action
|
class TriggerKeyword(BaseModel):
|
||||||
|
id: str
|
||||||
|
keyword: str
|
||||||
|
|
||||||
|
|
||||||
class Plan(ProgramElement):
|
class KeywordTrigger(BaseModel):
|
||||||
"""
|
id: str
|
||||||
Represents a list of steps to execute. Each of these steps can be a goal (with its own plan)
|
label: str
|
||||||
or a simple action.
|
type: str
|
||||||
|
keywords: list[TriggerKeyword]
|
||||||
:ivar steps: The actions or subgoals to execute, in order.
|
|
||||||
"""
|
|
||||||
|
|
||||||
name: str = ""
|
|
||||||
steps: list[PlanElement]
|
|
||||||
|
|
||||||
|
|
||||||
class Goal(ProgramElement):
|
class Phase(BaseModel):
|
||||||
"""
|
|
||||||
Represents an objective to be achieved. To reach the goal, we should execute
|
|
||||||
the corresponding plan. If we can fail to achieve a goal after executing the plan,
|
|
||||||
for example when the achieving of the goal is dependent on the user's reply, this means
|
|
||||||
that the achieved status will be set from somewhere else in the program.
|
|
||||||
|
|
||||||
:ivar plan: The plan to execute.
|
|
||||||
:ivar can_fail: Whether we can fail to achieve the goal after executing the plan.
|
|
||||||
"""
|
|
||||||
|
|
||||||
plan: Plan
|
|
||||||
can_fail: bool = True
|
|
||||||
|
|
||||||
|
|
||||||
type Action = SpeechAction | GestureAction | LLMAction
|
|
||||||
|
|
||||||
|
|
||||||
class SpeechAction(ProgramElement):
|
|
||||||
"""
|
|
||||||
Represents the action of the robot speaking a literal text.
|
|
||||||
|
|
||||||
:ivar text: The text to speak.
|
|
||||||
"""
|
|
||||||
|
|
||||||
name: str = ""
|
|
||||||
text: str
|
|
||||||
|
|
||||||
|
|
||||||
class Gesture(BaseModel):
|
|
||||||
"""
|
|
||||||
Represents a gesture to be performed. Can be either a single gesture,
|
|
||||||
or a random gesture from a category (tag).
|
|
||||||
|
|
||||||
:ivar type: The type of the gesture, "tag" or "single".
|
|
||||||
:ivar name: The name of the single gesture or tag.
|
|
||||||
"""
|
|
||||||
|
|
||||||
type: Literal["tag", "single"]
|
|
||||||
name: str
|
|
||||||
|
|
||||||
|
|
||||||
class GestureAction(ProgramElement):
|
|
||||||
"""
|
|
||||||
Represents the action of the robot performing a gesture.
|
|
||||||
|
|
||||||
:ivar gesture: The gesture to perform.
|
|
||||||
"""
|
|
||||||
|
|
||||||
name: str = ""
|
|
||||||
gesture: Gesture
|
|
||||||
|
|
||||||
|
|
||||||
class LLMAction(ProgramElement):
|
|
||||||
"""
|
|
||||||
Represents the action of letting an LLM generate a reply based on its chat history
|
|
||||||
and an additional goal added in the prompt.
|
|
||||||
|
|
||||||
:ivar goal: The extra (temporary) goal to add to the LLM.
|
|
||||||
"""
|
|
||||||
|
|
||||||
name: str = ""
|
|
||||||
goal: str
|
|
||||||
|
|
||||||
|
|
||||||
class Trigger(ProgramElement):
|
|
||||||
"""
|
|
||||||
Represents a belief-based trigger. When a belief is set, the corresponding plan is executed.
|
|
||||||
|
|
||||||
:ivar condition: When to activate the trigger.
|
|
||||||
:ivar plan: The plan to execute.
|
|
||||||
"""
|
|
||||||
|
|
||||||
name: str = ""
|
|
||||||
condition: Belief
|
|
||||||
plan: Plan
|
|
||||||
|
|
||||||
|
|
||||||
class Phase(ProgramElement):
|
|
||||||
"""
|
"""
|
||||||
A distinct phase within a program, containing norms, goals, and triggers.
|
A distinct phase within a program, containing norms, goals, and triggers.
|
||||||
|
|
||||||
|
:ivar id: Unique identifier.
|
||||||
|
:ivar label: Human-readable label.
|
||||||
:ivar norms: List of norms active in this phase.
|
:ivar norms: List of norms active in this phase.
|
||||||
:ivar goals: List of goals to pursue in this phase.
|
:ivar goals: List of goals to pursue in this phase.
|
||||||
:ivar triggers: List of triggers that define transitions out of this phase.
|
:ivar triggers: List of triggers that define transitions out of this phase.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
name: str = ""
|
id: str
|
||||||
norms: list[BasicNorm | ConditionalNorm]
|
label: str
|
||||||
|
norms: list[Norm]
|
||||||
goals: list[Goal]
|
goals: list[Goal]
|
||||||
triggers: list[Trigger]
|
triggers: list[KeywordTrigger]
|
||||||
|
|
||||||
|
|
||||||
class Program(BaseModel):
|
class Program(BaseModel):
|
||||||
|
|||||||
@@ -14,7 +14,6 @@ class RIEndpoint(str, Enum):
|
|||||||
GESTURE_TAG = "actuate/gesture/tag"
|
GESTURE_TAG = "actuate/gesture/tag"
|
||||||
PING = "ping"
|
PING = "ping"
|
||||||
NEGOTIATE_PORTS = "negotiate/ports"
|
NEGOTIATE_PORTS = "negotiate/ports"
|
||||||
PAUSE = "pause"
|
|
||||||
|
|
||||||
|
|
||||||
class RIMessage(BaseModel):
|
class RIMessage(BaseModel):
|
||||||
@@ -65,14 +64,3 @@ class GestureCommand(RIMessage):
|
|||||||
if self.endpoint not in allowed:
|
if self.endpoint not in allowed:
|
||||||
raise ValueError("endpoint must be GESTURE_SINGLE or GESTURE_TAG")
|
raise ValueError("endpoint must be GESTURE_SINGLE or GESTURE_TAG")
|
||||||
return self
|
return self
|
||||||
|
|
||||||
class PauseCommand(RIMessage):
|
|
||||||
"""
|
|
||||||
A specific command to pause or unpause the robot's actions.
|
|
||||||
|
|
||||||
:ivar endpoint: Fixed to ``RIEndpoint.PAUSE``.
|
|
||||||
:ivar data: A boolean indicating whether to pause (True) or unpause (False).
|
|
||||||
"""
|
|
||||||
|
|
||||||
endpoint: RIEndpoint = RIEndpoint(RIEndpoint.PAUSE)
|
|
||||||
data: bool
|
|
||||||
@@ -91,7 +91,7 @@ def test_out_socket_creation(zmq_context):
|
|||||||
assert per_vad_agent.audio_out_socket is not None
|
assert per_vad_agent.audio_out_socket is not None
|
||||||
|
|
||||||
zmq_context.return_value.socket.assert_called_once_with(zmq.PUB)
|
zmq_context.return_value.socket.assert_called_once_with(zmq.PUB)
|
||||||
zmq_context.return_value.socket.return_value.bind_to_random_port.assert_called_once()
|
zmq_context.return_value.socket.return_value.bind.assert_called_once_with("inproc://vad_stream")
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
|
|||||||
@@ -73,7 +73,7 @@ async def test_setup_connect(zmq_context, mocker):
|
|||||||
async def test_handle_message_sends_valid_gesture_command():
|
async def test_handle_message_sends_valid_gesture_command():
|
||||||
"""Internal message with valid gesture tag is forwarded to robot pub socket."""
|
"""Internal message with valid gesture tag is forwarded to robot pub socket."""
|
||||||
pubsocket = AsyncMock()
|
pubsocket = AsyncMock()
|
||||||
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no"])
|
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no"], address="")
|
||||||
agent.pubsocket = pubsocket
|
agent.pubsocket = pubsocket
|
||||||
|
|
||||||
payload = {
|
payload = {
|
||||||
@@ -91,7 +91,7 @@ async def test_handle_message_sends_valid_gesture_command():
|
|||||||
async def test_handle_message_sends_non_gesture_command():
|
async def test_handle_message_sends_non_gesture_command():
|
||||||
"""Internal message with non-gesture endpoint is not forwarded by this agent."""
|
"""Internal message with non-gesture endpoint is not forwarded by this agent."""
|
||||||
pubsocket = AsyncMock()
|
pubsocket = AsyncMock()
|
||||||
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no"])
|
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no"], address="")
|
||||||
agent.pubsocket = pubsocket
|
agent.pubsocket = pubsocket
|
||||||
|
|
||||||
payload = {"endpoint": "some_other_endpoint", "data": "invalid_tag_not_in_list"}
|
payload = {"endpoint": "some_other_endpoint", "data": "invalid_tag_not_in_list"}
|
||||||
@@ -107,7 +107,7 @@ async def test_handle_message_sends_non_gesture_command():
|
|||||||
async def test_handle_message_rejects_invalid_gesture_tag():
|
async def test_handle_message_rejects_invalid_gesture_tag():
|
||||||
"""Internal message with invalid gesture tag is not forwarded."""
|
"""Internal message with invalid gesture tag is not forwarded."""
|
||||||
pubsocket = AsyncMock()
|
pubsocket = AsyncMock()
|
||||||
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no"])
|
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no"], address="")
|
||||||
agent.pubsocket = pubsocket
|
agent.pubsocket = pubsocket
|
||||||
|
|
||||||
# Use a tag that's not in gesture_data
|
# Use a tag that's not in gesture_data
|
||||||
@@ -123,7 +123,7 @@ async def test_handle_message_rejects_invalid_gesture_tag():
|
|||||||
async def test_handle_message_invalid_payload():
|
async def test_handle_message_invalid_payload():
|
||||||
"""Invalid payload is caught and does not send."""
|
"""Invalid payload is caught and does not send."""
|
||||||
pubsocket = AsyncMock()
|
pubsocket = AsyncMock()
|
||||||
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no"])
|
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no"], address="")
|
||||||
agent.pubsocket = pubsocket
|
agent.pubsocket = pubsocket
|
||||||
|
|
||||||
msg = InternalMessage(to="robot", sender="tester", body=json.dumps({"bad": "data"}))
|
msg = InternalMessage(to="robot", sender="tester", body=json.dumps({"bad": "data"}))
|
||||||
@@ -142,12 +142,12 @@ async def test_zmq_command_loop_valid_gesture_payload():
|
|||||||
async def recv_once():
|
async def recv_once():
|
||||||
# stop after first iteration
|
# stop after first iteration
|
||||||
agent._running = False
|
agent._running = False
|
||||||
return (b"command", json.dumps(command).encode("utf-8"))
|
return b"command", json.dumps(command).encode("utf-8")
|
||||||
|
|
||||||
fake_socket.recv_multipart = recv_once
|
fake_socket.recv_multipart = recv_once
|
||||||
fake_socket.send_json = AsyncMock()
|
fake_socket.send_json = AsyncMock()
|
||||||
|
|
||||||
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no"])
|
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no"], address="")
|
||||||
agent.subsocket = fake_socket
|
agent.subsocket = fake_socket
|
||||||
agent.pubsocket = fake_socket
|
agent.pubsocket = fake_socket
|
||||||
agent._running = True
|
agent._running = True
|
||||||
@@ -165,12 +165,12 @@ async def test_zmq_command_loop_valid_non_gesture_payload():
|
|||||||
|
|
||||||
async def recv_once():
|
async def recv_once():
|
||||||
agent._running = False
|
agent._running = False
|
||||||
return (b"command", json.dumps(command).encode("utf-8"))
|
return b"command", json.dumps(command).encode("utf-8")
|
||||||
|
|
||||||
fake_socket.recv_multipart = recv_once
|
fake_socket.recv_multipart = recv_once
|
||||||
fake_socket.send_json = AsyncMock()
|
fake_socket.send_json = AsyncMock()
|
||||||
|
|
||||||
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no"])
|
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no"], address="")
|
||||||
agent.subsocket = fake_socket
|
agent.subsocket = fake_socket
|
||||||
agent.pubsocket = fake_socket
|
agent.pubsocket = fake_socket
|
||||||
agent._running = True
|
agent._running = True
|
||||||
@@ -188,12 +188,12 @@ async def test_zmq_command_loop_invalid_gesture_tag():
|
|||||||
|
|
||||||
async def recv_once():
|
async def recv_once():
|
||||||
agent._running = False
|
agent._running = False
|
||||||
return (b"command", json.dumps(command).encode("utf-8"))
|
return b"command", json.dumps(command).encode("utf-8")
|
||||||
|
|
||||||
fake_socket.recv_multipart = recv_once
|
fake_socket.recv_multipart = recv_once
|
||||||
fake_socket.send_json = AsyncMock()
|
fake_socket.send_json = AsyncMock()
|
||||||
|
|
||||||
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no"])
|
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no"], address="")
|
||||||
agent.subsocket = fake_socket
|
agent.subsocket = fake_socket
|
||||||
agent.pubsocket = fake_socket
|
agent.pubsocket = fake_socket
|
||||||
agent._running = True
|
agent._running = True
|
||||||
@@ -210,12 +210,12 @@ async def test_zmq_command_loop_invalid_json():
|
|||||||
|
|
||||||
async def recv_once():
|
async def recv_once():
|
||||||
agent._running = False
|
agent._running = False
|
||||||
return (b"command", b"{not_json}")
|
return b"command", b"{not_json}"
|
||||||
|
|
||||||
fake_socket.recv_multipart = recv_once
|
fake_socket.recv_multipart = recv_once
|
||||||
fake_socket.send_json = AsyncMock()
|
fake_socket.send_json = AsyncMock()
|
||||||
|
|
||||||
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no"])
|
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no"], address="")
|
||||||
agent.subsocket = fake_socket
|
agent.subsocket = fake_socket
|
||||||
agent.pubsocket = fake_socket
|
agent.pubsocket = fake_socket
|
||||||
agent._running = True
|
agent._running = True
|
||||||
@@ -232,12 +232,12 @@ async def test_zmq_command_loop_ignores_send_gestures_topic():
|
|||||||
|
|
||||||
async def recv_once():
|
async def recv_once():
|
||||||
agent._running = False
|
agent._running = False
|
||||||
return (b"send_gestures", b"{}")
|
return b"send_gestures", b"{}"
|
||||||
|
|
||||||
fake_socket.recv_multipart = recv_once
|
fake_socket.recv_multipart = recv_once
|
||||||
fake_socket.send_json = AsyncMock()
|
fake_socket.send_json = AsyncMock()
|
||||||
|
|
||||||
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no"])
|
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no"], address="")
|
||||||
agent.subsocket = fake_socket
|
agent.subsocket = fake_socket
|
||||||
agent.pubsocket = fake_socket
|
agent.pubsocket = fake_socket
|
||||||
agent._running = True
|
agent._running = True
|
||||||
@@ -259,7 +259,9 @@ async def test_fetch_gestures_loop_without_amount():
|
|||||||
fake_repsocket.recv = recv_once
|
fake_repsocket.recv = recv_once
|
||||||
fake_repsocket.send = AsyncMock()
|
fake_repsocket.send = AsyncMock()
|
||||||
|
|
||||||
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no", "wave", "point"])
|
agent = RobotGestureAgent(
|
||||||
|
"robot_gesture", gesture_data=["hello", "yes", "no", "wave", "point"], address=""
|
||||||
|
)
|
||||||
agent.repsocket = fake_repsocket
|
agent.repsocket = fake_repsocket
|
||||||
agent._running = True
|
agent._running = True
|
||||||
|
|
||||||
@@ -287,7 +289,9 @@ async def test_fetch_gestures_loop_with_amount():
|
|||||||
fake_repsocket.recv = recv_once
|
fake_repsocket.recv = recv_once
|
||||||
fake_repsocket.send = AsyncMock()
|
fake_repsocket.send = AsyncMock()
|
||||||
|
|
||||||
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no", "wave", "point"])
|
agent = RobotGestureAgent(
|
||||||
|
"robot_gesture", gesture_data=["hello", "yes", "no", "wave", "point"], address=""
|
||||||
|
)
|
||||||
agent.repsocket = fake_repsocket
|
agent.repsocket = fake_repsocket
|
||||||
agent._running = True
|
agent._running = True
|
||||||
|
|
||||||
@@ -315,7 +319,7 @@ async def test_fetch_gestures_loop_with_integer_request():
|
|||||||
fake_repsocket.recv = recv_once
|
fake_repsocket.recv = recv_once
|
||||||
fake_repsocket.send = AsyncMock()
|
fake_repsocket.send = AsyncMock()
|
||||||
|
|
||||||
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no"])
|
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no"], address="")
|
||||||
agent.repsocket = fake_repsocket
|
agent.repsocket = fake_repsocket
|
||||||
agent._running = True
|
agent._running = True
|
||||||
|
|
||||||
@@ -340,7 +344,7 @@ async def test_fetch_gestures_loop_with_invalid_json():
|
|||||||
fake_repsocket.recv = recv_once
|
fake_repsocket.recv = recv_once
|
||||||
fake_repsocket.send = AsyncMock()
|
fake_repsocket.send = AsyncMock()
|
||||||
|
|
||||||
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no"])
|
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no"], address="")
|
||||||
agent.repsocket = fake_repsocket
|
agent.repsocket = fake_repsocket
|
||||||
agent._running = True
|
agent._running = True
|
||||||
|
|
||||||
@@ -365,7 +369,7 @@ async def test_fetch_gestures_loop_with_non_integer_json():
|
|||||||
fake_repsocket.recv = recv_once
|
fake_repsocket.recv = recv_once
|
||||||
fake_repsocket.send = AsyncMock()
|
fake_repsocket.send = AsyncMock()
|
||||||
|
|
||||||
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no"])
|
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no"], address="")
|
||||||
agent.repsocket = fake_repsocket
|
agent.repsocket = fake_repsocket
|
||||||
agent._running = True
|
agent._running = True
|
||||||
|
|
||||||
@@ -381,7 +385,7 @@ async def test_fetch_gestures_loop_with_non_integer_json():
|
|||||||
def test_gesture_data_attribute():
|
def test_gesture_data_attribute():
|
||||||
"""Test that gesture_data returns the expected list."""
|
"""Test that gesture_data returns the expected list."""
|
||||||
gesture_data = ["hello", "yes", "no", "wave"]
|
gesture_data = ["hello", "yes", "no", "wave"]
|
||||||
agent = RobotGestureAgent("robot_gesture", gesture_data=gesture_data)
|
agent = RobotGestureAgent("robot_gesture", gesture_data=gesture_data, address="")
|
||||||
|
|
||||||
assert agent.gesture_data == gesture_data
|
assert agent.gesture_data == gesture_data
|
||||||
assert isinstance(agent.gesture_data, list)
|
assert isinstance(agent.gesture_data, list)
|
||||||
@@ -398,7 +402,7 @@ async def test_stop_closes_sockets():
|
|||||||
pubsocket = MagicMock()
|
pubsocket = MagicMock()
|
||||||
subsocket = MagicMock()
|
subsocket = MagicMock()
|
||||||
repsocket = MagicMock()
|
repsocket = MagicMock()
|
||||||
agent = RobotGestureAgent("robot_gesture")
|
agent = RobotGestureAgent("robot_gesture", address="")
|
||||||
agent.pubsocket = pubsocket
|
agent.pubsocket = pubsocket
|
||||||
agent.subsocket = subsocket
|
agent.subsocket = subsocket
|
||||||
agent.repsocket = repsocket
|
agent.repsocket = repsocket
|
||||||
@@ -415,7 +419,7 @@ async def test_stop_closes_sockets():
|
|||||||
async def test_initialization_with_custom_gesture_data():
|
async def test_initialization_with_custom_gesture_data():
|
||||||
"""Agent can be initialized with custom gesture data."""
|
"""Agent can be initialized with custom gesture data."""
|
||||||
custom_gestures = ["custom1", "custom2", "custom3"]
|
custom_gestures = ["custom1", "custom2", "custom3"]
|
||||||
agent = RobotGestureAgent("robot_gesture", gesture_data=custom_gestures)
|
agent = RobotGestureAgent("robot_gesture", gesture_data=custom_gestures, address="")
|
||||||
|
|
||||||
assert agent.gesture_data == custom_gestures
|
assert agent.gesture_data == custom_gestures
|
||||||
|
|
||||||
@@ -432,7 +436,7 @@ async def test_fetch_gestures_loop_handles_exception():
|
|||||||
fake_repsocket.recv = recv_once
|
fake_repsocket.recv = recv_once
|
||||||
fake_repsocket.send = AsyncMock()
|
fake_repsocket.send = AsyncMock()
|
||||||
|
|
||||||
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no"])
|
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no"], address="")
|
||||||
agent.repsocket = fake_repsocket
|
agent.repsocket = fake_repsocket
|
||||||
agent.logger = MagicMock()
|
agent.logger = MagicMock()
|
||||||
agent._running = True
|
agent._running = True
|
||||||
|
|||||||
@@ -51,7 +51,7 @@ async def test_handle_belief_collector_message(agent, mock_settings):
|
|||||||
msg = InternalMessage(
|
msg = InternalMessage(
|
||||||
to="bdi_agent",
|
to="bdi_agent",
|
||||||
sender=mock_settings.agent_settings.bdi_belief_collector_name,
|
sender=mock_settings.agent_settings.bdi_belief_collector_name,
|
||||||
body=BeliefMessage(create=beliefs).model_dump_json(),
|
body=BeliefMessage(beliefs=beliefs).model_dump_json(),
|
||||||
thread="beliefs",
|
thread="beliefs",
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -64,26 +64,6 @@ async def test_handle_belief_collector_message(agent, mock_settings):
|
|||||||
assert args[2] == agentspeak.Literal("user_said", (agentspeak.Literal("Hello"),))
|
assert args[2] == agentspeak.Literal("user_said", (agentspeak.Literal("Hello"),))
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_handle_delete_belief_message(agent, mock_settings):
|
|
||||||
"""Test that incoming beliefs to be deleted are removed from the BDI agent"""
|
|
||||||
beliefs = [Belief(name="user_said", arguments=["Hello"])]
|
|
||||||
|
|
||||||
msg = InternalMessage(
|
|
||||||
to="bdi_agent",
|
|
||||||
sender=mock_settings.agent_settings.bdi_belief_collector_name,
|
|
||||||
body=BeliefMessage(delete=beliefs).model_dump_json(),
|
|
||||||
thread="beliefs",
|
|
||||||
)
|
|
||||||
await agent.handle_message(msg)
|
|
||||||
|
|
||||||
# Expect bdi_agent.call to be triggered to remove belief
|
|
||||||
args = agent.bdi_agent.call.call_args.args
|
|
||||||
assert args[0] == agentspeak.Trigger.removal
|
|
||||||
assert args[1] == agentspeak.GoalType.belief
|
|
||||||
assert args[2] == agentspeak.Literal("user_said", (agentspeak.Literal("Hello"),))
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_incorrect_belief_collector_message(agent, mock_settings):
|
async def test_incorrect_belief_collector_message(agent, mock_settings):
|
||||||
"""Test that incorrect message format triggers an exception."""
|
"""Test that incorrect message format triggers an exception."""
|
||||||
@@ -148,8 +128,7 @@ def test_add_belief_sets_event(agent):
|
|||||||
agent._wake_bdi_loop = MagicMock()
|
agent._wake_bdi_loop = MagicMock()
|
||||||
|
|
||||||
belief = Belief(name="test_belief", arguments=["a", "b"])
|
belief = Belief(name="test_belief", arguments=["a", "b"])
|
||||||
belief_changes = BeliefMessage(replace=[belief])
|
agent._apply_beliefs([belief])
|
||||||
agent._apply_belief_changes(belief_changes)
|
|
||||||
|
|
||||||
assert agent.bdi_agent.call.called
|
assert agent.bdi_agent.call.called
|
||||||
agent._wake_bdi_loop.set.assert_called()
|
agent._wake_bdi_loop.set.assert_called()
|
||||||
@@ -158,7 +137,7 @@ def test_add_belief_sets_event(agent):
|
|||||||
def test_apply_beliefs_empty_returns(agent):
|
def test_apply_beliefs_empty_returns(agent):
|
||||||
"""Line: if not beliefs: return"""
|
"""Line: if not beliefs: return"""
|
||||||
agent._wake_bdi_loop = MagicMock()
|
agent._wake_bdi_loop = MagicMock()
|
||||||
agent._apply_belief_changes(BeliefMessage())
|
agent._apply_beliefs([])
|
||||||
agent.bdi_agent.call.assert_not_called()
|
agent.bdi_agent.call.assert_not_called()
|
||||||
agent._wake_bdi_loop.set.assert_not_called()
|
agent._wake_bdi_loop.set.assert_not_called()
|
||||||
|
|
||||||
@@ -241,9 +220,8 @@ def test_replace_belief_calls_remove_all(agent):
|
|||||||
agent._remove_all_with_name = MagicMock()
|
agent._remove_all_with_name = MagicMock()
|
||||||
agent._wake_bdi_loop = MagicMock()
|
agent._wake_bdi_loop = MagicMock()
|
||||||
|
|
||||||
belief = Belief(name="user_said", arguments=["Hello"])
|
belief = Belief(name="user_said", arguments=["Hello"], replace=True)
|
||||||
belief_changes = BeliefMessage(replace=[belief])
|
agent._apply_beliefs([belief])
|
||||||
agent._apply_belief_changes(belief_changes)
|
|
||||||
|
|
||||||
agent._remove_all_with_name.assert_called_with("user_said")
|
agent._remove_all_with_name.assert_called_with("user_said")
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
|
import json
|
||||||
import sys
|
import sys
|
||||||
import uuid
|
|
||||||
from unittest.mock import AsyncMock
|
from unittest.mock import AsyncMock
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
@@ -8,45 +8,31 @@ import pytest
|
|||||||
from control_backend.agents.bdi.bdi_program_manager import BDIProgramManager
|
from control_backend.agents.bdi.bdi_program_manager import BDIProgramManager
|
||||||
from control_backend.core.agent_system import InternalMessage
|
from control_backend.core.agent_system import InternalMessage
|
||||||
from control_backend.schemas.belief_message import BeliefMessage
|
from control_backend.schemas.belief_message import BeliefMessage
|
||||||
from control_backend.schemas.program import BasicNorm, Goal, Phase, Plan, Program
|
from control_backend.schemas.program import Program
|
||||||
|
|
||||||
# Fix Windows Proactor loop for zmq
|
# Fix Windows Proactor loop for zmq
|
||||||
if sys.platform.startswith("win"):
|
if sys.platform.startswith("win"):
|
||||||
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
|
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
|
||||||
|
|
||||||
|
|
||||||
def make_valid_program_json(norm="N1", goal="G1") -> str:
|
def make_valid_program_json(norm="N1", goal="G1"):
|
||||||
return Program(
|
return json.dumps(
|
||||||
phases=[
|
{
|
||||||
Phase(
|
"phases": [
|
||||||
id=uuid.uuid4(),
|
{
|
||||||
name="Basic Phase",
|
"id": "phase1",
|
||||||
norms=[
|
"label": "Phase 1",
|
||||||
BasicNorm(
|
"triggers": [],
|
||||||
id=uuid.uuid4(),
|
"norms": [{"id": "n1", "label": "Norm 1", "norm": norm}],
|
||||||
name=norm,
|
"goals": [
|
||||||
norm=norm,
|
{"id": "g1", "label": "Goal 1", "description": goal, "achieved": False}
|
||||||
),
|
],
|
||||||
],
|
}
|
||||||
goals=[
|
]
|
||||||
Goal(
|
}
|
||||||
id=uuid.uuid4(),
|
)
|
||||||
name=goal,
|
|
||||||
plan=Plan(
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
name="Goal Plan",
|
|
||||||
steps=[],
|
|
||||||
),
|
|
||||||
can_fail=False,
|
|
||||||
),
|
|
||||||
],
|
|
||||||
triggers=[],
|
|
||||||
),
|
|
||||||
],
|
|
||||||
).model_dump_json()
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skip(reason="Functionality being rebuilt.")
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_send_to_bdi():
|
async def test_send_to_bdi():
|
||||||
manager = BDIProgramManager(name="program_manager_test")
|
manager = BDIProgramManager(name="program_manager_test")
|
||||||
@@ -77,6 +63,7 @@ async def test_receive_programs_valid_and_invalid():
|
|||||||
manager = BDIProgramManager(name="program_manager_test")
|
manager = BDIProgramManager(name="program_manager_test")
|
||||||
manager.sub_socket = sub
|
manager.sub_socket = sub
|
||||||
manager._send_to_bdi = AsyncMock()
|
manager._send_to_bdi = AsyncMock()
|
||||||
|
manager._send_clear_llm_history = AsyncMock()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Will give StopAsyncIteration when the predefined `sub.recv_multipart` side-effects run out
|
# Will give StopAsyncIteration when the predefined `sub.recv_multipart` side-effects run out
|
||||||
@@ -87,5 +74,26 @@ async def test_receive_programs_valid_and_invalid():
|
|||||||
# Only valid Program should have triggered _send_to_bdi
|
# Only valid Program should have triggered _send_to_bdi
|
||||||
assert manager._send_to_bdi.await_count == 1
|
assert manager._send_to_bdi.await_count == 1
|
||||||
forwarded: Program = manager._send_to_bdi.await_args[0][0]
|
forwarded: Program = manager._send_to_bdi.await_args[0][0]
|
||||||
assert forwarded.phases[0].norms[0].name == "N1"
|
assert forwarded.phases[0].norms[0].norm == "N1"
|
||||||
assert forwarded.phases[0].goals[0].name == "G1"
|
assert forwarded.phases[0].goals[0].description == "G1"
|
||||||
|
|
||||||
|
# Verify history clear was triggered
|
||||||
|
assert manager._send_clear_llm_history.await_count == 1
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_send_clear_llm_history(mock_settings):
|
||||||
|
# Ensure the mock returns a string for the agent name (just like in your LLM tests)
|
||||||
|
mock_settings.agent_settings.llm_agent_name = "llm_agent"
|
||||||
|
|
||||||
|
manager = BDIProgramManager(name="program_manager_test")
|
||||||
|
manager.send = AsyncMock()
|
||||||
|
|
||||||
|
await manager._send_clear_llm_history()
|
||||||
|
|
||||||
|
assert manager.send.await_count == 1
|
||||||
|
msg: InternalMessage = manager.send.await_args[0][0]
|
||||||
|
|
||||||
|
# Verify the content and recipient
|
||||||
|
assert msg.body == "clear_history"
|
||||||
|
assert msg.to == "llm_agent"
|
||||||
|
|||||||
@@ -86,7 +86,7 @@ async def test_send_beliefs_to_bdi(agent):
|
|||||||
sent: InternalMessage = agent.send.call_args.args[0]
|
sent: InternalMessage = agent.send.call_args.args[0]
|
||||||
assert sent.to == settings.agent_settings.bdi_core_name
|
assert sent.to == settings.agent_settings.bdi_core_name
|
||||||
assert sent.thread == "beliefs"
|
assert sent.thread == "beliefs"
|
||||||
assert json.loads(sent.body)["create"] == [belief.model_dump() for belief in beliefs]
|
assert json.loads(sent.body)["beliefs"] == [belief.model_dump() for belief in beliefs]
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
|
|||||||
@@ -1,346 +0,0 @@
|
|||||||
import json
|
|
||||||
import uuid
|
|
||||||
from unittest.mock import AsyncMock, MagicMock, patch
|
|
||||||
|
|
||||||
import httpx
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
from control_backend.agents.bdi import TextBeliefExtractorAgent
|
|
||||||
from control_backend.core.agent_system import InternalMessage
|
|
||||||
from control_backend.core.config import settings
|
|
||||||
from control_backend.schemas.belief_message import BeliefMessage
|
|
||||||
from control_backend.schemas.program import (
|
|
||||||
ConditionalNorm,
|
|
||||||
LLMAction,
|
|
||||||
Phase,
|
|
||||||
Plan,
|
|
||||||
Program,
|
|
||||||
SemanticBelief,
|
|
||||||
Trigger,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def agent():
|
|
||||||
agent = TextBeliefExtractorAgent("text_belief_agent")
|
|
||||||
agent.send = AsyncMock()
|
|
||||||
agent._query_llm = AsyncMock()
|
|
||||||
return agent
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def sample_program():
|
|
||||||
return Program(
|
|
||||||
phases=[
|
|
||||||
Phase(
|
|
||||||
name="Some phase",
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
norms=[
|
|
||||||
ConditionalNorm(
|
|
||||||
name="Some norm",
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
norm="Use nautical terms.",
|
|
||||||
critical=False,
|
|
||||||
condition=SemanticBelief(
|
|
||||||
name="is_pirate",
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
description="The user is a pirate. Perhaps because they say "
|
|
||||||
"they are, or because they speak like a pirate "
|
|
||||||
'with terms like "arr".',
|
|
||||||
),
|
|
||||||
),
|
|
||||||
],
|
|
||||||
goals=[],
|
|
||||||
triggers=[
|
|
||||||
Trigger(
|
|
||||||
name="Some trigger",
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
condition=SemanticBelief(
|
|
||||||
name="no_more_booze",
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
description="There is no more alcohol.",
|
|
||||||
),
|
|
||||||
plan=Plan(
|
|
||||||
name="Some plan",
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
steps=[
|
|
||||||
LLMAction(
|
|
||||||
name="Some action",
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
goal="Suggest eating chocolate instead.",
|
|
||||||
),
|
|
||||||
],
|
|
||||||
),
|
|
||||||
),
|
|
||||||
],
|
|
||||||
),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def make_msg(sender: str, body: str, thread: str | None = None) -> InternalMessage:
|
|
||||||
return InternalMessage(to="unused", sender=sender, body=body, thread=thread)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_handle_message_ignores_other_agents(agent):
|
|
||||||
msg = make_msg("unknown", "some data", None)
|
|
||||||
|
|
||||||
await agent.handle_message(msg)
|
|
||||||
|
|
||||||
agent.send.assert_not_called() # noqa # `agent.send` has no such property, but we mock it.
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_handle_message_from_transcriber(agent, mock_settings):
|
|
||||||
transcription = "hello world"
|
|
||||||
msg = make_msg(mock_settings.agent_settings.transcription_name, transcription, None)
|
|
||||||
|
|
||||||
await agent.handle_message(msg)
|
|
||||||
|
|
||||||
agent.send.assert_awaited_once() # noqa # `agent.send` has no such property, but we mock it.
|
|
||||||
sent: InternalMessage = agent.send.call_args.args[0] # noqa
|
|
||||||
assert sent.to == mock_settings.agent_settings.bdi_belief_collector_name
|
|
||||||
assert sent.thread == "beliefs"
|
|
||||||
parsed = json.loads(sent.body)
|
|
||||||
assert parsed == {"beliefs": {"user_said": [transcription]}, "type": "belief_extraction_text"}
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_process_user_said(agent, mock_settings):
|
|
||||||
transcription = "this is a test"
|
|
||||||
|
|
||||||
await agent._user_said(transcription)
|
|
||||||
|
|
||||||
agent.send.assert_awaited_once() # noqa # `agent.send` has no such property, but we mock it.
|
|
||||||
sent: InternalMessage = agent.send.call_args.args[0] # noqa
|
|
||||||
assert sent.to == mock_settings.agent_settings.bdi_belief_collector_name
|
|
||||||
assert sent.thread == "beliefs"
|
|
||||||
parsed = json.loads(sent.body)
|
|
||||||
assert parsed["beliefs"]["user_said"] == [transcription]
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_query_llm():
|
|
||||||
mock_response = MagicMock()
|
|
||||||
mock_response.json.return_value = {
|
|
||||||
"choices": [
|
|
||||||
{
|
|
||||||
"message": {
|
|
||||||
"content": "null",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
mock_client = AsyncMock()
|
|
||||||
mock_client.post.return_value = mock_response
|
|
||||||
mock_async_client = MagicMock()
|
|
||||||
mock_async_client.__aenter__.return_value = mock_client
|
|
||||||
mock_async_client.__aexit__.return_value = None
|
|
||||||
|
|
||||||
with patch(
|
|
||||||
"control_backend.agents.bdi.text_belief_extractor_agent.httpx.AsyncClient",
|
|
||||||
return_value=mock_async_client,
|
|
||||||
):
|
|
||||||
agent = TextBeliefExtractorAgent("text_belief_agent")
|
|
||||||
|
|
||||||
res = await agent._query_llm("hello world", {"type": "null"})
|
|
||||||
# Response content was set as "null", so should be deserialized as None
|
|
||||||
assert res is None
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_retry_query_llm_success(agent):
|
|
||||||
agent._query_llm.return_value = None
|
|
||||||
res = await agent._retry_query_llm("hello world", {"type": "null"})
|
|
||||||
|
|
||||||
agent._query_llm.assert_called_once()
|
|
||||||
assert res is None
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_retry_query_llm_success_after_failure(agent):
|
|
||||||
agent._query_llm.side_effect = [KeyError(), "real value"]
|
|
||||||
res = await agent._retry_query_llm("hello world", {"type": "string"})
|
|
||||||
|
|
||||||
assert agent._query_llm.call_count == 2
|
|
||||||
assert res == "real value"
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_retry_query_llm_failures(agent):
|
|
||||||
agent._query_llm.side_effect = [KeyError(), KeyError(), KeyError(), "real value"]
|
|
||||||
res = await agent._retry_query_llm("hello world", {"type": "string"})
|
|
||||||
|
|
||||||
assert agent._query_llm.call_count == 3
|
|
||||||
assert res is None
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_retry_query_llm_fail_immediately(agent):
|
|
||||||
agent._query_llm.side_effect = [KeyError(), "real value"]
|
|
||||||
res = await agent._retry_query_llm("hello world", {"type": "string"}, tries=1)
|
|
||||||
|
|
||||||
assert agent._query_llm.call_count == 1
|
|
||||||
assert res is None
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_extracting_beliefs_from_program(agent, sample_program):
|
|
||||||
assert len(agent.available_beliefs) == 0
|
|
||||||
await agent.handle_message(
|
|
||||||
InternalMessage(
|
|
||||||
to=settings.agent_settings.text_belief_extractor_name,
|
|
||||||
sender=settings.agent_settings.bdi_program_manager_name,
|
|
||||||
body=sample_program.model_dump_json(),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
assert len(agent.available_beliefs) == 2
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_handle_invalid_program(agent, sample_program):
|
|
||||||
agent.available_beliefs.append(sample_program.phases[0].norms[0].condition)
|
|
||||||
agent.available_beliefs.append(sample_program.phases[0].triggers[0].condition)
|
|
||||||
assert len(agent.available_beliefs) == 2
|
|
||||||
|
|
||||||
await agent.handle_message(
|
|
||||||
InternalMessage(
|
|
||||||
to=settings.agent_settings.text_belief_extractor_name,
|
|
||||||
sender=settings.agent_settings.bdi_program_manager_name,
|
|
||||||
body=json.dumps({"phases": "Invalid"}),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
assert len(agent.available_beliefs) == 2
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_handle_robot_response(agent):
|
|
||||||
initial_length = len(agent.conversation.messages)
|
|
||||||
response = "Hi, I'm Pepper. What's your name?"
|
|
||||||
|
|
||||||
await agent.handle_message(
|
|
||||||
InternalMessage(
|
|
||||||
to=settings.agent_settings.text_belief_extractor_name,
|
|
||||||
sender=settings.agent_settings.llm_name,
|
|
||||||
body=response,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
assert len(agent.conversation.messages) == initial_length + 1
|
|
||||||
assert agent.conversation.messages[-1].role == "assistant"
|
|
||||||
assert agent.conversation.messages[-1].content == response
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_simulated_real_turn_with_beliefs(agent, sample_program):
|
|
||||||
"""Test sending user message to extract beliefs from."""
|
|
||||||
agent.available_beliefs.append(sample_program.phases[0].norms[0].condition)
|
|
||||||
agent.available_beliefs.append(sample_program.phases[0].triggers[0].condition)
|
|
||||||
|
|
||||||
# Send a user message with the belief that there's no more booze
|
|
||||||
agent._query_llm.return_value = {"is_pirate": None, "no_more_booze": True}
|
|
||||||
assert len(agent.conversation.messages) == 0
|
|
||||||
await agent.handle_message(
|
|
||||||
InternalMessage(
|
|
||||||
to=settings.agent_settings.text_belief_extractor_name,
|
|
||||||
sender=settings.agent_settings.transcription_name,
|
|
||||||
body="We're all out of schnaps.",
|
|
||||||
),
|
|
||||||
)
|
|
||||||
assert len(agent.conversation.messages) == 1
|
|
||||||
|
|
||||||
# There should be a belief set and sent to the BDI core, as well as the user_said belief
|
|
||||||
assert agent.send.call_count == 2
|
|
||||||
|
|
||||||
# First should be the beliefs message
|
|
||||||
message: InternalMessage = agent.send.call_args_list[0].args[0]
|
|
||||||
beliefs = BeliefMessage.model_validate_json(message.body)
|
|
||||||
assert len(beliefs.create) == 1
|
|
||||||
assert beliefs.create[0].name == "no_more_booze"
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_simulated_real_turn_no_beliefs(agent, sample_program):
|
|
||||||
"""Test a user message to extract beliefs from, but no beliefs are formed."""
|
|
||||||
agent.available_beliefs.append(sample_program.phases[0].norms[0].condition)
|
|
||||||
agent.available_beliefs.append(sample_program.phases[0].triggers[0].condition)
|
|
||||||
|
|
||||||
# Send a user message with no new beliefs
|
|
||||||
agent._query_llm.return_value = {"is_pirate": None, "no_more_booze": None}
|
|
||||||
await agent.handle_message(
|
|
||||||
InternalMessage(
|
|
||||||
to=settings.agent_settings.text_belief_extractor_name,
|
|
||||||
sender=settings.agent_settings.transcription_name,
|
|
||||||
body="Hello there!",
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
# Only the user_said belief should've been sent
|
|
||||||
agent.send.assert_called_once()
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_simulated_real_turn_no_new_beliefs(agent, sample_program):
|
|
||||||
"""
|
|
||||||
Test a user message to extract beliefs from, but no new beliefs are formed because they already
|
|
||||||
existed.
|
|
||||||
"""
|
|
||||||
agent.available_beliefs.append(sample_program.phases[0].norms[0].condition)
|
|
||||||
agent.available_beliefs.append(sample_program.phases[0].triggers[0].condition)
|
|
||||||
agent.beliefs["is_pirate"] = True
|
|
||||||
|
|
||||||
# Send a user message with the belief the user is a pirate, still
|
|
||||||
agent._query_llm.return_value = {"is_pirate": True, "no_more_booze": None}
|
|
||||||
await agent.handle_message(
|
|
||||||
InternalMessage(
|
|
||||||
to=settings.agent_settings.text_belief_extractor_name,
|
|
||||||
sender=settings.agent_settings.transcription_name,
|
|
||||||
body="Arr, nice to meet you, matey.",
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
# Only the user_said belief should've been sent, as no beliefs have changed
|
|
||||||
agent.send.assert_called_once()
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_simulated_real_turn_remove_belief(agent, sample_program):
|
|
||||||
"""
|
|
||||||
Test a user message to extract beliefs from, but an existing belief is determined no longer to
|
|
||||||
hold.
|
|
||||||
"""
|
|
||||||
agent.available_beliefs.append(sample_program.phases[0].norms[0].condition)
|
|
||||||
agent.available_beliefs.append(sample_program.phases[0].triggers[0].condition)
|
|
||||||
agent.beliefs["no_more_booze"] = True
|
|
||||||
|
|
||||||
# Send a user message with the belief the user is a pirate, still
|
|
||||||
agent._query_llm.return_value = {"is_pirate": None, "no_more_booze": False}
|
|
||||||
await agent.handle_message(
|
|
||||||
InternalMessage(
|
|
||||||
to=settings.agent_settings.text_belief_extractor_name,
|
|
||||||
sender=settings.agent_settings.transcription_name,
|
|
||||||
body="I found an untouched barrel of wine!",
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
# Both user_said and belief change should've been sent
|
|
||||||
assert agent.send.call_count == 2
|
|
||||||
|
|
||||||
# Agent's current beliefs should've changed
|
|
||||||
assert not agent.beliefs["no_more_booze"]
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_llm_failure_handling(agent, sample_program):
|
|
||||||
"""
|
|
||||||
Check that the agent handles failures gracefully without crashing.
|
|
||||||
"""
|
|
||||||
agent._query_llm.side_effect = httpx.HTTPError("")
|
|
||||||
agent.available_beliefs.append(sample_program.phases[0].norms[0].condition)
|
|
||||||
agent.available_beliefs.append(sample_program.phases[0].triggers[0].condition)
|
|
||||||
|
|
||||||
belief_changes = await agent._infer_turn()
|
|
||||||
|
|
||||||
assert len(belief_changes) == 0
|
|
||||||
65
test/unit/agents/bdi/test_text_extractor.py
Normal file
65
test/unit/agents/bdi/test_text_extractor.py
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
import json
|
||||||
|
from unittest.mock import AsyncMock
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from control_backend.agents.bdi import (
|
||||||
|
TextBeliefExtractorAgent,
|
||||||
|
)
|
||||||
|
from control_backend.core.agent_system import InternalMessage
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def agent():
|
||||||
|
agent = TextBeliefExtractorAgent("text_belief_agent")
|
||||||
|
agent.send = AsyncMock()
|
||||||
|
return agent
|
||||||
|
|
||||||
|
|
||||||
|
def make_msg(sender: str, body: str, thread: str | None = None) -> InternalMessage:
|
||||||
|
return InternalMessage(to="unused", sender=sender, body=body, thread=thread)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_handle_message_ignores_other_agents(agent):
|
||||||
|
msg = make_msg("unknown", "some data", None)
|
||||||
|
|
||||||
|
await agent.handle_message(msg)
|
||||||
|
|
||||||
|
agent.send.assert_not_called() # noqa # `agent.send` has no such property, but we mock it.
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_handle_message_from_transcriber(agent, mock_settings):
|
||||||
|
transcription = "hello world"
|
||||||
|
msg = make_msg(mock_settings.agent_settings.transcription_name, transcription, None)
|
||||||
|
|
||||||
|
await agent.handle_message(msg)
|
||||||
|
|
||||||
|
agent.send.assert_awaited_once() # noqa # `agent.send` has no such property, but we mock it.
|
||||||
|
sent: InternalMessage = agent.send.call_args.args[0] # noqa
|
||||||
|
assert sent.to == mock_settings.agent_settings.bdi_belief_collector_name
|
||||||
|
assert sent.thread == "beliefs"
|
||||||
|
parsed = json.loads(sent.body)
|
||||||
|
assert parsed == {"beliefs": {"user_said": [transcription]}, "type": "belief_extraction_text"}
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_process_transcription_demo(agent, mock_settings):
|
||||||
|
transcription = "this is a test"
|
||||||
|
|
||||||
|
await agent._process_transcription_demo(transcription)
|
||||||
|
|
||||||
|
agent.send.assert_awaited_once() # noqa # `agent.send` has no such property, but we mock it.
|
||||||
|
sent: InternalMessage = agent.send.call_args.args[0] # noqa
|
||||||
|
assert sent.to == mock_settings.agent_settings.bdi_belief_collector_name
|
||||||
|
assert sent.thread == "beliefs"
|
||||||
|
parsed = json.loads(sent.body)
|
||||||
|
assert parsed["beliefs"]["user_said"] == [transcription]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_setup_initializes_beliefs(agent):
|
||||||
|
"""Covers the setup method and ensures beliefs are initialized."""
|
||||||
|
await agent.setup()
|
||||||
|
assert agent.beliefs == {"mood": ["X"], "car": ["Y"]}
|
||||||
@@ -66,7 +66,7 @@ async def test_llm_processing_success(mock_httpx_client, mock_settings):
|
|||||||
# "Hello world." constitutes one sentence/chunk based on punctuation split
|
# "Hello world." constitutes one sentence/chunk based on punctuation split
|
||||||
# The agent should call send once with the full sentence
|
# The agent should call send once with the full sentence
|
||||||
assert agent.send.called
|
assert agent.send.called
|
||||||
args = agent.send.call_args_list[0][0][0]
|
args = agent.send.call_args[0][0]
|
||||||
assert args.to == mock_settings.agent_settings.bdi_core_name
|
assert args.to == mock_settings.agent_settings.bdi_core_name
|
||||||
assert "Hello world." in args.body
|
assert "Hello world." in args.body
|
||||||
|
|
||||||
@@ -265,3 +265,23 @@ async def test_stream_query_llm_skips_non_data_lines(mock_httpx_client, mock_set
|
|||||||
|
|
||||||
# Only the valid 'data:' line should yield content
|
# Only the valid 'data:' line should yield content
|
||||||
assert tokens == ["Hi"]
|
assert tokens == ["Hi"]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_clear_history_command(mock_settings):
|
||||||
|
"""Test that the 'clear_history' message clears the agent's memory."""
|
||||||
|
# setup LLM to have some history
|
||||||
|
mock_settings.agent_settings.bdi_program_manager_name = "bdi_program_manager_agent"
|
||||||
|
agent = LLMAgent("llm_agent")
|
||||||
|
agent.history = [
|
||||||
|
{"role": "user", "content": "Old conversation context"},
|
||||||
|
{"role": "assistant", "content": "Old response"},
|
||||||
|
]
|
||||||
|
assert len(agent.history) == 2
|
||||||
|
msg = InternalMessage(
|
||||||
|
to="llm_agent",
|
||||||
|
sender=mock_settings.agent_settings.bdi_program_manager_name,
|
||||||
|
body="clear_history",
|
||||||
|
)
|
||||||
|
await agent.handle_message(msg)
|
||||||
|
assert len(agent.history) == 0
|
||||||
|
|||||||
@@ -7,6 +7,15 @@ import zmq
|
|||||||
from control_backend.agents.perception.vad_agent import VADAgent
|
from control_backend.agents.perception.vad_agent import VADAgent
|
||||||
|
|
||||||
|
|
||||||
|
# We don't want to use real ZMQ in unit tests, for example because it can give errors when sockets
|
||||||
|
# aren't closed properly.
|
||||||
|
@pytest.fixture(autouse=True)
|
||||||
|
def mock_zmq():
|
||||||
|
with patch("zmq.asyncio.Context") as mock:
|
||||||
|
mock.instance.return_value = MagicMock()
|
||||||
|
yield mock
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def audio_out_socket():
|
def audio_out_socket():
|
||||||
return AsyncMock()
|
return AsyncMock()
|
||||||
@@ -140,12 +149,10 @@ async def test_vad_model_load_failure_stops_agent(vad_agent):
|
|||||||
# Patch stop to an AsyncMock so we can check it was awaited
|
# Patch stop to an AsyncMock so we can check it was awaited
|
||||||
vad_agent.stop = AsyncMock()
|
vad_agent.stop = AsyncMock()
|
||||||
|
|
||||||
result = await vad_agent.setup()
|
await vad_agent.setup()
|
||||||
|
|
||||||
# Assert stop was called
|
# Assert stop was called
|
||||||
vad_agent.stop.assert_awaited_once()
|
vad_agent.stop.assert_awaited_once()
|
||||||
# Assert setup returned None
|
|
||||||
assert result is None
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
@@ -155,7 +162,7 @@ async def test_audio_out_bind_failure_sets_none_and_logs(vad_agent, caplog):
|
|||||||
audio_out_socket is set to None, None is returned, and an error is logged.
|
audio_out_socket is set to None, None is returned, and an error is logged.
|
||||||
"""
|
"""
|
||||||
mock_socket = MagicMock()
|
mock_socket = MagicMock()
|
||||||
mock_socket.bind_to_random_port.side_effect = zmq.ZMQBindError()
|
mock_socket.bind.side_effect = zmq.ZMQBindError()
|
||||||
with patch("control_backend.agents.perception.vad_agent.azmq.Context.instance") as mock_ctx:
|
with patch("control_backend.agents.perception.vad_agent.azmq.Context.instance") as mock_ctx:
|
||||||
mock_ctx.return_value.socket.return_value = mock_socket
|
mock_ctx.return_value.socket.return_value = mock_socket
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
import json
|
import json
|
||||||
import uuid
|
|
||||||
from unittest.mock import AsyncMock
|
from unittest.mock import AsyncMock
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
@@ -7,7 +6,7 @@ from fastapi import FastAPI
|
|||||||
from fastapi.testclient import TestClient
|
from fastapi.testclient import TestClient
|
||||||
|
|
||||||
from control_backend.api.v1.endpoints import program
|
from control_backend.api.v1.endpoints import program
|
||||||
from control_backend.schemas.program import BasicNorm, Goal, Phase, Plan, Program
|
from control_backend.schemas.program import Program
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
@@ -26,37 +25,29 @@ def client(app):
|
|||||||
|
|
||||||
def make_valid_program_dict():
|
def make_valid_program_dict():
|
||||||
"""Helper to create a valid Program JSON structure."""
|
"""Helper to create a valid Program JSON structure."""
|
||||||
# Converting to JSON using Pydantic because it knows how to convert a UUID object
|
return {
|
||||||
program_json_str = Program(
|
"phases": [
|
||||||
phases=[
|
{
|
||||||
Phase(
|
"id": "phase1",
|
||||||
id=uuid.uuid4(),
|
"label": "basephase",
|
||||||
name="Basic Phase",
|
"norms": [{"id": "n1", "label": "norm", "norm": "be nice"}],
|
||||||
norms=[
|
"goals": [
|
||||||
BasicNorm(
|
{"id": "g1", "label": "goal", "description": "test goal", "achieved": False}
|
||||||
id=uuid.uuid4(),
|
|
||||||
name="Some norm",
|
|
||||||
norm="Do normal.",
|
|
||||||
),
|
|
||||||
],
|
],
|
||||||
goals=[
|
"triggers": [
|
||||||
Goal(
|
{
|
||||||
id=uuid.uuid4(),
|
"id": "t1",
|
||||||
name="Some goal",
|
"label": "trigger",
|
||||||
plan=Plan(
|
"type": "keywords",
|
||||||
id=uuid.uuid4(),
|
"keywords": [
|
||||||
name="Goal Plan",
|
{"id": "kw1", "keyword": "keyword1"},
|
||||||
steps=[],
|
{"id": "kw2", "keyword": "keyword2"},
|
||||||
),
|
],
|
||||||
can_fail=False,
|
},
|
||||||
),
|
|
||||||
],
|
],
|
||||||
triggers=[],
|
}
|
||||||
),
|
]
|
||||||
],
|
}
|
||||||
).model_dump_json()
|
|
||||||
# Converting back to a dict because that's what's expected
|
|
||||||
return json.loads(program_json_str)
|
|
||||||
|
|
||||||
|
|
||||||
def test_receive_program_success(client):
|
def test_receive_program_success(client):
|
||||||
@@ -80,8 +71,7 @@ def test_receive_program_success(client):
|
|||||||
sent_bytes = args[0][1]
|
sent_bytes = args[0][1]
|
||||||
sent_obj = json.loads(sent_bytes.decode())
|
sent_obj = json.loads(sent_bytes.decode())
|
||||||
|
|
||||||
# Converting to JSON using Pydantic because it knows how to handle UUIDs
|
expected_obj = Program.model_validate(program_dict).model_dump()
|
||||||
expected_obj = json.loads(Program.model_validate(program_dict).model_dump_json())
|
|
||||||
assert sent_obj == expected_obj
|
assert sent_obj == expected_obj
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,65 +1,49 @@
|
|||||||
import uuid
|
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from pydantic import ValidationError
|
from pydantic import ValidationError
|
||||||
|
|
||||||
from control_backend.schemas.program import (
|
from control_backend.schemas.program import (
|
||||||
BasicNorm,
|
|
||||||
ConditionalNorm,
|
|
||||||
Goal,
|
Goal,
|
||||||
InferredBelief,
|
KeywordTrigger,
|
||||||
KeywordBelief,
|
Norm,
|
||||||
LogicalOperator,
|
|
||||||
Phase,
|
Phase,
|
||||||
Plan,
|
|
||||||
Program,
|
Program,
|
||||||
SemanticBelief,
|
TriggerKeyword,
|
||||||
Trigger,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def base_norm() -> BasicNorm:
|
def base_norm() -> Norm:
|
||||||
return BasicNorm(
|
return Norm(
|
||||||
id=uuid.uuid4(),
|
id="norm1",
|
||||||
name="testNormName",
|
label="testNorm",
|
||||||
norm="testNormNorm",
|
norm="testNormNorm",
|
||||||
critical=False,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def base_goal() -> Goal:
|
def base_goal() -> Goal:
|
||||||
return Goal(
|
return Goal(
|
||||||
id=uuid.uuid4(),
|
id="goal1",
|
||||||
name="testGoalName",
|
label="testGoal",
|
||||||
plan=Plan(
|
description="testGoalDescription",
|
||||||
id=uuid.uuid4(),
|
achieved=False,
|
||||||
name="testGoalPlanName",
|
|
||||||
steps=[],
|
|
||||||
),
|
|
||||||
can_fail=False,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def base_trigger() -> Trigger:
|
def base_trigger() -> KeywordTrigger:
|
||||||
return Trigger(
|
return KeywordTrigger(
|
||||||
id=uuid.uuid4(),
|
id="trigger1",
|
||||||
name="testTriggerName",
|
label="testTrigger",
|
||||||
condition=KeywordBelief(
|
type="keywords",
|
||||||
id=uuid.uuid4(),
|
keywords=[
|
||||||
name="testTriggerKeywordBeliefTriggerName",
|
TriggerKeyword(id="keyword1", keyword="testKeyword1"),
|
||||||
keyword="Keyword",
|
TriggerKeyword(id="keyword1", keyword="testKeyword2"),
|
||||||
),
|
],
|
||||||
plan=Plan(
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
name="testTriggerPlanName",
|
|
||||||
steps=[],
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def base_phase() -> Phase:
|
def base_phase() -> Phase:
|
||||||
return Phase(
|
return Phase(
|
||||||
id=uuid.uuid4(),
|
id="phase1",
|
||||||
|
label="basephase",
|
||||||
norms=[base_norm()],
|
norms=[base_norm()],
|
||||||
goals=[base_goal()],
|
goals=[base_goal()],
|
||||||
triggers=[base_trigger()],
|
triggers=[base_trigger()],
|
||||||
@@ -74,7 +58,7 @@ def invalid_program() -> dict:
|
|||||||
# wrong types inside phases list (not Phase objects)
|
# wrong types inside phases list (not Phase objects)
|
||||||
return {
|
return {
|
||||||
"phases": [
|
"phases": [
|
||||||
{"id": uuid.uuid4()}, # incomplete
|
{"id": "phase1"}, # incomplete
|
||||||
{"not_a_phase": True},
|
{"not_a_phase": True},
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
@@ -93,112 +77,11 @@ def test_valid_deepprogram():
|
|||||||
# validate nested components directly
|
# validate nested components directly
|
||||||
phase = validated.phases[0]
|
phase = validated.phases[0]
|
||||||
assert isinstance(phase.goals[0], Goal)
|
assert isinstance(phase.goals[0], Goal)
|
||||||
assert isinstance(phase.triggers[0], Trigger)
|
assert isinstance(phase.triggers[0], KeywordTrigger)
|
||||||
assert isinstance(phase.norms[0], BasicNorm)
|
assert isinstance(phase.norms[0], Norm)
|
||||||
|
|
||||||
|
|
||||||
def test_invalid_program():
|
def test_invalid_program():
|
||||||
bad = invalid_program()
|
bad = invalid_program()
|
||||||
with pytest.raises(ValidationError):
|
with pytest.raises(ValidationError):
|
||||||
Program.model_validate(bad)
|
Program.model_validate(bad)
|
||||||
|
|
||||||
|
|
||||||
def test_conditional_norm_parsing():
|
|
||||||
"""
|
|
||||||
Check that pydantic is able to preserve the type of the norm, that it doesn't lose its
|
|
||||||
"condition" field when serializing and deserializing.
|
|
||||||
"""
|
|
||||||
norm = ConditionalNorm(
|
|
||||||
name="testNormName",
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
norm="testNormNorm",
|
|
||||||
critical=False,
|
|
||||||
condition=KeywordBelief(
|
|
||||||
name="testKeywordBelief",
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
keyword="testKeywordBelief",
|
|
||||||
),
|
|
||||||
)
|
|
||||||
program = Program(
|
|
||||||
phases=[
|
|
||||||
Phase(
|
|
||||||
name="Some phase",
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
norms=[norm],
|
|
||||||
goals=[],
|
|
||||||
triggers=[],
|
|
||||||
),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
parsed_program = Program.model_validate_json(program.model_dump_json())
|
|
||||||
parsed_norm = parsed_program.phases[0].norms[0]
|
|
||||||
|
|
||||||
assert hasattr(parsed_norm, "condition")
|
|
||||||
assert isinstance(parsed_norm, ConditionalNorm)
|
|
||||||
|
|
||||||
|
|
||||||
def test_belief_type_parsing():
|
|
||||||
"""
|
|
||||||
Check that pydantic is able to discern between the different types of beliefs when serializing
|
|
||||||
and deserializing.
|
|
||||||
"""
|
|
||||||
keyword_belief = KeywordBelief(
|
|
||||||
name="testKeywordBelief",
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
keyword="something",
|
|
||||||
)
|
|
||||||
semantic_belief = SemanticBelief(
|
|
||||||
name="testSemanticBelief",
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
description="something",
|
|
||||||
)
|
|
||||||
inferred_belief = InferredBelief(
|
|
||||||
name="testInferredBelief",
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
operator=LogicalOperator.OR,
|
|
||||||
left=keyword_belief,
|
|
||||||
right=semantic_belief,
|
|
||||||
)
|
|
||||||
|
|
||||||
program = Program(
|
|
||||||
phases=[
|
|
||||||
Phase(
|
|
||||||
name="Some phase",
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
norms=[],
|
|
||||||
goals=[],
|
|
||||||
triggers=[
|
|
||||||
Trigger(
|
|
||||||
name="testTriggerKeywordTrigger",
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
condition=keyword_belief,
|
|
||||||
plan=Plan(name="testTriggerPlanName", id=uuid.uuid4(), steps=[]),
|
|
||||||
),
|
|
||||||
Trigger(
|
|
||||||
name="testTriggerSemanticTrigger",
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
condition=semantic_belief,
|
|
||||||
plan=Plan(name="testTriggerPlanName", id=uuid.uuid4(), steps=[]),
|
|
||||||
),
|
|
||||||
Trigger(
|
|
||||||
name="testTriggerInferredTrigger",
|
|
||||||
id=uuid.uuid4(),
|
|
||||||
condition=inferred_belief,
|
|
||||||
plan=Plan(name="testTriggerPlanName", id=uuid.uuid4(), steps=[]),
|
|
||||||
),
|
|
||||||
],
|
|
||||||
),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
parsed_program = Program.model_validate_json(program.model_dump_json())
|
|
||||||
|
|
||||||
parsed_keyword_belief = parsed_program.phases[0].triggers[0].condition
|
|
||||||
assert isinstance(parsed_keyword_belief, KeywordBelief)
|
|
||||||
|
|
||||||
parsed_semantic_belief = parsed_program.phases[0].triggers[1].condition
|
|
||||||
assert isinstance(parsed_semantic_belief, SemanticBelief)
|
|
||||||
|
|
||||||
parsed_inferred_belief = parsed_program.phases[0].triggers[2].condition
|
|
||||||
assert isinstance(parsed_inferred_belief, InferredBelief)
|
|
||||||
|
|||||||
23
uv.lock
generated
23
uv.lock
generated
@@ -997,7 +997,6 @@ dependencies = [
|
|||||||
{ name = "pydantic" },
|
{ name = "pydantic" },
|
||||||
{ name = "pydantic-settings" },
|
{ name = "pydantic-settings" },
|
||||||
{ name = "python-json-logger" },
|
{ name = "python-json-logger" },
|
||||||
{ name = "python-slugify" },
|
|
||||||
{ name = "pyyaml" },
|
{ name = "pyyaml" },
|
||||||
{ name = "pyzmq" },
|
{ name = "pyzmq" },
|
||||||
{ name = "silero-vad" },
|
{ name = "silero-vad" },
|
||||||
@@ -1047,7 +1046,6 @@ requires-dist = [
|
|||||||
{ name = "pydantic", specifier = ">=2.12.0" },
|
{ name = "pydantic", specifier = ">=2.12.0" },
|
||||||
{ name = "pydantic-settings", specifier = ">=2.11.0" },
|
{ name = "pydantic-settings", specifier = ">=2.11.0" },
|
||||||
{ name = "python-json-logger", specifier = ">=4.0.0" },
|
{ name = "python-json-logger", specifier = ">=4.0.0" },
|
||||||
{ name = "python-slugify", specifier = ">=8.0.4" },
|
|
||||||
{ name = "pyyaml", specifier = ">=6.0.3" },
|
{ name = "pyyaml", specifier = ">=6.0.3" },
|
||||||
{ name = "pyzmq", specifier = ">=27.1.0" },
|
{ name = "pyzmq", specifier = ">=27.1.0" },
|
||||||
{ name = "silero-vad", specifier = ">=6.0.0" },
|
{ name = "silero-vad", specifier = ">=6.0.0" },
|
||||||
@@ -1343,18 +1341,6 @@ wheels = [
|
|||||||
{ url = "https://files.pythonhosted.org/packages/45/58/38b5afbc1a800eeea951b9285d3912613f2603bdf897a4ab0f4bd7f405fc/python_multipart-0.0.20-py3-none-any.whl", hash = "sha256:8a62d3a8335e06589fe01f2a3e178cdcc632f3fbe0d492ad9ee0ec35aab1f104", size = 24546, upload-time = "2024-12-16T19:45:44.423Z" },
|
{ url = "https://files.pythonhosted.org/packages/45/58/38b5afbc1a800eeea951b9285d3912613f2603bdf897a4ab0f4bd7f405fc/python_multipart-0.0.20-py3-none-any.whl", hash = "sha256:8a62d3a8335e06589fe01f2a3e178cdcc632f3fbe0d492ad9ee0ec35aab1f104", size = 24546, upload-time = "2024-12-16T19:45:44.423Z" },
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "python-slugify"
|
|
||||||
version = "8.0.4"
|
|
||||||
source = { registry = "https://pypi.org/simple" }
|
|
||||||
dependencies = [
|
|
||||||
{ name = "text-unidecode" },
|
|
||||||
]
|
|
||||||
sdist = { url = "https://files.pythonhosted.org/packages/87/c7/5e1547c44e31da50a460df93af11a535ace568ef89d7a811069ead340c4a/python-slugify-8.0.4.tar.gz", hash = "sha256:59202371d1d05b54a9e7720c5e038f928f45daaffe41dd10822f3907b937c856", size = 10921, upload-time = "2024-02-08T18:32:45.488Z" }
|
|
||||||
wheels = [
|
|
||||||
{ url = "https://files.pythonhosted.org/packages/a4/62/02da182e544a51a5c3ccf4b03ab79df279f9c60c5e82d5e8bec7ca26ac11/python_slugify-8.0.4-py2.py3-none-any.whl", hash = "sha256:276540b79961052b66b7d116620b36518847f52d5fd9e3a70164fc8c50faa6b8", size = 10051, upload-time = "2024-02-08T18:32:43.911Z" },
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "pyyaml"
|
name = "pyyaml"
|
||||||
version = "6.0.3"
|
version = "6.0.3"
|
||||||
@@ -1878,15 +1864,6 @@ wheels = [
|
|||||||
{ url = "https://files.pythonhosted.org/packages/a2/09/77d55d46fd61b4a135c444fc97158ef34a095e5681d0a6c10b75bf356191/sympy-1.14.0-py3-none-any.whl", hash = "sha256:e091cc3e99d2141a0ba2847328f5479b05d94a6635cb96148ccb3f34671bd8f5", size = 6299353, upload-time = "2025-04-27T18:04:59.103Z" },
|
{ url = "https://files.pythonhosted.org/packages/a2/09/77d55d46fd61b4a135c444fc97158ef34a095e5681d0a6c10b75bf356191/sympy-1.14.0-py3-none-any.whl", hash = "sha256:e091cc3e99d2141a0ba2847328f5479b05d94a6635cb96148ccb3f34671bd8f5", size = 6299353, upload-time = "2025-04-27T18:04:59.103Z" },
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "text-unidecode"
|
|
||||||
version = "1.3"
|
|
||||||
source = { registry = "https://pypi.org/simple" }
|
|
||||||
sdist = { url = "https://files.pythonhosted.org/packages/ab/e2/e9a00f0ccb71718418230718b3d900e71a5d16e701a3dae079a21e9cd8f8/text-unidecode-1.3.tar.gz", hash = "sha256:bad6603bb14d279193107714b288be206cac565dfa49aa5b105294dd5c4aab93", size = 76885, upload-time = "2019-08-30T21:36:45.405Z" }
|
|
||||||
wheels = [
|
|
||||||
{ url = "https://files.pythonhosted.org/packages/a6/a5/c0b6468d3824fe3fde30dbb5e1f687b291608f9473681bbf7dabbf5a87d7/text_unidecode-1.3-py2.py3-none-any.whl", hash = "sha256:1311f10e8b895935241623731c2ba64f4c455287888b18189350b67134a822e8", size = 78154, upload-time = "2019-08-30T21:37:03.543Z" },
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tiktoken"
|
name = "tiktoken"
|
||||||
version = "0.12.0"
|
version = "0.12.0"
|
||||||
|
|||||||
Reference in New Issue
Block a user