Compare commits
10 Commits
feat/face-
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| 4855bde1a4 | |||
| ff93356a9a | |||
| c938345fc2 | |||
| 5d90ff4c44 | |||
| b71ec5c76a | |||
| b3caff0f90 | |||
| 2e717ec277 | |||
| b53bf872a5 | |||
| 1337b1f06b | |||
| f79b65a6fa |
@@ -24,6 +24,7 @@ dependencies = [
|
||||
"sphinx-rtd-theme>=3.0.2",
|
||||
"tf-keras>=2.20.1",
|
||||
"torch>=2.8.0",
|
||||
"tornado ; sys_platform == 'win32'",
|
||||
"uvicorn>=0.37.0",
|
||||
]
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ University within the Software Project course.
|
||||
© Copyright Utrecht University (Department of Information and Computing Sciences)
|
||||
"""
|
||||
|
||||
import logging
|
||||
from functools import singledispatchmethod
|
||||
|
||||
from slugify import slugify
|
||||
@@ -30,7 +31,6 @@ from control_backend.schemas.program import (
|
||||
BasicNorm,
|
||||
ConditionalNorm,
|
||||
EmotionBelief,
|
||||
FaceBelief,
|
||||
GestureAction,
|
||||
Goal,
|
||||
InferredBelief,
|
||||
@@ -67,6 +67,7 @@ class AgentSpeakGenerator:
|
||||
"""
|
||||
|
||||
_asp: AstProgram
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def generate(self, program: Program) -> str:
|
||||
"""
|
||||
@@ -106,7 +107,7 @@ class AgentSpeakGenerator:
|
||||
check if a keyword is a substring of the user's message.
|
||||
|
||||
The generated rule has the form:
|
||||
keyword_said(Keyword) :- user_said(Message) & .substring(Keyword, Message, Pos) & Pos >= 0
|
||||
keyword_said(Keyword) :- user_said(Message) & .substring_case_insensitive(Keyword, Message, Pos) & Pos >= 0
|
||||
|
||||
This enables the system to trigger behaviors based on keyword detection.
|
||||
"""
|
||||
@@ -118,7 +119,7 @@ class AgentSpeakGenerator:
|
||||
AstRule(
|
||||
AstLiteral("keyword_said", [keyword]),
|
||||
AstLiteral("user_said", [message])
|
||||
& AstLiteral(".substring", [keyword, message, position])
|
||||
& AstLiteral(".substring_case_insensitive", [keyword, message, position])
|
||||
& (position >= 0),
|
||||
)
|
||||
)
|
||||
@@ -134,7 +135,6 @@ class AgentSpeakGenerator:
|
||||
"""
|
||||
self._add_reply_with_goal_plan()
|
||||
self._add_say_plan()
|
||||
self._add_reply_plan()
|
||||
self._add_notify_cycle_plan()
|
||||
|
||||
def _add_reply_with_goal_plan(self):
|
||||
@@ -198,40 +198,6 @@ class AgentSpeakGenerator:
|
||||
)
|
||||
)
|
||||
|
||||
def _add_reply_plan(self):
|
||||
"""
|
||||
Adds a plan for general reply actions.
|
||||
|
||||
This plan handles general reply actions where the agent needs to respond
|
||||
to user input without a specific conversational goal. It:
|
||||
1. Marks that the agent has responded this turn
|
||||
2. Gathers all active norms
|
||||
3. Generates a reply based on the user message and norms
|
||||
|
||||
Trigger: +!reply
|
||||
Context: user_said(Message)
|
||||
"""
|
||||
self._asp.plans.append(
|
||||
AstPlan(
|
||||
TriggerType.ADDED_GOAL,
|
||||
AstLiteral("reply"),
|
||||
[AstLiteral("user_said", [AstVar("Message")])],
|
||||
[
|
||||
AstStatement(StatementType.ADD_BELIEF, AstLiteral("responded_this_turn")),
|
||||
AstStatement(
|
||||
StatementType.DO_ACTION,
|
||||
AstLiteral(
|
||||
"findall",
|
||||
[AstVar("Norm"), AstLiteral("norm", [AstVar("Norm")]), AstVar("Norms")],
|
||||
),
|
||||
),
|
||||
AstStatement(
|
||||
StatementType.DO_ACTION,
|
||||
AstLiteral("reply", [AstVar("Message"), AstVar("Norms")]),
|
||||
),
|
||||
],
|
||||
)
|
||||
)
|
||||
|
||||
def _add_notify_cycle_plan(self):
|
||||
"""
|
||||
@@ -269,6 +235,39 @@ class AgentSpeakGenerator:
|
||||
)
|
||||
)
|
||||
|
||||
def _add_stop_plan(self, phase: Phase):
|
||||
"""
|
||||
Adds a plan to stop the program. This just skips to the end phase,
|
||||
where there is no behavior defined.
|
||||
"""
|
||||
self._asp.plans.append(
|
||||
AstPlan(
|
||||
TriggerType.ADDED_GOAL,
|
||||
AstLiteral("stop"),
|
||||
[AstLiteral("phase", [AstString(phase.id)])],
|
||||
[
|
||||
AstStatement(
|
||||
StatementType.DO_ACTION,
|
||||
AstLiteral(
|
||||
"notify_transition_phase",
|
||||
[
|
||||
AstString(phase.id),
|
||||
AstString("end")
|
||||
]
|
||||
)
|
||||
),
|
||||
AstStatement(
|
||||
StatementType.REMOVE_BELIEF,
|
||||
AstLiteral("phase", [AstVar("Phase")]),
|
||||
),
|
||||
AstStatement(
|
||||
StatementType.ADD_BELIEF,
|
||||
AstLiteral("phase", [AstString("end")])
|
||||
)
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
def _process_phases(self, phases: list[Phase]) -> None:
|
||||
"""
|
||||
Processes all phases in the program and their transitions.
|
||||
@@ -285,21 +284,6 @@ class AgentSpeakGenerator:
|
||||
self._process_phase(curr_phase)
|
||||
self._add_phase_transition(curr_phase, next_phase)
|
||||
|
||||
# End phase behavior
|
||||
# When deleting this, the entire `reply` plan and action can be deleted
|
||||
self._asp.plans.append(
|
||||
AstPlan(
|
||||
type=TriggerType.ADDED_BELIEF,
|
||||
trigger_literal=AstLiteral("user_said", [AstVar("Message")]),
|
||||
context=[AstLiteral("phase", [AstString("end")])],
|
||||
body=[
|
||||
AstStatement(
|
||||
StatementType.DO_ACTION, AstLiteral("notify_user_said", [AstVar("Message")])
|
||||
),
|
||||
AstStatement(StatementType.ACHIEVE_GOAL, AstLiteral("reply")),
|
||||
],
|
||||
)
|
||||
)
|
||||
|
||||
def _process_phase(self, phase: Phase) -> None:
|
||||
"""
|
||||
@@ -326,6 +310,9 @@ class AgentSpeakGenerator:
|
||||
for trigger in phase.triggers:
|
||||
self._process_trigger(trigger, phase)
|
||||
|
||||
# Add force transition to end phase
|
||||
self._add_stop_plan(phase)
|
||||
|
||||
def _add_phase_transition(self, from_phase: Phase | None, to_phase: Phase | None) -> None:
|
||||
"""
|
||||
Adds plans for transitioning between phases.
|
||||
@@ -501,9 +488,13 @@ class AgentSpeakGenerator:
|
||||
if isinstance(step, Goal):
|
||||
subgoals.append(step)
|
||||
|
||||
if not goal.can_fail and not continues_response:
|
||||
if not goal.can_fail:
|
||||
body.append(AstStatement(StatementType.ADD_BELIEF, self._astify(goal, achieved=True)))
|
||||
|
||||
if len(body) == 0:
|
||||
self.logger.warning("Goal with no plan detected: %s", goal.name)
|
||||
body.append(AstStatement(StatementType.EMPTY, AstLiteral("true")))
|
||||
|
||||
self._asp.plans.append(AstPlan(TriggerType.ADDED_GOAL, self._astify(goal), context, body))
|
||||
|
||||
self._asp.plans.append(
|
||||
@@ -564,10 +555,10 @@ class AgentSpeakGenerator:
|
||||
)
|
||||
)
|
||||
for step in trigger.plan.steps:
|
||||
body.append(self._step_to_statement(step))
|
||||
if isinstance(step, Goal):
|
||||
step.can_fail = False # triggers are continuous sequence
|
||||
subgoals.append(step)
|
||||
new_step = step.model_copy(update={"can_fail": False}) # triggers are sequence
|
||||
subgoals.append(new_step)
|
||||
body.append(self._step_to_statement(step))
|
||||
|
||||
# Arbitrary wait for UI to display nicely
|
||||
body.append(
|
||||
@@ -611,6 +602,7 @@ class AgentSpeakGenerator:
|
||||
- check_triggers: When no triggers are applicable
|
||||
- transition_phase: When phase transition conditions aren't met
|
||||
- force_transition_phase: When forced transitions aren't possible
|
||||
- stop: When we are already in the end phase
|
||||
"""
|
||||
# Trigger fallback
|
||||
self._asp.plans.append(
|
||||
@@ -642,6 +634,16 @@ class AgentSpeakGenerator:
|
||||
)
|
||||
)
|
||||
|
||||
# Stop fallback
|
||||
self._asp.plans.append(
|
||||
AstPlan(
|
||||
TriggerType.ADDED_GOAL,
|
||||
AstLiteral("stop"),
|
||||
[],
|
||||
[AstStatement(StatementType.EMPTY, AstLiteral("true"))],
|
||||
)
|
||||
)
|
||||
|
||||
@singledispatchmethod
|
||||
def _astify(self, element: ProgramElement) -> AstExpression:
|
||||
"""
|
||||
@@ -688,10 +690,6 @@ class AgentSpeakGenerator:
|
||||
def _(self, eb: EmotionBelief) -> AstExpression:
|
||||
return AstLiteral("emotion_detected", [AstAtom(eb.emotion)])
|
||||
|
||||
@_astify.register
|
||||
def _(self, fb: FaceBelief) -> AstExpression:
|
||||
return AstLiteral("face_present")
|
||||
|
||||
@_astify.register
|
||||
def _(self, ib: InferredBelief) -> AstExpression:
|
||||
"""
|
||||
|
||||
@@ -176,6 +176,8 @@ class BDICoreAgent(BaseAgent):
|
||||
self._force_norm(msg.body)
|
||||
case "force_next_phase":
|
||||
self._force_next_phase()
|
||||
case "stop":
|
||||
self._stop()
|
||||
case _:
|
||||
self.logger.warning("Received unknown user interruption: %s", msg)
|
||||
|
||||
@@ -335,6 +337,11 @@ class BDICoreAgent(BaseAgent):
|
||||
|
||||
self.logger.info("Manually forced phase transition.")
|
||||
|
||||
def _stop(self):
|
||||
self._set_goal("stop")
|
||||
|
||||
self.logger.info("Stopped the program (skipped to end phase).")
|
||||
|
||||
def _add_custom_actions(self) -> None:
|
||||
"""
|
||||
Add any custom actions here. Inside `@self.actions.add()`, the first argument is
|
||||
@@ -342,6 +349,28 @@ class BDICoreAgent(BaseAgent):
|
||||
the function expects (which will be located in `term.args`).
|
||||
"""
|
||||
|
||||
@self.actions.add(".substring_case_insensitive", 3)
|
||||
@agentspeak.optimizer.function_like
|
||||
def _substring(agent, term, intention):
|
||||
"""
|
||||
Find out if a string is a substring of another (case insensitive). Copied mostly from
|
||||
the agentspeak library method .substring.
|
||||
"""
|
||||
needle = agentspeak.asl_str(agentspeak.grounded(term.args[0], intention.scope)).lower()
|
||||
haystack = agentspeak.asl_str(agentspeak.grounded(term.args[1], intention.scope)).lower()
|
||||
|
||||
choicepoint = object()
|
||||
|
||||
pos = haystack.find(needle)
|
||||
while pos != -1:
|
||||
intention.stack.append(choicepoint)
|
||||
|
||||
if agentspeak.unify(term.args[2], pos, intention.scope, intention.stack):
|
||||
yield
|
||||
|
||||
agentspeak.reroll(intention.scope, intention.stack, choicepoint)
|
||||
pos = haystack.find(needle, pos + 1)
|
||||
|
||||
@self.actions.add(".reply", 2)
|
||||
def _reply(agent, term, intention):
|
||||
"""
|
||||
@@ -467,7 +496,6 @@ class BDICoreAgent(BaseAgent):
|
||||
body=str(trigger_name),
|
||||
)
|
||||
|
||||
# TODO: check with Pim
|
||||
self.add_behavior(self.send(msg))
|
||||
|
||||
yield
|
||||
|
||||
@@ -14,7 +14,7 @@ from control_backend.agents.perception.visual_emotion_recognition_agent.visual_e
|
||||
)
|
||||
from control_backend.core.agent_system import InternalMessage
|
||||
from control_backend.core.config import settings
|
||||
from control_backend.schemas.belief_message import Belief, BeliefMessage
|
||||
from control_backend.schemas.belief_message import Belief
|
||||
|
||||
|
||||
class VisualEmotionRecognitionAgent(BaseAgent):
|
||||
@@ -44,7 +44,6 @@ class VisualEmotionRecognitionAgent(BaseAgent):
|
||||
self.timeout_ms = timeout_ms
|
||||
self.window_duration = window_duration
|
||||
self.min_frames_required = min_frames_required
|
||||
self._face_detected = False
|
||||
|
||||
# Pause functionality
|
||||
# NOTE: flag is set when running, cleared when paused
|
||||
@@ -90,9 +89,6 @@ class VisualEmotionRecognitionAgent(BaseAgent):
|
||||
# Tracks counts of detected emotions per face index
|
||||
face_stats = defaultdict(Counter)
|
||||
|
||||
# How many times a face has been detected
|
||||
face_detection_yes_no = [0, 0]
|
||||
|
||||
prev_dominant_emotions = set()
|
||||
|
||||
while self._running:
|
||||
@@ -101,8 +97,8 @@ class VisualEmotionRecognitionAgent(BaseAgent):
|
||||
|
||||
width, height, image_bytes = await self.video_in_socket.recv_multipart()
|
||||
|
||||
width = int.from_bytes(width, "little")
|
||||
height = int.from_bytes(height, "little")
|
||||
width = int.from_bytes(width, 'little')
|
||||
height = int.from_bytes(height, 'little')
|
||||
|
||||
# Convert bytes to a numpy buffer
|
||||
image_array = np.frombuffer(image_bytes, np.uint8)
|
||||
@@ -111,13 +107,6 @@ class VisualEmotionRecognitionAgent(BaseAgent):
|
||||
|
||||
# Get the dominant emotion from each face
|
||||
current_emotions = self.emotion_recognizer.sorted_dominant_emotions(frame)
|
||||
|
||||
# Update face face_detection_yes_no
|
||||
if len(current_emotions) > 0:
|
||||
face_detection_yes_no[0] += 1
|
||||
else:
|
||||
face_detection_yes_no[1] += 1
|
||||
|
||||
# Update emotion counts for each detected face
|
||||
for i, emotion in enumerate(current_emotions):
|
||||
face_stats[i][emotion] += 1
|
||||
@@ -133,31 +122,18 @@ class VisualEmotionRecognitionAgent(BaseAgent):
|
||||
dominant_emotion = counter.most_common(1)[0][0]
|
||||
window_dominant_emotions.add(dominant_emotion)
|
||||
|
||||
if (
|
||||
face_detection_yes_no[0] > face_detection_yes_no[1]
|
||||
and not self._face_detected
|
||||
):
|
||||
self._face_detected = True
|
||||
await self._inform_face_detected()
|
||||
elif (
|
||||
face_detection_yes_no[0] <= face_detection_yes_no[1] and self._face_detected
|
||||
):
|
||||
self._face_detected = False
|
||||
await self._inform_face_detected()
|
||||
|
||||
face_detection_yes_no = [0, 0]
|
||||
|
||||
await self.update_emotions(prev_dominant_emotions, window_dominant_emotions)
|
||||
prev_dominant_emotions = window_dominant_emotions
|
||||
face_stats.clear()
|
||||
next_window_time = time.time() + self.window_duration
|
||||
|
||||
except zmq.Again:
|
||||
self.logger.warning("No video frame received within timeout.")
|
||||
pass
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error in emotion recognition loop: {e}")
|
||||
|
||||
|
||||
async def update_emotions(self, prev_emotions: set[str], emotions: set[str]):
|
||||
"""
|
||||
Compare emotions from previous window and current emotions,
|
||||
@@ -173,7 +149,9 @@ class VisualEmotionRecognitionAgent(BaseAgent):
|
||||
for emotion in emotions_to_remove:
|
||||
self.logger.info(f"Emotion '{emotion}' has disappeared.")
|
||||
try:
|
||||
emotion_beliefs_remove.append(Belief(name="emotion_detected", arguments=[emotion]))
|
||||
emotion_beliefs_remove.append(
|
||||
Belief(name="emotion_detected", arguments=[emotion], remove=True)
|
||||
)
|
||||
except ValidationError:
|
||||
self.logger.warning("Invalid belief for emotion removal: %s", emotion)
|
||||
|
||||
@@ -197,25 +175,11 @@ class VisualEmotionRecognitionAgent(BaseAgent):
|
||||
)
|
||||
await self.send(message)
|
||||
|
||||
async def _inform_face_detected(self):
|
||||
if self._face_detected:
|
||||
belief_message = BeliefMessage(create=[Belief(name="face_present")])
|
||||
else:
|
||||
belief_message = BeliefMessage(delete=[Belief(name="face_present")])
|
||||
|
||||
msg = InternalMessage(
|
||||
to=settings.agent_settings.bdi_core_name,
|
||||
thread="beliefs",
|
||||
body=belief_message.model_dump_json(),
|
||||
)
|
||||
|
||||
await self.send(msg)
|
||||
|
||||
async def handle_message(self, msg: InternalMessage):
|
||||
"""
|
||||
Handle incoming messages.
|
||||
|
||||
Expects messages to pause or resume the Visual Emotion Recognition
|
||||
Expects messages to pause or resume the Visual Emotion Recognition
|
||||
processing from User Interrupt Agent.
|
||||
|
||||
:param msg: The received internal message.
|
||||
@@ -240,3 +204,4 @@ class VisualEmotionRecognitionAgent(BaseAgent):
|
||||
"""
|
||||
self.video_in_socket.close()
|
||||
await super().stop()
|
||||
|
||||
|
||||
@@ -164,6 +164,12 @@ class UserInterruptAgent(BaseAgent):
|
||||
else:
|
||||
self.logger.info("Sent resume command.")
|
||||
|
||||
case "stop":
|
||||
self.logger.debug(
|
||||
"Received stop command."
|
||||
)
|
||||
await self._send_stop_command()
|
||||
|
||||
case "next_phase" | "reset_phase":
|
||||
await self._send_experiment_control_to_bdi_core(event_type)
|
||||
case _:
|
||||
@@ -422,4 +428,16 @@ class UserInterruptAgent(BaseAgent):
|
||||
)
|
||||
await self.send(vad_message)
|
||||
# Voice Activity Detection and Visual Emotion Recognition agents
|
||||
self.logger.info("Sent resume command to VAD and VED agents.")
|
||||
self.logger.info("Sent resume command to VAD and VED agents.")
|
||||
|
||||
async def _send_stop_command(self):
|
||||
"""
|
||||
Send a command to the BDI to stop the program (i.e., skip to end phase).
|
||||
"""
|
||||
msg = InternalMessage(
|
||||
to=settings.agent_settings.bdi_core_name,
|
||||
body="",
|
||||
thread="stop"
|
||||
)
|
||||
|
||||
await self.send(msg)
|
||||
|
||||
@@ -82,7 +82,7 @@ class BehaviourSettings(BaseModel):
|
||||
:ivar transcription_words_per_token: Estimated words per token for transcription timing.
|
||||
:ivar transcription_token_buffer: Buffer for transcription tokens.
|
||||
:ivar conversation_history_length_limit: The maximum amount of messages to extract beliefs from.
|
||||
:ivar visual_emotion_recognition_window_duration_s: Duration in seconds over which to aggregate
|
||||
:ivar visual_emotion_recognition_window_duration_s: Duration in seconds over which to aggregate
|
||||
emotions and update emotion beliefs.
|
||||
:ivar visual_emotion_recognition_min_frames_per_face: Minimum number of frames per face required
|
||||
to consider a face valid.
|
||||
@@ -112,7 +112,7 @@ class BehaviourSettings(BaseModel):
|
||||
conversation_history_length_limit: int = 10
|
||||
|
||||
# Visual Emotion Recognition settings
|
||||
visual_emotion_recognition_window_duration_s: int = 3
|
||||
visual_emotion_recognition_window_duration_s: int = 5
|
||||
visual_emotion_recognition_min_frames_per_face: int = 3
|
||||
# AgentSpeak related settings
|
||||
trigger_time_to_wait: int = 2000
|
||||
|
||||
@@ -7,7 +7,7 @@ University within the Software Project course.
|
||||
from enum import Enum
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import UUID4, BaseModel
|
||||
from pydantic import UUID4, BaseModel, field_validator
|
||||
|
||||
|
||||
class ProgramElement(BaseModel):
|
||||
@@ -24,6 +24,13 @@ class ProgramElement(BaseModel):
|
||||
# To make program elements hashable
|
||||
model_config = {"frozen": True}
|
||||
|
||||
@field_validator("name")
|
||||
@classmethod
|
||||
def name_must_not_start_with_number(cls, v: str) -> str:
|
||||
if v and v[0].isdigit():
|
||||
raise ValueError('Field "name" must not start with a number.')
|
||||
return v
|
||||
|
||||
|
||||
class LogicalOperator(Enum):
|
||||
"""
|
||||
@@ -41,8 +48,8 @@ class LogicalOperator(Enum):
|
||||
OR = "OR"
|
||||
|
||||
|
||||
type Belief = KeywordBelief | SemanticBelief | InferredBelief | EmotionBelief | FaceBelief
|
||||
type BasicBelief = KeywordBelief | SemanticBelief | EmotionBelief | FaceBelief
|
||||
type Belief = KeywordBelief | SemanticBelief | InferredBelief | EmotionBelief
|
||||
type BasicBelief = KeywordBelief | SemanticBelief | EmotionBelief
|
||||
|
||||
|
||||
class KeywordBelief(ProgramElement):
|
||||
@@ -117,15 +124,6 @@ class EmotionBelief(ProgramElement):
|
||||
emotion: str
|
||||
|
||||
|
||||
class FaceBelief(ProgramElement):
|
||||
"""
|
||||
Represents the belief that at least one face is currently in view.
|
||||
"""
|
||||
|
||||
name: str = ""
|
||||
face_present: bool
|
||||
|
||||
|
||||
class Norm(ProgramElement):
|
||||
"""
|
||||
Base class for behavioral norms that guide the robot's interactions.
|
||||
|
||||
15
uv.lock
generated
15
uv.lock
generated
@@ -1,5 +1,5 @@
|
||||
version = 1
|
||||
revision = 3
|
||||
revision = 2
|
||||
requires-python = ">=3.13"
|
||||
resolution-markers = [
|
||||
"python_full_version >= '3.14' and sys_platform == 'darwin'",
|
||||
@@ -1524,6 +1524,7 @@ dependencies = [
|
||||
{ name = "sphinx-rtd-theme" },
|
||||
{ name = "tf-keras" },
|
||||
{ name = "torch" },
|
||||
{ name = "tornado", marker = "sys_platform == 'win32'" },
|
||||
{ name = "uvicorn" },
|
||||
]
|
||||
|
||||
@@ -1579,6 +1580,7 @@ requires-dist = [
|
||||
{ name = "sphinx-rtd-theme", specifier = ">=3.0.2" },
|
||||
{ name = "tf-keras", specifier = ">=2.20.1" },
|
||||
{ name = "torch", specifier = ">=2.8.0" },
|
||||
{ name = "tornado", marker = "sys_platform == 'win32'" },
|
||||
{ name = "uvicorn", specifier = ">=0.37.0" },
|
||||
]
|
||||
|
||||
@@ -2724,6 +2726,17 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/52/27/7fc2d7435af044ffbe0b9b8e98d99eac096d43f128a5cde23c04825d5dcf/torchaudio-2.8.0-cp313-cp313t-win_amd64.whl", hash = "sha256:d4a715d09ac28c920d031ee1e60ecbc91e8a5079ad8c61c0277e658436c821a6", size = 2549553, upload-time = "2025-08-06T14:59:00.019Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tornado"
|
||||
version = "6.5.4"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/37/1d/0a336abf618272d53f62ebe274f712e213f5a03c0b2339575430b8362ef2/tornado-6.5.4.tar.gz", hash = "sha256:a22fa9047405d03260b483980635f0b041989d8bcc9a313f8fe18b411d84b1d7", size = 513632, upload-time = "2025-12-15T19:21:03.836Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/0c/1a/d7592328d037d36f2d2462f4bc1fbb383eec9278bc786c1b111cbbd44cfa/tornado-6.5.4-cp39-abi3-win32.whl", hash = "sha256:1768110f2411d5cd281bac0a090f707223ce77fd110424361092859e089b38d1", size = 446481, upload-time = "2025-12-15T19:21:00.008Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d6/6d/c69be695a0a64fd37a97db12355a035a6d90f79067a3cf936ec2b1dc38cd/tornado-6.5.4-cp39-abi3-win_amd64.whl", hash = "sha256:fa07d31e0cd85c60713f2b995da613588aa03e1303d75705dca6af8babc18ddc", size = 446886, upload-time = "2025-12-15T19:21:01.287Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/50/49/8dc3fd90902f70084bd2cd059d576ddb4f8bb44c2c7c0e33a11422acb17e/tornado-6.5.4-cp39-abi3-win_arm64.whl", hash = "sha256:053e6e16701eb6cbe641f308f4c1a9541f91b6261991160391bfc342e8a551a1", size = 445910, upload-time = "2025-12-15T19:21:02.571Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tqdm"
|
||||
version = "4.67.1"
|
||||
|
||||
Reference in New Issue
Block a user