mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-04-08 03:00:28 -04:00
feat(classic): add Rich interactive selector for command approval
Adds a custom Rich-based interactive selector for the command approval workflow. Features include: - Arrow key navigation for selecting approval options - Tab to add context to any selection (e.g., "Once + also check file x") - Dedicated inline feedback option with shadow placeholder text - Quick select with number keys 1-5 - Works within existing asyncio event loop (no prompt_toolkit dependency) Also adds UIProvider abstraction pattern for future UI implementations. Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -47,11 +47,13 @@ from autogpt.app.config import (
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.app.ui.protocol import UIProvider
|
||||
|
||||
from .configurator import apply_overrides_to_config
|
||||
from .input import clean_input
|
||||
from .setup import apply_overrides_to_ai_settings, interactively_revise_ai_settings
|
||||
from .spinner import Spinner
|
||||
from .ui import create_ui_provider
|
||||
from .utils import (
|
||||
coroutine,
|
||||
get_legal_warning,
|
||||
@@ -110,6 +112,8 @@ async def run_auto_gpt(
|
||||
def prompt_permission(cmd: str, args_str: str, args: dict) -> ApprovalScope:
|
||||
"""Prompt user for command permission.
|
||||
|
||||
Uses an interactive selector with arrow keys and a feedback option.
|
||||
|
||||
Args:
|
||||
cmd: Command name.
|
||||
args_str: Formatted arguments string.
|
||||
@@ -119,31 +123,37 @@ async def run_auto_gpt(
|
||||
ApprovalScope indicating user's choice.
|
||||
|
||||
Raises:
|
||||
UserFeedbackProvided: If user types feedback instead of choosing an option.
|
||||
UserFeedbackProvided: If user selects feedback option.
|
||||
"""
|
||||
print(f"\n{Fore.CYAN}{cmd}({args_str}){Style.RESET_ALL}")
|
||||
print(
|
||||
f" {Fore.GREEN}[1]{Style.RESET_ALL} Once "
|
||||
f"{Fore.GREEN}[2]{Style.RESET_ALL} Always (agent) "
|
||||
f"{Fore.GREEN}[3]{Style.RESET_ALL} Always (all) "
|
||||
f"{Fore.RED}[4]{Style.RESET_ALL} Deny"
|
||||
)
|
||||
response = clean_input(" Choice or feedback: ")
|
||||
from autogpt.app.ui.rich_select import RichSelect
|
||||
|
||||
if response == "1":
|
||||
return ApprovalScope.ONCE
|
||||
elif response == "2":
|
||||
return ApprovalScope.AGENT
|
||||
elif response == "3":
|
||||
return ApprovalScope.WORKSPACE
|
||||
elif response == "4":
|
||||
return ApprovalScope.DENY
|
||||
elif response.strip():
|
||||
# Any other non-empty input is feedback for the agent
|
||||
raise UserFeedbackProvided(response)
|
||||
else:
|
||||
# Empty input defaults to deny
|
||||
return ApprovalScope.DENY
|
||||
choices = [
|
||||
"Once",
|
||||
"Always (this agent)",
|
||||
"Always (all agents)",
|
||||
"Deny",
|
||||
]
|
||||
|
||||
scope_map = {
|
||||
0: ApprovalScope.ONCE,
|
||||
1: ApprovalScope.AGENT,
|
||||
2: ApprovalScope.WORKSPACE,
|
||||
3: ApprovalScope.DENY,
|
||||
}
|
||||
|
||||
selector = RichSelect(
|
||||
choices=choices,
|
||||
title="Approve command execution?",
|
||||
subtitle=f"{cmd}({args_str})",
|
||||
)
|
||||
result = selector.run()
|
||||
|
||||
# If feedback was provided, raise it
|
||||
if result.has_feedback and result.feedback:
|
||||
raise UserFeedbackProvided(result.feedback)
|
||||
|
||||
scope = scope_map.get(result.index, ApprovalScope.DENY)
|
||||
return scope
|
||||
|
||||
# Set up logging module
|
||||
if speak:
|
||||
@@ -416,9 +426,13 @@ async def run_auto_gpt(
|
||||
#################
|
||||
# Run the Agent #
|
||||
#################
|
||||
try:
|
||||
await run_interaction_loop(agent)
|
||||
except AgentTerminated:
|
||||
# Create UI provider for terminal output
|
||||
ui_provider = create_ui_provider(
|
||||
plain_output=config.logging.plain_console_output,
|
||||
)
|
||||
|
||||
async def handle_agent_termination():
|
||||
"""Handle agent termination by saving state."""
|
||||
agent_id = agent.state.agent_id
|
||||
logger.info(f"Saving state of {agent_id}...")
|
||||
|
||||
@@ -432,6 +446,11 @@ async def run_auto_gpt(
|
||||
save_as_id.strip() if not save_as_id.isspace() else None
|
||||
)
|
||||
|
||||
try:
|
||||
await run_interaction_loop(agent, ui_provider)
|
||||
except AgentTerminated:
|
||||
await handle_agent_termination()
|
||||
|
||||
|
||||
@coroutine
|
||||
async def run_auto_gpt_server(
|
||||
@@ -529,11 +548,14 @@ class UserFeedback(str, enum.Enum):
|
||||
|
||||
async def run_interaction_loop(
|
||||
agent: "Agent",
|
||||
ui_provider: Optional["UIProvider"] = None,
|
||||
) -> None:
|
||||
"""Run the main interaction loop for the agent.
|
||||
|
||||
Args:
|
||||
agent: The agent to run the interaction loop for.
|
||||
ui_provider: Optional UI provider for displaying output.
|
||||
If not provided, a terminal provider will be created.
|
||||
|
||||
Returns:
|
||||
None
|
||||
@@ -543,9 +565,17 @@ async def run_interaction_loop(
|
||||
ai_profile = agent.state.ai_profile
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Create default UI provider if not provided
|
||||
if ui_provider is None:
|
||||
ui_provider = create_ui_provider(
|
||||
plain_output=app_config.logging.plain_console_output,
|
||||
)
|
||||
assert ui_provider is not None # Satisfy type checker
|
||||
|
||||
cycle_budget = cycles_remaining = _get_cycle_budget(
|
||||
app_config.continuous_mode, app_config.continuous_limit
|
||||
)
|
||||
# Keep spinner for signal handler compatibility (but use UI provider in loop)
|
||||
spinner = Spinner(
|
||||
"Thinking...", plain_output=app_config.logging.plain_console_output
|
||||
)
|
||||
@@ -597,7 +627,7 @@ async def run_interaction_loop(
|
||||
handle_stop_signal()
|
||||
# Have the agent determine the next action to take.
|
||||
if not (_ep := agent.event_history.current_episode) or _ep.result:
|
||||
with spinner:
|
||||
async with ui_provider.show_spinner("Thinking..."):
|
||||
try:
|
||||
action_proposal = await agent.propose_action()
|
||||
except InvalidAgentResponseError as e:
|
||||
@@ -621,13 +651,19 @@ async def run_interaction_loop(
|
||||
###############
|
||||
# Update User #
|
||||
###############
|
||||
# Print the assistant's thoughts and the next command to the user.
|
||||
update_user(
|
||||
ai_profile,
|
||||
action_proposal,
|
||||
# Display the assistant's thoughts and the next command via UI provider
|
||||
await ui_provider.display_thoughts(
|
||||
ai_name=ai_profile.ai_name,
|
||||
thoughts=action_proposal.thoughts,
|
||||
speak_mode=app_config.tts_config.speak_mode,
|
||||
)
|
||||
|
||||
if action_proposal.use_tool:
|
||||
await ui_provider.display_command(
|
||||
name=action_proposal.use_tool.name,
|
||||
arguments=action_proposal.use_tool.arguments,
|
||||
)
|
||||
|
||||
# Permission manager handles per-command approval during execute()
|
||||
handle_stop_signal()
|
||||
|
||||
@@ -647,18 +683,19 @@ async def run_interaction_loop(
|
||||
cycles_remaining -= 1
|
||||
except UserFeedbackProvided as e:
|
||||
result = await agent.do_not_execute(action_proposal, e.feedback)
|
||||
logger.info(
|
||||
await ui_provider.display_message(
|
||||
f"Feedback provided: {e.feedback}",
|
||||
extra={"title": "USER:", "title_color": Fore.MAGENTA},
|
||||
title="USER:",
|
||||
)
|
||||
|
||||
if result.status == "success":
|
||||
logger.info(result, extra={"title": "SYSTEM:", "title_color": Fore.YELLOW})
|
||||
await ui_provider.display_result(str(result), is_error=False)
|
||||
elif result.status == "error":
|
||||
logger.warning(
|
||||
error_msg = (
|
||||
f"Command {action_proposal.use_tool.name} returned an error: "
|
||||
f"{result.error or result.reason}"
|
||||
)
|
||||
await ui_provider.display_result(error_msg, is_error=True)
|
||||
|
||||
|
||||
def update_user(
|
||||
|
||||
23
classic/original_autogpt/autogpt/app/ui/__init__.py
Normal file
23
classic/original_autogpt/autogpt/app/ui/__init__.py
Normal file
@@ -0,0 +1,23 @@
|
||||
"""UI providers for AutoGPT.
|
||||
|
||||
This package provides UI abstractions for the AutoGPT interaction loop,
|
||||
using Rich for beautiful terminal output.
|
||||
"""
|
||||
|
||||
from .protocol import ApprovalResult, UIProvider
|
||||
|
||||
__all__ = ["UIProvider", "ApprovalResult", "create_ui_provider"]
|
||||
|
||||
|
||||
def create_ui_provider(plain_output: bool = False) -> UIProvider:
|
||||
"""Create a UI provider.
|
||||
|
||||
Args:
|
||||
plain_output: Whether to use plain output (no spinners/colors).
|
||||
|
||||
Returns:
|
||||
A UIProvider instance.
|
||||
"""
|
||||
from .terminal.provider import TerminalUIProvider
|
||||
|
||||
return TerminalUIProvider(plain_output=plain_output)
|
||||
183
classic/original_autogpt/autogpt/app/ui/protocol.py
Normal file
183
classic/original_autogpt/autogpt/app/ui/protocol.py
Normal file
@@ -0,0 +1,183 @@
|
||||
"""Protocol defining the UI provider interface for AutoGPT."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from contextlib import asynccontextmanager
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
from typing import TYPE_CHECKING, Any, AsyncIterator
|
||||
|
||||
from forge.permissions import ApprovalScope
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.agents.prompt_strategies.one_shot import AssistantThoughts
|
||||
|
||||
|
||||
class MessageLevel(str, Enum):
|
||||
"""Log message severity levels."""
|
||||
|
||||
DEBUG = "debug"
|
||||
INFO = "info"
|
||||
WARNING = "warning"
|
||||
ERROR = "error"
|
||||
SUCCESS = "success"
|
||||
|
||||
|
||||
@dataclass
|
||||
class ApprovalResult:
|
||||
"""Result of a command approval prompt.
|
||||
|
||||
Attributes:
|
||||
scope: The approval scope chosen by the user.
|
||||
feedback: Optional user feedback if they typed text instead.
|
||||
"""
|
||||
|
||||
scope: ApprovalScope
|
||||
feedback: str | None = None
|
||||
|
||||
|
||||
class UIProvider(ABC):
|
||||
"""Abstract base class for UI providers.
|
||||
|
||||
UI providers handle all user interaction in the AutoGPT interaction loop,
|
||||
including displaying thoughts, prompting for input, and showing results.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
@asynccontextmanager
|
||||
async def show_spinner(self, message: str) -> AsyncIterator[None]:
|
||||
"""Show a spinner/loading indicator.
|
||||
|
||||
Args:
|
||||
message: The message to display alongside the spinner.
|
||||
|
||||
Yields:
|
||||
None
|
||||
"""
|
||||
yield
|
||||
|
||||
@abstractmethod
|
||||
async def prompt_input(self, prompt: str, default: str = "") -> str:
|
||||
"""Prompt the user for text input.
|
||||
|
||||
Args:
|
||||
prompt: The prompt message to display.
|
||||
default: Default value if user just presses Enter.
|
||||
|
||||
Returns:
|
||||
The user's input string.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def prompt_permission(
|
||||
self, cmd: str, args_str: str, args: dict[str, Any]
|
||||
) -> ApprovalResult:
|
||||
"""Prompt user for command permission.
|
||||
|
||||
Args:
|
||||
cmd: Command name.
|
||||
args_str: Formatted arguments string.
|
||||
args: Full arguments dictionary.
|
||||
|
||||
Returns:
|
||||
ApprovalResult with the user's choice and optional feedback.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def display_thoughts(
|
||||
self,
|
||||
ai_name: str,
|
||||
thoughts: "str | AssistantThoughts",
|
||||
speak_mode: bool = False,
|
||||
) -> None:
|
||||
"""Display the agent's thoughts.
|
||||
|
||||
Args:
|
||||
ai_name: The name of the AI agent.
|
||||
thoughts: The agent's thoughts (string or structured).
|
||||
speak_mode: Whether to use text-to-speech.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def display_command(self, name: str, arguments: dict[str, Any]) -> None:
|
||||
"""Display the next command to be executed.
|
||||
|
||||
Args:
|
||||
name: The command name.
|
||||
arguments: The command arguments.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def display_result(
|
||||
self, result: str, is_error: bool = False, title: str = "SYSTEM:"
|
||||
) -> None:
|
||||
"""Display a command result.
|
||||
|
||||
Args:
|
||||
result: The result message.
|
||||
is_error: Whether this is an error result.
|
||||
title: The title to show with the result.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def display_message(
|
||||
self,
|
||||
message: str,
|
||||
level: MessageLevel = MessageLevel.INFO,
|
||||
title: str | None = None,
|
||||
preserve_color: bool = False,
|
||||
) -> None:
|
||||
"""Display a general message.
|
||||
|
||||
Args:
|
||||
message: The message content.
|
||||
level: The message severity level.
|
||||
title: Optional title/prefix for the message.
|
||||
preserve_color: Whether to preserve ANSI color codes in the message.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def display_agent_selection(self, agents: list[str]) -> str:
|
||||
"""Display existing agents and let user select one.
|
||||
|
||||
Args:
|
||||
agents: List of existing agent IDs.
|
||||
|
||||
Returns:
|
||||
The selected agent ID or empty string for new agent.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def confirm(self, message: str, default: bool = True) -> bool:
|
||||
"""Ask user for yes/no confirmation.
|
||||
|
||||
Args:
|
||||
message: The confirmation prompt.
|
||||
default: Default value if user just presses Enter.
|
||||
|
||||
Returns:
|
||||
True if user confirms, False otherwise.
|
||||
"""
|
||||
pass
|
||||
|
||||
async def startup(self) -> None:
|
||||
"""Called when the UI is starting up.
|
||||
|
||||
Override to perform any initialization.
|
||||
"""
|
||||
pass
|
||||
|
||||
async def shutdown(self) -> None:
|
||||
"""Called when the UI is shutting down.
|
||||
|
||||
Override to perform any cleanup.
|
||||
"""
|
||||
pass
|
||||
307
classic/original_autogpt/autogpt/app/ui/rich_select.py
Normal file
307
classic/original_autogpt/autogpt/app/ui/rich_select.py
Normal file
@@ -0,0 +1,307 @@
|
||||
"""Rich-based interactive selector with inline feedback option.
|
||||
|
||||
A custom selector using Rich for display and raw terminal input for interaction.
|
||||
No prompt_toolkit dependency - works within existing async event loops.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import termios
|
||||
import tty
|
||||
from dataclasses import dataclass
|
||||
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
from rich.text import Text
|
||||
|
||||
|
||||
@dataclass
|
||||
class SelectionResult:
|
||||
"""Result of a selection with optional feedback."""
|
||||
|
||||
choice: str
|
||||
index: int
|
||||
feedback: str | None = None
|
||||
|
||||
@property
|
||||
def has_feedback(self) -> bool:
|
||||
return self.feedback is not None and self.feedback.strip() != ""
|
||||
|
||||
|
||||
def _getch() -> str:
|
||||
"""Read a single character from stdin without echo."""
|
||||
fd = sys.stdin.fileno()
|
||||
old_settings = termios.tcgetattr(fd)
|
||||
try:
|
||||
tty.setraw(fd)
|
||||
ch = sys.stdin.read(1)
|
||||
# Handle escape sequences (arrow keys)
|
||||
if ch == "\x1b":
|
||||
ch2 = sys.stdin.read(1)
|
||||
if ch2 == "[":
|
||||
ch3 = sys.stdin.read(1)
|
||||
return f"\x1b[{ch3}"
|
||||
return ch
|
||||
finally:
|
||||
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
|
||||
|
||||
|
||||
class RichSelect:
|
||||
"""Interactive selector with Rich formatting and inline feedback."""
|
||||
|
||||
FEEDBACK_INDEX = -1
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
choices: list[str],
|
||||
title: str = "Select an option",
|
||||
subtitle: str | None = None,
|
||||
default_index: int = 0,
|
||||
show_feedback_option: bool = True,
|
||||
feedback_placeholder: str = "Type feedback here...",
|
||||
):
|
||||
self.choices = choices
|
||||
self.title = title
|
||||
self.subtitle = subtitle
|
||||
self.selected_index = default_index
|
||||
self.show_feedback_option = show_feedback_option
|
||||
self.feedback_placeholder = feedback_placeholder
|
||||
self.feedback_buffer = ""
|
||||
self.adding_context = False # Tab mode for adding context to current selection
|
||||
self.context_buffer = ""
|
||||
self.console = Console()
|
||||
|
||||
@property
|
||||
def _total_options(self) -> int:
|
||||
return len(self.choices) + (1 if self.show_feedback_option else 0)
|
||||
|
||||
@property
|
||||
def _on_feedback_option(self) -> bool:
|
||||
return self.show_feedback_option and self.selected_index == len(self.choices)
|
||||
|
||||
def _render(self) -> str:
|
||||
"""Render the selector as a plain string."""
|
||||
lines = []
|
||||
|
||||
# Regular choices
|
||||
for i, choice in enumerate(self.choices):
|
||||
if i == self.selected_index:
|
||||
if self.adding_context:
|
||||
# Show choice + context being typed
|
||||
ctx = self.context_buffer
|
||||
lines.append(
|
||||
f" \033[1;32m❯ {choice}\033[0m \033[33m+ {ctx}\033[5m█\033[0m"
|
||||
)
|
||||
else:
|
||||
lines.append(f" \033[1;32m❯ {choice}\033[0m")
|
||||
else:
|
||||
lines.append(f" \033[2m{choice}\033[0m")
|
||||
|
||||
# Feedback option - inline text input
|
||||
if self.show_feedback_option:
|
||||
feedback_idx = len(self.choices)
|
||||
if self.selected_index == feedback_idx:
|
||||
if self.feedback_buffer:
|
||||
# Show typed text with cursor
|
||||
lines.append(f" \033[1;33m❯ {self.feedback_buffer}\033[5m█\033[0m")
|
||||
else:
|
||||
# Show placeholder as shadow text with cursor
|
||||
ph = self.feedback_placeholder
|
||||
lines.append(f" \033[1;33m❯ \033[2;33m{ph}\033[0;5;33m█\033[0m")
|
||||
else:
|
||||
if self.feedback_buffer:
|
||||
# Show typed text (not selected)
|
||||
lines.append(f" \033[2;33m{self.feedback_buffer}\033[0m")
|
||||
else:
|
||||
# Show placeholder (not selected)
|
||||
lines.append(f" \033[2m{self.feedback_placeholder}\033[0m")
|
||||
|
||||
# Help text
|
||||
lines.append("")
|
||||
# ANSI: \033[2m=dim, \033[1;36m=bold cyan, \033[0;2m=reset+dim, \033[0m=reset
|
||||
if self.adding_context:
|
||||
lines.append(
|
||||
" \033[2mType context, \033[1;36mEnter\033[0;2m confirm, "
|
||||
"\033[1;36mEsc\033[0;2m cancel\033[0m"
|
||||
)
|
||||
elif self._on_feedback_option:
|
||||
lines.append(
|
||||
" \033[1;36m↑↓\033[0;2m move \033[1;36mEnter\033[0;2m send "
|
||||
"\033[1;36mEsc\033[0;2m clear \033[2mjust start typing...\033[0m"
|
||||
)
|
||||
else:
|
||||
lines.append(
|
||||
" \033[1;36m↑↓\033[0;2m move \033[1;36mEnter\033[0;2m select "
|
||||
"\033[1;36mTab\033[0;2m +context \033[1;36m1-5\033[0;2m quick\033[0m"
|
||||
)
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
def _clear_lines(self, n: int):
|
||||
"""Clear n lines above cursor."""
|
||||
for _ in range(n):
|
||||
sys.stdout.write("\033[A") # Move up
|
||||
sys.stdout.write("\033[2K") # Clear line
|
||||
sys.stdout.flush()
|
||||
|
||||
def run(self) -> SelectionResult:
|
||||
"""Run the interactive selector."""
|
||||
# Print header with Rich
|
||||
header = Text()
|
||||
header.append(f"{self.title}", style="bold cyan")
|
||||
if self.subtitle:
|
||||
header.append(f"\n{self.subtitle}", style="dim")
|
||||
|
||||
self.console.print()
|
||||
self.console.print(Panel(header, border_style="cyan", padding=(0, 1)))
|
||||
self.console.print()
|
||||
|
||||
num_lines = self._total_options + 2 # options + blank + help
|
||||
|
||||
# Initial render
|
||||
output = self._render()
|
||||
print(output, flush=True)
|
||||
|
||||
while True:
|
||||
ch = _getch()
|
||||
|
||||
# Handle context input mode (Tab on regular option)
|
||||
if self.adding_context:
|
||||
if ch == "\r" or ch == "\n": # Enter - confirm with context
|
||||
self._clear_lines(num_lines)
|
||||
choice = self.choices[self.selected_index]
|
||||
context = (
|
||||
self.context_buffer if self.context_buffer.strip() else None
|
||||
)
|
||||
if context:
|
||||
result_text = (
|
||||
f" \033[1;32m✓\033[0m \033[32m{choice}\033[0m "
|
||||
f"\033[33m+ {context}\033[0m"
|
||||
)
|
||||
else:
|
||||
result_text = f" \033[1;32m✓\033[0m \033[32m{choice}\033[0m"
|
||||
print(result_text)
|
||||
print()
|
||||
return SelectionResult(
|
||||
choice=choice, index=self.selected_index, feedback=context
|
||||
)
|
||||
elif ch == "\x1b": # Escape - cancel context
|
||||
self.adding_context = False
|
||||
self.context_buffer = ""
|
||||
elif ch == "\x7f" or ch == "\x08": # Backspace
|
||||
self.context_buffer = self.context_buffer[:-1]
|
||||
elif ch == "\x03": # Ctrl+C
|
||||
raise KeyboardInterrupt()
|
||||
elif ch.isprintable():
|
||||
self.context_buffer += ch
|
||||
|
||||
# Navigation (when not in context mode)
|
||||
elif ch == "\x1b[A": # Up arrow
|
||||
self.selected_index = (self.selected_index - 1) % self._total_options
|
||||
elif ch == "\x1b[B": # Down arrow
|
||||
self.selected_index = (self.selected_index + 1) % self._total_options
|
||||
elif ch == "\x03": # Ctrl+C
|
||||
raise KeyboardInterrupt()
|
||||
|
||||
# Tab - add context to current selection (not on feedback option)
|
||||
elif ch == "\t" and not self._on_feedback_option:
|
||||
self.adding_context = True
|
||||
self.context_buffer = ""
|
||||
|
||||
# Enter key
|
||||
elif ch == "\r" or ch == "\n":
|
||||
if self._on_feedback_option and self.feedback_buffer.strip():
|
||||
# Submit feedback
|
||||
self._clear_lines(num_lines)
|
||||
fb = self.feedback_buffer
|
||||
result_text = f" \033[1;32m✓\033[0m \033[33mFeedback: {fb}\033[0m"
|
||||
print(result_text)
|
||||
print()
|
||||
return SelectionResult(
|
||||
choice="feedback",
|
||||
index=self.FEEDBACK_INDEX,
|
||||
feedback=self.feedback_buffer,
|
||||
)
|
||||
elif not self._on_feedback_option:
|
||||
# Select regular option
|
||||
self._clear_lines(num_lines)
|
||||
choice = self.choices[self.selected_index]
|
||||
result_text = f" \033[1;32m✓\033[0m \033[32m{choice}\033[0m"
|
||||
print(result_text)
|
||||
print()
|
||||
return SelectionResult(
|
||||
choice=choice,
|
||||
index=self.selected_index,
|
||||
feedback=None,
|
||||
)
|
||||
# On feedback option with no text - do nothing (need to type something)
|
||||
|
||||
# Escape key
|
||||
elif ch == "\x1b":
|
||||
if self._on_feedback_option and self.feedback_buffer:
|
||||
# Clear feedback buffer
|
||||
self.feedback_buffer = ""
|
||||
else:
|
||||
# Exit with first option
|
||||
self._clear_lines(num_lines)
|
||||
choice = self.choices[0]
|
||||
result_text = f" \033[1;32m✓\033[0m \033[32m{choice}\033[0m"
|
||||
print(result_text)
|
||||
print()
|
||||
return SelectionResult(choice=choice, index=0, feedback=None)
|
||||
|
||||
# Quick select numbers
|
||||
elif ch in "12345" and not self._on_feedback_option:
|
||||
idx = int(ch) - 1
|
||||
if idx < len(self.choices):
|
||||
self._clear_lines(num_lines)
|
||||
choice = self.choices[idx]
|
||||
result_text = f" \033[1;32m✓\033[0m \033[32m{choice}\033[0m"
|
||||
print(result_text)
|
||||
print()
|
||||
return SelectionResult(choice=choice, index=idx, feedback=None)
|
||||
elif idx == len(self.choices) and self.show_feedback_option:
|
||||
# Jump to feedback option
|
||||
self.selected_index = idx
|
||||
|
||||
# Backspace (when on feedback option)
|
||||
elif (ch == "\x7f" or ch == "\x08") and self._on_feedback_option:
|
||||
self.feedback_buffer = self.feedback_buffer[:-1]
|
||||
|
||||
# Printable character - if on feedback option, type directly
|
||||
elif ch.isprintable():
|
||||
if self._on_feedback_option:
|
||||
self.feedback_buffer += ch
|
||||
|
||||
# Re-render
|
||||
self._clear_lines(num_lines)
|
||||
output = self._render()
|
||||
print(output, flush=True)
|
||||
|
||||
|
||||
def select(
|
||||
choices: list[str],
|
||||
title: str = "Select an option",
|
||||
subtitle: str | None = None,
|
||||
default_index: int = 0,
|
||||
show_feedback_option: bool = True,
|
||||
) -> SelectionResult:
|
||||
"""Convenience function to run a selection."""
|
||||
selector = RichSelect(
|
||||
choices=choices,
|
||||
title=title,
|
||||
subtitle=subtitle,
|
||||
default_index=default_index,
|
||||
show_feedback_option=show_feedback_option,
|
||||
)
|
||||
return selector.run()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
result = select(
|
||||
choices=["Once", "Always (this agent)", "Always (all agents)", "Deny"],
|
||||
title="Approve command execution?",
|
||||
subtitle="execute_python_code(code='print(hello)')",
|
||||
)
|
||||
print(f"Result: choice={result.choice}, index={result.index}")
|
||||
if result.has_feedback:
|
||||
print(f"Feedback: {result.feedback}")
|
||||
@@ -0,0 +1,5 @@
|
||||
"""Terminal UI provider for AutoGPT."""
|
||||
|
||||
from .provider import TerminalUIProvider
|
||||
|
||||
__all__ = ["TerminalUIProvider"]
|
||||
316
classic/original_autogpt/autogpt/app/ui/terminal/provider.py
Normal file
316
classic/original_autogpt/autogpt/app/ui/terminal/provider.py
Normal file
@@ -0,0 +1,316 @@
|
||||
"""Terminal-based UI provider for AutoGPT.
|
||||
|
||||
This provider wraps the existing terminal-based interaction behavior,
|
||||
providing a seamless migration path while maintaining backward compatibility.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from contextlib import asynccontextmanager
|
||||
from typing import TYPE_CHECKING, Any, AsyncIterator
|
||||
|
||||
import click
|
||||
from colorama import Fore, Style
|
||||
from forge.logging.utils import print_attribute, speak
|
||||
from forge.permissions import ApprovalScope
|
||||
|
||||
from ..protocol import ApprovalResult, MessageLevel, UIProvider
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.agents.prompt_strategies.one_shot import AssistantThoughts
|
||||
|
||||
|
||||
class TerminalUIProvider(UIProvider):
|
||||
"""Terminal-based UI provider using colorama and click.
|
||||
|
||||
This provider maintains backward compatibility with the original
|
||||
AutoGPT terminal interface.
|
||||
"""
|
||||
|
||||
def __init__(self, plain_output: bool = False):
|
||||
"""Initialize the terminal UI provider.
|
||||
|
||||
Args:
|
||||
plain_output: If True, disable spinners and fancy output.
|
||||
"""
|
||||
self.plain_output = plain_output
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self._spinner = None
|
||||
|
||||
@asynccontextmanager
|
||||
async def show_spinner(self, message: str) -> AsyncIterator[None]:
|
||||
"""Show a spinner/loading indicator.
|
||||
|
||||
Args:
|
||||
message: The message to display alongside the spinner.
|
||||
"""
|
||||
from autogpt.app.spinner import Spinner
|
||||
|
||||
spinner = Spinner(message, plain_output=self.plain_output)
|
||||
spinner.start()
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
spinner.stop()
|
||||
|
||||
async def prompt_input(self, prompt: str, default: str = "") -> str:
|
||||
"""Prompt the user for text input.
|
||||
|
||||
Args:
|
||||
prompt: The prompt message to display.
|
||||
default: Default value if user just presses Enter.
|
||||
|
||||
Returns:
|
||||
The user's input string.
|
||||
"""
|
||||
try:
|
||||
return click.prompt(
|
||||
text=prompt, prompt_suffix=" ", default=default, show_default=False
|
||||
)
|
||||
except KeyboardInterrupt:
|
||||
self.logger.info("You interrupted AutoGPT")
|
||||
self.logger.info("Quitting...")
|
||||
raise SystemExit(0)
|
||||
|
||||
async def prompt_permission(
|
||||
self, cmd: str, args_str: str, args: dict[str, Any]
|
||||
) -> ApprovalResult:
|
||||
"""Prompt user for command permission.
|
||||
|
||||
Uses an interactive selector with arrow keys and Tab-to-add-context.
|
||||
|
||||
Args:
|
||||
cmd: Command name.
|
||||
args_str: Formatted arguments string.
|
||||
args: Full arguments dictionary.
|
||||
|
||||
Returns:
|
||||
ApprovalResult with the user's choice and optional feedback.
|
||||
"""
|
||||
from ..rich_select import RichSelect
|
||||
|
||||
# Map choices to approval scopes
|
||||
choices = [
|
||||
"Once",
|
||||
"Always (this agent)",
|
||||
"Always (all agents)",
|
||||
"Deny",
|
||||
]
|
||||
|
||||
scope_map = {
|
||||
0: ApprovalScope.ONCE,
|
||||
1: ApprovalScope.AGENT,
|
||||
2: ApprovalScope.WORKSPACE,
|
||||
3: ApprovalScope.DENY,
|
||||
}
|
||||
|
||||
try:
|
||||
selector = RichSelect(
|
||||
choices=choices,
|
||||
title="Approve command execution?",
|
||||
subtitle=f"{cmd}({args_str})",
|
||||
)
|
||||
result = selector.run()
|
||||
|
||||
scope = scope_map.get(result.index, ApprovalScope.DENY)
|
||||
|
||||
# If feedback was provided (via Tab context or inline typing)
|
||||
if result.has_feedback:
|
||||
return ApprovalResult(scope=scope, feedback=result.feedback)
|
||||
|
||||
return ApprovalResult(scope=scope)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
self.logger.info("Command approval interrupted")
|
||||
return ApprovalResult(scope=ApprovalScope.DENY)
|
||||
|
||||
async def display_thoughts(
|
||||
self,
|
||||
ai_name: str,
|
||||
thoughts: "str | AssistantThoughts",
|
||||
speak_mode: bool = False,
|
||||
) -> None:
|
||||
"""Display the agent's thoughts.
|
||||
|
||||
Args:
|
||||
ai_name: The name of the AI agent.
|
||||
thoughts: The agent's thoughts (string or structured).
|
||||
speak_mode: Whether to use text-to-speech.
|
||||
"""
|
||||
from forge.models.utils import ModelWithSummary
|
||||
|
||||
from autogpt.agents.prompt_strategies.one_shot import AssistantThoughts
|
||||
|
||||
thoughts_text = self._remove_ansi_escape(
|
||||
thoughts.text
|
||||
if isinstance(thoughts, AssistantThoughts)
|
||||
else (
|
||||
thoughts.summary()
|
||||
if isinstance(thoughts, ModelWithSummary)
|
||||
else thoughts
|
||||
)
|
||||
)
|
||||
print_attribute(
|
||||
f"{ai_name.upper()} THOUGHTS", thoughts_text, title_color=Fore.YELLOW
|
||||
)
|
||||
|
||||
if isinstance(thoughts, AssistantThoughts):
|
||||
print_attribute(
|
||||
"REASONING",
|
||||
self._remove_ansi_escape(thoughts.reasoning),
|
||||
title_color=Fore.YELLOW,
|
||||
)
|
||||
if assistant_thoughts_plan := self._remove_ansi_escape(
|
||||
"\n".join(f"- {p}" for p in thoughts.plan)
|
||||
):
|
||||
print_attribute("PLAN", "", title_color=Fore.YELLOW)
|
||||
# If it's a list, join it into a string
|
||||
if isinstance(assistant_thoughts_plan, list):
|
||||
assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
|
||||
elif isinstance(assistant_thoughts_plan, dict):
|
||||
assistant_thoughts_plan = str(assistant_thoughts_plan)
|
||||
|
||||
# Split the input_string using the newline character and dashes
|
||||
lines = assistant_thoughts_plan.split("\n")
|
||||
for line in lines:
|
||||
line = line.lstrip("- ")
|
||||
self.logger.info(
|
||||
line.strip(), extra={"title": "- ", "title_color": Fore.GREEN}
|
||||
)
|
||||
print_attribute(
|
||||
"CRITICISM",
|
||||
self._remove_ansi_escape(thoughts.self_criticism),
|
||||
title_color=Fore.YELLOW,
|
||||
)
|
||||
|
||||
# Speak the assistant's thoughts
|
||||
if assistant_thoughts_speak := self._remove_ansi_escape(thoughts.speak):
|
||||
if speak_mode:
|
||||
speak(assistant_thoughts_speak)
|
||||
else:
|
||||
print_attribute(
|
||||
"SPEAK", assistant_thoughts_speak, title_color=Fore.YELLOW
|
||||
)
|
||||
else:
|
||||
speak(thoughts_text)
|
||||
|
||||
async def display_command(self, name: str, arguments: dict[str, Any]) -> None:
|
||||
"""Display the next command to be executed.
|
||||
|
||||
Args:
|
||||
name: The command name.
|
||||
arguments: The command arguments.
|
||||
"""
|
||||
print()
|
||||
safe_name = self._remove_ansi_escape(name)
|
||||
self.logger.info(
|
||||
f"COMMAND = {Fore.CYAN}{safe_name}{Style.RESET_ALL} "
|
||||
f"ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
|
||||
extra={
|
||||
"title": "NEXT ACTION:",
|
||||
"title_color": Fore.CYAN,
|
||||
"preserve_color": True,
|
||||
},
|
||||
)
|
||||
|
||||
async def display_result(
|
||||
self, result: str, is_error: bool = False, title: str = "SYSTEM:"
|
||||
) -> None:
|
||||
"""Display a command result.
|
||||
|
||||
Args:
|
||||
result: The result message.
|
||||
is_error: Whether this is an error result.
|
||||
title: The title to show with the result.
|
||||
"""
|
||||
if is_error:
|
||||
self.logger.warning(result)
|
||||
else:
|
||||
self.logger.info(result, extra={"title": title, "title_color": Fore.YELLOW})
|
||||
|
||||
async def display_message(
|
||||
self,
|
||||
message: str,
|
||||
level: MessageLevel = MessageLevel.INFO,
|
||||
title: str | None = None,
|
||||
preserve_color: bool = False,
|
||||
) -> None:
|
||||
"""Display a general message.
|
||||
|
||||
Args:
|
||||
message: The message content.
|
||||
level: The message severity level.
|
||||
title: Optional title/prefix for the message.
|
||||
preserve_color: Whether to preserve ANSI color codes.
|
||||
"""
|
||||
extra: dict[str, Any] = {}
|
||||
if title:
|
||||
extra["title"] = title
|
||||
if preserve_color:
|
||||
extra["preserve_color"] = True
|
||||
|
||||
if level == MessageLevel.DEBUG:
|
||||
self.logger.debug(message, extra=extra if extra else None)
|
||||
elif level == MessageLevel.INFO:
|
||||
self.logger.info(message, extra=extra if extra else None)
|
||||
elif level == MessageLevel.WARNING:
|
||||
self.logger.warning(message, extra=extra if extra else None)
|
||||
elif level == MessageLevel.ERROR:
|
||||
self.logger.error(message, extra=extra if extra else None)
|
||||
elif level == MessageLevel.SUCCESS:
|
||||
extra["color"] = Fore.GREEN
|
||||
self.logger.info(message, extra=extra)
|
||||
|
||||
async def display_agent_selection(self, agents: list[str]) -> str:
|
||||
"""Display existing agents and let user select one.
|
||||
|
||||
Args:
|
||||
agents: List of existing agent IDs.
|
||||
|
||||
Returns:
|
||||
The selected agent ID or empty string for new agent.
|
||||
"""
|
||||
print(
|
||||
"Existing agents\n---------------\n"
|
||||
+ "\n".join(f"{i} - {agent_id}" for i, agent_id in enumerate(agents, 1))
|
||||
)
|
||||
selection = await self.prompt_input(
|
||||
"Enter the number or name of the agent to run,"
|
||||
" or hit enter to create a new one:"
|
||||
)
|
||||
|
||||
# Check if input is a number
|
||||
import re
|
||||
|
||||
if re.match(r"^\d+$", selection.strip()):
|
||||
idx = int(selection)
|
||||
if 0 < idx <= len(agents):
|
||||
return agents[idx - 1]
|
||||
|
||||
# Check if input matches an agent name
|
||||
if selection in agents:
|
||||
return selection
|
||||
|
||||
return ""
|
||||
|
||||
async def confirm(self, message: str, default: bool = True) -> bool:
|
||||
"""Ask user for yes/no confirmation.
|
||||
|
||||
Args:
|
||||
message: The confirmation prompt.
|
||||
default: Default value if user just presses Enter.
|
||||
|
||||
Returns:
|
||||
True if user confirms, False otherwise.
|
||||
"""
|
||||
suffix = " [Y/n]" if default else " [y/N]"
|
||||
response = await self.prompt_input(message + suffix)
|
||||
|
||||
if response == "":
|
||||
return default
|
||||
return response.lower() in ("y", "yes")
|
||||
|
||||
def _remove_ansi_escape(self, s: str) -> str:
|
||||
"""Remove ANSI escape sequences from a string."""
|
||||
return s.replace("\x1B", "")
|
||||
125
classic/original_autogpt/poetry.lock
generated
125
classic/original_autogpt/poetry.lock
generated
@@ -1,4 +1,4 @@
|
||||
# This file is automatically @generated by Poetry 2.3.0 and should not be changed by hand.
|
||||
# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand.
|
||||
|
||||
[[package]]
|
||||
name = "agbenchmark"
|
||||
@@ -599,7 +599,7 @@ description = "The Blis BLAS-like linear algebra library, as a self-contained C-
|
||||
optional = false
|
||||
python-versions = "<3.15,>=3.9"
|
||||
groups = ["main"]
|
||||
markers = "python_version < \"3.14\""
|
||||
markers = "python_version <= \"3.13\""
|
||||
files = [
|
||||
{file = "blis-1.3.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:650f1d2b28e3c875927c63deebda463a6f9d237dff30e445bfe2127718c1a344"},
|
||||
{file = "blis-1.3.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9b0d42420ddd543eec51ccb99d38364a0c0833b6895eced37127822de6ecacff"},
|
||||
@@ -1071,7 +1071,6 @@ files = [
|
||||
{file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
|
||||
{file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
|
||||
]
|
||||
markers = {dev = "platform_system == \"Windows\" or sys_platform == \"win32\""}
|
||||
|
||||
[[package]]
|
||||
name = "coloredlogs"
|
||||
@@ -2054,7 +2053,7 @@ google-auth = ">=2.14.1,<3.0.0"
|
||||
googleapis-common-protos = ">=1.56.2,<2.0.0"
|
||||
grpcio = [
|
||||
{version = ">=1.75.1,<2.0.0", optional = true, markers = "python_version >= \"3.14\" and extra == \"grpc\""},
|
||||
{version = ">=1.49.1,<2.0.0", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\" and python_version < \"3.14\""},
|
||||
{version = ">=1.49.1,<2.0.0", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""},
|
||||
]
|
||||
grpcio-status = [
|
||||
{version = ">=1.75.1,<2.0.0", optional = true, markers = "python_version >= \"3.14\" and extra == \"grpc\""},
|
||||
@@ -2152,7 +2151,7 @@ google-api-core = {version = ">=1.34.1,<2.0.dev0 || >=2.11.dev0,<3.0.0", extras
|
||||
google-auth = ">=2.14.1,<2.24.0 || >2.24.0,<2.25.0 || >2.25.0,<3.0.0"
|
||||
grpcio = [
|
||||
{version = ">=1.75.1,<2.0.0", markers = "python_version >= \"3.14\""},
|
||||
{version = ">=1.33.2,<2.0.0"},
|
||||
{version = ">=1.33.2,<2.0.0", markers = "python_version < \"3.14\""},
|
||||
]
|
||||
proto-plus = [
|
||||
{version = ">=1.25.0,<2.0.0", markers = "python_version >= \"3.13\""},
|
||||
@@ -2234,15 +2233,15 @@ files = [
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
google-api-core = ">=2.15.0,<3.0.0.dev0"
|
||||
google-auth = ">=2.26.1,<3.0.dev0"
|
||||
google-cloud-core = ">=2.3.0,<3.0.dev0"
|
||||
google-crc32c = ">=1.0,<2.0.dev0"
|
||||
google-api-core = ">=2.15.0,<3.0.0dev"
|
||||
google-auth = ">=2.26.1,<3.0dev"
|
||||
google-cloud-core = ">=2.3.0,<3.0dev"
|
||||
google-crc32c = ">=1.0,<2.0dev"
|
||||
google-resumable-media = ">=2.7.2"
|
||||
requests = ">=2.18.0,<3.0.0.dev0"
|
||||
requests = ">=2.18.0,<3.0.0dev"
|
||||
|
||||
[package.extras]
|
||||
protobuf = ["protobuf (<6.0.0.dev0)"]
|
||||
protobuf = ["protobuf (<6.0.0dev)"]
|
||||
tracing = ["opentelemetry-api (>=1.1.0)"]
|
||||
|
||||
[[package]]
|
||||
@@ -2904,7 +2903,6 @@ files = [
|
||||
{file = "iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12"},
|
||||
{file = "iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730"},
|
||||
]
|
||||
markers = {main = "extra == \"benchmark\""}
|
||||
|
||||
[[package]]
|
||||
name = "ipython"
|
||||
@@ -3156,7 +3154,7 @@ files = [
|
||||
|
||||
[package.dependencies]
|
||||
attrs = ">=22.2.0"
|
||||
jsonschema-specifications = ">=2023.3.6"
|
||||
jsonschema-specifications = ">=2023.03.6"
|
||||
referencing = ">=0.28.4"
|
||||
rpds-py = ">=0.25.0"
|
||||
|
||||
@@ -3304,7 +3302,7 @@ files = [
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
certifi = ">=14.5.14"
|
||||
certifi = ">=14.05.14"
|
||||
durationpy = ">=0.7"
|
||||
python-dateutil = ">=2.5.3"
|
||||
pyyaml = ">=5.4.1"
|
||||
@@ -3380,27 +3378,6 @@ files = [
|
||||
{file = "lief-0.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:2db3eb282a35daf51f89c6509226668a08fb6a6d1f507dd549dd9f077585db11"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "linkify-it-py"
|
||||
version = "2.0.3"
|
||||
description = "Links recognition library with FULL unicode support."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "linkify-it-py-2.0.3.tar.gz", hash = "sha256:68cda27e162e9215c17d786649d1da0021a451bdc436ef9e0fa0ba5234b9b048"},
|
||||
{file = "linkify_it_py-2.0.3-py3-none-any.whl", hash = "sha256:6bcbc417b0ac14323382aef5c5192c0075bf8a9d6b41820a2b66371eac6b6d79"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
uc-micro-py = "*"
|
||||
|
||||
[package.extras]
|
||||
benchmark = ["pytest", "pytest-benchmark"]
|
||||
dev = ["black", "flake8", "isort", "pre-commit", "pyproject-flake8"]
|
||||
doc = ["myst-parser", "sphinx", "sphinx-book-theme"]
|
||||
test = ["coverage", "pytest", "pytest-cov"]
|
||||
|
||||
[[package]]
|
||||
name = "litellm"
|
||||
version = "1.80.0"
|
||||
@@ -3617,8 +3594,6 @@ files = [
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
linkify-it-py = {version = ">=1,<3", optional = true, markers = "extra == \"linkify\""}
|
||||
mdit-py-plugins = {version = ">=0.5.0", optional = true, markers = "extra == \"plugins\""}
|
||||
mdurl = ">=0.1,<1.0"
|
||||
|
||||
[package.extras]
|
||||
@@ -3840,26 +3815,6 @@ files = [
|
||||
{file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mdit-py-plugins"
|
||||
version = "0.5.0"
|
||||
description = "Collection of plugins for markdown-it-py"
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "mdit_py_plugins-0.5.0-py3-none-any.whl", hash = "sha256:07a08422fc1936a5d26d146759e9155ea466e842f5ab2f7d2266dd084c8dab1f"},
|
||||
{file = "mdit_py_plugins-0.5.0.tar.gz", hash = "sha256:f4918cb50119f50446560513a8e311d574ff6aaed72606ddae6d35716fe809c6"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
markdown-it-py = ">=2.0.0,<5.0.0"
|
||||
|
||||
[package.extras]
|
||||
code-style = ["pre-commit"]
|
||||
rtd = ["myst-parser", "sphinx-book-theme"]
|
||||
testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"]
|
||||
|
||||
[[package]]
|
||||
name = "mdurl"
|
||||
version = "0.1.2"
|
||||
@@ -4339,7 +4294,7 @@ description = "Fundamental package for array computing in Python"
|
||||
optional = false
|
||||
python-versions = ">=3.11"
|
||||
groups = ["main"]
|
||||
markers = "python_version < \"3.14\""
|
||||
markers = "python_version <= \"3.13\""
|
||||
files = [
|
||||
{file = "numpy-2.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0cce2a669e3c8ba02ee563c7835f92c153cf02edff1ae05e1823f1dde21b16a5"},
|
||||
{file = "numpy-2.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:899d2c18024984814ac7e83f8f49d8e8180e2fbe1b2e252f2e7f1d06bea92425"},
|
||||
@@ -5061,7 +5016,6 @@ files = [
|
||||
{file = "pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746"},
|
||||
{file = "pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3"},
|
||||
]
|
||||
markers = {main = "extra == \"benchmark\""}
|
||||
|
||||
[package.extras]
|
||||
dev = ["pre-commit", "tox"]
|
||||
@@ -5195,10 +5149,9 @@ files = [
|
||||
name = "prompt-toolkit"
|
||||
version = "3.0.52"
|
||||
description = "Library for building powerful interactive command lines in Python"
|
||||
optional = true
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"benchmark\""
|
||||
files = [
|
||||
{file = "prompt_toolkit-3.0.52-py3-none-any.whl", hash = "sha256:9aac639a3bbd33284347de5ad8d68ecc044b91a762dc39b7c21095fcd6a19955"},
|
||||
{file = "prompt_toolkit-3.0.52.tar.gz", hash = "sha256:28cde192929c8e7321de85de1ddbe736f1375148b02f2e17edd840042b1be855"},
|
||||
@@ -6051,7 +6004,6 @@ files = [
|
||||
{file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"},
|
||||
{file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"},
|
||||
]
|
||||
markers = {main = "extra == \"benchmark\""}
|
||||
|
||||
[package.dependencies]
|
||||
colorama = {version = "*", markers = "sys_platform == \"win32\""}
|
||||
@@ -6073,7 +6025,6 @@ files = [
|
||||
{file = "pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2"},
|
||||
{file = "pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3"},
|
||||
]
|
||||
markers = {main = "extra == \"benchmark\""}
|
||||
|
||||
[package.dependencies]
|
||||
pytest = ">=7.0.0,<9"
|
||||
@@ -6738,10 +6689,10 @@ files = [
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
botocore = ">=1.37.4,<2.0a0"
|
||||
botocore = ">=1.37.4,<2.0a.0"
|
||||
|
||||
[package.extras]
|
||||
crt = ["botocore[crt] (>=1.37.4,<2.0a0)"]
|
||||
crt = ["botocore[crt] (>=1.37.4,<2.0a.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "selenium"
|
||||
@@ -7012,7 +6963,7 @@ description = "Industrial-strength Natural Language Processing (NLP) in Python"
|
||||
optional = false
|
||||
python-versions = "<3.15,>=3.9"
|
||||
groups = ["main"]
|
||||
markers = "python_version < \"3.14\""
|
||||
markers = "python_version <= \"3.13\""
|
||||
files = [
|
||||
{file = "spacy-3.8.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f9cc7f775cfc41ccb8be63bd6258a1ec4613d4ad3859f2ba2c079f34240b21f6"},
|
||||
{file = "spacy-3.8.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:be9d665be8581926fba4303543ba189d34e8517803052551b000cf1a1af33b87"},
|
||||
@@ -7354,26 +7305,6 @@ files = [
|
||||
doc = ["reno", "sphinx"]
|
||||
test = ["pytest", "tornado (>=4.5)", "typeguard"]
|
||||
|
||||
[[package]]
|
||||
name = "textual"
|
||||
version = "0.52.1"
|
||||
description = "Modern Text User Interface framework"
|
||||
optional = false
|
||||
python-versions = ">=3.8,<4.0"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "textual-0.52.1-py3-none-any.whl", hash = "sha256:960a19df2319482918b4a58736d9552cdc1ab65d170ba0bc15273ce0e1922b7a"},
|
||||
{file = "textual-0.52.1.tar.gz", hash = "sha256:4232e5c2b423ed7c63baaeb6030355e14e1de1b9df096c9655b68a1e60e4de5f"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
markdown-it-py = {version = ">=2.1.0", extras = ["linkify", "plugins"]}
|
||||
rich = ">=13.3.3"
|
||||
typing-extensions = ">=4.4.0,<5.0.0"
|
||||
|
||||
[package.extras]
|
||||
syntax = ["tree-sitter (>=0.20.1,<0.21.0)", "tree_sitter_languages (>=1.7.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "thinc"
|
||||
version = "8.3.2"
|
||||
@@ -7453,7 +7384,7 @@ description = "A refreshing functional take on deep learning, compatible with yo
|
||||
optional = false
|
||||
python-versions = "<3.15,>=3.10"
|
||||
groups = ["main"]
|
||||
markers = "python_version < \"3.14\""
|
||||
markers = "python_version <= \"3.13\""
|
||||
files = [
|
||||
{file = "thinc-8.3.10-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:fbe0313cb3c898f4e6a3f13b704af51f4bf8f927078deb0fe2d6eaf3c6c5b31b"},
|
||||
{file = "thinc-8.3.10-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:892ac91cf7cc8d3ac9a4527c68ead37a96e87132c9f589de56b057b50358e895"},
|
||||
@@ -7830,21 +7761,6 @@ files = [
|
||||
{file = "tzdata-2025.3.tar.gz", hash = "sha256:de39c2ca5dc7b0344f2eba86f49d614019d29f060fc4ebc8a417896a620b56a7"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "uc-micro-py"
|
||||
version = "1.0.3"
|
||||
description = "Micro subset of unicode data files for linkify-it-py projects."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "uc-micro-py-1.0.3.tar.gz", hash = "sha256:d321b92cff673ec58027c04015fcaa8bb1e005478643ff4a500882eaab88c48a"},
|
||||
{file = "uc_micro_py-1.0.3-py3-none-any.whl", hash = "sha256:db1dffff340817673d7b466ec86114a9dc0e9d4d9b5ba229d9d60e5c12600cd5"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
test = ["coverage", "pytest", "pytest-cov"]
|
||||
|
||||
[[package]]
|
||||
name = "uritemplate"
|
||||
version = "4.2.0"
|
||||
@@ -8192,10 +8108,9 @@ anyio = ">=3.0.0"
|
||||
name = "wcwidth"
|
||||
version = "0.2.14"
|
||||
description = "Measures the displayed width of unicode strings in a terminal"
|
||||
optional = true
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
groups = ["main"]
|
||||
markers = "extra == \"benchmark\""
|
||||
files = [
|
||||
{file = "wcwidth-0.2.14-py2.py3-none-any.whl", hash = "sha256:a7bb560c8aee30f9957e5f9895805edd20602f2d7f720186dfd906e82b4982e1"},
|
||||
{file = "wcwidth-0.2.14.tar.gz", hash = "sha256:4d478375d31bc5395a3c55c40ccdf3354688364cd61c4f6adacaa9215d0b3605"},
|
||||
@@ -8650,4 +8565,4 @@ benchmark = ["agbenchmark"]
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = "^3.12"
|
||||
content-hash = "5f804458cae6c7f030897da39e48945a280ca800bb665ddf4078a0a663684800"
|
||||
content-hash = "954e09a70e2221c66f77d68afa618c0a916ec1f3ad8f077ef6158282009c2632"
|
||||
|
||||
@@ -45,9 +45,9 @@ pydantic = "^2.7.2"
|
||||
python-dotenv = "^1.0.0"
|
||||
requests = "*"
|
||||
sentry-sdk = "^1.40.4"
|
||||
# TUI dependencies
|
||||
textual = "^0.52.0"
|
||||
# UI dependencies
|
||||
rich = "^13.0"
|
||||
prompt-toolkit = "^3.0.0"
|
||||
|
||||
# Benchmarking
|
||||
agbenchmark = { path = "../benchmark", optional = true }
|
||||
|
||||
Reference in New Issue
Block a user