Remove V0 CLI (#11538)

This commit is contained in:
Rohit Malhotra
2025-10-28 13:16:07 -04:00
committed by GitHub
parent b8f387df94
commit 297af05d53
37 changed files with 0 additions and 12181 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -1,368 +0,0 @@
"""Unit tests for CLI alias setup functionality."""
import tempfile
from pathlib import Path
from unittest.mock import patch
from openhands.cli.main import alias_setup_declined as main_alias_setup_declined
from openhands.cli.main import aliases_exist_in_shell_config, run_alias_setup_flow
from openhands.cli.shell_config import (
ShellConfigManager,
add_aliases_to_shell_config,
alias_setup_declined,
get_shell_config_path,
mark_alias_setup_declined,
)
from openhands.core.config import OpenHandsConfig
def test_get_shell_config_path_no_files_fallback():
"""Test shell config path fallback when no shell detection and no config files exist."""
with tempfile.TemporaryDirectory() as temp_dir:
with patch('openhands.cli.shell_config.Path.home', return_value=Path(temp_dir)):
# Mock shellingham to raise an exception (detection failure)
with patch(
'shellingham.detect_shell',
side_effect=Exception('Shell detection failed'),
):
profile_path = get_shell_config_path()
assert profile_path.name == '.bash_profile'
def test_get_shell_config_path_bash_fallback():
"""Test shell config path fallback to bash when it exists."""
with tempfile.TemporaryDirectory() as temp_dir:
with patch('openhands.cli.shell_config.Path.home', return_value=Path(temp_dir)):
# Create .bashrc
bashrc = Path(temp_dir) / '.bashrc'
bashrc.touch()
# Mock shellingham to raise an exception (detection failure)
with patch(
'shellingham.detect_shell',
side_effect=Exception('Shell detection failed'),
):
profile_path = get_shell_config_path()
assert profile_path.name == '.bashrc'
def test_get_shell_config_path_with_bash_detection():
"""Test shell config path when bash is detected."""
with tempfile.TemporaryDirectory() as temp_dir:
with patch('openhands.cli.shell_config.Path.home', return_value=Path(temp_dir)):
# Create .bashrc
bashrc = Path(temp_dir) / '.bashrc'
bashrc.touch()
# Mock shellingham to return bash
with patch('shellingham.detect_shell', return_value=('bash', 'bash')):
profile_path = get_shell_config_path()
assert profile_path.name == '.bashrc'
def test_get_shell_config_path_with_zsh_detection():
"""Test shell config path when zsh is detected."""
with tempfile.TemporaryDirectory() as temp_dir:
with patch('openhands.cli.shell_config.Path.home', return_value=Path(temp_dir)):
# Create .zshrc
zshrc = Path(temp_dir) / '.zshrc'
zshrc.touch()
# Mock shellingham to return zsh
with patch('shellingham.detect_shell', return_value=('zsh', 'zsh')):
profile_path = get_shell_config_path()
assert profile_path.name == '.zshrc'
def test_get_shell_config_path_with_fish_detection():
"""Test shell config path when fish is detected."""
with tempfile.TemporaryDirectory() as temp_dir:
with patch('openhands.cli.shell_config.Path.home', return_value=Path(temp_dir)):
# Create fish config directory and file
fish_config_dir = Path(temp_dir) / '.config' / 'fish'
fish_config_dir.mkdir(parents=True)
fish_config = fish_config_dir / 'config.fish'
fish_config.touch()
# Mock shellingham to return fish
with patch('shellingham.detect_shell', return_value=('fish', 'fish')):
profile_path = get_shell_config_path()
assert profile_path.name == 'config.fish'
assert 'fish' in str(profile_path)
def test_add_aliases_to_shell_config_bash():
"""Test adding aliases to bash config."""
with tempfile.TemporaryDirectory() as temp_dir:
with patch('openhands.cli.shell_config.Path.home', return_value=Path(temp_dir)):
# Mock shellingham to return bash
with patch('shellingham.detect_shell', return_value=('bash', 'bash')):
# Add aliases
success = add_aliases_to_shell_config()
assert success is True
# Get the actual path that was used
with patch('shellingham.detect_shell', return_value=('bash', 'bash')):
profile_path = get_shell_config_path()
# Check that the aliases were added
with open(profile_path, 'r') as f:
content = f.read()
assert 'alias openhands=' in content
assert 'alias oh=' in content
assert 'uvx --python 3.12 --from openhands-ai openhands' in content
def test_add_aliases_to_shell_config_zsh():
"""Test adding aliases to zsh config."""
with tempfile.TemporaryDirectory() as temp_dir:
with patch('openhands.cli.shell_config.Path.home', return_value=Path(temp_dir)):
# Mock shellingham to return zsh
with patch('shellingham.detect_shell', return_value=('zsh', 'zsh')):
# Add aliases
success = add_aliases_to_shell_config()
assert success is True
# Check that the aliases were added to .zshrc
profile_path = Path(temp_dir) / '.zshrc'
with open(profile_path, 'r') as f:
content = f.read()
assert 'alias openhands=' in content
assert 'alias oh=' in content
assert 'uvx --python 3.12 --from openhands-ai openhands' in content
def test_add_aliases_handles_existing_aliases():
"""Test that adding aliases handles existing aliases correctly."""
with tempfile.TemporaryDirectory() as temp_dir:
with patch('openhands.cli.shell_config.Path.home', return_value=Path(temp_dir)):
# Mock shellingham to return bash
with patch('shellingham.detect_shell', return_value=('bash', 'bash')):
# Add aliases first time
success = add_aliases_to_shell_config()
assert success is True
# Try adding again - should detect existing aliases
success = add_aliases_to_shell_config()
assert success is True
# Get the actual path that was used
with patch('shellingham.detect_shell', return_value=('bash', 'bash')):
profile_path = get_shell_config_path()
# Check that aliases weren't duplicated
with open(profile_path, 'r') as f:
content = f.read()
# Count occurrences of the alias
openhands_count = content.count('alias openhands=')
oh_count = content.count('alias oh=')
assert openhands_count == 1
assert oh_count == 1
def test_aliases_exist_in_shell_config_no_file():
"""Test alias detection when no shell config exists."""
with tempfile.TemporaryDirectory() as temp_dir:
with patch('openhands.cli.shell_config.Path.home', return_value=Path(temp_dir)):
# Mock shellingham to return bash
with patch('shellingham.detect_shell', return_value=('bash', 'bash')):
assert aliases_exist_in_shell_config() is False
def test_aliases_exist_in_shell_config_no_aliases():
"""Test alias detection when shell config exists but has no aliases."""
with tempfile.TemporaryDirectory() as temp_dir:
with patch('openhands.cli.shell_config.Path.home', return_value=Path(temp_dir)):
# Mock shellingham to return bash
with patch('shellingham.detect_shell', return_value=('bash', 'bash')):
# Create bash profile with other content
profile_path = get_shell_config_path()
with open(profile_path, 'w') as f:
f.write('export PATH=$PATH:/usr/local/bin\n')
assert aliases_exist_in_shell_config() is False
def test_aliases_exist_in_shell_config_with_aliases():
"""Test alias detection when aliases exist."""
with tempfile.TemporaryDirectory() as temp_dir:
with patch('openhands.cli.shell_config.Path.home', return_value=Path(temp_dir)):
# Mock shellingham to return bash
with patch('shellingham.detect_shell', return_value=('bash', 'bash')):
# Add aliases first
add_aliases_to_shell_config()
assert aliases_exist_in_shell_config() is True
def test_shell_config_manager_basic_functionality():
"""Test basic ShellConfigManager functionality."""
manager = ShellConfigManager()
# Test command customization
custom_manager = ShellConfigManager(command='custom-command')
assert custom_manager.command == 'custom-command'
# Test shell type detection from path
assert manager.get_shell_type_from_path(Path('/home/user/.bashrc')) == 'bash'
assert manager.get_shell_type_from_path(Path('/home/user/.zshrc')) == 'zsh'
assert (
manager.get_shell_type_from_path(Path('/home/user/.config/fish/config.fish'))
== 'fish'
)
def test_shell_config_manager_reload_commands():
"""Test reload command generation."""
manager = ShellConfigManager()
# Test different shell reload commands
assert 'source ~/.zshrc' in manager.get_reload_command(Path('/home/user/.zshrc'))
assert 'source ~/.bashrc' in manager.get_reload_command(Path('/home/user/.bashrc'))
assert 'source ~/.bash_profile' in manager.get_reload_command(
Path('/home/user/.bash_profile')
)
assert 'source ~/.config/fish/config.fish' in manager.get_reload_command(
Path('/home/user/.config/fish/config.fish')
)
def test_shell_config_manager_template_rendering():
"""Test that templates are properly rendered."""
manager = ShellConfigManager(command='test-command')
with tempfile.TemporaryDirectory() as temp_dir:
with patch('openhands.cli.shell_config.Path.home', return_value=Path(temp_dir)):
# Create a bash config file
bashrc = Path(temp_dir) / '.bashrc'
bashrc.touch()
# Mock shell detection
with patch.object(manager, 'detect_shell', return_value='bash'):
success = manager.add_aliases()
assert success is True
# Check that the custom command was used
with open(bashrc, 'r') as f:
content = f.read()
assert 'test-command' in content
assert 'alias openhands="test-command"' in content
assert 'alias oh="test-command"' in content
def test_alias_setup_declined_false():
"""Test alias setup declined check when marker file doesn't exist."""
with tempfile.TemporaryDirectory() as temp_dir:
with patch('openhands.cli.shell_config.Path.home', return_value=Path(temp_dir)):
assert alias_setup_declined() is False
def test_alias_setup_declined_true():
"""Test alias setup declined check when marker file exists."""
with tempfile.TemporaryDirectory() as temp_dir:
with patch('openhands.cli.shell_config.Path.home', return_value=Path(temp_dir)):
# Create the marker file
mark_alias_setup_declined()
assert alias_setup_declined() is True
def test_mark_alias_setup_declined():
"""Test marking alias setup as declined creates the marker file."""
with tempfile.TemporaryDirectory() as temp_dir:
with patch('openhands.cli.shell_config.Path.home', return_value=Path(temp_dir)):
# Initially should be False
assert alias_setup_declined() is False
# Mark as declined
mark_alias_setup_declined()
# Should now be True
assert alias_setup_declined() is True
# Verify the file exists
marker_file = Path(temp_dir) / '.openhands' / '.cli_alias_setup_declined'
assert marker_file.exists()
def test_alias_setup_declined_persisted():
"""Test that when user declines alias setup, their choice is persisted."""
config = OpenHandsConfig()
with tempfile.TemporaryDirectory() as temp_dir:
with patch('openhands.cli.shell_config.Path.home', return_value=Path(temp_dir)):
with patch('shellingham.detect_shell', return_value=('bash', 'bash')):
with patch(
'openhands.cli.shell_config.aliases_exist_in_shell_config',
return_value=False,
):
with patch(
'openhands.cli.main.cli_confirm', return_value=1
): # User chooses "No"
with patch('prompt_toolkit.print_formatted_text'):
# Initially, user hasn't declined
assert not alias_setup_declined()
# Run the alias setup flow
run_alias_setup_flow(config)
# After declining, the marker should be set
assert alias_setup_declined()
def test_alias_setup_skipped_when_previously_declined():
"""Test that alias setup is skipped when user has previously declined."""
OpenHandsConfig()
with tempfile.TemporaryDirectory() as temp_dir:
with patch('openhands.cli.shell_config.Path.home', return_value=Path(temp_dir)):
# Mark that user has previously declined
mark_alias_setup_declined()
assert alias_setup_declined()
with patch('shellingham.detect_shell', return_value=('bash', 'bash')):
with patch(
'openhands.cli.shell_config.aliases_exist_in_shell_config',
return_value=False,
):
with patch('openhands.cli.main.cli_confirm'):
with patch('prompt_toolkit.print_formatted_text'):
# This should not show the setup flow since user previously declined
# We test this by checking the main logic conditions
should_show = (
not aliases_exist_in_shell_config()
and not main_alias_setup_declined()
)
assert not should_show, (
'Alias setup should be skipped when user previously declined'
)
def test_alias_setup_accepted_does_not_set_declined_flag():
"""Test that when user accepts alias setup, no declined marker is created."""
config = OpenHandsConfig()
with tempfile.TemporaryDirectory() as temp_dir:
with patch('openhands.cli.shell_config.Path.home', return_value=Path(temp_dir)):
with patch('shellingham.detect_shell', return_value=('bash', 'bash')):
with patch(
'openhands.cli.shell_config.aliases_exist_in_shell_config',
return_value=False,
):
with patch(
'openhands.cli.main.cli_confirm', return_value=0
): # User chooses "Yes"
with patch(
'openhands.cli.shell_config.add_aliases_to_shell_config',
return_value=True,
):
with patch('prompt_toolkit.print_formatted_text'):
# Initially, user hasn't declined
assert not alias_setup_declined()
# Run the alias setup flow
run_alias_setup_flow(config)
# After accepting, the declined marker should still be False
assert not alias_setup_declined()

View File

@@ -1,637 +0,0 @@
from unittest.mock import MagicMock, patch
import pytest
from prompt_toolkit.formatted_text import HTML
from openhands.cli.commands import (
display_mcp_servers,
handle_commands,
handle_exit_command,
handle_help_command,
handle_init_command,
handle_mcp_command,
handle_new_command,
handle_resume_command,
handle_settings_command,
handle_status_command,
)
from openhands.cli.tui import UsageMetrics
from openhands.core.config import OpenHandsConfig
from openhands.core.schema import AgentState
from openhands.events import EventSource
from openhands.events.action import ChangeAgentStateAction, MessageAction
from openhands.events.stream import EventStream
from openhands.storage.settings.file_settings_store import FileSettingsStore
class TestHandleCommands:
@pytest.fixture
def mock_dependencies(self):
event_stream = MagicMock(spec=EventStream)
usage_metrics = MagicMock(spec=UsageMetrics)
sid = 'test-session-id'
config = MagicMock(spec=OpenHandsConfig)
current_dir = '/test/dir'
settings_store = MagicMock(spec=FileSettingsStore)
agent_state = AgentState.RUNNING
return {
'event_stream': event_stream,
'usage_metrics': usage_metrics,
'sid': sid,
'config': config,
'current_dir': current_dir,
'settings_store': settings_store,
'agent_state': agent_state,
}
@pytest.mark.asyncio
@patch('openhands.cli.commands.handle_exit_command')
async def test_handle_exit_command(self, mock_handle_exit, mock_dependencies):
mock_handle_exit.return_value = True
close_repl, reload_microagents, new_session, _ = await handle_commands(
'/exit', **mock_dependencies
)
mock_handle_exit.assert_called_once_with(
mock_dependencies['config'],
mock_dependencies['event_stream'],
mock_dependencies['usage_metrics'],
mock_dependencies['sid'],
)
assert close_repl is True
assert reload_microagents is False
assert new_session is False
@pytest.mark.asyncio
@patch('openhands.cli.commands.handle_help_command')
async def test_handle_help_command(self, mock_handle_help, mock_dependencies):
mock_handle_help.return_value = (False, False, False)
close_repl, reload_microagents, new_session, _ = await handle_commands(
'/help', **mock_dependencies
)
mock_handle_help.assert_called_once()
assert close_repl is False
assert reload_microagents is False
assert new_session is False
@pytest.mark.asyncio
@patch('openhands.cli.commands.handle_init_command')
async def test_handle_init_command(self, mock_handle_init, mock_dependencies):
mock_handle_init.return_value = (True, True)
close_repl, reload_microagents, new_session, _ = await handle_commands(
'/init', **mock_dependencies
)
mock_handle_init.assert_called_once_with(
mock_dependencies['config'],
mock_dependencies['event_stream'],
mock_dependencies['current_dir'],
)
assert close_repl is True
assert reload_microagents is True
assert new_session is False
@pytest.mark.asyncio
@patch('openhands.cli.commands.handle_status_command')
async def test_handle_status_command(self, mock_handle_status, mock_dependencies):
mock_handle_status.return_value = (False, False, False)
close_repl, reload_microagents, new_session, _ = await handle_commands(
'/status', **mock_dependencies
)
mock_handle_status.assert_called_once_with(
mock_dependencies['usage_metrics'], mock_dependencies['sid']
)
assert close_repl is False
assert reload_microagents is False
assert new_session is False
@pytest.mark.asyncio
@patch('openhands.cli.commands.handle_new_command')
async def test_handle_new_command(self, mock_handle_new, mock_dependencies):
mock_handle_new.return_value = (True, True)
close_repl, reload_microagents, new_session, _ = await handle_commands(
'/new', **mock_dependencies
)
mock_handle_new.assert_called_once_with(
mock_dependencies['config'],
mock_dependencies['event_stream'],
mock_dependencies['usage_metrics'],
mock_dependencies['sid'],
)
assert close_repl is True
assert reload_microagents is False
assert new_session is True
@pytest.mark.asyncio
@patch('openhands.cli.commands.handle_settings_command')
async def test_handle_settings_command(
self, mock_handle_settings, mock_dependencies
):
close_repl, reload_microagents, new_session, _ = await handle_commands(
'/settings', **mock_dependencies
)
mock_handle_settings.assert_called_once_with(
mock_dependencies['config'],
mock_dependencies['settings_store'],
)
assert close_repl is False
assert reload_microagents is False
assert new_session is False
@pytest.mark.asyncio
@patch('openhands.cli.commands.handle_mcp_command')
async def test_handle_mcp_command(self, mock_handle_mcp, mock_dependencies):
close_repl, reload_microagents, new_session, _ = await handle_commands(
'/mcp', **mock_dependencies
)
mock_handle_mcp.assert_called_once_with(mock_dependencies['config'])
assert close_repl is False
assert reload_microagents is False
assert new_session is False
@pytest.mark.asyncio
async def test_handle_unknown_command(self, mock_dependencies):
user_message = 'Hello, this is not a command'
close_repl, reload_microagents, new_session, _ = await handle_commands(
user_message, **mock_dependencies
)
# The command should be treated as a message and added to the event stream
mock_dependencies['event_stream'].add_event.assert_called_once()
# Check the first argument is a MessageAction with the right content
args, kwargs = mock_dependencies['event_stream'].add_event.call_args
assert isinstance(args[0], MessageAction)
assert args[0].content == user_message
assert args[1] == EventSource.USER
assert close_repl is True
assert reload_microagents is False
assert new_session is False
class TestHandleExitCommand:
@patch('openhands.cli.commands.cli_confirm')
@patch('openhands.cli.commands.display_shutdown_message')
def test_exit_with_confirmation(self, mock_display_shutdown, mock_cli_confirm):
config = MagicMock(spec=OpenHandsConfig)
event_stream = MagicMock(spec=EventStream)
usage_metrics = MagicMock(spec=UsageMetrics)
sid = 'test-session-id'
# Mock user confirming exit
mock_cli_confirm.return_value = 0 # First option, which is "Yes, proceed"
# Call the function under test
result = handle_exit_command(config, event_stream, usage_metrics, sid)
# Verify correct behavior
mock_cli_confirm.assert_called_once()
event_stream.add_event.assert_called_once()
# Check event is the right type
args, kwargs = event_stream.add_event.call_args
assert isinstance(args[0], ChangeAgentStateAction)
assert args[0].agent_state == AgentState.STOPPED
assert args[1] == EventSource.ENVIRONMENT
mock_display_shutdown.assert_called_once_with(usage_metrics, sid)
assert result is True
@patch('openhands.cli.commands.cli_confirm')
@patch('openhands.cli.commands.display_shutdown_message')
def test_exit_without_confirmation(self, mock_display_shutdown, mock_cli_confirm):
config = MagicMock(spec=OpenHandsConfig)
event_stream = MagicMock(spec=EventStream)
usage_metrics = MagicMock(spec=UsageMetrics)
sid = 'test-session-id'
# Mock user rejecting exit
mock_cli_confirm.return_value = 1 # Second option, which is "No, dismiss"
# Call the function under test
result = handle_exit_command(config, event_stream, usage_metrics, sid)
# Verify correct behavior
mock_cli_confirm.assert_called_once()
event_stream.add_event.assert_not_called()
mock_display_shutdown.assert_not_called()
assert result is False
class TestHandleHelpCommand:
@patch('openhands.cli.commands.display_help')
def test_help_command(self, mock_display_help):
handle_help_command()
mock_display_help.assert_called_once()
class TestDisplayMcpServers:
@patch('openhands.cli.commands.print_formatted_text')
def test_display_mcp_servers_no_servers(self, mock_print):
from openhands.core.config.mcp_config import MCPConfig
config = MagicMock(spec=OpenHandsConfig)
config.mcp = MCPConfig() # Empty config with no servers
display_mcp_servers(config)
mock_print.assert_called_once()
call_args = mock_print.call_args[0][0]
assert 'No custom MCP servers configured' in call_args
assert (
'https://docs.all-hands.dev/usage/how-to/cli-mode#using-mcp-servers'
in call_args
)
@patch('openhands.cli.commands.print_formatted_text')
def test_display_mcp_servers_with_servers(self, mock_print):
from openhands.core.config.mcp_config import (
MCPConfig,
MCPSHTTPServerConfig,
MCPSSEServerConfig,
MCPStdioServerConfig,
)
config = MagicMock(spec=OpenHandsConfig)
config.mcp = MCPConfig(
sse_servers=[MCPSSEServerConfig(url='https://example.com/sse')],
stdio_servers=[MCPStdioServerConfig(name='tavily', command='npx')],
shttp_servers=[MCPSHTTPServerConfig(url='http://localhost:3000/mcp')],
)
display_mcp_servers(config)
# Should be called multiple times for different sections
assert mock_print.call_count >= 4
# Check that the summary is printed
first_call = mock_print.call_args_list[0][0][0]
assert 'Configured MCP servers:' in first_call
assert 'SSE servers: 1' in first_call
assert 'Stdio servers: 1' in first_call
assert 'SHTTP servers: 1' in first_call
assert 'Total: 3' in first_call
class TestHandleMcpCommand:
@pytest.mark.asyncio
@patch('openhands.cli.commands.cli_confirm')
@patch('openhands.cli.commands.display_mcp_servers')
async def test_handle_mcp_command_list_action(self, mock_display, mock_cli_confirm):
config = MagicMock(spec=OpenHandsConfig)
mock_cli_confirm.return_value = 0 # List action
await handle_mcp_command(config)
mock_cli_confirm.assert_called_once_with(
config,
'MCP Server Configuration',
[
'List configured servers',
'Add new server',
'Remove server',
'View errors',
'Go back',
],
)
mock_display.assert_called_once_with(config)
class TestHandleStatusCommand:
@patch('openhands.cli.commands.display_status')
def test_status_command(self, mock_display_status):
usage_metrics = MagicMock(spec=UsageMetrics)
sid = 'test-session-id'
handle_status_command(usage_metrics, sid)
mock_display_status.assert_called_once_with(usage_metrics, sid)
class TestHandleNewCommand:
@patch('openhands.cli.commands.cli_confirm')
@patch('openhands.cli.commands.display_shutdown_message')
def test_new_with_confirmation(self, mock_display_shutdown, mock_cli_confirm):
config = MagicMock(spec=OpenHandsConfig)
event_stream = MagicMock(spec=EventStream)
usage_metrics = MagicMock(spec=UsageMetrics)
sid = 'test-session-id'
# Mock user confirming new session
mock_cli_confirm.return_value = 0 # First option, which is "Yes, proceed"
# Call the function under test
close_repl, new_session = handle_new_command(
config, event_stream, usage_metrics, sid
)
# Verify correct behavior
mock_cli_confirm.assert_called_once()
event_stream.add_event.assert_called_once()
# Check event is the right type
args, kwargs = event_stream.add_event.call_args
assert isinstance(args[0], ChangeAgentStateAction)
assert args[0].agent_state == AgentState.STOPPED
assert args[1] == EventSource.ENVIRONMENT
mock_display_shutdown.assert_called_once_with(usage_metrics, sid)
assert close_repl is True
assert new_session is True
@patch('openhands.cli.commands.cli_confirm')
@patch('openhands.cli.commands.display_shutdown_message')
def test_new_without_confirmation(self, mock_display_shutdown, mock_cli_confirm):
config = MagicMock(spec=OpenHandsConfig)
event_stream = MagicMock(spec=EventStream)
usage_metrics = MagicMock(spec=UsageMetrics)
sid = 'test-session-id'
# Mock user rejecting new session
mock_cli_confirm.return_value = 1 # Second option, which is "No, dismiss"
# Call the function under test
close_repl, new_session = handle_new_command(
config, event_stream, usage_metrics, sid
)
# Verify correct behavior
mock_cli_confirm.assert_called_once()
event_stream.add_event.assert_not_called()
mock_display_shutdown.assert_not_called()
assert close_repl is False
assert new_session is False
class TestHandleInitCommand:
@pytest.mark.asyncio
@patch('openhands.cli.commands.init_repository')
async def test_init_local_runtime_successful(self, mock_init_repository):
config = MagicMock(spec=OpenHandsConfig)
config.runtime = 'local'
event_stream = MagicMock(spec=EventStream)
current_dir = '/test/dir'
# Mock successful repository initialization
mock_init_repository.return_value = True
# Call the function under test
close_repl, reload_microagents = await handle_init_command(
config, event_stream, current_dir
)
# Verify correct behavior
mock_init_repository.assert_called_once_with(config, current_dir)
event_stream.add_event.assert_called_once()
# Check event is the right type
args, kwargs = event_stream.add_event.call_args
assert isinstance(args[0], MessageAction)
assert 'Please explore this repository' in args[0].content
assert args[1] == EventSource.USER
assert close_repl is True
assert reload_microagents is True
@pytest.mark.asyncio
@patch('openhands.cli.commands.init_repository')
async def test_init_local_runtime_unsuccessful(self, mock_init_repository):
config = MagicMock(spec=OpenHandsConfig)
config.runtime = 'local'
event_stream = MagicMock(spec=EventStream)
current_dir = '/test/dir'
# Mock unsuccessful repository initialization
mock_init_repository.return_value = False
# Call the function under test
close_repl, reload_microagents = await handle_init_command(
config, event_stream, current_dir
)
# Verify correct behavior
mock_init_repository.assert_called_once_with(config, current_dir)
event_stream.add_event.assert_not_called()
assert close_repl is False
assert reload_microagents is False
@pytest.mark.asyncio
@patch('openhands.cli.commands.print_formatted_text')
@patch('openhands.cli.commands.init_repository')
async def test_init_non_local_runtime(self, mock_init_repository, mock_print):
config = MagicMock(spec=OpenHandsConfig)
config.runtime = 'remote' # Not local
event_stream = MagicMock(spec=EventStream)
current_dir = '/test/dir'
# Call the function under test
close_repl, reload_microagents = await handle_init_command(
config, event_stream, current_dir
)
# Verify correct behavior
mock_init_repository.assert_not_called()
mock_print.assert_called_once()
event_stream.add_event.assert_not_called()
assert close_repl is False
assert reload_microagents is False
class TestHandleSettingsCommand:
@pytest.mark.asyncio
@patch('openhands.cli.commands.display_settings')
@patch('openhands.cli.commands.cli_confirm')
@patch('openhands.cli.commands.modify_llm_settings_basic')
async def test_settings_basic_with_changes(
self,
mock_modify_basic,
mock_cli_confirm,
mock_display_settings,
):
config = MagicMock(spec=OpenHandsConfig)
settings_store = MagicMock(spec=FileSettingsStore)
# Mock user selecting "Basic" settings
mock_cli_confirm.return_value = 0
# Call the function under test
await handle_settings_command(config, settings_store)
# Verify correct behavior
mock_display_settings.assert_called_once_with(config)
mock_cli_confirm.assert_called_once()
mock_modify_basic.assert_called_once_with(config, settings_store)
@pytest.mark.asyncio
@patch('openhands.cli.commands.display_settings')
@patch('openhands.cli.commands.cli_confirm')
@patch('openhands.cli.commands.modify_llm_settings_basic')
async def test_settings_basic_without_changes(
self,
mock_modify_basic,
mock_cli_confirm,
mock_display_settings,
):
config = MagicMock(spec=OpenHandsConfig)
settings_store = MagicMock(spec=FileSettingsStore)
# Mock user selecting "Basic" settings
mock_cli_confirm.return_value = 0
# Call the function under test
await handle_settings_command(config, settings_store)
# Verify correct behavior
mock_display_settings.assert_called_once_with(config)
mock_cli_confirm.assert_called_once()
mock_modify_basic.assert_called_once_with(config, settings_store)
@pytest.mark.asyncio
@patch('openhands.cli.commands.display_settings')
@patch('openhands.cli.commands.cli_confirm')
@patch('openhands.cli.commands.modify_llm_settings_advanced')
async def test_settings_advanced_with_changes(
self,
mock_modify_advanced,
mock_cli_confirm,
mock_display_settings,
):
config = MagicMock(spec=OpenHandsConfig)
settings_store = MagicMock(spec=FileSettingsStore)
# Mock user selecting "Advanced" settings
mock_cli_confirm.return_value = 1
# Call the function under test
await handle_settings_command(config, settings_store)
# Verify correct behavior
mock_display_settings.assert_called_once_with(config)
mock_cli_confirm.assert_called_once()
mock_modify_advanced.assert_called_once_with(config, settings_store)
@pytest.mark.asyncio
@patch('openhands.cli.commands.display_settings')
@patch('openhands.cli.commands.cli_confirm')
@patch('openhands.cli.commands.modify_llm_settings_advanced')
async def test_settings_advanced_without_changes(
self,
mock_modify_advanced,
mock_cli_confirm,
mock_display_settings,
):
config = MagicMock(spec=OpenHandsConfig)
settings_store = MagicMock(spec=FileSettingsStore)
# Mock user selecting "Advanced" settings
mock_cli_confirm.return_value = 1
# Call the function under test
await handle_settings_command(config, settings_store)
# Verify correct behavior
mock_display_settings.assert_called_once_with(config)
mock_cli_confirm.assert_called_once()
mock_modify_advanced.assert_called_once_with(config, settings_store)
@pytest.mark.asyncio
@patch('openhands.cli.commands.display_settings')
@patch('openhands.cli.commands.cli_confirm')
async def test_settings_go_back(self, mock_cli_confirm, mock_display_settings):
config = MagicMock(spec=OpenHandsConfig)
settings_store = MagicMock(spec=FileSettingsStore)
# Mock user selecting "Go back" (now option 4, index 3)
mock_cli_confirm.return_value = 3
# Call the function under test
await handle_settings_command(config, settings_store)
# Verify correct behavior
mock_display_settings.assert_called_once_with(config)
mock_cli_confirm.assert_called_once()
class TestHandleResumeCommand:
@pytest.mark.asyncio
@patch('openhands.cli.commands.print_formatted_text')
async def test_handle_resume_command_paused_state(self, mock_print):
"""Test that handle_resume_command works when agent is in PAUSED state."""
# Create a mock event stream
event_stream = MagicMock(spec=EventStream)
# Call the function with PAUSED state
close_repl, new_session_requested = await handle_resume_command(
'/resume', event_stream, AgentState.PAUSED
)
# Check that the event stream add_event was called with the correct message action
event_stream.add_event.assert_called_once()
args, kwargs = event_stream.add_event.call_args
message_action, source = args
assert isinstance(message_action, MessageAction)
assert message_action.content == 'continue'
assert source == EventSource.USER
# Check the return values
assert close_repl is True
assert new_session_requested is False
# Verify no error message was printed
mock_print.assert_not_called()
@pytest.mark.asyncio
@pytest.mark.parametrize(
'invalid_state', [AgentState.RUNNING, AgentState.FINISHED, AgentState.ERROR]
)
@patch('openhands.cli.commands.print_formatted_text')
async def test_handle_resume_command_invalid_states(
self, mock_print, invalid_state
):
"""Test that handle_resume_command shows error for all non-PAUSED states."""
event_stream = MagicMock(spec=EventStream)
close_repl, new_session_requested = await handle_resume_command(
'/resume', event_stream, invalid_state
)
# Check that no event was added to the stream
event_stream.add_event.assert_not_called()
# Verify print was called with the error message
assert mock_print.call_count == 1
error_call = mock_print.call_args_list[0][0][0]
assert isinstance(error_call, HTML)
assert 'Error: Agent is not paused' in str(error_call)
assert '/resume command is only available when agent is paused' in str(
error_call
)
# Check the return values
assert close_repl is False
assert new_session_requested is False
class TestMCPErrorHandling:
"""Test MCP error handling in commands."""
@patch('openhands.cli.commands.display_mcp_errors')
def test_handle_mcp_errors_command(self, mock_display_errors):
"""Test handling MCP errors command."""
from openhands.cli.commands import handle_mcp_errors_command
handle_mcp_errors_command()
mock_display_errors.assert_called_once()

View File

@@ -1,106 +0,0 @@
"""Tests for CLI server management functionality."""
from unittest.mock import MagicMock, patch
import pytest
from openhands.cli.commands import (
display_mcp_servers,
remove_mcp_server,
)
from openhands.core.config import OpenHandsConfig
from openhands.core.config.mcp_config import (
MCPConfig,
MCPSSEServerConfig,
MCPStdioServerConfig,
)
class TestMCPServerManagement:
"""Test MCP server management functions."""
def setup_method(self):
"""Set up test fixtures."""
self.config = MagicMock(spec=OpenHandsConfig)
self.config.cli = MagicMock()
self.config.cli.vi_mode = False
@patch('openhands.cli.commands.print_formatted_text')
def test_display_mcp_servers_no_servers(self, mock_print):
"""Test displaying MCP servers when none are configured."""
self.config.mcp = MCPConfig() # Empty config
display_mcp_servers(self.config)
mock_print.assert_called_once()
call_args = mock_print.call_args[0][0]
assert 'No custom MCP servers configured' in call_args
@patch('openhands.cli.commands.print_formatted_text')
def test_display_mcp_servers_with_servers(self, mock_print):
"""Test displaying MCP servers when some are configured."""
self.config.mcp = MCPConfig(
sse_servers=[MCPSSEServerConfig(url='http://test.com')],
stdio_servers=[MCPStdioServerConfig(name='test-stdio', command='python')],
)
display_mcp_servers(self.config)
# Should be called multiple times for different sections
assert mock_print.call_count >= 2
# Check that the summary is printed
first_call = mock_print.call_args_list[0][0][0]
assert 'Configured MCP servers:' in first_call
assert 'SSE servers: 1' in first_call
assert 'Stdio servers: 1' in first_call
@pytest.mark.asyncio
@patch('openhands.cli.commands.cli_confirm')
@patch('openhands.cli.commands.print_formatted_text')
async def test_remove_mcp_server_no_servers(self, mock_print, mock_cli_confirm):
"""Test removing MCP server when none are configured."""
self.config.mcp = MCPConfig() # Empty config
await remove_mcp_server(self.config)
mock_print.assert_called_once_with('No MCP servers configured to remove.')
mock_cli_confirm.assert_not_called()
@pytest.mark.asyncio
@patch('openhands.cli.commands.cli_confirm')
@patch('openhands.cli.commands.load_config_file')
@patch('openhands.cli.commands.save_config_file')
@patch('openhands.cli.commands.print_formatted_text')
async def test_remove_mcp_server_success(
self, mock_print, mock_save, mock_load, mock_cli_confirm
):
"""Test successfully removing an MCP server."""
# Set up config with servers
self.config.mcp = MCPConfig(
sse_servers=[MCPSSEServerConfig(url='http://test.com')],
stdio_servers=[MCPStdioServerConfig(name='test-stdio', command='python')],
)
# Mock user selections
mock_cli_confirm.side_effect = [0, 0] # Select first server, confirm removal
# Mock config file operations
mock_load.return_value = {
'mcp': {
'sse_servers': [{'url': 'http://test.com'}],
'stdio_servers': [{'name': 'test-stdio', 'command': 'python'}],
}
}
await remove_mcp_server(self.config)
# Should have been called twice (select server, confirm removal)
assert mock_cli_confirm.call_count == 2
mock_save.assert_called_once()
# Check that success message was printed
success_calls = [
call for call in mock_print.call_args_list if 'removed' in str(call[0][0])
]
assert len(success_calls) >= 1

View File

@@ -1,80 +0,0 @@
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from openhands.cli.settings import modify_llm_settings_basic
from openhands.cli.utils import VERIFIED_ANTHROPIC_MODELS
@pytest.mark.asyncio
@patch('openhands.cli.settings.get_supported_llm_models')
@patch('openhands.cli.settings.organize_models_and_providers')
@patch('openhands.cli.settings.PromptSession')
@patch('openhands.cli.settings.cli_confirm')
@patch('openhands.cli.settings.print_formatted_text')
async def test_anthropic_default_model_is_best_verified(
mock_print,
mock_confirm,
mock_session,
mock_organize,
mock_get_models,
):
"""Test that the default model for anthropic is the best verified model."""
# Setup mocks
mock_get_models.return_value = [
'anthropic/claude-sonnet-4-20250514',
'anthropic/claude-2',
]
mock_organize.return_value = {
'anthropic': {
'models': ['claude-sonnet-4-20250514', 'claude-2'],
'separator': '/',
},
}
# Mock session to avoid actual user input
session_instance = MagicMock()
session_instance.prompt_async = AsyncMock(side_effect=KeyboardInterrupt())
mock_session.return_value = session_instance
# Mock config and settings store
app_config = MagicMock()
llm_config = MagicMock()
llm_config.model = 'anthropic/claude-sonnet-4-20250514'
app_config.get_llm_config.return_value = llm_config
settings_store = AsyncMock()
# Mock cli_confirm to avoid actual user input
# We need enough values to handle all the calls in the function
mock_confirm.side_effect = [
0,
0,
0,
] # Use default provider, use default model, etc.
try:
# Call the function (it will exit early due to KeyboardInterrupt)
await modify_llm_settings_basic(app_config, settings_store)
except KeyboardInterrupt:
pass # Expected exception
# Check that the default model displayed is the best verified model
best_verified_model = VERIFIED_ANTHROPIC_MODELS[
0
] # First model in the list is the best
default_model_displayed = False
for call in mock_print.call_args_list:
args, _ = call
if (
args
and hasattr(args[0], 'value')
and f'Default model: </grey><green>{best_verified_model}</green>'
in args[0].value
):
default_model_displayed = True
break
assert default_model_displayed, (
f'Default model displayed was not {best_verified_model}'
)

View File

@@ -1,143 +0,0 @@
"""Tests for CLI loop recovery functionality."""
from unittest.mock import MagicMock, patch
import pytest
from openhands.cli.commands import handle_resume_command
from openhands.controller.agent_controller import AgentController
from openhands.controller.stuck import StuckDetector
from openhands.core.schema import AgentState
from openhands.events import EventSource
from openhands.events.action import LoopRecoveryAction, MessageAction
from openhands.events.stream import EventStream
class TestCliLoopRecoveryIntegration:
"""Integration tests for CLI loop recovery functionality."""
@pytest.mark.asyncio
async def test_loop_recovery_resume_option_1(self):
"""Test that resume option 1 triggers loop recovery with memory truncation."""
# Create a mock agent controller with stuck analysis
mock_controller = MagicMock(spec=AgentController)
mock_controller._stuck_detector = MagicMock(spec=StuckDetector)
mock_controller._stuck_detector.stuck_analysis = MagicMock()
mock_controller._stuck_detector.stuck_analysis.loop_start_idx = 5
# Mock the loop recovery methods
mock_controller._perform_loop_recovery = MagicMock()
mock_controller._restart_with_last_user_message = MagicMock()
mock_controller.set_agent_state_to = MagicMock()
mock_controller._loop_recovery_info = None
# Create a mock event stream
event_stream = MagicMock(spec=EventStream)
# Call handle_resume_command with option 1
close_repl, new_session_requested = await handle_resume_command(
'/resume 1', event_stream, AgentState.PAUSED
)
# Verify that LoopRecoveryAction was added to the event stream
event_stream.add_event.assert_called_once()
args, kwargs = event_stream.add_event.call_args
loop_recovery_action, source = args
assert isinstance(loop_recovery_action, LoopRecoveryAction)
assert loop_recovery_action.option == 1
assert source == EventSource.USER
# Check the return values
assert close_repl is True
assert new_session_requested is False
@pytest.mark.asyncio
async def test_loop_recovery_resume_option_2(self):
"""Test that resume option 2 triggers restart with last user message."""
# Create a mock event stream
event_stream = MagicMock(spec=EventStream)
# Call handle_resume_command with option 2
close_repl, new_session_requested = await handle_resume_command(
'/resume 2', event_stream, AgentState.PAUSED
)
# Verify that LoopRecoveryAction was added to the event stream
event_stream.add_event.assert_called_once()
args, kwargs = event_stream.add_event.call_args
loop_recovery_action, source = args
assert isinstance(loop_recovery_action, LoopRecoveryAction)
assert loop_recovery_action.option == 2
assert source == EventSource.USER
# Check the return values
assert close_repl is True
assert new_session_requested is False
@pytest.mark.asyncio
async def test_regular_resume_without_loop_recovery(self):
"""Test that regular resume without option sends continue message."""
# Create a mock event stream
event_stream = MagicMock(spec=EventStream)
# Call handle_resume_command without loop recovery option
close_repl, new_session_requested = await handle_resume_command(
'/resume', event_stream, AgentState.PAUSED
)
# Verify that MessageAction was added to the event stream
event_stream.add_event.assert_called_once()
args, kwargs = event_stream.add_event.call_args
message_action, source = args
assert isinstance(message_action, MessageAction)
assert message_action.content == 'continue'
assert source == EventSource.USER
# Check the return values
assert close_repl is True
assert new_session_requested is False
@pytest.mark.asyncio
async def test_handle_commands_with_loop_recovery_resume(self):
"""Test that handle_commands properly routes loop recovery resume commands."""
from openhands.cli.commands import handle_commands
# Create mock dependencies
event_stream = MagicMock(spec=EventStream)
usage_metrics = MagicMock()
sid = 'test-session-id'
config = MagicMock()
current_dir = '/test/dir'
settings_store = MagicMock()
agent_state = AgentState.PAUSED
# Mock handle_resume_command
with patch(
'openhands.cli.commands.handle_resume_command'
) as mock_handle_resume:
mock_handle_resume.return_value = (False, False)
# Call handle_commands with loop recovery resume
close_repl, reload_microagents, new_session, _ = await handle_commands(
'/resume 1',
event_stream,
usage_metrics,
sid,
config,
current_dir,
settings_store,
agent_state,
)
# Check that handle_resume_command was called with correct args
mock_handle_resume.assert_called_once_with(
'/resume 1', event_stream, agent_state
)
# Check the return values
assert close_repl is False
assert reload_microagents is False
assert new_session is False

View File

@@ -1,205 +0,0 @@
import asyncio
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
import pytest_asyncio
from litellm.exceptions import AuthenticationError
from pydantic import SecretStr
from openhands.cli import main as cli
from openhands.core.config.llm_config import LLMConfig
from openhands.events import EventSource
from openhands.events.action import MessageAction
@pytest_asyncio.fixture
def mock_agent():
agent = AsyncMock()
agent.reset = MagicMock()
return agent
@pytest_asyncio.fixture
def mock_runtime():
runtime = AsyncMock()
runtime.close = MagicMock()
runtime.event_stream = MagicMock()
return runtime
@pytest_asyncio.fixture
def mock_controller():
controller = AsyncMock()
controller.close = AsyncMock()
# Setup for get_state() and the returned state's save_to_session()
mock_state = MagicMock()
mock_state.save_to_session = MagicMock()
controller.get_state = MagicMock(return_value=mock_state)
return controller
@pytest_asyncio.fixture
def mock_config():
config = MagicMock()
config.runtime = 'local'
config.cli_multiline_input = False
config.workspace_base = '/test/dir'
# Set up LLM config to use OpenHands provider
llm_config = LLMConfig(model='openhands/o3', api_key=SecretStr('invalid-api-key'))
llm_config.model = 'openhands/o3' # Use OpenHands provider with o3 model
config.get_llm_config.return_value = llm_config
config.get_llm_config_from_agent.return_value = llm_config
# Mock search_api_key with get_secret_value method
search_api_key_mock = MagicMock()
search_api_key_mock.get_secret_value.return_value = (
'' # Empty string, not starting with 'tvly-'
)
config.search_api_key = search_api_key_mock
# Mock sandbox with volumes attribute to prevent finalize_config issues
config.sandbox = MagicMock()
config.sandbox.volumes = (
None # This prevents finalize_config from overriding workspace_base
)
return config
@pytest_asyncio.fixture
def mock_settings_store():
settings_store = AsyncMock()
return settings_store
@pytest.mark.asyncio
@patch('openhands.cli.main.display_runtime_initialization_message')
@patch('openhands.cli.main.display_initialization_animation')
@patch('openhands.cli.main.create_agent')
@patch('openhands.cli.main.add_mcp_tools_to_agent')
@patch('openhands.cli.main.create_runtime')
@patch('openhands.cli.main.create_controller')
@patch('openhands.cli.main.create_memory')
@patch('openhands.cli.main.run_agent_until_done')
@patch('openhands.cli.main.cleanup_session')
@patch('openhands.cli.main.initialize_repository_for_runtime')
@patch('openhands.llm.llm.litellm_completion')
async def test_openhands_provider_authentication_error(
mock_litellm_completion,
mock_initialize_repo,
mock_cleanup_session,
mock_run_agent_until_done,
mock_create_memory,
mock_create_controller,
mock_create_runtime,
mock_add_mcp_tools,
mock_create_agent,
mock_display_animation,
mock_display_runtime_init,
mock_config,
mock_settings_store,
):
"""Test that authentication errors with the OpenHands provider are handled correctly.
This test reproduces the error seen in the CLI when using the OpenHands provider:
```
litellm.exceptions.AuthenticationError: litellm.AuthenticationError: AuthenticationError: Litellm_proxyException -
Authentication Error, Invalid proxy server token passed. Received API Key = sk-...7hlQ,
Key Hash (Token) =e316fa114498880be11f2e236d6f482feee5e324a4a148b98af247eded5290c4.
Unable to find token in cache or `LiteLLM_VerificationTokenTable`
18:38:53 - openhands:ERROR: loop.py:25 - STATUS$ERROR_LLM_AUTHENTICATION
```
The test mocks the litellm_completion function to raise an AuthenticationError
with the OpenHands provider and verifies that the CLI handles the error gracefully.
"""
loop = asyncio.get_running_loop()
# Mock initialize_repository_for_runtime to return a valid path
mock_initialize_repo.return_value = '/test/dir'
# Mock objects returned by the setup functions
mock_agent = AsyncMock()
mock_create_agent.return_value = mock_agent
mock_runtime = AsyncMock()
mock_runtime.event_stream = MagicMock()
mock_create_runtime.return_value = mock_runtime
mock_controller = AsyncMock()
mock_controller_task = MagicMock()
mock_create_controller.return_value = (mock_controller, mock_controller_task)
# Create a regular MagicMock for memory to avoid coroutine issues
mock_memory = MagicMock()
mock_create_memory.return_value = mock_memory
# Mock the litellm_completion function to raise an AuthenticationError
# This simulates the exact error seen in the user's issue
auth_error_message = (
'litellm.AuthenticationError: AuthenticationError: Litellm_proxyException - '
'Authentication Error, Invalid proxy server token passed. Received API Key = sk-...7hlQ, '
'Key Hash (Token) =e316fa114498880be11f2e236d6f482feee5e324a4a148b98af247eded5290c4. '
'Unable to find token in cache or `LiteLLM_VerificationTokenTable`'
)
mock_litellm_completion.side_effect = AuthenticationError(
message=auth_error_message, llm_provider='litellm_proxy', model='o3'
)
with patch(
'openhands.cli.main.read_prompt_input', new_callable=AsyncMock
) as mock_read_prompt:
# Set up read_prompt_input to return a string that will trigger the command handler
mock_read_prompt.return_value = '/exit'
# Mock handle_commands to return values that will exit the loop
with patch(
'openhands.cli.main.handle_commands', new_callable=AsyncMock
) as mock_handle_commands:
mock_handle_commands.return_value = (
True,
False,
False,
) # close_repl, reload_microagents, new_session_requested
# Mock logger.error to capture the error message
with patch('openhands.core.logger.openhands_logger.error'):
# Run the function with an initial action that will trigger the OpenHands provider
initial_action_content = 'Hello, I need help with a task'
# Run the function
result = await cli.run_session(
loop,
mock_config,
mock_settings_store,
'/test/dir',
initial_action_content,
)
# Check that an event was added to the event stream
mock_runtime.event_stream.add_event.assert_called_once()
call_args = mock_runtime.event_stream.add_event.call_args[0]
assert isinstance(call_args[0], MessageAction)
# The CLI might modify the initial message, so we don't check the exact content
assert call_args[1] == EventSource.USER
# Check that run_agent_until_done was called
mock_run_agent_until_done.assert_called_once()
# Since we're mocking the litellm_completion function to raise an AuthenticationError,
# we can verify that the error was handled by checking that the run_agent_until_done
# function was called and the session was cleaned up properly
# We can't directly check the error message in the test since the logger.error
# method isn't being called in our mocked environment. In a real environment,
# the error would be logged and the user would see the improved error message.
# Check that cleanup_session was called
mock_cleanup_session.assert_called_once()
# Check that the function returns the expected value
assert result is False

View File

@@ -1,416 +0,0 @@
import asyncio
from unittest.mock import MagicMock, call, patch
import pytest
from prompt_toolkit.formatted_text import HTML
from prompt_toolkit.keys import Keys
from openhands.cli.tui import process_agent_pause
from openhands.core.schema import AgentState
from openhands.events import EventSource
from openhands.events.action import ChangeAgentStateAction
from openhands.events.observation import AgentStateChangedObservation
class TestProcessAgentPause:
@pytest.mark.asyncio
@patch('openhands.cli.tui.create_input')
@patch('openhands.cli.tui.print_formatted_text')
async def test_process_agent_pause_ctrl_p(self, mock_print, mock_create_input):
"""Test that process_agent_pause sets the done event when Ctrl+P is pressed."""
# Create the done event
done = asyncio.Event()
# Set up the mock input
mock_input = MagicMock()
mock_create_input.return_value = mock_input
# Mock the context managers
mock_raw_mode = MagicMock()
mock_input.raw_mode.return_value = mock_raw_mode
mock_raw_mode.__enter__ = MagicMock()
mock_raw_mode.__exit__ = MagicMock()
mock_attach = MagicMock()
mock_input.attach.return_value = mock_attach
mock_attach.__enter__ = MagicMock()
mock_attach.__exit__ = MagicMock()
# Capture the keys_ready function
keys_ready_func = None
def fake_attach(callback):
nonlocal keys_ready_func
keys_ready_func = callback
return mock_attach
mock_input.attach.side_effect = fake_attach
# Create a task to run process_agent_pause
task = asyncio.create_task(process_agent_pause(done, event_stream=MagicMock()))
# Give it a moment to start and capture the callback
await asyncio.sleep(0.1)
# Make sure we captured the callback
assert keys_ready_func is not None
# Create a key press that simulates Ctrl+P
key_press = MagicMock()
key_press.key = Keys.ControlP
mock_input.read_keys.return_value = [key_press]
# Manually call the callback to simulate key press
keys_ready_func()
# Verify done was set
assert done.is_set()
# Verify print was called with the pause message
assert mock_print.call_count == 2
assert mock_print.call_args_list[0] == call('')
# Check that the second call contains the pause message HTML
second_call = mock_print.call_args_list[1][0][0]
assert isinstance(second_call, HTML)
assert 'Pausing the agent' in str(second_call)
# Cancel the task
task.cancel()
try:
await task
except asyncio.CancelledError:
pass
class TestCliPauseResumeInRunSession:
@pytest.mark.asyncio
async def test_on_event_async_pause_processing(self):
"""Test that on_event_async processes the pause event when is_paused is set."""
# Create a mock event
event = MagicMock()
# Create mock dependencies
event_stream = MagicMock()
is_paused = asyncio.Event()
reload_microagents = False
config = MagicMock()
# Patch the display_event function
with (
patch('openhands.cli.main.display_event') as mock_display_event,
patch('openhands.cli.main.update_usage_metrics') as mock_update_metrics,
):
# Create a closure to capture the current context
async def test_func():
# Set the pause event
is_paused.set()
# Create a context similar to run_session to call on_event_async
# We're creating a function that mimics the environment of on_event_async
async def on_event_async_test(event):
nonlocal reload_microagents, is_paused
mock_display_event(event, config)
mock_update_metrics(event, usage_metrics=MagicMock())
# Pause the agent if the pause event is set (through Ctrl-P)
if is_paused.is_set():
event_stream.add_event(
ChangeAgentStateAction(AgentState.PAUSED),
EventSource.USER,
)
# The pause event is not cleared here because we want to simulate
# the PAUSED event processing in a future event
# Call on_event_async_test
await on_event_async_test(event)
# Check that event_stream.add_event was called with the correct action
event_stream.add_event.assert_called_once()
args, kwargs = event_stream.add_event.call_args
action, source = args
assert isinstance(action, ChangeAgentStateAction)
assert action.agent_state == AgentState.PAUSED
assert source == EventSource.USER
# Check that is_paused is still set (will be cleared when PAUSED state is processed)
assert is_paused.is_set()
# Run the test function
await test_func()
@pytest.mark.asyncio
async def test_awaiting_user_input_paused_skip(self):
"""Test that when is_paused is set, awaiting user input events do not trigger prompting."""
# Create a mock event with AgentStateChangedObservation
event = MagicMock()
event.observation = AgentStateChangedObservation(
agent_state=AgentState.AWAITING_USER_INPUT, content='Agent awaiting input'
)
# Create mock dependencies
is_paused = asyncio.Event()
reload_microagents = False
# Mock function that would be called if code reaches that point
mock_prompt_task = MagicMock()
# Create a closure to capture the current context
async def test_func():
# Set the pause event
is_paused.set()
# Create a context similar to run_session to call on_event_async
async def on_event_async_test(event):
nonlocal reload_microagents, is_paused
if isinstance(event.observation, AgentStateChangedObservation):
if event.observation.agent_state in [
AgentState.AWAITING_USER_INPUT,
AgentState.FINISHED,
]:
# If the agent is paused, do not prompt for input
if is_paused.is_set():
return
# This code should not be reached if is_paused is set
mock_prompt_task()
# Call on_event_async_test
await on_event_async_test(event)
# Verify that mock_prompt_task was not called
mock_prompt_task.assert_not_called()
# Run the test
await test_func()
@pytest.mark.asyncio
async def test_awaiting_confirmation_paused_skip(self):
"""Test that when is_paused is set, awaiting confirmation events do not trigger prompting."""
# Create a mock event with AgentStateChangedObservation
event = MagicMock()
event.observation = AgentStateChangedObservation(
agent_state=AgentState.AWAITING_USER_CONFIRMATION,
content='Agent awaiting confirmation',
)
# Create mock dependencies
is_paused = asyncio.Event()
# Mock function that would be called if code reaches that point
mock_confirmation = MagicMock()
# Create a closure to capture the current context
async def test_func():
# Set the pause event
is_paused.set()
# Create a context similar to run_session to call on_event_async
async def on_event_async_test(event):
nonlocal is_paused
if isinstance(event.observation, AgentStateChangedObservation):
if (
event.observation.agent_state
== AgentState.AWAITING_USER_CONFIRMATION
):
if is_paused.is_set():
return
# This code should not be reached if is_paused is set
mock_confirmation()
# Call on_event_async_test
await on_event_async_test(event)
# Verify that confirmation function was not called
mock_confirmation.assert_not_called()
# Run the test
await test_func()
class TestCliCommandsPauseResume:
@pytest.mark.asyncio
@patch('openhands.cli.commands.handle_resume_command')
async def test_handle_commands_resume(self, mock_handle_resume):
"""Test that the handle_commands function properly calls handle_resume_command."""
# Import here to avoid circular imports in test
from openhands.cli.commands import handle_commands
# Create mocks
message = '/resume'
event_stream = MagicMock()
usage_metrics = MagicMock()
sid = 'test-session-id'
config = MagicMock()
current_dir = '/test/dir'
settings_store = MagicMock()
agent_state = AgentState.PAUSED
# Mock return value
mock_handle_resume.return_value = (False, False)
# Call handle_commands
(
close_repl,
reload_microagents,
new_session_requested,
_,
) = await handle_commands(
message,
event_stream,
usage_metrics,
sid,
config,
current_dir,
settings_store,
agent_state,
)
# Check that handle_resume_command was called with correct args
mock_handle_resume.assert_called_once_with(message, event_stream, agent_state)
# Check the return values
assert close_repl is False
assert reload_microagents is False
assert new_session_requested is False
class TestAgentStatePauseResume:
@pytest.mark.asyncio
@patch('openhands.cli.main.display_agent_running_message')
@patch('openhands.cli.tui.process_agent_pause')
async def test_agent_running_enables_pause(
self, mock_process_agent_pause, mock_display_message
):
"""Test that when the agent is running, pause functionality is enabled."""
# Create a mock event and event stream
event = MagicMock()
event.observation = AgentStateChangedObservation(
agent_state=AgentState.RUNNING, content='Agent is running'
)
event_stream = MagicMock()
# Create mock dependencies
is_paused = asyncio.Event()
loop = MagicMock()
reload_microagents = False
# Create a closure to capture the current context
async def test_func():
# Create a context similar to run_session to call on_event_async
async def on_event_async_test(event):
nonlocal reload_microagents
if isinstance(event.observation, AgentStateChangedObservation):
if event.observation.agent_state == AgentState.RUNNING:
mock_display_message()
loop.create_task(
mock_process_agent_pause(is_paused, event_stream)
)
# Call on_event_async_test
await on_event_async_test(event)
# Check that display_agent_running_message was called
mock_display_message.assert_called_once()
# Check that loop.create_task was called
loop.create_task.assert_called_once()
# Run the test function
await test_func()
@pytest.mark.asyncio
@patch('openhands.cli.main.display_event')
@patch('openhands.cli.main.update_usage_metrics')
async def test_pause_event_changes_agent_state(
self, mock_update_metrics, mock_display_event
):
"""Test that when is_paused is set, a PAUSED state change event is added to the stream."""
# Create mock dependencies
event = MagicMock()
event_stream = MagicMock()
is_paused = asyncio.Event()
config = MagicMock()
reload_microagents = False
# Set the pause event
is_paused.set()
# Create a closure to capture the current context
async def test_func():
# Create a context similar to run_session to call on_event_async
async def on_event_async_test(event):
nonlocal reload_microagents
mock_display_event(event, config)
mock_update_metrics(event, MagicMock())
# Pause the agent if the pause event is set (through Ctrl-P)
if is_paused.is_set():
event_stream.add_event(
ChangeAgentStateAction(AgentState.PAUSED),
EventSource.USER,
)
is_paused.clear()
# Call the function
await on_event_async_test(event)
# Check that the event_stream.add_event was called with the correct action
event_stream.add_event.assert_called_once()
args, kwargs = event_stream.add_event.call_args
action, source = args
assert isinstance(action, ChangeAgentStateAction)
assert action.agent_state == AgentState.PAUSED
assert source == EventSource.USER
# Check that is_paused was cleared
assert not is_paused.is_set()
# Run the test
await test_func()
@pytest.mark.asyncio
async def test_paused_agent_awaits_input(self):
"""Test that when the agent is paused, it awaits user input."""
# Create mock dependencies
event = MagicMock()
# AgentStateChangedObservation requires a content parameter
event.observation = AgentStateChangedObservation(
agent_state=AgentState.PAUSED, content='Agent state changed to PAUSED'
)
is_paused = asyncio.Event()
# Mock function that would be called for prompting
mock_prompt_task = MagicMock()
# Create a closure to capture the current context
async def test_func():
# Create a simplified version of on_event_async
async def on_event_async_test(event):
nonlocal is_paused
if isinstance(event.observation, AgentStateChangedObservation):
if event.observation.agent_state == AgentState.PAUSED:
is_paused.clear() # Revert the event state before prompting for user input
mock_prompt_task(event.observation.agent_state)
# Set is_paused to test that it gets cleared
is_paused.set()
# Call the function
await on_event_async_test(event)
# Check that is_paused was cleared
assert not is_paused.is_set()
# Check that prompt task was called with the correct state
mock_prompt_task.assert_called_once_with(AgentState.PAUSED)
# Run the test
await test_func()

View File

@@ -1,161 +0,0 @@
"""Tests for CLI Runtime MCP functionality."""
from unittest.mock import MagicMock, patch
import pytest
from openhands.core.config import OpenHandsConfig
from openhands.core.config.mcp_config import (
MCPConfig,
MCPSSEServerConfig,
MCPStdioServerConfig,
)
from openhands.events.action.mcp import MCPAction
from openhands.events.observation import ErrorObservation
from openhands.events.observation.mcp import MCPObservation
from openhands.llm.llm_registry import LLMRegistry
from openhands.runtime.impl.cli.cli_runtime import CLIRuntime
class TestCLIRuntimeMCP:
"""Test MCP functionality in CLI Runtime."""
def setup_method(self):
"""Set up test fixtures."""
self.config = OpenHandsConfig()
self.event_stream = MagicMock()
llm_registry = LLMRegistry(config=OpenHandsConfig())
self.runtime = CLIRuntime(
config=self.config,
event_stream=self.event_stream,
sid='test-session',
llm_registry=llm_registry,
)
@pytest.mark.asyncio
async def test_call_tool_mcp_no_servers_configured(self):
"""Test MCP call with no servers configured."""
# Set up empty MCP config
self.runtime.config.mcp = MCPConfig()
action = MCPAction(name='test_tool', arguments={'arg1': 'value1'})
with patch('sys.platform', 'linux'):
result = await self.runtime.call_tool_mcp(action)
assert isinstance(result, ErrorObservation)
assert 'No MCP servers configured' in result.content
@pytest.mark.asyncio
@patch('openhands.mcp.utils.create_mcp_clients')
async def test_call_tool_mcp_no_clients_created(self, mock_create_clients):
"""Test MCP call when no clients can be created."""
# Set up MCP config with servers
self.runtime.config.mcp = MCPConfig(
sse_servers=[MCPSSEServerConfig(url='http://test.com')]
)
# Mock create_mcp_clients to return empty list
mock_create_clients.return_value = []
action = MCPAction(name='test_tool', arguments={'arg1': 'value1'})
with patch('sys.platform', 'linux'):
result = await self.runtime.call_tool_mcp(action)
assert isinstance(result, ErrorObservation)
assert 'No MCP clients could be created' in result.content
mock_create_clients.assert_called_once()
@pytest.mark.asyncio
@patch('openhands.mcp.utils.create_mcp_clients')
@patch('openhands.mcp.utils.call_tool_mcp')
async def test_call_tool_mcp_success(self, mock_call_tool, mock_create_clients):
"""Test successful MCP tool call."""
# Set up MCP config with servers
self.runtime.config.mcp = MCPConfig(
sse_servers=[MCPSSEServerConfig(url='http://test.com')],
stdio_servers=[MCPStdioServerConfig(name='test-stdio', command='python')],
)
# Mock successful client creation
mock_client = MagicMock()
mock_create_clients.return_value = [mock_client]
# Mock successful tool call
expected_observation = MCPObservation(
content='{"result": "success"}',
name='test_tool',
arguments={'arg1': 'value1'},
)
mock_call_tool.return_value = expected_observation
action = MCPAction(name='test_tool', arguments={'arg1': 'value1'})
with patch('sys.platform', 'linux'):
result = await self.runtime.call_tool_mcp(action)
assert result == expected_observation
mock_create_clients.assert_called_once_with(
self.runtime.config.mcp.sse_servers,
self.runtime.config.mcp.shttp_servers,
self.runtime.sid,
self.runtime.config.mcp.stdio_servers,
)
mock_call_tool.assert_called_once_with([mock_client], action)
@pytest.mark.asyncio
@patch('openhands.mcp.utils.create_mcp_clients')
async def test_call_tool_mcp_exception_handling(self, mock_create_clients):
"""Test exception handling in MCP tool call."""
# Set up MCP config with servers
self.runtime.config.mcp = MCPConfig(
sse_servers=[MCPSSEServerConfig(url='http://test.com')]
)
# Mock create_mcp_clients to raise an exception
mock_create_clients.side_effect = Exception('Connection error')
action = MCPAction(name='test_tool', arguments={'arg1': 'value1'})
with patch('sys.platform', 'linux'):
result = await self.runtime.call_tool_mcp(action)
assert isinstance(result, ErrorObservation)
assert 'Error executing MCP tool test_tool' in result.content
assert 'Connection error' in result.content
def test_get_mcp_config_basic(self):
"""Test basic MCP config retrieval."""
# Set up MCP config
expected_config = MCPConfig(
sse_servers=[MCPSSEServerConfig(url='http://test.com')],
stdio_servers=[MCPStdioServerConfig(name='test-stdio', command='python')],
)
self.runtime.config.mcp = expected_config
with patch('sys.platform', 'linux'):
result = self.runtime.get_mcp_config()
assert result == expected_config
def test_get_mcp_config_with_extra_stdio_servers(self):
"""Test MCP config with extra stdio servers."""
# Set up initial MCP config
initial_stdio_server = MCPStdioServerConfig(name='initial', command='python')
self.runtime.config.mcp = MCPConfig(stdio_servers=[initial_stdio_server])
# Add extra stdio servers
extra_servers = [
MCPStdioServerConfig(name='extra1', command='node'),
MCPStdioServerConfig(name='extra2', command='java'),
]
with patch('sys.platform', 'linux'):
result = self.runtime.get_mcp_config(extra_stdio_servers=extra_servers)
# Should have all three servers
assert len(result.stdio_servers) == 3
assert initial_stdio_server in result.stdio_servers
assert extra_servers[0] in result.stdio_servers
assert extra_servers[1] in result.stdio_servers

File diff suppressed because it is too large Load Diff

View File

@@ -1,90 +0,0 @@
import asyncio
import unittest
from unittest.mock import AsyncMock, MagicMock, patch
from openhands.cli.main import run_setup_flow
from openhands.core.config import OpenHandsConfig
from openhands.storage.settings.file_settings_store import FileSettingsStore
class TestCLISetupFlow(unittest.TestCase):
"""Test the CLI setup flow."""
@patch('openhands.cli.settings.modify_llm_settings_basic')
@patch('openhands.cli.main.print_formatted_text')
async def test_run_setup_flow(self, mock_print, mock_modify_settings):
"""Test that the setup flow calls the modify_llm_settings_basic function."""
# Setup
config = MagicMock(spec=OpenHandsConfig)
settings_store = MagicMock(spec=FileSettingsStore)
mock_modify_settings.return_value = None
# Mock settings_store.load to return a settings object
settings = MagicMock()
settings_store.load = AsyncMock(return_value=settings)
# Execute
result = await run_setup_flow(config, settings_store)
# Verify
mock_modify_settings.assert_called_once_with(config, settings_store)
# Verify that print_formatted_text was called at least twice (for welcome message and instructions)
self.assertGreaterEqual(mock_print.call_count, 2)
# Verify that the function returns True when settings are found
self.assertTrue(result)
@patch('openhands.cli.main.print_formatted_text')
@patch('openhands.cli.main.run_setup_flow')
@patch('openhands.cli.main.FileSettingsStore.get_instance')
@patch('openhands.cli.main.setup_config_from_args')
@patch('openhands.cli.main.parse_arguments')
async def test_main_calls_setup_flow_when_no_settings(
self,
mock_parse_args,
mock_setup_config,
mock_get_instance,
mock_run_setup_flow,
mock_print,
):
"""Test that main calls run_setup_flow when no settings are found and exits."""
# Setup
mock_args = MagicMock()
mock_config = MagicMock(spec=OpenHandsConfig)
mock_settings_store = AsyncMock(spec=FileSettingsStore)
# Settings load returns None (no settings)
mock_settings_store.load = AsyncMock(return_value=None)
mock_parse_args.return_value = mock_args
mock_setup_config.return_value = mock_config
mock_get_instance.return_value = mock_settings_store
# Mock run_setup_flow to return True (settings configured successfully)
mock_run_setup_flow.return_value = True
# Import here to avoid circular imports during patching
from openhands.cli.main import main
# Execute
loop = asyncio.get_event_loop()
await main(loop)
# Verify
mock_run_setup_flow.assert_called_once_with(mock_config, mock_settings_store)
# Verify that load was called once (before setup)
self.assertEqual(mock_settings_store.load.call_count, 1)
# Verify that print_formatted_text was called for success messages
self.assertGreaterEqual(mock_print.call_count, 2)
def run_async_test(coro):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
return loop.run_until_complete(coro)
finally:
loop.close()
if __name__ == '__main__':
unittest.main()

View File

@@ -1,130 +0,0 @@
"""Test warning suppression functionality in CLI mode."""
import warnings
from io import StringIO
from unittest.mock import patch
from openhands.cli.suppress_warnings import suppress_cli_warnings
class TestWarningSuppressionCLI:
"""Test cases for CLI warning suppression."""
def test_suppress_pydantic_warnings(self):
"""Test that Pydantic serialization warnings are suppressed."""
# Apply suppression
suppress_cli_warnings()
# Capture stderr to check if warnings are printed
captured_output = StringIO()
with patch('sys.stderr', captured_output):
# Trigger Pydantic serialization warning
warnings.warn(
'Pydantic serializer warnings: PydanticSerializationUnexpectedValue',
UserWarning,
stacklevel=2,
)
# Should be suppressed (no output to stderr)
output = captured_output.getvalue()
assert 'Pydantic serializer warnings' not in output
def test_suppress_deprecated_method_warnings(self):
"""Test that deprecated method warnings are suppressed."""
# Apply suppression
suppress_cli_warnings()
# Capture stderr to check if warnings are printed
captured_output = StringIO()
with patch('sys.stderr', captured_output):
# Trigger deprecated method warning
warnings.warn(
'Call to deprecated method get_events. (Use search_events instead)',
DeprecationWarning,
stacklevel=2,
)
# Should be suppressed (no output to stderr)
output = captured_output.getvalue()
assert 'deprecated method' not in output
def test_suppress_expected_fields_warnings(self):
"""Test that expected fields warnings are suppressed."""
# Apply suppression
suppress_cli_warnings()
# Capture stderr to check if warnings are printed
captured_output = StringIO()
with patch('sys.stderr', captured_output):
# Trigger expected fields warning
warnings.warn(
'Expected 9 fields but got 5: Expected `Message`',
UserWarning,
stacklevel=2,
)
# Should be suppressed (no output to stderr)
output = captured_output.getvalue()
assert 'Expected 9 fields' not in output
def test_regular_warnings_not_suppressed(self):
"""Test that regular warnings are NOT suppressed."""
# Apply suppression
suppress_cli_warnings()
# Capture stderr to check if warnings are printed
captured_output = StringIO()
with patch('sys.stderr', captured_output):
# Trigger a regular warning that should NOT be suppressed
warnings.warn(
'This is a regular warning that should appear',
UserWarning,
stacklevel=2,
)
# Should NOT be suppressed (should appear in stderr)
output = captured_output.getvalue()
assert 'regular warning' in output
def test_module_import_applies_suppression(self):
"""Test that importing the module automatically applies suppression."""
# Reset warnings filters
warnings.resetwarnings()
# Re-import the module to trigger suppression again
import importlib
import openhands.cli.suppress_warnings
importlib.reload(openhands.cli.suppress_warnings)
# Capture stderr to check if warnings are printed
captured_output = StringIO()
with patch('sys.stderr', captured_output):
warnings.warn(
'Pydantic serializer warnings: test', UserWarning, stacklevel=2
)
# Should be suppressed (no output to stderr)
output = captured_output.getvalue()
assert 'Pydantic serializer warnings' not in output
def test_warning_filters_are_applied(self):
"""Test that warning filters are properly applied."""
# Reset warnings filters
warnings.resetwarnings()
# Apply suppression
suppress_cli_warnings()
# Check that filters are in place
filters = warnings.filters
# Should have filters for the specific warning patterns
filter_messages = [f[1] for f in filters if f[1] is not None]
# Check that our specific patterns are in the filters
assert any(
'Pydantic serializer warnings' in str(msg) for msg in filter_messages
)
assert any('deprecated method' in str(msg) for msg in filter_messages)

View File

@@ -1,246 +0,0 @@
"""Tests for CLI thought display order fix.
This ensures that agent thoughts are displayed before commands, not after.
"""
from unittest.mock import MagicMock, patch
from openhands.cli.tui import display_event
from openhands.core.config import OpenHandsConfig
from openhands.events import EventSource
from openhands.events.action import Action, ActionConfirmationStatus, CmdRunAction
from openhands.events.action.message import MessageAction
class TestThoughtDisplayOrder:
"""Test that thoughts are displayed in the correct order relative to commands."""
@patch('openhands.cli.tui.display_thought_if_new')
@patch('openhands.cli.tui.display_command')
def test_cmd_run_action_thought_before_command(
self, mock_display_command, mock_display_thought_if_new
):
"""Test that for CmdRunAction, thought is displayed before command."""
config = MagicMock(spec=OpenHandsConfig)
# Create a CmdRunAction with a thought awaiting confirmation
cmd_action = CmdRunAction(
command='npm install',
thought='I need to install the dependencies first before running the tests.',
)
cmd_action.confirmation_state = ActionConfirmationStatus.AWAITING_CONFIRMATION
display_event(cmd_action, config)
# Verify that display_thought_if_new (for thought) was called before display_command
mock_display_thought_if_new.assert_called_once_with(
'I need to install the dependencies first before running the tests.'
)
mock_display_command.assert_called_once_with(cmd_action)
# Check the call order by examining the mock call history
all_calls = []
all_calls.extend(
[
('display_thought_if_new', call)
for call in mock_display_thought_if_new.call_args_list
]
)
all_calls.extend(
[('display_command', call) for call in mock_display_command.call_args_list]
)
# Sort by the order they were called (this is a simplified check)
# In practice, we know display_thought_if_new should be called first based on our code
assert mock_display_thought_if_new.called
assert mock_display_command.called
@patch('openhands.cli.tui.display_thought_if_new')
@patch('openhands.cli.tui.display_command')
def test_cmd_run_action_no_thought(
self, mock_display_command, mock_display_thought_if_new
):
"""Test that CmdRunAction without thought only displays command."""
config = MagicMock(spec=OpenHandsConfig)
# Create a CmdRunAction without a thought
cmd_action = CmdRunAction(command='npm install')
cmd_action.confirmation_state = ActionConfirmationStatus.AWAITING_CONFIRMATION
display_event(cmd_action, config)
# Verify that display_thought_if_new was not called (no thought)
mock_display_thought_if_new.assert_not_called()
mock_display_command.assert_called_once_with(cmd_action)
@patch('openhands.cli.tui.display_thought_if_new')
@patch('openhands.cli.tui.display_command')
def test_cmd_run_action_empty_thought(
self, mock_display_command, mock_display_thought_if_new
):
"""Test that CmdRunAction with empty thought only displays command."""
config = MagicMock(spec=OpenHandsConfig)
# Create a CmdRunAction with empty thought
cmd_action = CmdRunAction(command='npm install', thought='')
cmd_action.confirmation_state = ActionConfirmationStatus.AWAITING_CONFIRMATION
display_event(cmd_action, config)
# Verify that display_thought_if_new was not called (empty thought)
mock_display_thought_if_new.assert_not_called()
mock_display_command.assert_called_once_with(cmd_action)
@patch('openhands.cli.tui.display_thought_if_new')
@patch('openhands.cli.tui.display_command')
@patch('openhands.cli.tui.initialize_streaming_output')
def test_cmd_run_action_confirmed_no_display(
self, mock_init_streaming, mock_display_command, mock_display_thought_if_new
):
"""Test that confirmed CmdRunAction doesn't display command again but initializes streaming."""
config = MagicMock(spec=OpenHandsConfig)
# Create a confirmed CmdRunAction with thought
cmd_action = CmdRunAction(
command='npm install',
thought='I need to install the dependencies first before running the tests.',
)
cmd_action.confirmation_state = ActionConfirmationStatus.CONFIRMED
display_event(cmd_action, config)
# Verify that thought is still displayed
mock_display_thought_if_new.assert_called_once_with(
'I need to install the dependencies first before running the tests.'
)
# But command should not be displayed again (already shown when awaiting confirmation)
mock_display_command.assert_not_called()
# Streaming should be initialized
mock_init_streaming.assert_called_once()
@patch('openhands.cli.tui.display_thought_if_new')
def test_other_action_thought_display(self, mock_display_thought_if_new):
"""Test that other Action types still display thoughts normally."""
config = MagicMock(spec=OpenHandsConfig)
# Create a generic Action with thought
action = Action()
action.thought = 'This is a thought for a generic action.'
display_event(action, config)
# Verify that thought is displayed
mock_display_thought_if_new.assert_called_once_with(
'This is a thought for a generic action.'
)
@patch('openhands.cli.tui.display_message')
def test_other_action_final_thought_display(self, mock_display_message):
"""Test that other Action types display final thoughts as agent messages."""
config = MagicMock(spec=OpenHandsConfig)
# Create a generic Action with final thought
action = Action()
action.final_thought = 'This is a final thought.'
display_event(action, config)
# Verify that final thought is displayed as an agent message
mock_display_message.assert_called_once_with(
'This is a final thought.', is_agent_message=True
)
@patch('openhands.cli.tui.display_thought_if_new')
def test_message_action_from_agent(self, mock_display_thought_if_new):
"""Test that MessageAction from agent is displayed."""
config = MagicMock(spec=OpenHandsConfig)
# Create a MessageAction from agent
message_action = MessageAction(content='Hello from agent')
message_action._source = EventSource.AGENT
display_event(message_action, config)
# Verify that agent message is displayed with agent styling
mock_display_thought_if_new.assert_called_once_with(
'Hello from agent', is_agent_message=True
)
@patch('openhands.cli.tui.display_thought_if_new')
def test_message_action_from_user_not_displayed(self, mock_display_thought_if_new):
"""Test that MessageAction from user is not displayed."""
config = MagicMock(spec=OpenHandsConfig)
# Create a MessageAction from user
message_action = MessageAction(content='Hello from user')
message_action._source = EventSource.USER
display_event(message_action, config)
# Verify that message is not displayed (only agent messages are shown)
mock_display_thought_if_new.assert_not_called()
@patch('openhands.cli.tui.display_thought_if_new')
@patch('openhands.cli.tui.display_command')
def test_cmd_run_action_with_both_thoughts(
self, mock_display_command, mock_display_thought_if_new
):
"""Test CmdRunAction with both thought and final_thought."""
config = MagicMock(spec=OpenHandsConfig)
# Create a CmdRunAction with both thoughts
cmd_action = CmdRunAction(command='npm install', thought='Initial thought')
cmd_action.final_thought = 'Final thought'
cmd_action.confirmation_state = ActionConfirmationStatus.AWAITING_CONFIRMATION
display_event(cmd_action, config)
# For CmdRunAction, only the regular thought should be displayed
# (final_thought is handled by the general Action case, but CmdRunAction is handled first)
mock_display_thought_if_new.assert_called_once_with('Initial thought')
mock_display_command.assert_called_once_with(cmd_action)
class TestThoughtDisplayIntegration:
"""Integration tests for the thought display order fix."""
def test_realistic_scenario_order(self):
"""Test a realistic scenario to ensure proper order."""
config = MagicMock(spec=OpenHandsConfig)
# Track the order of calls
call_order = []
def track_display_message(message, is_agent_message=False):
call_order.append(f'THOUGHT: {message}')
def track_display_command(event):
call_order.append(f'COMMAND: {event.command}')
with (
patch(
'openhands.cli.tui.display_message', side_effect=track_display_message
),
patch(
'openhands.cli.tui.display_command', side_effect=track_display_command
),
):
# Create the scenario from the issue
cmd_action = CmdRunAction(
command='npm install',
thought='I need to install the dependencies first before running the tests.',
)
cmd_action.confirmation_state = (
ActionConfirmationStatus.AWAITING_CONFIRMATION
)
display_event(cmd_action, config)
# Verify the correct order
expected_order = [
'THOUGHT: I need to install the dependencies first before running the tests.',
'COMMAND: npm install',
]
assert call_order == expected_order, (
f'Expected {expected_order}, but got {call_order}'
)

View File

@@ -1,513 +0,0 @@
from unittest.mock import MagicMock, Mock, patch
import pytest
from openhands.cli.tui import (
CustomDiffLexer,
UsageMetrics,
UserCancelledError,
_render_basic_markdown,
display_banner,
display_command,
display_event,
display_mcp_action,
display_mcp_errors,
display_mcp_observation,
display_message,
display_runtime_initialization_message,
display_shutdown_message,
display_status,
display_usage_metrics,
display_welcome_message,
get_session_duration,
read_confirmation_input,
)
from openhands.core.config import OpenHandsConfig
from openhands.events import EventSource
from openhands.events.action import (
Action,
ActionConfirmationStatus,
CmdRunAction,
MCPAction,
MessageAction,
)
from openhands.events.observation import (
CmdOutputObservation,
FileEditObservation,
FileReadObservation,
MCPObservation,
)
from openhands.llm.metrics import Metrics
from openhands.mcp.error_collector import MCPError
class TestDisplayFunctions:
@patch('openhands.cli.tui.print_formatted_text')
def test_display_runtime_initialization_message_local(self, mock_print):
display_runtime_initialization_message('local')
assert mock_print.call_count == 3
# Check the second call has the local runtime message
args, kwargs = mock_print.call_args_list[1]
assert 'Starting local runtime' in str(args[0])
@patch('openhands.cli.tui.print_formatted_text')
def test_display_runtime_initialization_message_docker(self, mock_print):
display_runtime_initialization_message('docker')
assert mock_print.call_count == 3
# Check the second call has the docker runtime message
args, kwargs = mock_print.call_args_list[1]
assert 'Starting Docker runtime' in str(args[0])
@patch('openhands.cli.tui.print_formatted_text')
def test_display_banner(self, mock_print):
session_id = 'test-session-id'
display_banner(session_id)
# Verify banner calls
assert mock_print.call_count >= 3
# Check the last call has the session ID
args, kwargs = mock_print.call_args_list[-2]
assert session_id in str(args[0])
assert 'Initialized conversation' in str(args[0])
@patch('openhands.cli.tui.print_formatted_text')
def test_display_welcome_message(self, mock_print):
display_welcome_message()
assert mock_print.call_count == 2
# Check the first call contains the welcome message
args, kwargs = mock_print.call_args_list[0]
assert "Let's start building" in str(args[0])
@patch('openhands.cli.tui.print_formatted_text')
def test_display_welcome_message_with_message(self, mock_print):
message = 'Test message'
display_welcome_message(message)
assert mock_print.call_count == 2
# Check the first call contains the welcome message
args, kwargs = mock_print.call_args_list[0]
message_text = str(args[0])
assert "Let's start building" in message_text
# Check the second call contains the custom message
args, kwargs = mock_print.call_args_list[1]
message_text = str(args[0])
assert 'Test message' in message_text
assert 'Type /help for help' in message_text
@patch('openhands.cli.tui.print_formatted_text')
def test_display_welcome_message_without_message(self, mock_print):
display_welcome_message()
assert mock_print.call_count == 2
# Check the first call contains the welcome message
args, kwargs = mock_print.call_args_list[0]
message_text = str(args[0])
assert "Let's start building" in message_text
# Check the second call contains the default message
args, kwargs = mock_print.call_args_list[1]
message_text = str(args[0])
assert 'What do you want to build?' in message_text
assert 'Type /help for help' in message_text
def test_display_event_message_action(self):
config = MagicMock(spec=OpenHandsConfig)
message = MessageAction(content='Test message')
message._source = EventSource.AGENT
# Directly test the function without mocking
display_event(message, config)
@patch('openhands.cli.tui.display_command')
def test_display_event_cmd_action(self, mock_display_command):
config = MagicMock(spec=OpenHandsConfig)
# Test that commands awaiting confirmation are displayed
cmd_action = CmdRunAction(command='echo test')
cmd_action.confirmation_state = ActionConfirmationStatus.AWAITING_CONFIRMATION
display_event(cmd_action, config)
mock_display_command.assert_called_once_with(cmd_action)
@patch('openhands.cli.tui.display_command')
@patch('openhands.cli.tui.initialize_streaming_output')
def test_display_event_cmd_action_confirmed(
self, mock_init_streaming, mock_display_command
):
config = MagicMock(spec=OpenHandsConfig)
# Test that confirmed commands don't display the command but do initialize streaming
cmd_action = CmdRunAction(command='echo test')
cmd_action.confirmation_state = ActionConfirmationStatus.CONFIRMED
display_event(cmd_action, config)
# Command should not be displayed (since it was already shown when awaiting confirmation)
mock_display_command.assert_not_called()
# But streaming should be initialized
mock_init_streaming.assert_called_once()
@patch('openhands.cli.tui.display_command_output')
def test_display_event_cmd_output(self, mock_display_output):
config = MagicMock(spec=OpenHandsConfig)
cmd_output = CmdOutputObservation(content='Test output', command='echo test')
display_event(cmd_output, config)
mock_display_output.assert_called_once_with('Test output')
@patch('openhands.cli.tui.display_file_edit')
def test_display_event_file_edit_observation(self, mock_display_file_edit):
config = MagicMock(spec=OpenHandsConfig)
file_edit_obs = FileEditObservation(path='test.py', content="print('hello')")
display_event(file_edit_obs, config)
mock_display_file_edit.assert_called_once_with(file_edit_obs)
@patch('openhands.cli.tui.display_file_read')
def test_display_event_file_read(self, mock_display_file_read):
config = MagicMock(spec=OpenHandsConfig)
file_read = FileReadObservation(path='test.py', content="print('hello')")
display_event(file_read, config)
mock_display_file_read.assert_called_once_with(file_read)
def test_display_event_thought(self):
config = MagicMock(spec=OpenHandsConfig)
action = Action()
action.thought = 'Thinking about this...'
# Directly test the function without mocking
display_event(action, config)
@patch('openhands.cli.tui.display_mcp_action')
def test_display_event_mcp_action(self, mock_display_mcp_action):
config = MagicMock(spec=OpenHandsConfig)
mcp_action = MCPAction(name='test_tool', arguments={'param': 'value'})
display_event(mcp_action, config)
mock_display_mcp_action.assert_called_once_with(mcp_action)
@patch('openhands.cli.tui.display_mcp_observation')
def test_display_event_mcp_observation(self, mock_display_mcp_observation):
config = MagicMock(spec=OpenHandsConfig)
mcp_observation = MCPObservation(
content='Tool result', name='test_tool', arguments={'param': 'value'}
)
display_event(mcp_observation, config)
mock_display_mcp_observation.assert_called_once_with(mcp_observation)
@patch('openhands.cli.tui.print_container')
def test_display_mcp_action(self, mock_print_container):
mcp_action = MCPAction(name='test_tool', arguments={'param': 'value'})
display_mcp_action(mcp_action)
mock_print_container.assert_called_once()
container = mock_print_container.call_args[0][0]
assert 'test_tool' in container.body.text
assert 'param' in container.body.text
@patch('openhands.cli.tui.print_container')
def test_display_mcp_action_no_args(self, mock_print_container):
mcp_action = MCPAction(name='test_tool')
display_mcp_action(mcp_action)
mock_print_container.assert_called_once()
container = mock_print_container.call_args[0][0]
assert 'test_tool' in container.body.text
assert 'Arguments' not in container.body.text
@patch('openhands.cli.tui.print_container')
def test_display_mcp_observation(self, mock_print_container):
mcp_observation = MCPObservation(
content='Tool result', name='test_tool', arguments={'param': 'value'}
)
display_mcp_observation(mcp_observation)
mock_print_container.assert_called_once()
container = mock_print_container.call_args[0][0]
assert 'test_tool' in container.body.text
assert 'Tool result' in container.body.text
@patch('openhands.cli.tui.print_container')
def test_display_mcp_observation_no_content(self, mock_print_container):
mcp_observation = MCPObservation(content='', name='test_tool')
display_mcp_observation(mcp_observation)
mock_print_container.assert_called_once()
container = mock_print_container.call_args[0][0]
assert 'No output' in container.body.text
@patch('openhands.cli.tui.print_formatted_text')
def test_display_message(self, mock_print):
message = 'Test message'
display_message(message)
mock_print.assert_called()
args, kwargs = mock_print.call_args
assert message in str(args[0])
@patch('openhands.cli.tui.print_container')
def test_display_command_awaiting_confirmation(self, mock_print_container):
cmd_action = CmdRunAction(command='echo test')
cmd_action.confirmation_state = ActionConfirmationStatus.AWAITING_CONFIRMATION
display_command(cmd_action)
mock_print_container.assert_called_once()
container = mock_print_container.call_args[0][0]
assert 'echo test' in container.body.text
class TestInteractiveCommandFunctions:
@patch('openhands.cli.tui.print_container')
def test_display_usage_metrics(self, mock_print_container):
metrics = UsageMetrics()
metrics.total_cost = 1.25
metrics.total_input_tokens = 1000
metrics.total_output_tokens = 2000
display_usage_metrics(metrics)
mock_print_container.assert_called_once()
def test_get_session_duration(self):
import time
current_time = time.time()
one_hour_ago = current_time - 3600
# Test for a 1-hour session
duration = get_session_duration(one_hour_ago)
assert '1h' in duration
assert '0m' in duration
assert '0s' in duration
@patch('openhands.cli.tui.print_formatted_text')
@patch('openhands.cli.tui.get_session_duration')
def test_display_shutdown_message(self, mock_get_duration, mock_print):
mock_get_duration.return_value = '1 hour 5 minutes'
metrics = UsageMetrics()
metrics.total_cost = 1.25
session_id = 'test-session-id'
display_shutdown_message(metrics, session_id)
assert mock_print.call_count >= 3 # At least 3 print calls
assert mock_get_duration.call_count == 1
@patch('openhands.cli.tui.display_usage_metrics')
def test_display_status(self, mock_display_metrics):
metrics = UsageMetrics()
session_id = 'test-session-id'
display_status(metrics, session_id)
mock_display_metrics.assert_called_once_with(metrics)
class TestCustomDiffLexer:
def test_custom_diff_lexer_plus_line(self):
lexer = CustomDiffLexer()
document = Mock()
document.lines = ['+added line']
line_style = lexer.lex_document(document)(0)
assert line_style[0][0] == 'ansigreen' # Green for added lines
assert line_style[0][1] == '+added line'
def test_custom_diff_lexer_minus_line(self):
lexer = CustomDiffLexer()
document = Mock()
document.lines = ['-removed line']
line_style = lexer.lex_document(document)(0)
assert line_style[0][0] == 'ansired' # Red for removed lines
assert line_style[0][1] == '-removed line'
def test_custom_diff_lexer_metadata_line(self):
lexer = CustomDiffLexer()
document = Mock()
document.lines = ['[Existing file]']
line_style = lexer.lex_document(document)(0)
assert line_style[0][0] == 'bold' # Bold for metadata lines
assert line_style[0][1] == '[Existing file]'
def test_custom_diff_lexer_normal_line(self):
lexer = CustomDiffLexer()
document = Mock()
document.lines = ['normal line']
line_style = lexer.lex_document(document)(0)
assert line_style[0][0] == '' # Default style for other lines
assert line_style[0][1] == 'normal line'
class TestUsageMetrics:
def test_usage_metrics_initialization(self):
metrics = UsageMetrics()
# Only test the attributes that are actually initialized
assert isinstance(metrics.metrics, Metrics)
assert metrics.session_init_time > 0 # Should have a valid timestamp
class TestUserCancelledError:
def test_user_cancelled_error(self):
error = UserCancelledError()
assert isinstance(error, Exception)
class TestReadConfirmationInput:
@pytest.mark.asyncio
@patch('openhands.cli.tui.cli_confirm')
async def test_read_confirmation_input_yes(self, mock_confirm):
mock_confirm.return_value = 0 # user picked first menu item
cfg = MagicMock() # <- no spec for simplicity
cfg.cli = MagicMock(vi_mode=False)
result = await read_confirmation_input(config=cfg, security_risk='LOW')
assert result == 'yes'
@pytest.mark.asyncio
@patch('openhands.cli.tui.cli_confirm')
async def test_read_confirmation_input_no(self, mock_confirm):
mock_confirm.return_value = 1 # user picked second menu item
cfg = MagicMock() # <- no spec for simplicity
cfg.cli = MagicMock(vi_mode=False)
result = await read_confirmation_input(config=cfg, security_risk='MEDIUM')
assert result == 'no'
@pytest.mark.asyncio
@patch('openhands.cli.tui.cli_confirm')
async def test_read_confirmation_input_smart(self, mock_confirm):
mock_confirm.return_value = 2 # user picked third menu item
class TestMarkdownRendering:
def test_empty_string(self):
assert _render_basic_markdown('') == ''
def test_plain_text(self):
assert _render_basic_markdown('hello world') == 'hello world'
def test_bold(self):
assert _render_basic_markdown('**bold**') == '<b>bold</b>'
def test_underline(self):
assert _render_basic_markdown('__under__') == '<u>under</u>'
def test_combined(self):
assert (
_render_basic_markdown('mix **bold** and __under__ here')
== 'mix <b>bold</b> and <u>under</u> here'
)
def test_html_is_escaped(self):
assert _render_basic_markdown('<script>alert(1)</script>') == (
'&lt;script&gt;alert(1)&lt;/script&gt;'
)
def test_bold_with_special_chars(self):
assert _render_basic_markdown('**a < b & c > d**') == (
'<b>a &lt; b &amp; c &gt; d</b>'
)
"""Tests for CLI TUI MCP functionality."""
class TestMCPTUIDisplay:
"""Test MCP TUI display functions."""
@patch('openhands.cli.tui.print_container')
def test_display_mcp_action_with_arguments(self, mock_print_container):
"""Test displaying MCP action with arguments."""
mcp_action = MCPAction(
name='test_tool', arguments={'param1': 'value1', 'param2': 42}
)
display_mcp_action(mcp_action)
mock_print_container.assert_called_once()
container = mock_print_container.call_args[0][0]
assert 'test_tool' in container.body.text
assert 'param1' in container.body.text
assert 'value1' in container.body.text
@patch('openhands.cli.tui.print_container')
def test_display_mcp_observation_with_content(self, mock_print_container):
"""Test displaying MCP observation with content."""
mcp_observation = MCPObservation(
content='Tool execution successful',
name='test_tool',
arguments={'param': 'value'},
)
display_mcp_observation(mcp_observation)
mock_print_container.assert_called_once()
container = mock_print_container.call_args[0][0]
assert 'test_tool' in container.body.text
assert 'Tool execution successful' in container.body.text
@patch('openhands.cli.tui.print_formatted_text')
@patch('openhands.cli.tui.mcp_error_collector')
def test_display_mcp_errors_no_errors(self, mock_collector, mock_print):
"""Test displaying MCP errors when none exist."""
mock_collector.get_errors.return_value = []
display_mcp_errors()
mock_print.assert_called_once()
call_args = mock_print.call_args[0][0]
assert 'No MCP errors detected' in str(call_args)
@patch('openhands.cli.tui.print_container')
@patch('openhands.cli.tui.print_formatted_text')
@patch('openhands.cli.tui.mcp_error_collector')
def test_display_mcp_errors_with_errors(
self, mock_collector, mock_print, mock_print_container
):
"""Test displaying MCP errors when some exist."""
# Create mock errors
error1 = MCPError(
timestamp=1234567890.0,
server_name='test-server-1',
server_type='stdio',
error_message='Connection failed',
exception_details='Socket timeout',
)
error2 = MCPError(
timestamp=1234567891.0,
server_name='test-server-2',
server_type='sse',
error_message='Server unreachable',
)
mock_collector.get_errors.return_value = [error1, error2]
display_mcp_errors()
# Should print error count header
assert mock_print.call_count >= 1
header_call = mock_print.call_args_list[0][0][0]
assert '2 MCP error(s) detected' in str(header_call)
# Should print containers for each error
assert mock_print_container.call_count == 2

View File

@@ -1,473 +0,0 @@
from pathlib import Path
from unittest.mock import MagicMock, PropertyMock, mock_open, patch
import toml
from openhands.cli.tui import UsageMetrics
from openhands.cli.utils import (
add_local_config_trusted_dir,
extract_model_and_provider,
get_local_config_trusted_dirs,
is_number,
organize_models_and_providers,
read_file,
split_is_actually_version,
update_usage_metrics,
write_to_file,
)
from openhands.events.event import Event
from openhands.llm.metrics import Metrics, TokenUsage
class TestGetLocalConfigTrustedDirs:
@patch('openhands.cli.utils._LOCAL_CONFIG_FILE_PATH')
def test_config_file_does_not_exist(self, mock_config_path):
mock_config_path.exists.return_value = False
result = get_local_config_trusted_dirs()
assert result == []
mock_config_path.exists.assert_called_once()
@patch('openhands.cli.utils._LOCAL_CONFIG_FILE_PATH')
@patch('builtins.open', new_callable=mock_open, read_data='invalid toml')
@patch(
'openhands.cli.utils.toml.load',
side_effect=toml.TomlDecodeError('error', 'doc', 0),
)
def test_config_file_invalid_toml(
self, mock_toml_load, mock_open_file, mock_config_path
):
mock_config_path.exists.return_value = True
result = get_local_config_trusted_dirs()
assert result == []
mock_config_path.exists.assert_called_once()
mock_open_file.assert_called_once_with(mock_config_path, 'r')
mock_toml_load.assert_called_once()
@patch('openhands.cli.utils._LOCAL_CONFIG_FILE_PATH')
@patch(
'builtins.open',
new_callable=mock_open,
read_data=toml.dumps({'sandbox': {'trusted_dirs': ['/path/one']}}),
)
@patch('openhands.cli.utils.toml.load')
def test_config_file_valid(self, mock_toml_load, mock_open_file, mock_config_path):
mock_config_path.exists.return_value = True
mock_toml_load.return_value = {'sandbox': {'trusted_dirs': ['/path/one']}}
result = get_local_config_trusted_dirs()
assert result == ['/path/one']
mock_config_path.exists.assert_called_once()
mock_open_file.assert_called_once_with(mock_config_path, 'r')
mock_toml_load.assert_called_once()
@patch('openhands.cli.utils._LOCAL_CONFIG_FILE_PATH')
@patch(
'builtins.open',
new_callable=mock_open,
read_data=toml.dumps({'other_section': {}}),
)
@patch('openhands.cli.utils.toml.load')
def test_config_file_missing_sandbox(
self, mock_toml_load, mock_open_file, mock_config_path
):
mock_config_path.exists.return_value = True
mock_toml_load.return_value = {'other_section': {}}
result = get_local_config_trusted_dirs()
assert result == []
mock_config_path.exists.assert_called_once()
mock_open_file.assert_called_once_with(mock_config_path, 'r')
mock_toml_load.assert_called_once()
@patch('openhands.cli.utils._LOCAL_CONFIG_FILE_PATH')
@patch(
'builtins.open',
new_callable=mock_open,
read_data=toml.dumps({'sandbox': {'other_key': []}}),
)
@patch('openhands.cli.utils.toml.load')
def test_config_file_missing_trusted_dirs(
self, mock_toml_load, mock_open_file, mock_config_path
):
mock_config_path.exists.return_value = True
mock_toml_load.return_value = {'sandbox': {'other_key': []}}
result = get_local_config_trusted_dirs()
assert result == []
mock_config_path.exists.assert_called_once()
mock_open_file.assert_called_once_with(mock_config_path, 'r')
mock_toml_load.assert_called_once()
class TestAddLocalConfigTrustedDir:
@patch('openhands.cli.utils._LOCAL_CONFIG_FILE_PATH')
@patch('builtins.open', new_callable=mock_open)
@patch('openhands.cli.utils.toml.dump')
@patch('openhands.cli.utils.toml.load')
def test_add_to_non_existent_file(
self, mock_toml_load, mock_toml_dump, mock_open_file, mock_config_path
):
mock_config_path.exists.return_value = False
mock_parent = MagicMock(spec=Path)
mock_config_path.parent = mock_parent
add_local_config_trusted_dir('/new/path')
mock_config_path.exists.assert_called_once()
mock_parent.mkdir.assert_called_once_with(parents=True, exist_ok=True)
mock_open_file.assert_called_once_with(mock_config_path, 'w')
expected_config = {'sandbox': {'trusted_dirs': ['/new/path']}}
mock_toml_dump.assert_called_once_with(expected_config, mock_open_file())
mock_toml_load.assert_not_called()
@patch('openhands.cli.utils._LOCAL_CONFIG_FILE_PATH')
@patch(
'builtins.open',
new_callable=mock_open,
read_data=toml.dumps({'sandbox': {'trusted_dirs': ['/old/path']}}),
)
@patch('openhands.cli.utils.toml.dump')
@patch('openhands.cli.utils.toml.load')
def test_add_to_existing_file(
self, mock_toml_load, mock_toml_dump, mock_open_file, mock_config_path
):
mock_config_path.exists.return_value = True
mock_toml_load.return_value = {'sandbox': {'trusted_dirs': ['/old/path']}}
add_local_config_trusted_dir('/new/path')
mock_config_path.exists.assert_called_once()
assert mock_open_file.call_count == 2 # Once for read, once for write
mock_open_file.assert_any_call(mock_config_path, 'r')
mock_open_file.assert_any_call(mock_config_path, 'w')
mock_toml_load.assert_called_once()
expected_config = {'sandbox': {'trusted_dirs': ['/old/path', '/new/path']}}
mock_toml_dump.assert_called_once_with(expected_config, mock_open_file())
@patch('openhands.cli.utils._LOCAL_CONFIG_FILE_PATH')
@patch(
'builtins.open',
new_callable=mock_open,
read_data=toml.dumps({'sandbox': {'trusted_dirs': ['/old/path']}}),
)
@patch('openhands.cli.utils.toml.dump')
@patch('openhands.cli.utils.toml.load')
def test_add_existing_dir(
self, mock_toml_load, mock_toml_dump, mock_open_file, mock_config_path
):
mock_config_path.exists.return_value = True
mock_toml_load.return_value = {'sandbox': {'trusted_dirs': ['/old/path']}}
add_local_config_trusted_dir('/old/path')
mock_config_path.exists.assert_called_once()
mock_toml_load.assert_called_once()
expected_config = {
'sandbox': {'trusted_dirs': ['/old/path']}
} # Should not change
mock_toml_dump.assert_called_once_with(expected_config, mock_open_file())
@patch('openhands.cli.utils._LOCAL_CONFIG_FILE_PATH')
@patch('builtins.open', new_callable=mock_open, read_data='invalid toml')
@patch('openhands.cli.utils.toml.dump')
@patch(
'openhands.cli.utils.toml.load',
side_effect=toml.TomlDecodeError('error', 'doc', 0),
)
def test_add_to_invalid_toml(
self, mock_toml_load, mock_toml_dump, mock_open_file, mock_config_path
):
mock_config_path.exists.return_value = True
add_local_config_trusted_dir('/new/path')
mock_config_path.exists.assert_called_once()
mock_toml_load.assert_called_once()
expected_config = {
'sandbox': {'trusted_dirs': ['/new/path']}
} # Should reset to default + new path
mock_toml_dump.assert_called_once_with(expected_config, mock_open_file())
@patch('openhands.cli.utils._LOCAL_CONFIG_FILE_PATH')
@patch(
'builtins.open',
new_callable=mock_open,
read_data=toml.dumps({'other_section': {}}),
)
@patch('openhands.cli.utils.toml.dump')
@patch('openhands.cli.utils.toml.load')
def test_add_to_missing_sandbox(
self, mock_toml_load, mock_toml_dump, mock_open_file, mock_config_path
):
mock_config_path.exists.return_value = True
mock_toml_load.return_value = {'other_section': {}}
add_local_config_trusted_dir('/new/path')
mock_config_path.exists.assert_called_once()
mock_toml_load.assert_called_once()
expected_config = {
'other_section': {},
'sandbox': {'trusted_dirs': ['/new/path']},
}
mock_toml_dump.assert_called_once_with(expected_config, mock_open_file())
@patch('openhands.cli.utils._LOCAL_CONFIG_FILE_PATH')
@patch(
'builtins.open',
new_callable=mock_open,
read_data=toml.dumps({'sandbox': {'other_key': []}}),
)
@patch('openhands.cli.utils.toml.dump')
@patch('openhands.cli.utils.toml.load')
def test_add_to_missing_trusted_dirs(
self, mock_toml_load, mock_toml_dump, mock_open_file, mock_config_path
):
mock_config_path.exists.return_value = True
mock_toml_load.return_value = {'sandbox': {'other_key': []}}
add_local_config_trusted_dir('/new/path')
mock_config_path.exists.assert_called_once()
mock_toml_load.assert_called_once()
expected_config = {'sandbox': {'other_key': [], 'trusted_dirs': ['/new/path']}}
mock_toml_dump.assert_called_once_with(expected_config, mock_open_file())
class TestUpdateUsageMetrics:
def test_update_usage_metrics_no_llm_metrics(self):
event = Event()
usage_metrics = UsageMetrics()
# Store original metrics object for comparison
original_metrics = usage_metrics.metrics
update_usage_metrics(event, usage_metrics)
# Metrics should remain unchanged
assert usage_metrics.metrics is original_metrics # Same object reference
assert usage_metrics.metrics.accumulated_cost == 0.0 # Default value
def test_update_usage_metrics_with_cost(self):
event = Event()
# Create a mock Metrics object
metrics = MagicMock(spec=Metrics)
# Mock the accumulated_cost property
type(metrics).accumulated_cost = PropertyMock(return_value=1.25)
event.llm_metrics = metrics
usage_metrics = UsageMetrics()
update_usage_metrics(event, usage_metrics)
# Test that the metrics object was updated to the one from the event
assert usage_metrics.metrics is metrics # Should be the same object reference
# Test that we can access the accumulated_cost through the metrics property
assert usage_metrics.metrics.accumulated_cost == 1.25
def test_update_usage_metrics_with_tokens(self):
event = Event()
# Create mock token usage
token_usage = MagicMock(spec=TokenUsage)
token_usage.prompt_tokens = 100
token_usage.completion_tokens = 50
token_usage.cache_read_tokens = 20
token_usage.cache_write_tokens = 30
# Create mock metrics
metrics = MagicMock(spec=Metrics)
# Set the mock properties
type(metrics).accumulated_cost = PropertyMock(return_value=1.5)
type(metrics).accumulated_token_usage = PropertyMock(return_value=token_usage)
event.llm_metrics = metrics
usage_metrics = UsageMetrics()
update_usage_metrics(event, usage_metrics)
# Test that the metrics object was updated to the one from the event
assert usage_metrics.metrics is metrics # Should be the same object reference
# Test we can access metrics values through the metrics property
assert usage_metrics.metrics.accumulated_cost == 1.5
assert usage_metrics.metrics.accumulated_token_usage is token_usage
assert usage_metrics.metrics.accumulated_token_usage.prompt_tokens == 100
assert usage_metrics.metrics.accumulated_token_usage.completion_tokens == 50
assert usage_metrics.metrics.accumulated_token_usage.cache_read_tokens == 20
assert usage_metrics.metrics.accumulated_token_usage.cache_write_tokens == 30
def test_update_usage_metrics_with_invalid_types(self):
event = Event()
# Create mock token usage with invalid types
token_usage = MagicMock(spec=TokenUsage)
token_usage.prompt_tokens = 'not an int'
token_usage.completion_tokens = 'not an int'
token_usage.cache_read_tokens = 'not an int'
token_usage.cache_write_tokens = 'not an int'
# Create mock metrics
metrics = MagicMock(spec=Metrics)
# Set the mock properties
type(metrics).accumulated_cost = PropertyMock(return_value='not a float')
type(metrics).accumulated_token_usage = PropertyMock(return_value=token_usage)
event.llm_metrics = metrics
usage_metrics = UsageMetrics()
update_usage_metrics(event, usage_metrics)
# Test that the metrics object was still updated to the one from the event
# Even though the values are invalid types, the metrics object reference should be updated
assert usage_metrics.metrics is metrics # Should be the same object reference
# We can verify that we can access the properties through the metrics object
# The invalid types are preserved since our update_usage_metrics function
# simply assigns the metrics object without validation
assert usage_metrics.metrics.accumulated_cost == 'not a float'
assert usage_metrics.metrics.accumulated_token_usage is token_usage
class TestModelAndProviderFunctions:
def test_extract_model_and_provider_slash_format(self):
model = 'openai/gpt-4o'
result = extract_model_and_provider(model)
assert result['provider'] == 'openai'
assert result['model'] == 'gpt-4o'
assert result['separator'] == '/'
def test_extract_model_and_provider_dot_format(self):
model = 'anthropic.claude-3-7'
result = extract_model_and_provider(model)
assert result['provider'] == 'anthropic'
assert result['model'] == 'claude-3-7'
assert result['separator'] == '.'
def test_extract_model_and_provider_openai_implicit(self):
model = 'gpt-4o'
result = extract_model_and_provider(model)
assert result['provider'] == 'openai'
assert result['model'] == 'gpt-4o'
assert result['separator'] == '/'
def test_extract_model_and_provider_anthropic_implicit(self):
model = 'claude-sonnet-4-20250514'
result = extract_model_and_provider(model)
assert result['provider'] == 'anthropic'
assert result['model'] == 'claude-sonnet-4-20250514'
assert result['separator'] == '/'
def test_extract_model_and_provider_mistral_implicit(self):
model = 'devstral-small-2505'
result = extract_model_and_provider(model)
assert result['provider'] == 'mistral'
assert result['model'] == 'devstral-small-2505'
assert result['separator'] == '/'
def test_extract_model_and_provider_o4_mini(self):
model = 'o4-mini'
result = extract_model_and_provider(model)
assert result['provider'] == 'openai'
assert result['model'] == 'o4-mini'
assert result['separator'] == '/'
def test_extract_model_and_provider_versioned(self):
model = 'deepseek.deepseek-coder-1.3b'
result = extract_model_and_provider(model)
assert result['provider'] == 'deepseek'
assert result['model'] == 'deepseek-coder-1.3b'
assert result['separator'] == '.'
def test_extract_model_and_provider_unknown(self):
model = 'unknown-model'
result = extract_model_and_provider(model)
assert result['provider'] == ''
assert result['model'] == 'unknown-model'
assert result['separator'] == ''
def test_organize_models_and_providers(self):
models = [
'openai/gpt-4o',
'anthropic/claude-sonnet-4-20250514',
'o3',
'o4-mini',
'devstral-small-2505',
'mistral/devstral-small-2505',
'anthropic.claude-3-5', # Should be ignored as it uses dot separator for anthropic
'unknown-model',
]
result = organize_models_and_providers(models)
assert 'openai' in result
assert 'anthropic' in result
assert 'mistral' in result
assert 'other' in result
assert len(result['openai']['models']) == 3
assert 'gpt-4o' in result['openai']['models']
assert 'o3' in result['openai']['models']
assert 'o4-mini' in result['openai']['models']
assert len(result['anthropic']['models']) == 1
assert 'claude-sonnet-4-20250514' in result['anthropic']['models']
assert len(result['mistral']['models']) == 2
assert 'devstral-small-2505' in result['mistral']['models']
assert len(result['other']['models']) == 1
assert 'unknown-model' in result['other']['models']
class TestUtilityFunctions:
def test_is_number_with_digit(self):
assert is_number('1') is True
assert is_number('9') is True
def test_is_number_with_letter(self):
assert is_number('a') is False
assert is_number('Z') is False
def test_is_number_with_special_char(self):
assert is_number('.') is False
assert is_number('-') is False
def test_split_is_actually_version_true(self):
split = ['model', '1.0']
assert split_is_actually_version(split) is True
def test_split_is_actually_version_false(self):
split = ['model', 'version']
assert split_is_actually_version(split) is False
def test_split_is_actually_version_single_item(self):
split = ['model']
assert split_is_actually_version(split) is False
class TestFileOperations:
def test_read_file(self):
mock_content = 'test file content'
with patch('builtins.open', mock_open(read_data=mock_content)):
result = read_file('test.txt')
assert result == mock_content
def test_write_to_file(self):
mock_content = 'test file content'
mock_file = mock_open()
with patch('builtins.open', mock_file):
write_to_file('test.txt', mock_content)
mock_file.assert_called_once_with('test.txt', 'w')
handle = mock_file()
handle.write.assert_called_once_with(mock_content)

View File

@@ -1,89 +0,0 @@
import os
from unittest.mock import ANY, MagicMock, patch
from openhands.core.config import CLIConfig, OpenHandsConfig
class TestCliViMode:
"""Test the VI mode feature."""
@patch('openhands.cli.tui.PromptSession')
def test_create_prompt_session_vi_mode_enabled(self, mock_prompt_session):
"""Test that vi_mode can be enabled."""
from openhands.cli.tui import create_prompt_session
config = OpenHandsConfig(cli=CLIConfig(vi_mode=True))
create_prompt_session(config)
mock_prompt_session.assert_called_with(
style=ANY,
vi_mode=True,
)
@patch('openhands.cli.tui.PromptSession')
def test_create_prompt_session_vi_mode_disabled(self, mock_prompt_session):
"""Test that vi_mode is disabled by default."""
from openhands.cli.tui import create_prompt_session
config = OpenHandsConfig(cli=CLIConfig(vi_mode=False))
create_prompt_session(config)
mock_prompt_session.assert_called_with(
style=ANY,
vi_mode=False,
)
@patch('openhands.cli.tui.Application')
def test_cli_confirm_vi_keybindings_are_added(self, mock_app_class):
"""Test that vi keybindings are added to the KeyBindings object."""
from openhands.cli.tui import cli_confirm
config = OpenHandsConfig(cli=CLIConfig(vi_mode=True))
with patch('openhands.cli.tui.KeyBindings', MagicMock()) as mock_key_bindings:
cli_confirm(
config, 'Test question', choices=['Choice 1', 'Choice 2', 'Choice 3']
)
# here we are checking if the key bindings are being created
assert mock_key_bindings.call_count == 1
# then we check that the key bindings are being added
mock_kb_instance = mock_key_bindings.return_value
assert mock_kb_instance.add.call_count > 0
@patch('openhands.cli.tui.Application')
def test_cli_confirm_vi_keybindings_are_not_added(self, mock_app_class):
"""Test that vi keybindings are not added when vi_mode is False."""
from openhands.cli.tui import cli_confirm
config = OpenHandsConfig(cli=CLIConfig(vi_mode=False))
with patch('openhands.cli.tui.KeyBindings', MagicMock()) as mock_key_bindings:
cli_confirm(
config, 'Test question', choices=['Choice 1', 'Choice 2', 'Choice 3']
)
# here we are checking if the key bindings are being created
assert mock_key_bindings.call_count == 1
# then we check that the key bindings are being added
mock_kb_instance = mock_key_bindings.return_value
# and here we check that the vi key bindings are not being added
for call in mock_kb_instance.add.call_args_list:
assert call[0][0] not in ('j', 'k')
@patch.dict(os.environ, {}, clear=True)
def test_vi_mode_disabled_by_default(self):
"""Test that vi_mode is disabled by default when no env var is set."""
from openhands.core.config.utils import load_from_env
config = OpenHandsConfig()
load_from_env(config, os.environ)
assert config.cli.vi_mode is False, 'vi_mode should be False by default'
@patch.dict(os.environ, {'CLI_VI_MODE': 'True'})
def test_vi_mode_enabled_from_env(self):
"""Test that vi_mode can be enabled from an environment variable."""
from openhands.core.config.utils import load_from_env
config = OpenHandsConfig()
load_from_env(config, os.environ)
assert config.cli.vi_mode is True, (
'vi_mode should be True when CLI_VI_MODE is set'
)

View File

@@ -1,90 +0,0 @@
"""Test CLIRuntime class."""
import os
import tempfile
import pytest
from openhands.core.config import OpenHandsConfig
from openhands.events import EventStream
# Mock LLMRegistry
from openhands.runtime.impl.cli.cli_runtime import CLIRuntime
from openhands.storage import get_file_store
# Create a mock LLMRegistry class
class MockLLMRegistry:
def __init__(self, config):
self.config = config
@pytest.fixture
def temp_dir():
"""Create a temporary directory for testing."""
with tempfile.TemporaryDirectory() as temp_dir:
yield temp_dir
@pytest.fixture
def cli_runtime(temp_dir):
"""Create a CLIRuntime instance for testing."""
file_store = get_file_store('local', temp_dir)
event_stream = EventStream('test', file_store)
config = OpenHandsConfig()
config.workspace_base = temp_dir
llm_registry = MockLLMRegistry(config)
runtime = CLIRuntime(config, event_stream, llm_registry)
runtime._runtime_initialized = True # Skip initialization
return runtime
def test_sanitize_filename_valid_path(cli_runtime):
"""Test _sanitize_filename with a valid path."""
test_path = os.path.join(cli_runtime._workspace_path, 'test.txt')
sanitized_path = cli_runtime._sanitize_filename(test_path)
assert sanitized_path == os.path.realpath(test_path)
def test_sanitize_filename_relative_path(cli_runtime):
"""Test _sanitize_filename with a relative path."""
test_path = 'test.txt'
expected_path = os.path.join(cli_runtime._workspace_path, test_path)
sanitized_path = cli_runtime._sanitize_filename(test_path)
assert sanitized_path == os.path.realpath(expected_path)
def test_sanitize_filename_outside_workspace(cli_runtime):
"""Test _sanitize_filename with a path outside the workspace."""
test_path = '/tmp/test.txt' # Path outside workspace
with pytest.raises(PermissionError) as exc_info:
cli_runtime._sanitize_filename(test_path)
assert 'Invalid path:' in str(exc_info.value)
assert 'You can only work with files in' in str(exc_info.value)
def test_sanitize_filename_path_traversal(cli_runtime):
"""Test _sanitize_filename with path traversal attempt."""
test_path = os.path.join(cli_runtime._workspace_path, '..', 'test.txt')
with pytest.raises(PermissionError) as exc_info:
cli_runtime._sanitize_filename(test_path)
assert 'Invalid path traversal:' in str(exc_info.value)
assert 'Path resolves outside the workspace' in str(exc_info.value)
def test_sanitize_filename_absolute_path_with_dots(cli_runtime):
"""Test _sanitize_filename with absolute path containing dots."""
test_path = os.path.join(cli_runtime._workspace_path, 'subdir', '..', 'test.txt')
# Create the parent directory
os.makedirs(os.path.join(cli_runtime._workspace_path, 'subdir'), exist_ok=True)
sanitized_path = cli_runtime._sanitize_filename(test_path)
assert sanitized_path == os.path.join(cli_runtime._workspace_path, 'test.txt')
def test_sanitize_filename_nested_path(cli_runtime):
"""Test _sanitize_filename with a nested path."""
nested_dir = os.path.join(cli_runtime._workspace_path, 'dir1', 'dir2')
os.makedirs(nested_dir, exist_ok=True)
test_path = os.path.join(nested_dir, 'test.txt')
sanitized_path = cli_runtime._sanitize_filename(test_path)
assert sanitized_path == os.path.realpath(test_path)

View File

@@ -1,858 +0,0 @@
import os
import pathlib
import subprocess
from unittest import mock
import pytest
from openhands.cli import vscode_extension
@pytest.fixture
def mock_env_and_dependencies():
"""A fixture to mock all external dependencies and manage the environment."""
with (
mock.patch.dict(os.environ, {}, clear=True),
mock.patch('pathlib.Path.home') as mock_home,
mock.patch('pathlib.Path.exists') as mock_exists,
mock.patch('pathlib.Path.touch') as mock_touch,
mock.patch('pathlib.Path.mkdir') as mock_mkdir,
mock.patch('subprocess.run') as mock_subprocess,
mock.patch('importlib.resources.as_file') as mock_as_file,
mock.patch(
'openhands.cli.vscode_extension.download_latest_vsix_from_github'
) as mock_download,
mock.patch('builtins.print') as mock_print,
mock.patch('openhands.cli.vscode_extension.logger.debug') as mock_logger,
):
# Setup a temporary directory for home
temp_dir = pathlib.Path.cwd() / 'temp_test_home'
temp_dir.mkdir(exist_ok=True)
mock_home.return_value = temp_dir
try:
yield {
'home': mock_home,
'exists': mock_exists,
'touch': mock_touch,
'mkdir': mock_mkdir,
'subprocess': mock_subprocess,
'as_file': mock_as_file,
'download': mock_download,
'print': mock_print,
'logger': mock_logger,
}
finally:
# Teardown the temporary directory, ignoring errors if files don't exist
openhands_dir = temp_dir / '.openhands'
if openhands_dir.exists():
for f in openhands_dir.glob('*'):
if f.is_file():
f.unlink()
try:
openhands_dir.rmdir()
except FileNotFoundError:
pass
try:
temp_dir.rmdir()
except (FileNotFoundError, OSError):
pass
def test_not_in_vscode_environment(mock_env_and_dependencies):
"""Should not attempt any installation if not in a VSCode-like environment."""
os.environ['TERM_PROGRAM'] = 'not_vscode'
vscode_extension.attempt_vscode_extension_install()
mock_env_and_dependencies['download'].assert_not_called()
mock_env_and_dependencies['subprocess'].assert_not_called()
def test_already_attempted_flag_prevents_execution(mock_env_and_dependencies):
"""Should do nothing if the installation flag file already exists."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = True # Simulate flag file exists
vscode_extension.attempt_vscode_extension_install()
mock_env_and_dependencies['download'].assert_not_called()
mock_env_and_dependencies['subprocess'].assert_not_called()
def test_extension_already_installed_detected(mock_env_and_dependencies):
"""Should detect already installed extension and create flag."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = False
# Mock subprocess call for --list-extensions (returns extension as installed)
mock_env_and_dependencies['subprocess'].return_value = subprocess.CompletedProcess(
returncode=0,
args=[],
stdout='openhands.openhands-vscode\nother.extension',
stderr='',
)
vscode_extension.attempt_vscode_extension_install()
# Should only call --list-extensions, no installation attempts
assert mock_env_and_dependencies['subprocess'].call_count == 1
mock_env_and_dependencies['subprocess'].assert_called_with(
['code', '--list-extensions'],
capture_output=True,
text=True,
check=False,
)
mock_env_and_dependencies['print'].assert_any_call(
'INFO: OpenHands VS Code extension is already installed.'
)
mock_env_and_dependencies['touch'].assert_called_once()
mock_env_and_dependencies['download'].assert_not_called()
def test_extension_detection_in_middle_of_list(mock_env_and_dependencies):
"""Should detect extension even when it's not the first in the list."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = False
# Extension is in the middle of the list
mock_env_and_dependencies['subprocess'].return_value = subprocess.CompletedProcess(
returncode=0,
args=[],
stdout='first.extension\nopenhands.openhands-vscode\nlast.extension',
stderr='',
)
vscode_extension.attempt_vscode_extension_install()
mock_env_and_dependencies['print'].assert_any_call(
'INFO: OpenHands VS Code extension is already installed.'
)
mock_env_and_dependencies['touch'].assert_called_once()
def test_extension_detection_partial_match_ignored(mock_env_and_dependencies):
"""Should not match partial extension IDs."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = False
# Partial match should not trigger detection
mock_env_and_dependencies['subprocess'].side_effect = [
subprocess.CompletedProcess(
returncode=0,
args=[],
stdout='other.openhands-vscode-fork\nsome.extension',
stderr='',
),
subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
), # Bundled install succeeds
]
# Mock bundled VSIX to succeed
mock_vsix_path = mock.MagicMock()
mock_vsix_path.exists.return_value = True
mock_vsix_path.__str__.return_value = '/fake/path/to/bundled.vsix'
mock_env_and_dependencies[
'as_file'
].return_value.__enter__.return_value = mock_vsix_path
vscode_extension.attempt_vscode_extension_install()
# Should proceed with installation since exact match not found
assert mock_env_and_dependencies['subprocess'].call_count == 2
mock_env_and_dependencies['as_file'].assert_called_once()
# GitHub download should not be attempted since bundled install succeeds
mock_env_and_dependencies['download'].assert_not_called()
def test_list_extensions_fails_continues_installation(mock_env_and_dependencies):
"""Should continue with installation if --list-extensions fails."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = False
# --list-extensions fails, but bundled install succeeds
mock_env_and_dependencies['subprocess'].side_effect = [
subprocess.CompletedProcess(
returncode=1, args=[], stdout='', stderr='Command failed'
),
subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
), # Bundled install succeeds
]
# Mock bundled VSIX to succeed
mock_vsix_path = mock.MagicMock()
mock_vsix_path.exists.return_value = True
mock_vsix_path.__str__.return_value = '/fake/path/to/bundled.vsix'
mock_env_and_dependencies[
'as_file'
].return_value.__enter__.return_value = mock_vsix_path
vscode_extension.attempt_vscode_extension_install()
# Should proceed with installation
assert mock_env_and_dependencies['subprocess'].call_count == 2
mock_env_and_dependencies['as_file'].assert_called_once()
# GitHub download should not be attempted since bundled install succeeds
mock_env_and_dependencies['download'].assert_not_called()
def test_list_extensions_exception_continues_installation(mock_env_and_dependencies):
"""Should continue with installation if --list-extensions throws exception."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = False
# --list-extensions throws exception, but bundled install succeeds
mock_env_and_dependencies['subprocess'].side_effect = [
FileNotFoundError('code command not found'),
subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
), # Bundled install succeeds
]
# Mock bundled VSIX to succeed
mock_vsix_path = mock.MagicMock()
mock_vsix_path.exists.return_value = True
mock_vsix_path.__str__.return_value = '/fake/path/to/bundled.vsix'
mock_env_and_dependencies[
'as_file'
].return_value.__enter__.return_value = mock_vsix_path
vscode_extension.attempt_vscode_extension_install()
# Should proceed with installation
assert mock_env_and_dependencies['subprocess'].call_count == 2
mock_env_and_dependencies['as_file'].assert_called_once()
# GitHub download should not be attempted since bundled install succeeds
mock_env_and_dependencies['download'].assert_not_called()
def test_mark_installation_successful_os_error(mock_env_and_dependencies):
"""Should log error but continue if flag file creation fails."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = False
# Mock bundled VSIX to succeed
mock_vsix_path = mock.MagicMock()
mock_vsix_path.exists.return_value = True
mock_vsix_path.__str__.return_value = '/fake/path/to/bundled.vsix'
mock_env_and_dependencies[
'as_file'
].return_value.__enter__.return_value = mock_vsix_path
mock_env_and_dependencies['subprocess'].side_effect = [
subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
), # --list-extensions (empty)
subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
), # Bundled install succeeds
]
mock_env_and_dependencies['touch'].side_effect = OSError('Permission denied')
vscode_extension.attempt_vscode_extension_install()
# Should still complete installation
mock_env_and_dependencies['as_file'].assert_called_once()
# GitHub download should not be attempted since bundled install succeeds
mock_env_and_dependencies['download'].assert_not_called()
mock_env_and_dependencies['touch'].assert_called_once()
# Should log the error
mock_env_and_dependencies['logger'].assert_any_call(
'Could not create VS Code extension success flag file: Permission denied'
)
def test_installation_failure_no_flag_created(mock_env_and_dependencies):
"""Should NOT create flag when all installation methods fail (allow retry)."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = False
mock_env_and_dependencies['subprocess'].return_value = subprocess.CompletedProcess(
returncode=0,
args=[],
stdout='',
stderr='', # --list-extensions (empty)
)
mock_env_and_dependencies['download'].return_value = None # GitHub fails
mock_env_and_dependencies[
'as_file'
].side_effect = FileNotFoundError # Bundled fails
vscode_extension.attempt_vscode_extension_install()
# Should NOT create flag file - this is the key behavior change
mock_env_and_dependencies['touch'].assert_not_called()
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Will retry installation next time you run OpenHands in VS Code.'
)
def test_install_succeeds_from_bundled(mock_env_and_dependencies):
"""Should successfully install from bundled VSIX on the first try."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = False
mock_vsix_path = mock.MagicMock()
mock_vsix_path.exists.return_value = True
mock_vsix_path.__str__.return_value = '/fake/path/to/bundled.vsix'
mock_env_and_dependencies[
'as_file'
].return_value.__enter__.return_value = mock_vsix_path
# Mock subprocess calls: first --list-extensions (returns empty), then install
mock_env_and_dependencies['subprocess'].side_effect = [
subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
), # --list-extensions
subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
), # --install-extension
]
vscode_extension.attempt_vscode_extension_install()
mock_env_and_dependencies['as_file'].assert_called_once()
# Should have two subprocess calls: list-extensions and install-extension
assert mock_env_and_dependencies['subprocess'].call_count == 2
mock_env_and_dependencies['subprocess'].assert_any_call(
['code', '--list-extensions'],
capture_output=True,
text=True,
check=False,
)
mock_env_and_dependencies['subprocess'].assert_any_call(
['code', '--install-extension', '/fake/path/to/bundled.vsix', '--force'],
capture_output=True,
text=True,
check=False,
)
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Bundled VS Code extension installed successfully.'
)
mock_env_and_dependencies['touch'].assert_called_once()
# GitHub download should not be attempted
mock_env_and_dependencies['download'].assert_not_called()
def test_bundled_fails_falls_back_to_github(mock_env_and_dependencies):
"""Should fall back to GitHub if bundled VSIX installation fails."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = False
mock_env_and_dependencies['download'].return_value = '/fake/path/to/github.vsix'
# Mock bundled VSIX to fail
mock_env_and_dependencies['as_file'].side_effect = FileNotFoundError
# Mock subprocess calls: first --list-extensions (returns empty), then install
mock_env_and_dependencies['subprocess'].side_effect = [
subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
), # --list-extensions
subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
), # --install-extension
]
with (
mock.patch('os.remove') as mock_os_remove,
mock.patch('os.path.exists', return_value=True),
):
vscode_extension.attempt_vscode_extension_install()
mock_env_and_dependencies['as_file'].assert_called_once()
mock_env_and_dependencies['download'].assert_called_once()
# Should have two subprocess calls: list-extensions and install-extension
assert mock_env_and_dependencies['subprocess'].call_count == 2
mock_env_and_dependencies['subprocess'].assert_any_call(
['code', '--list-extensions'],
capture_output=True,
text=True,
check=False,
)
mock_env_and_dependencies['subprocess'].assert_any_call(
['code', '--install-extension', '/fake/path/to/github.vsix', '--force'],
capture_output=True,
text=True,
check=False,
)
mock_env_and_dependencies['print'].assert_any_call(
'INFO: OpenHands VS Code extension installed successfully from GitHub.'
)
mock_os_remove.assert_called_once_with('/fake/path/to/github.vsix')
mock_env_and_dependencies['touch'].assert_called_once()
def test_all_methods_fail(mock_env_and_dependencies):
"""Should show a final failure message if all installation methods fail."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = False
mock_env_and_dependencies['download'].return_value = None
mock_env_and_dependencies['as_file'].side_effect = FileNotFoundError
# Mock subprocess call for --list-extensions (returns empty, extension not installed)
mock_env_and_dependencies['subprocess'].return_value = subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
)
vscode_extension.attempt_vscode_extension_install()
mock_env_and_dependencies['download'].assert_called_once()
mock_env_and_dependencies['as_file'].assert_called_once()
# Only one subprocess call for --list-extensions, no installation attempts
assert mock_env_and_dependencies['subprocess'].call_count == 1
mock_env_and_dependencies['subprocess'].assert_called_with(
['code', '--list-extensions'],
capture_output=True,
text=True,
check=False,
)
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Automatic installation failed. Please check the OpenHands documentation for manual installation instructions.'
)
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Will retry installation next time you run OpenHands in VS Code.'
)
# Should NOT create flag file on failure - that's the point of our new approach
mock_env_and_dependencies['touch'].assert_not_called()
def test_windsurf_detection_and_install(mock_env_and_dependencies):
"""Should correctly detect Windsurf but not attempt marketplace installation."""
os.environ['__CFBundleIdentifier'] = 'com.exafunction.windsurf'
mock_env_and_dependencies['exists'].return_value = False
mock_env_and_dependencies['download'].return_value = None
mock_env_and_dependencies['as_file'].side_effect = FileNotFoundError
# Mock subprocess call for --list-extensions (returns empty, extension not installed)
mock_env_and_dependencies['subprocess'].return_value = subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
)
vscode_extension.attempt_vscode_extension_install()
# Only one subprocess call for --list-extensions, no installation attempts
assert mock_env_and_dependencies['subprocess'].call_count == 1
mock_env_and_dependencies['subprocess'].assert_called_with(
['surf', '--list-extensions'],
capture_output=True,
text=True,
check=False,
)
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Automatic installation failed. Please check the OpenHands documentation for manual installation instructions.'
)
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Will retry installation next time you run OpenHands in Windsurf.'
)
# Should NOT create flag file on failure
mock_env_and_dependencies['touch'].assert_not_called()
def test_os_error_on_mkdir(mock_env_and_dependencies):
"""Should log a debug message if creating the flag directory fails."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['mkdir'].side_effect = OSError('Permission denied')
vscode_extension.attempt_vscode_extension_install()
mock_env_and_dependencies['logger'].assert_called_once_with(
'Could not create or check VS Code extension flag directory: Permission denied'
)
mock_env_and_dependencies['download'].assert_not_called()
def test_os_error_on_touch(mock_env_and_dependencies):
"""Should log a debug message if creating the flag file fails."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = False
mock_env_and_dependencies['download'].return_value = None
mock_env_and_dependencies['as_file'].side_effect = FileNotFoundError
# Mock subprocess call for --list-extensions (returns empty, extension not installed)
mock_env_and_dependencies['subprocess'].return_value = subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
)
mock_env_and_dependencies['touch'].side_effect = OSError('Permission denied')
vscode_extension.attempt_vscode_extension_install()
# Should NOT create flag file on failure - this is the new behavior
mock_env_and_dependencies['touch'].assert_not_called()
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Will retry installation next time you run OpenHands in VS Code.'
)
def test_flag_file_exists_windsurf(mock_env_and_dependencies):
"""Should not attempt install if flag file already exists (Windsurf)."""
os.environ['__CFBundleIdentifier'] = 'com.exafunction.windsurf'
mock_env_and_dependencies['exists'].return_value = True
vscode_extension.attempt_vscode_extension_install()
mock_env_and_dependencies['download'].assert_not_called()
mock_env_and_dependencies['subprocess'].assert_not_called()
def test_successful_install_attempt_vscode(mock_env_and_dependencies):
"""Test that VS Code is detected but marketplace installation is not attempted."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = False
mock_env_and_dependencies['download'].return_value = None
mock_env_and_dependencies['as_file'].side_effect = FileNotFoundError
# Mock subprocess call for --list-extensions (returns empty, extension not installed)
mock_env_and_dependencies['subprocess'].return_value = subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
)
vscode_extension.attempt_vscode_extension_install()
# One subprocess call for --list-extensions, no installation attempts
assert mock_env_and_dependencies['subprocess'].call_count == 1
mock_env_and_dependencies['subprocess'].assert_called_with(
['code', '--list-extensions'],
capture_output=True,
text=True,
check=False,
)
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Automatic installation failed. Please check the OpenHands documentation for manual installation instructions.'
)
def test_successful_install_attempt_windsurf(mock_env_and_dependencies):
"""Test that Windsurf is detected but marketplace installation is not attempted."""
os.environ['__CFBundleIdentifier'] = 'com.exafunction.windsurf'
mock_env_and_dependencies['exists'].return_value = False
mock_env_and_dependencies['download'].return_value = None
mock_env_and_dependencies['as_file'].side_effect = FileNotFoundError
# Mock subprocess call for --list-extensions (returns empty, extension not installed)
mock_env_and_dependencies['subprocess'].return_value = subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
)
vscode_extension.attempt_vscode_extension_install()
# One subprocess call for --list-extensions, no installation attempts
assert mock_env_and_dependencies['subprocess'].call_count == 1
mock_env_and_dependencies['subprocess'].assert_called_with(
['surf', '--list-extensions'],
capture_output=True,
text=True,
check=False,
)
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Automatic installation failed. Please check the OpenHands documentation for manual installation instructions.'
)
def test_install_attempt_code_command_fails(mock_env_and_dependencies):
"""Test that VS Code is detected but marketplace installation is not attempted."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = False
mock_env_and_dependencies['download'].return_value = None
mock_env_and_dependencies['as_file'].side_effect = FileNotFoundError
# Mock subprocess call for --list-extensions (returns empty, extension not installed)
mock_env_and_dependencies['subprocess'].return_value = subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
)
vscode_extension.attempt_vscode_extension_install()
# One subprocess call for --list-extensions, no installation attempts
assert mock_env_and_dependencies['subprocess'].call_count == 1
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Automatic installation failed. Please check the OpenHands documentation for manual installation instructions.'
)
def test_install_attempt_code_not_found(mock_env_and_dependencies):
"""Test that VS Code is detected but marketplace installation is not attempted."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = False
mock_env_and_dependencies['download'].return_value = None
mock_env_and_dependencies['as_file'].side_effect = FileNotFoundError
# Mock subprocess call for --list-extensions (returns empty, extension not installed)
mock_env_and_dependencies['subprocess'].return_value = subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
)
vscode_extension.attempt_vscode_extension_install()
# One subprocess call for --list-extensions, no installation attempts
assert mock_env_and_dependencies['subprocess'].call_count == 1
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Automatic installation failed. Please check the OpenHands documentation for manual installation instructions.'
)
def test_flag_dir_creation_os_error_windsurf(mock_env_and_dependencies):
"""Test OSError during flag directory creation (Windsurf)."""
os.environ['__CFBundleIdentifier'] = 'com.exafunction.windsurf'
mock_env_and_dependencies['mkdir'].side_effect = OSError('Permission denied')
vscode_extension.attempt_vscode_extension_install()
mock_env_and_dependencies['logger'].assert_called_once_with(
'Could not create or check Windsurf extension flag directory: Permission denied'
)
mock_env_and_dependencies['download'].assert_not_called()
def test_flag_file_touch_os_error_vscode(mock_env_and_dependencies):
"""Test OSError during flag file touch (VS Code)."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = False
mock_env_and_dependencies['download'].return_value = None
mock_env_and_dependencies['as_file'].side_effect = FileNotFoundError
# Mock subprocess call for --list-extensions (returns empty, extension not installed)
mock_env_and_dependencies['subprocess'].return_value = subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
)
mock_env_and_dependencies['touch'].side_effect = OSError('Permission denied')
vscode_extension.attempt_vscode_extension_install()
# Should NOT create flag file on failure - this is the new behavior
mock_env_and_dependencies['touch'].assert_not_called()
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Will retry installation next time you run OpenHands in VS Code.'
)
def test_flag_file_touch_os_error_windsurf(mock_env_and_dependencies):
"""Test OSError during flag file touch (Windsurf)."""
os.environ['__CFBundleIdentifier'] = 'com.exafunction.windsurf'
mock_env_and_dependencies['exists'].return_value = False
mock_env_and_dependencies['download'].return_value = None
mock_env_and_dependencies['as_file'].side_effect = FileNotFoundError
# Mock subprocess call for --list-extensions (returns empty, extension not installed)
mock_env_and_dependencies['subprocess'].return_value = subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
)
mock_env_and_dependencies['touch'].side_effect = OSError('Permission denied')
vscode_extension.attempt_vscode_extension_install()
# Should NOT create flag file on failure - this is the new behavior
mock_env_and_dependencies['touch'].assert_not_called()
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Will retry installation next time you run OpenHands in Windsurf.'
)
def test_bundled_vsix_installation_failure_fallback_to_marketplace(
mock_env_and_dependencies,
):
"""Test bundled VSIX failure shows appropriate message."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = False
mock_env_and_dependencies['download'].return_value = None
mock_vsix_path = mock.MagicMock()
mock_vsix_path.exists.return_value = True
mock_vsix_path.__str__.return_value = '/mock/path/openhands-vscode-0.0.1.vsix'
mock_env_and_dependencies[
'as_file'
].return_value.__enter__.return_value = mock_vsix_path
# Mock subprocess calls: first --list-extensions (empty), then bundled install (fails)
mock_env_and_dependencies['subprocess'].side_effect = [
subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
), # --list-extensions
subprocess.CompletedProcess(
args=[
'code',
'--install-extension',
'/mock/path/openhands-vscode-0.0.1.vsix',
'--force',
],
returncode=1,
stdout='Installation failed',
stderr='Error installing extension',
),
]
vscode_extension.attempt_vscode_extension_install()
# Two subprocess calls: --list-extensions and bundled VSIX install
assert mock_env_and_dependencies['subprocess'].call_count == 2
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Automatic installation failed. Please check the OpenHands documentation for manual installation instructions.'
)
def test_bundled_vsix_not_found_fallback_to_marketplace(mock_env_and_dependencies):
"""Test bundled VSIX not found shows appropriate message."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = False
mock_env_and_dependencies['download'].return_value = None
mock_vsix_path = mock.MagicMock()
mock_vsix_path.exists.return_value = False
mock_env_and_dependencies[
'as_file'
].return_value.__enter__.return_value = mock_vsix_path
# Mock subprocess call for --list-extensions (returns empty, extension not installed)
mock_env_and_dependencies['subprocess'].return_value = subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
)
vscode_extension.attempt_vscode_extension_install()
# One subprocess call for --list-extensions, no installation attempts
assert mock_env_and_dependencies['subprocess'].call_count == 1
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Automatic installation failed. Please check the OpenHands documentation for manual installation instructions.'
)
def test_importlib_resources_exception_fallback_to_marketplace(
mock_env_and_dependencies,
):
"""Test importlib.resources exception shows appropriate message."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = False
mock_env_and_dependencies['download'].return_value = None
mock_env_and_dependencies['as_file'].side_effect = FileNotFoundError(
'Resource not found'
)
# Mock subprocess call for --list-extensions (returns empty, extension not installed)
mock_env_and_dependencies['subprocess'].return_value = subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
)
vscode_extension.attempt_vscode_extension_install()
# One subprocess call for --list-extensions, no installation attempts
assert mock_env_and_dependencies['subprocess'].call_count == 1
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Automatic installation failed. Please check the OpenHands documentation for manual installation instructions.'
)
def test_comprehensive_windsurf_detection_path_based(mock_env_and_dependencies):
"""Test Windsurf detection via PATH environment variable but no marketplace installation."""
os.environ['PATH'] = (
'/usr/local/bin:/Applications/Windsurf.app/Contents/Resources/app/bin:/usr/bin'
)
mock_env_and_dependencies['exists'].return_value = False
mock_env_and_dependencies['download'].return_value = None
mock_env_and_dependencies['as_file'].side_effect = FileNotFoundError
# Mock subprocess call for --list-extensions (returns empty, extension not installed)
mock_env_and_dependencies['subprocess'].return_value = subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
)
vscode_extension.attempt_vscode_extension_install()
# One subprocess call for --list-extensions, no installation attempts
assert mock_env_and_dependencies['subprocess'].call_count == 1
mock_env_and_dependencies['subprocess'].assert_called_with(
['surf', '--list-extensions'],
capture_output=True,
text=True,
check=False,
)
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Automatic installation failed. Please check the OpenHands documentation for manual installation instructions.'
)
def test_comprehensive_windsurf_detection_env_value_based(mock_env_and_dependencies):
"""Test Windsurf detection via environment variable values but no marketplace installation."""
os.environ['SOME_APP_PATH'] = '/Applications/Windsurf.app/Contents/MacOS/Windsurf'
mock_env_and_dependencies['exists'].return_value = False
mock_env_and_dependencies['download'].return_value = None
mock_env_and_dependencies['as_file'].side_effect = FileNotFoundError
# Mock subprocess call for --list-extensions (returns empty, extension not installed)
mock_env_and_dependencies['subprocess'].return_value = subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
)
vscode_extension.attempt_vscode_extension_install()
# One subprocess call for --list-extensions, no installation attempts
assert mock_env_and_dependencies['subprocess'].call_count == 1
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Automatic installation failed. Please check the OpenHands documentation for manual installation instructions.'
)
def test_comprehensive_windsurf_detection_multiple_indicators(
mock_env_and_dependencies,
):
"""Test Windsurf detection with multiple environment indicators."""
os.environ['__CFBundleIdentifier'] = 'com.exafunction.windsurf'
os.environ['PATH'] = (
'/usr/local/bin:/Applications/Windsurf.app/Contents/Resources/app/bin:/usr/bin'
)
os.environ['WINDSURF_CONFIG'] = '/Users/test/.windsurf/config'
mock_env_and_dependencies['exists'].return_value = False
mock_env_and_dependencies['download'].return_value = None
mock_env_and_dependencies['as_file'].side_effect = FileNotFoundError
# Mock subprocess call for --list-extensions (returns empty, extension not installed)
mock_env_and_dependencies['subprocess'].return_value = subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
)
vscode_extension.attempt_vscode_extension_install()
# One subprocess call for --list-extensions, no installation attempts
assert mock_env_and_dependencies['subprocess'].call_count == 1
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Automatic installation failed. Please check the OpenHands documentation for manual installation instructions.'
)
def test_no_editor_detection_skips_installation(mock_env_and_dependencies):
"""Test that no installation is attempted when no supported editor is detected."""
os.environ['TERM_PROGRAM'] = 'iTerm.app'
os.environ['PATH'] = '/usr/local/bin:/usr/bin:/bin'
vscode_extension.attempt_vscode_extension_install()
mock_env_and_dependencies['exists'].assert_not_called()
mock_env_and_dependencies['touch'].assert_not_called()
mock_env_and_dependencies['subprocess'].assert_not_called()
mock_env_and_dependencies['print'].assert_not_called()
def test_both_bundled_and_marketplace_fail(mock_env_and_dependencies):
"""Test when bundled VSIX installation fails."""
os.environ['TERM_PROGRAM'] = 'vscode'
mock_env_and_dependencies['exists'].return_value = False
mock_env_and_dependencies['download'].return_value = None
mock_vsix_path = mock.MagicMock()
mock_vsix_path.exists.return_value = True
mock_vsix_path.__str__.return_value = '/mock/path/openhands-vscode-0.0.1.vsix'
mock_env_and_dependencies[
'as_file'
].return_value.__enter__.return_value = mock_vsix_path
# Mock subprocess calls: first --list-extensions (empty), then bundled install (fails)
mock_env_and_dependencies['subprocess'].side_effect = [
subprocess.CompletedProcess(
returncode=0, args=[], stdout='', stderr=''
), # --list-extensions
subprocess.CompletedProcess(
args=[
'code',
'--install-extension',
'/mock/path/openhands-vscode-0.0.1.vsix',
'--force',
],
returncode=1,
stdout='Bundled installation failed',
stderr='Error installing bundled extension',
),
]
vscode_extension.attempt_vscode_extension_install()
# Two subprocess calls: --list-extensions and bundled VSIX install
assert mock_env_and_dependencies['subprocess'].call_count == 2
mock_env_and_dependencies['print'].assert_any_call(
'INFO: Automatic installation failed. Please check the OpenHands documentation for manual installation instructions.'
)

View File

@@ -153,165 +153,6 @@ def test_get_llm_config_arg_precedence(mock_expanduser, temp_config_files):
assert llm_config is None
@patch('openhands.core.config.utils.os.path.expanduser')
@patch('openhands.cli.main.FileSettingsStore.get_instance')
@patch('openhands.cli.main.FileSettingsStore.load')
def test_cli_main_settings_precedence(
mock_load, mock_get_instance, mock_expanduser, temp_config_files
):
"""Test that the CLI main.py correctly applies settings precedence."""
from openhands.cli.main import setup_config_from_args
mock_expanduser.side_effect = lambda path: path.replace(
'~', temp_config_files['home_dir']
)
# Create mock settings
mock_settings = MagicMock()
mock_settings.llm_model = 'settings-store-model'
mock_settings.llm_api_key = 'settings-store-api-key'
mock_settings.llm_base_url = None
mock_settings.agent = 'CodeActAgent'
mock_settings.confirmation_mode = False
mock_settings.enable_default_condenser = True
# Setup mocks
mock_load.return_value = mock_settings
mock_get_instance.return_value = MagicMock()
# Create mock args with config file pointing to current directory config
mock_args = MagicMock()
mock_args.config_file = temp_config_files['current_dir_toml']
mock_args.llm_config = None # No CLI parameter
mock_args.agent_cls = None
mock_args.max_iterations = None
mock_args.max_budget_per_task = None
mock_args.selected_repo = None
# Load config using the actual CLI code path
with patch('os.path.exists', return_value=True):
config = setup_config_from_args(mock_args)
# Verify that config.toml values take precedence over settings.json
assert config.get_llm_config().model == 'current-dir-model'
assert config.get_llm_config().api_key.get_secret_value() == 'current-dir-api-key'
@patch('openhands.core.config.utils.os.path.expanduser')
@patch('openhands.cli.main.FileSettingsStore.get_instance')
@patch('openhands.cli.main.FileSettingsStore.load')
def test_cli_with_l_parameter_precedence(
mock_load, mock_get_instance, mock_expanduser, temp_config_files
):
"""Test that CLI -l parameter has highest precedence in CLI mode."""
from openhands.cli.main import setup_config_from_args
mock_expanduser.side_effect = lambda path: path.replace(
'~', temp_config_files['home_dir']
)
# Create mock settings
mock_settings = MagicMock()
mock_settings.llm_model = 'settings-store-model'
mock_settings.llm_api_key = 'settings-store-api-key'
mock_settings.llm_base_url = None
mock_settings.agent = 'CodeActAgent'
mock_settings.confirmation_mode = False
mock_settings.enable_default_condenser = True
# Setup mocks
mock_load.return_value = mock_settings
mock_get_instance.return_value = MagicMock()
# Create mock args with -l parameter
mock_args = MagicMock()
mock_args.config_file = temp_config_files['current_dir_toml']
mock_args.llm_config = 'current-dir-llm' # Specify LLM via CLI
mock_args.agent_cls = None
mock_args.max_iterations = None
mock_args.max_budget_per_task = None
mock_args.selected_repo = None
# Load config using the actual CLI code path
with patch('os.path.exists', return_value=True):
config = setup_config_from_args(mock_args)
# Verify that -l parameter takes precedence over everything
assert config.get_llm_config().model == 'current-dir-specific-model'
assert (
config.get_llm_config().api_key.get_secret_value()
== 'current-dir-specific-api-key'
)
@patch('openhands.core.config.utils.os.path.expanduser')
@patch('openhands.cli.main.FileSettingsStore.get_instance')
@patch('openhands.cli.main.FileSettingsStore.load')
def test_cli_settings_json_not_override_config_toml(
mock_load, mock_get_instance, mock_expanduser, temp_config_files
):
"""Test that settings.json doesn't override config.toml in CLI mode."""
import importlib
import sys
from unittest.mock import patch
# First, ensure we can import the CLI main module
if 'openhands.cli.main' in sys.modules:
importlib.reload(sys.modules['openhands.cli.main'])
# Now import the specific function we want to test
from openhands.cli.main import setup_config_from_args
mock_expanduser.side_effect = lambda path: path.replace(
'~', temp_config_files['home_dir']
)
# Create mock settings with different values than config.toml
mock_settings = MagicMock()
mock_settings.llm_model = 'settings-json-model'
mock_settings.llm_api_key = 'settings-json-api-key'
mock_settings.llm_base_url = None
mock_settings.agent = 'CodeActAgent'
mock_settings.confirmation_mode = False
mock_settings.enable_default_condenser = True
# Setup mocks
mock_load.return_value = mock_settings
mock_get_instance.return_value = MagicMock()
# Create mock args with config file pointing to current directory config
mock_args = MagicMock()
mock_args.config_file = temp_config_files['current_dir_toml']
mock_args.llm_config = None # No CLI parameter
mock_args.agent_cls = None
mock_args.max_iterations = None
mock_args.max_budget_per_task = None
mock_args.selected_repo = None
# Load config using the actual CLI code path
with patch('os.path.exists', return_value=True):
setup_config_from_args(mock_args)
# Create a test LLM config to simulate the fix in CLI main.py
test_config = OpenHandsConfig()
test_llm_config = test_config.get_llm_config()
test_llm_config.model = 'config-toml-model'
test_llm_config.api_key = 'config-toml-api-key'
# Simulate the CLI main.py logic that we fixed
if not mock_args.llm_config and (test_llm_config.model or test_llm_config.api_key):
# Should NOT apply settings from settings.json
pass
else:
# This branch should not be taken in our test
test_llm_config.model = mock_settings.llm_model
test_llm_config.api_key = mock_settings.llm_api_key
# Verify that settings.json did not override config.toml
assert test_llm_config.model == 'config-toml-model'
assert test_llm_config.api_key == 'config-toml-api-key'
def test_default_values_applied_when_none():
"""Test that default values are applied when config values are None."""
# Create mock args with None values for agent_cls and max_iterations

View File

@@ -1,10 +1,3 @@
import time
from unittest.mock import MagicMock
import pytest
from openhands.cli.commands import handle_commands
from openhands.core.schema import AgentState
from openhands.core.schema.exit_reason import ExitReason
@@ -23,36 +16,3 @@ def test_exit_reason_enum_names():
def test_exit_reason_str_representation():
assert str(ExitReason.INTENTIONAL) == 'ExitReason.INTENTIONAL'
assert repr(ExitReason.ERROR) == "<ExitReason.ERROR: 'error'>"
@pytest.mark.asyncio
async def test_handle_exit_command_returns_intentional(monkeypatch):
monkeypatch.setattr('openhands.cli.commands.cli_confirm', lambda *a, **k: 0)
mock_usage_metrics = MagicMock()
mock_usage_metrics.session_init_time = time.time() - 3600
mock_usage_metrics.metrics.accumulated_cost = 0.123456
# Mock all token counts used in display formatting
mock_usage_metrics.metrics.accumulated_token_usage.prompt_tokens = 1234
mock_usage_metrics.metrics.accumulated_token_usage.cache_read_tokens = 5678
mock_usage_metrics.metrics.accumulated_token_usage.cache_write_tokens = 9012
mock_usage_metrics.metrics.accumulated_token_usage.completion_tokens = 3456
(
close_repl,
reload_microagents,
new_session_requested,
exit_reason,
) = await handle_commands(
'/exit',
MagicMock(),
mock_usage_metrics,
'test-session',
MagicMock(),
'/tmp/test',
MagicMock(),
AgentState.RUNNING,
)
assert exit_reason == ExitReason.INTENTIONAL

View File

@@ -11,24 +11,6 @@ import sys
import pytest
def test_cli_import_with_broken_third_party_runtime():
"""Test that CLI can be imported even with broken third-party runtime dependencies."""
# Clear any cached modules to ensure fresh import
modules_to_clear = [
k for k in sys.modules.keys() if 'openhands' in k or 'third_party' in k
]
for module in modules_to_clear:
del sys.modules[module]
# This should not raise an exception even if third-party runtimes have broken dependencies
try:
import openhands.cli.main # noqa: F401
assert True
except Exception as e:
pytest.fail(f'CLI import failed: {e}')
def test_runtime_import_robustness():
"""Test that runtime import system is robust against broken dependencies."""
# Clear any cached runtime modules