refactor: merge main and resolve conflicts

This commit is contained in:
hieptl
2025-12-03 15:39:05 +07:00
55 changed files with 2978 additions and 1250 deletions

View File

@@ -97,6 +97,9 @@ class GithubUserContext(UserContext):
user_secrets = await self.secrets_store.load()
return dict(user_secrets.custom_secrets) if user_secrets else {}
async def get_mcp_api_key(self) -> str | None:
raise NotImplementedError()
async def get_user_proactive_conversation_setting(user_id: str | None) -> bool:
"""Get the user's proactive conversation setting.

View File

@@ -203,6 +203,15 @@ class SaasUserAuth(UserAuth):
self.settings_store = settings_store
return settings_store
async def get_mcp_api_key(self) -> str:
api_key_store = ApiKeyStore.get_instance()
mcp_api_key = api_key_store.retrieve_mcp_api_key(self.user_id)
if not mcp_api_key:
mcp_api_key = api_key_store.create_api_key(
self.user_id, 'MCP_API_KEY', None
)
return mcp_api_key
@classmethod
async def get_instance(cls, request: Request) -> UserAuth:
logger.debug('saas_user_auth_get_instance')
@@ -243,7 +252,12 @@ def get_api_key_from_header(request: Request):
# This is a temp hack
# Streamable HTTP MCP Client works via redirect requests, but drops the Authorization header for reason
# We include `X-Session-API-Key` header by default due to nested runtimes, so it used as a drop in replacement here
return request.headers.get('X-Session-API-Key')
session_api_key = request.headers.get('X-Session-API-Key')
if session_api_key:
return session_api_key
# Fallback to X-Access-Token header as an additional option
return request.headers.get('X-Access-Token')
async def saas_user_auth_from_bearer(request: Request) -> SaasUserAuth | None:

View File

@@ -31,7 +31,6 @@ from openhands.server.services.conversation_service import create_provider_token
from openhands.server.shared import config
from openhands.server.user_auth import get_access_token
from openhands.server.user_auth.user_auth import get_user_auth
from openhands.utils.posthog_tracker import track_user_signup_completed
with warnings.catch_warnings():
warnings.simplefilter('ignore')
@@ -370,12 +369,6 @@ async def accept_tos(request: Request):
logger.info(f'User {user_id} accepted TOS')
# Track user signup completion in PostHog
track_user_signup_completed(
user_id=user_id,
signup_timestamp=user_settings.accepted_tos.isoformat(),
)
response = JSONResponse(
status_code=status.HTTP_200_OK, content={'redirect_url': redirect_url}
)

View File

@@ -28,7 +28,6 @@ from storage.subscription_access import SubscriptionAccess
from openhands.server.user_auth import get_user_id
from openhands.utils.http_session import httpx_verify_option
from openhands.utils.posthog_tracker import track_credits_purchased
stripe.api_key = STRIPE_API_KEY
billing_router = APIRouter(prefix='/api/billing')
@@ -458,20 +457,6 @@ async def success_callback(session_id: str, request: Request):
)
session.commit()
# Track credits purchased in PostHog
try:
track_credits_purchased(
user_id=billing_session.user_id,
amount_usd=amount_subtotal / 100, # Convert cents to dollars
credits_added=add_credits,
stripe_session_id=session_id,
)
except Exception as e:
logger.warning(
f'Failed to track credits purchase: {e}',
extra={'user_id': billing_session.user_id, 'error': str(e)},
)
return RedirectResponse(
f'{request.base_url}settings/billing?checkout=success', status_code=302
)

View File

@@ -535,3 +535,115 @@ def test_get_api_key_from_header_with_invalid_authorization_format():
# Assert that None was returned
assert api_key is None
def test_get_api_key_from_header_with_x_access_token():
"""Test that get_api_key_from_header extracts API key from X-Access-Token header."""
# Create a mock request with X-Access-Token header
mock_request = MagicMock(spec=Request)
mock_request.headers = {'X-Access-Token': 'access_token_key'}
# Call the function
api_key = get_api_key_from_header(mock_request)
# Assert that the API key was correctly extracted
assert api_key == 'access_token_key'
def test_get_api_key_from_header_priority_authorization_over_x_access_token():
"""Test that Authorization header takes priority over X-Access-Token header."""
# Create a mock request with both headers
mock_request = MagicMock(spec=Request)
mock_request.headers = {
'Authorization': 'Bearer auth_api_key',
'X-Access-Token': 'access_token_key',
}
# Call the function
api_key = get_api_key_from_header(mock_request)
# Assert that the API key from Authorization header was used
assert api_key == 'auth_api_key'
def test_get_api_key_from_header_priority_x_session_over_x_access_token():
"""Test that X-Session-API-Key header takes priority over X-Access-Token header."""
# Create a mock request with both headers
mock_request = MagicMock(spec=Request)
mock_request.headers = {
'X-Session-API-Key': 'session_api_key',
'X-Access-Token': 'access_token_key',
}
# Call the function
api_key = get_api_key_from_header(mock_request)
# Assert that the API key from X-Session-API-Key header was used
assert api_key == 'session_api_key'
def test_get_api_key_from_header_all_three_headers():
"""Test header priority when all three headers are present."""
# Create a mock request with all three headers
mock_request = MagicMock(spec=Request)
mock_request.headers = {
'Authorization': 'Bearer auth_api_key',
'X-Session-API-Key': 'session_api_key',
'X-Access-Token': 'access_token_key',
}
# Call the function
api_key = get_api_key_from_header(mock_request)
# Assert that the API key from Authorization header was used (highest priority)
assert api_key == 'auth_api_key'
def test_get_api_key_from_header_invalid_authorization_fallback_to_x_access_token():
"""Test that invalid Authorization header falls back to X-Access-Token."""
# Create a mock request with invalid Authorization header and X-Access-Token
mock_request = MagicMock(spec=Request)
mock_request.headers = {
'Authorization': 'InvalidFormat api_key',
'X-Access-Token': 'access_token_key',
}
# Call the function
api_key = get_api_key_from_header(mock_request)
# Assert that the API key from X-Access-Token header was used
assert api_key == 'access_token_key'
def test_get_api_key_from_header_empty_headers():
"""Test that empty header values are handled correctly."""
# Create a mock request with empty header values
mock_request = MagicMock(spec=Request)
mock_request.headers = {
'Authorization': '',
'X-Session-API-Key': '',
'X-Access-Token': 'access_token_key',
}
# Call the function
api_key = get_api_key_from_header(mock_request)
# Assert that the API key from X-Access-Token header was used
assert api_key == 'access_token_key'
def test_get_api_key_from_header_bearer_with_empty_token():
"""Test that Bearer header with empty token falls back to other headers."""
# Create a mock request with Bearer header with empty token
mock_request = MagicMock(spec=Request)
mock_request.headers = {
'Authorization': 'Bearer ',
'X-Access-Token': 'access_token_key',
}
# Call the function
api_key = get_api_key_from_header(mock_request)
# Assert that empty string from Bearer is returned (current behavior)
# This tests the current implementation behavior
assert api_key == ''

View File

@@ -39,7 +39,7 @@
"jose": "^6.1.0",
"lucide-react": "^0.544.0",
"monaco-editor": "^0.53.0",
"posthog-js": "^1.290.0",
"posthog-js": "^1.298.1",
"react": "^19.1.1",
"react-dom": "^19.1.1",
"react-highlight": "^0.15.0",
@@ -3910,9 +3910,9 @@
"license": "MIT"
},
"node_modules/@posthog/core": {
"version": "1.5.2",
"resolved": "https://registry.npmjs.org/@posthog/core/-/core-1.5.2.tgz",
"integrity": "sha512-iedUP3EnOPPxTA2VaIrsrd29lSZnUV+ZrMnvY56timRVeZAXoYCkmjfIs3KBAsF8OUT5h1GXLSkoQdrV0r31OQ==",
"version": "1.6.0",
"resolved": "https://registry.npmjs.org/@posthog/core/-/core-1.6.0.tgz",
"integrity": "sha512-Tbh8UACwbb7jFdDC7wwXHtfNzO+4wKh3VbyMHmp2UBe6w1jliJixexTJNfkqdGZm+ht3M10mcKvGGPnoZ2zLBg==",
"license": "MIT",
"dependencies": {
"cross-spawn": "^7.0.6"
@@ -14711,12 +14711,12 @@
"license": "MIT"
},
"node_modules/posthog-js": {
"version": "1.290.0",
"resolved": "https://registry.npmjs.org/posthog-js/-/posthog-js-1.290.0.tgz",
"integrity": "sha512-zavBwZkf+3JeiSDVE7ZDXBfzva/iOljicdhdJH+cZoqp0LsxjKxjnNhGOd3KpAhw0wqdwjhd7Lp1aJuI7DXyaw==",
"version": "1.298.1",
"resolved": "https://registry.npmjs.org/posthog-js/-/posthog-js-1.298.1.tgz",
"integrity": "sha512-MynFhC2HO6sg5moUfpkd0s6RzAqcqFX75kjIi4Xrj2Gl0/YQWYvFUgvv8FCpWPKPs2mdvNWYhs+oqJg0BVVHPw==",
"license": "SEE LICENSE IN LICENSE",
"dependencies": {
"@posthog/core": "1.5.2",
"@posthog/core": "1.6.0",
"core-js": "^3.38.1",
"fflate": "^0.4.8",
"preact": "^10.19.3",

View File

@@ -38,7 +38,7 @@
"jose": "^6.1.0",
"lucide-react": "^0.544.0",
"monaco-editor": "^0.53.0",
"posthog-js": "^1.290.0",
"posthog-js": "^1.298.1",
"react": "^19.1.1",
"react-dom": "^19.1.1",
"react-highlight": "^0.15.0",

View File

@@ -3,15 +3,19 @@ import { Provider } from "#/types/settings";
import { V1SandboxStatus } from "../sandbox-service/sandbox-service.types";
// V1 API Types for requests
// Note: This represents the serialized API format, not the internal TextContent/ImageContent types
export interface V1MessageContent {
type: "text" | "image_url";
text?: string;
image_url?: {
url: string;
};
// These types match the SDK's TextContent and ImageContent formats
export interface V1TextContent {
type: "text";
text: string;
}
export interface V1ImageContent {
type: "image";
image_urls: string[];
}
export type V1MessageContent = V1TextContent | V1ImageContent;
type V1Role = "user" | "system" | "assistant" | "tool";
export interface V1SendMessageRequest {

View File

@@ -12,20 +12,15 @@ import { USE_PLANNING_AGENT } from "#/utils/feature-flags";
import { useAgentState } from "#/hooks/use-agent-state";
import { AgentState } from "#/types/agent-state";
import { useActiveConversation } from "#/hooks/query/use-active-conversation";
import { useCreateConversation } from "#/hooks/mutation/use-create-conversation";
import { displaySuccessToast } from "#/utils/custom-toast-handlers";
import { useUnifiedWebSocketStatus } from "#/hooks/use-unified-websocket-status";
import { useSubConversationTaskPolling } from "#/hooks/query/use-sub-conversation-task-polling";
import { useHandlePlanClick } from "#/hooks/use-handle-plan-click";
export function ChangeAgentButton() {
const [contextMenuOpen, setContextMenuOpen] = useState<boolean>(false);
const {
conversationMode,
setConversationMode,
setSubConversationTaskId,
subConversationTaskId,
} = useConversationStore();
const { conversationMode, setConversationMode, subConversationTaskId } =
useConversationStore();
const webSocketStatus = useUnifiedWebSocketStatus();
@@ -40,8 +35,6 @@ export function ChangeAgentButton() {
const isAgentRunning = curAgentState === AgentState.RUNNING;
const { data: conversation } = useActiveConversation();
const { mutate: createConversation, isPending: isCreatingConversation } =
useCreateConversation();
// Poll sub-conversation task and invalidate parent conversation when ready
useSubConversationTaskPolling(
@@ -49,6 +42,9 @@ export function ChangeAgentButton() {
conversation?.conversation_id || null,
);
// Get handlePlanClick and isCreatingConversation from custom hook
const { handlePlanClick, isCreatingConversation } = useHandlePlanClick();
// Close context menu when agent starts running
useEffect(() => {
if ((isAgentRunning || !isWebSocketConnected) && contextMenuOpen) {
@@ -56,45 +52,6 @@ export function ChangeAgentButton() {
}
}, [isAgentRunning, contextMenuOpen, isWebSocketConnected]);
const handlePlanClick = (
event: React.MouseEvent<HTMLButtonElement> | KeyboardEvent,
) => {
event.preventDefault();
event.stopPropagation();
// Set conversation mode to "plan" immediately
setConversationMode("plan");
// Check if sub_conversation_ids is not empty
if (
(conversation?.sub_conversation_ids &&
conversation.sub_conversation_ids.length > 0) ||
!conversation?.conversation_id
) {
// Do nothing if both conditions are true
return;
}
// Create a new sub-conversation if we have a current conversation ID
createConversation(
{
parentConversationId: conversation.conversation_id,
agentType: "plan",
},
{
onSuccess: (data) => {
displaySuccessToast(
t(I18nKey.PLANNING_AGENTT$PLANNING_AGENT_INITIALIZED),
);
// Track the task ID to poll for sub-conversation creation
if (data.v1_task_id) {
setSubConversationTaskId(data.v1_task_id);
}
},
},
);
};
const isButtonDisabled =
isAgentRunning ||
isCreatingConversation ||

View File

@@ -1,15 +1,9 @@
import React from "react";
import Markdown from "react-markdown";
import remarkGfm from "remark-gfm";
import remarkBreaks from "remark-breaks";
import { code } from "../markdown/code";
import { cn } from "#/utils/utils";
import { ul, ol } from "../markdown/list";
import { CopyToClipboardButton } from "#/components/shared/buttons/copy-to-clipboard-button";
import { anchor } from "../markdown/anchor";
import { OpenHandsSourceType } from "#/types/core/base";
import { paragraph } from "../markdown/paragraph";
import { TooltipButton } from "#/components/shared/buttons/tooltip-button";
import { MarkdownRenderer } from "../markdown/markdown-renderer";
interface ChatMessageProps {
type: OpenHandsSourceType;
@@ -116,18 +110,7 @@ export function ChatMessage({
wordBreak: "break-word",
}}
>
<Markdown
components={{
code,
ul,
ol,
a: anchor,
p: paragraph,
}}
remarkPlugins={[remarkGfm, remarkBreaks]}
>
{message}
</Markdown>
<MarkdownRenderer includeStandard>{message}</MarkdownRenderer>
</div>
{children}
</article>

View File

@@ -1,13 +1,9 @@
import React from "react";
import Markdown from "react-markdown";
import remarkGfm from "remark-gfm";
import remarkBreaks from "remark-breaks";
import { useTranslation } from "react-i18next";
import { code } from "../markdown/code";
import { ol, ul } from "../markdown/list";
import ArrowDown from "#/icons/angle-down-solid.svg?react";
import ArrowUp from "#/icons/angle-up-solid.svg?react";
import i18n from "#/i18n";
import { MarkdownRenderer } from "../markdown/markdown-renderer";
interface ErrorMessageProps {
errorId?: string;
@@ -40,18 +36,7 @@ export function ErrorMessage({ errorId, defaultMessage }: ErrorMessageProps) {
</button>
</div>
{showDetails && (
<Markdown
components={{
code,
ul,
ol,
}}
remarkPlugins={[remarkGfm, remarkBreaks]}
>
{defaultMessage}
</Markdown>
)}
{showDetails && <MarkdownRenderer>{defaultMessage}</MarkdownRenderer>}
</div>
);
}

View File

@@ -1,9 +1,6 @@
import { useEffect, useState } from "react";
import { Trans, useTranslation } from "react-i18next";
import Markdown from "react-markdown";
import { Link } from "react-router";
import remarkGfm from "remark-gfm";
import remarkBreaks from "remark-breaks";
import { useConfig } from "#/hooks/query/use-config";
import { I18nKey } from "#/i18n/declaration";
import ArrowDown from "#/icons/angle-down-solid.svg?react";
@@ -13,9 +10,7 @@ import XCircle from "#/icons/x-circle-solid.svg?react";
import { OpenHandsAction } from "#/types/core/actions";
import { OpenHandsObservation } from "#/types/core/observations";
import { cn } from "#/utils/utils";
import { code } from "../markdown/code";
import { ol, ul } from "../markdown/list";
import { paragraph } from "../markdown/paragraph";
import { MarkdownRenderer } from "../markdown/markdown-renderer";
import { MonoComponent } from "./mono-component";
import { PathComponent } from "./path-component";
@@ -192,17 +187,7 @@ export function ExpandableMessage({
</div>
{showDetails && (
<div className="text-sm">
<Markdown
components={{
code,
ul,
ol,
p: paragraph,
}}
remarkPlugins={[remarkGfm, remarkBreaks]}
>
{details}
</Markdown>
<MarkdownRenderer includeStandard>{details}</MarkdownRenderer>
</div>
)}
</div>

View File

@@ -1,13 +1,9 @@
import React from "react";
import Markdown from "react-markdown";
import remarkGfm from "remark-gfm";
import remarkBreaks from "remark-breaks";
import { code } from "../markdown/code";
import { ol, ul } from "../markdown/list";
import ArrowDown from "#/icons/angle-down-solid.svg?react";
import ArrowUp from "#/icons/angle-up-solid.svg?react";
import { SuccessIndicator } from "./success-indicator";
import { ObservationResultStatus } from "./event-content-helpers/get-observation-result";
import { MarkdownRenderer } from "../markdown/markdown-renderer";
interface GenericEventMessageProps {
title: React.ReactNode;
@@ -49,16 +45,7 @@ export function GenericEventMessage({
{showDetails &&
(typeof details === "string" ? (
<Markdown
components={{
code,
ul,
ol,
}}
remarkPlugins={[remarkGfm, remarkBreaks]}
>
{details}
</Markdown>
<MarkdownRenderer>{details}</MarkdownRenderer>
) : (
details
))}

View File

@@ -39,7 +39,7 @@ export function ConversationCardFooter({
{(createdAt ?? lastUpdatedAt) && (
<p className="text-xs text-[#A3A3A3] flex-1 text-right">
<time>
{`${formatTimeDelta(new Date(lastUpdatedAt ?? createdAt))} ${t(I18nKey.CONVERSATION$AGO)}`}
{`${formatTimeDelta(lastUpdatedAt ?? createdAt)} ${t(I18nKey.CONVERSATION$AGO)}`}
</time>
</p>
)}

View File

@@ -31,7 +31,7 @@ export function StartTaskCardFooter({
{createdAt && (
<p className="text-xs text-[#A3A3A3] flex-1 text-right">
<time>
{`${formatTimeDelta(new Date(createdAt))} ${t(I18nKey.CONVERSATION$AGO)}`}
{`${formatTimeDelta(createdAt)} ${t(I18nKey.CONVERSATION$AGO)}`}
</time>
</p>
)}

View File

@@ -67,12 +67,14 @@ export function RecentConversation({ conversation }: RecentConversationProps) {
</div>
) : null}
</div>
<span>
{formatTimeDelta(
new Date(conversation.created_at || conversation.last_updated_at),
)}{" "}
{t(I18nKey.CONVERSATION$AGO)}
</span>
{(conversation.created_at || conversation.last_updated_at) && (
<span>
{formatTimeDelta(
conversation.created_at || conversation.last_updated_at,
)}{" "}
{t(I18nKey.CONVERSATION$AGO)}
</span>
)}
</div>
</button>
</Link>

View File

@@ -0,0 +1,80 @@
import Markdown, { Components } from "react-markdown";
import remarkGfm from "remark-gfm";
import remarkBreaks from "remark-breaks";
import { code } from "./code";
import { ul, ol } from "./list";
import { paragraph } from "./paragraph";
import { anchor } from "./anchor";
import { h1, h2, h3, h4, h5, h6 } from "./headings";
interface MarkdownRendererProps {
/**
* The markdown content to render. Can be passed as children (string) or content prop.
*/
children?: string;
content?: string;
/**
* Additional or override components for markdown elements.
* Default components (code, ul, ol) are always included unless overridden.
*/
components?: Partial<Components>;
/**
* Whether to include standard components (anchor, paragraph).
* Defaults to false.
*/
includeStandard?: boolean;
/**
* Whether to include heading components (h1-h6).
* Defaults to false.
*/
includeHeadings?: boolean;
}
/**
* A reusable Markdown renderer component that provides consistent
* markdown rendering across the application.
*
* By default, includes:
* - code, ul, ol components
* - remarkGfm and remarkBreaks plugins
*
* Can be extended with:
* - includeStandard: adds anchor and paragraph components
* - includeHeadings: adds h1-h6 heading components
* - components prop: allows custom overrides or additional components
*/
export function MarkdownRenderer({
children,
content,
components: customComponents,
includeStandard = false,
includeHeadings = false,
}: MarkdownRendererProps) {
// Build the components object with defaults and optional additions
const components: Components = {
code,
ul,
ol,
...(includeStandard && {
a: anchor,
p: paragraph,
}),
...(includeHeadings && {
h1,
h2,
h3,
h4,
h5,
h6,
}),
...customComponents, // Custom components override defaults
};
const markdownContent = content ?? children ?? "";
return (
<Markdown components={components} remarkPlugins={[remarkGfm, remarkBreaks]}>
{markdownContent}
</Markdown>
);
}

View File

@@ -1,16 +1,10 @@
import { useTranslation } from "react-i18next";
import { Spinner } from "@heroui/react";
import Markdown from "react-markdown";
import remarkGfm from "remark-gfm";
import remarkBreaks from "remark-breaks";
import { code } from "../markdown/code";
import { ul, ol } from "../markdown/list";
import { paragraph } from "../markdown/paragraph";
import { anchor } from "../markdown/anchor";
import { useMicroagentManagementStore } from "#/state/microagent-management-store";
import { useRepositoryMicroagentContent } from "#/hooks/query/use-repository-microagent-content";
import { I18nKey } from "#/i18n/declaration";
import { extractRepositoryInfo } from "#/utils/utils";
import { MarkdownRenderer } from "../markdown/markdown-renderer";
export function MicroagentManagementViewMicroagentContent() {
const { t } = useTranslation();
@@ -49,18 +43,9 @@ export function MicroagentManagementViewMicroagentContent() {
</div>
)}
{microagentData && !isLoading && !error && (
<Markdown
components={{
code,
ul,
ol,
a: anchor,
p: paragraph,
}}
remarkPlugins={[remarkGfm, remarkBreaks]}
>
<MarkdownRenderer includeStandard>
{microagentData.content}
</Markdown>
</MarkdownRenderer>
)}
</div>
);

View File

@@ -184,7 +184,22 @@ const getFinishObservationContent = (
event: ObservationEvent<FinishObservation>,
): string => {
const { observation } = event;
return observation.message || "";
// Extract text content from the observation
const textContent = observation.content
.filter((c) => c.type === "text")
.map((c) => c.text)
.join("\n");
let content = "";
if (observation.is_error) {
content += `**Error:**\n${textContent}`;
} else {
content += textContent;
}
return content;
};
export const getObservationContent = (event: ObservationEvent): string => {

View File

@@ -9,6 +9,7 @@ import {
} from "../event-content-helpers/create-skill-ready-event";
import { V1ConfirmationButtons } from "#/components/shared/buttons/v1-confirmation-buttons";
import { ObservationResultStatus } from "../../../features/chat/event-content-helpers/get-observation-result";
import { MarkdownRenderer } from "#/components/features/markdown/markdown-renderer";
interface GenericEventMessageWrapperProps {
event: OpenHandsEvent | SkillReadyEvent;
@@ -23,11 +24,17 @@ export function GenericEventMessageWrapper({
// SkillReadyEvent is not an observation event, so skip the observation checks
if (!isSkillReadyEvent(event)) {
if (
isObservationEvent(event) &&
event.observation.kind === "TaskTrackerObservation"
) {
return <div>{details}</div>;
if (isObservationEvent(event)) {
if (event.observation.kind === "TaskTrackerObservation") {
return <div>{details}</div>;
}
if (event.observation.kind === "FinishObservation") {
return (
<MarkdownRenderer includeStandard includeHeadings>
{details as string}
</MarkdownRenderer>
);
}
}
}

View File

@@ -505,7 +505,6 @@ export function ConversationWebSocketProvider({
},
{
onSuccess: (fileContent) => {
console.log("File content:", fileContent);
setPlanContent(fileContent);
},
onError: (error) => {

View File

@@ -0,0 +1,71 @@
import { useCallback } from "react";
import { useTranslation } from "react-i18next";
import { I18nKey } from "#/i18n/declaration";
import { useConversationStore } from "#/state/conversation-store";
import { useActiveConversation } from "#/hooks/query/use-active-conversation";
import { useCreateConversation } from "#/hooks/mutation/use-create-conversation";
import { displaySuccessToast } from "#/utils/custom-toast-handlers";
/**
* Custom hook that encapsulates the logic for handling plan creation.
* Returns a function that can be called to create a plan conversation and
* the pending state of the conversation creation.
*
* @returns An object containing handlePlanClick function and isCreatingConversation boolean
*/
export const useHandlePlanClick = () => {
const { t } = useTranslation();
const { setConversationMode, setSubConversationTaskId } =
useConversationStore();
const { data: conversation } = useActiveConversation();
const { mutate: createConversation, isPending: isCreatingConversation } =
useCreateConversation();
const handlePlanClick = useCallback(
(event?: React.MouseEvent<HTMLButtonElement> | KeyboardEvent) => {
event?.preventDefault();
event?.stopPropagation();
// Set conversation mode to "plan" immediately
setConversationMode("plan");
// Check if sub_conversation_ids is not empty
if (
(conversation?.sub_conversation_ids &&
conversation.sub_conversation_ids.length > 0) ||
!conversation?.conversation_id
) {
// Do nothing if both conditions are true
return;
}
// Create a new sub-conversation if we have a current conversation ID
createConversation(
{
parentConversationId: conversation.conversation_id,
agentType: "plan",
},
{
onSuccess: (data) => {
displaySuccessToast(
t(I18nKey.PLANNING_AGENTT$PLANNING_AGENT_INITIALIZED),
);
// Track the task ID to poll for sub-conversation creation
if (data.v1_task_id) {
setSubConversationTaskId(data.v1_task_id);
}
},
},
);
},
[
conversation,
createConversation,
setConversationMode,
setSubConversationTaskId,
t,
],
);
return { handlePlanClick, isCreatingConversation };
};

View File

@@ -41,13 +41,11 @@ export function useSendMessage() {
},
];
// Add images if present
// Add images if present - using SDK's ImageContent format
if (args.image_urls && args.image_urls.length > 0) {
args.image_urls.forEach((url) => {
content.push({
type: "image_url",
image_url: { url },
});
content.push({
type: "image",
image_urls: args.image_urls,
});
}

View File

@@ -30,11 +30,12 @@ function BillingSettingsScreen() {
}
displaySuccessToast(t(I18nKey.PAYMENT$SUCCESS));
setSearchParams({});
} else if (checkoutStatus === "cancel") {
displayErrorToast(t(I18nKey.PAYMENT$CANCELLED));
setSearchParams({});
}
setSearchParams({});
}, [checkoutStatus, searchParams, setSearchParams, t, trackCreditsPurchased]);
return <PaymentForm />;

View File

@@ -28,6 +28,7 @@ import { KeyStatusIcon } from "#/components/features/settings/key-status-icon";
import { DEFAULT_SETTINGS } from "#/services/settings";
import { getProviderId } from "#/utils/map-provider";
import { DEFAULT_OPENHANDS_MODEL } from "#/utils/verified-models";
import { USE_V1_CONVERSATION_API } from "#/utils/feature-flags";
interface OpenHandsApiKeyHelpProps {
testId: string;
@@ -118,6 +119,9 @@ function LlmSettingsScreen() {
const isSaasMode = config?.APP_MODE === "saas";
const shouldUseOpenHandsKey = isOpenHandsProvider && isSaasMode;
// Determine if we should hide the agent dropdown when V1 conversation API feature flag is enabled
const isV1Enabled = USE_V1_CONVERSATION_API();
React.useEffect(() => {
const determineWhetherToToggleAdvancedSettings = () => {
if (resources && settings) {
@@ -612,21 +616,23 @@ function LlmSettingsScreen() {
href="https://tavily.com/"
/>
<SettingsDropdownInput
testId="agent-input"
name="agent-input"
label={t(I18nKey.SETTINGS$AGENT)}
items={
resources?.agents.map((agent) => ({
key: agent,
label: agent, // TODO: Add i18n support for agent names
})) || []
}
defaultSelectedKey={settings.AGENT}
isClearable={false}
onInputChange={handleAgentIsDirty}
wrapperClassName="w-full max-w-[680px]"
/>
{!isV1Enabled && (
<SettingsDropdownInput
testId="agent-input"
name="agent-input"
label={t(I18nKey.SETTINGS$AGENT)}
items={
resources?.agents.map((agent) => ({
key: agent,
label: agent, // TODO: Add i18n support for agent names
})) || []
}
defaultSelectedKey={settings.AGENT}
isClearable={false}
onInputChange={handleAgentIsDirty}
wrapperClassName="w-full max-w-[680px]"
/>
)}
</>
)}

View File

@@ -1,24 +1,11 @@
import React from "react";
import { useTranslation } from "react-i18next";
import Markdown from "react-markdown";
import remarkGfm from "remark-gfm";
import remarkBreaks from "remark-breaks";
import { I18nKey } from "#/i18n/declaration";
import LessonPlanIcon from "#/icons/lesson-plan.svg?react";
import { useConversationStore } from "#/state/conversation-store";
import { code } from "#/components/features/markdown/code";
import { ul, ol } from "#/components/features/markdown/list";
import { paragraph } from "#/components/features/markdown/paragraph";
import { anchor } from "#/components/features/markdown/anchor";
import {
h1,
h2,
h3,
h4,
h5,
h6,
} from "#/components/features/markdown/headings";
import { useScrollToBottom } from "#/hooks/use-scroll-to-bottom";
import { MarkdownRenderer } from "#/components/features/markdown/markdown-renderer";
import { useHandlePlanClick } from "#/hooks/use-handle-plan-click";
function PlannerTab() {
const { t } = useTranslation();
@@ -26,7 +13,8 @@ function PlannerTab() {
React.useRef<HTMLDivElement>(null),
);
const { planContent, setConversationMode } = useConversationStore();
const { planContent } = useConversationStore();
const { handlePlanClick } = useHandlePlanClick();
if (planContent !== null && planContent !== undefined) {
return (
@@ -35,24 +23,9 @@ function PlannerTab() {
onScroll={(e) => onChatBodyScroll(e.currentTarget)}
className="flex flex-col w-full h-full p-4 overflow-auto"
>
<Markdown
components={{
code,
ul,
ol,
a: anchor,
p: paragraph,
h1,
h2,
h3,
h4,
h5,
h6,
}}
remarkPlugins={[remarkGfm, remarkBreaks]}
>
<MarkdownRenderer includeStandard includeHeadings>
{planContent}
</Markdown>
</MarkdownRenderer>
</div>
);
}
@@ -65,7 +38,7 @@ function PlannerTab() {
</span>
<button
type="button"
onClick={() => setConversationMode("plan")}
onClick={handlePlanClick}
className="flex w-[164px] h-[40px] p-2 justify-center items-center shrink-0 rounded-lg bg-white overflow-hidden text-black text-ellipsis font-sans text-[16px] not-italic font-normal leading-[20px] hover:cursor-pointer hover:opacity-80"
>
{t(I18nKey.COMMON$CREATE_A_PLAN)}

View File

@@ -25,9 +25,13 @@ export interface MCPToolObservation
export interface FinishObservation
extends ObservationBase<"FinishObservation"> {
/**
* Final message sent to the user
* Content returned from the finish action as a list of TextContent/ImageContent objects.
*/
message: string;
content: Array<TextContent | ImageContent>;
/**
* Whether the finish action resulted in an error
*/
is_error: boolean;
}
export interface ThinkObservation extends ObservationBase<"ThinkObservation"> {

View File

@@ -1,16 +1,45 @@
/**
* Parses a date string as UTC if it doesn't have a timezone indicator.
* This fixes the issue where ISO strings without timezone info are interpreted as local time.
* @param dateString ISO 8601 date string
* @returns Date object parsed as UTC
*
* @example
* parseDateAsUTC("2025-12-01T11:53:37.273886"); // Parsed as UTC
* parseDateAsUTC("2025-12-01T11:53:37.273886Z"); // Already has timezone, parsed correctly
* parseDateAsUTC("2025-12-01T11:53:37+00:00"); // Already has timezone, parsed correctly
*/
const parseDateAsUTC = (dateString: string): Date => {
// Check if the string already has a timezone indicator
// Look for 'Z' (UTC), '+' (positive offset), or '-' after the time part (negative offset)
const hasTimezone =
dateString.includes("Z") || dateString.match(/[+-]\d{2}:\d{2}$/) !== null;
if (hasTimezone) {
// Already has timezone info, parse normally
return new Date(dateString);
}
// No timezone indicator - append 'Z' to force UTC parsing
return new Date(`${dateString}Z`);
};
/**
* Formats a date into a compact string representing the time delta between the given date and the current date.
* @param date The date to format
* @param date The date to format (Date object or ISO 8601 string)
* @returns A compact string representing the time delta between the given date and the current date
*
* @example
* // now is 2024-01-01T00:00:00Z
* formatTimeDelta(new Date("2023-12-31T23:59:59Z")); // "1s"
* formatTimeDelta(new Date("2022-01-01T00:00:00Z")); // "2y"
* formatTimeDelta("2023-12-31T23:59:59Z"); // "1s"
* formatTimeDelta("2025-12-01T11:53:37.273886"); // Parsed as UTC automatically
*/
export const formatTimeDelta = (date: Date) => {
export const formatTimeDelta = (date: Date | string) => {
// Parse string dates as UTC if needed, or use Date object directly
const dateObj = typeof date === "string" ? parseDateAsUTC(date) : date;
const now = new Date();
const delta = now.getTime() - date.getTime();
const delta = now.getTime() - dateObj.getTime();
const seconds = Math.floor(delta / 1000);
const minutes = Math.floor(seconds / 60);

View File

@@ -59,6 +59,7 @@ export const VERIFIED_ANTHROPIC_MODELS = [
"claude-haiku-4-5-20251001",
"claude-opus-4-20250514",
"claude-opus-4-1-20250805",
"claude-opus-4-5-20251101",
];
// LiteLLM does not return the compatible Mistral models with the provider, so we list them here to set them ourselves

View File

@@ -9,6 +9,7 @@ from openhands.app_server.app_conversation.app_conversation_models import (
AppConversationSortOrder,
)
from openhands.app_server.services.injector import Injector
from openhands.sdk.event import ConversationStateUpdateEvent
from openhands.sdk.utils.models import DiscriminatedUnionMixin
@@ -92,6 +93,19 @@ class AppConversationInfoService(ABC):
Return the stored info
"""
@abstractmethod
async def process_stats_event(
self,
event: ConversationStateUpdateEvent,
conversation_id: UUID,
) -> None:
"""Process a stats event and update conversation statistics.
Args:
event: The ConversationStateUpdateEvent with key='stats'
conversation_id: The ID of the conversation to update
"""
class AppConversationInfoServiceInjector(
DiscriminatedUnionMixin, Injector[AppConversationInfoService], ABC

View File

@@ -9,6 +9,7 @@ from typing import AsyncGenerator
import base62
from openhands.app_server.app_conversation.app_conversation_models import (
AgentType,
AppConversationStartTask,
AppConversationStartTaskStatus,
)
@@ -25,7 +26,9 @@ from openhands.app_server.sandbox.sandbox_models import SandboxInfo
from openhands.app_server.user.user_context import UserContext
from openhands.sdk import Agent
from openhands.sdk.context.agent_context import AgentContext
from openhands.sdk.context.condenser import LLMSummarizingCondenser
from openhands.sdk.context.skills import load_user_skills
from openhands.sdk.llm import LLM
from openhands.sdk.workspace.remote.async_remote_workspace import AsyncRemoteWorkspace
_logger = logging.getLogger(__name__)
@@ -182,6 +185,43 @@ class AppConversationServiceBase(AppConversationService, ABC):
workspace.working_dir,
)
async def _configure_git_user_settings(
self,
workspace: AsyncRemoteWorkspace,
) -> None:
"""Configure git global user settings from user preferences.
Reads git_user_name and git_user_email from user settings and
configures them as git global settings in the workspace.
Args:
workspace: The remote workspace to configure git settings in.
"""
try:
user_info = await self.user_context.get_user_info()
if user_info.git_user_name:
cmd = f'git config --global user.name "{user_info.git_user_name}"'
result = await workspace.execute_command(cmd, workspace.working_dir)
if result.exit_code:
_logger.warning(f'Git config user.name failed: {result.stderr}')
else:
_logger.info(
f'Git configured with user.name={user_info.git_user_name}'
)
if user_info.git_user_email:
cmd = f'git config --global user.email "{user_info.git_user_email}"'
result = await workspace.execute_command(cmd, workspace.working_dir)
if result.exit_code:
_logger.warning(f'Git config user.email failed: {result.stderr}')
else:
_logger.info(
f'Git configured with user.email={user_info.git_user_email}'
)
except Exception as e:
_logger.warning(f'Failed to configure git user settings: {e}')
async def clone_or_init_git_repo(
self,
task: AppConversationStartTask,
@@ -197,6 +237,9 @@ class AppConversationServiceBase(AppConversationService, ABC):
if result.exit_code:
_logger.warning(f'mkdir failed: {result.stderr}')
# Configure git user settings from user preferences
await self._configure_git_user_settings(workspace)
if not request.selected_repository:
if self.init_git_in_empty_workspace:
_logger.debug('Initializing a new git repository in the workspace.')
@@ -221,7 +264,9 @@ class AppConversationServiceBase(AppConversationService, ABC):
# Clone the repo - this is the slow part!
clone_command = f'git clone {remote_repo_url} {dir_name}'
result = await workspace.execute_command(clone_command, workspace.working_dir)
result = await workspace.execute_command(
clone_command, workspace.working_dir, 120
)
if result.exit_code:
_logger.warning(f'Git clone failed: {result.stderr}')
@@ -233,7 +278,10 @@ class AppConversationServiceBase(AppConversationService, ABC):
random_str = base62.encodebytes(os.urandom(16))
openhands_workspace_branch = f'openhands-workspace-{random_str}'
checkout_command = f'git checkout -b {openhands_workspace_branch}'
await workspace.execute_command(checkout_command, workspace.working_dir)
git_dir = Path(workspace.working_dir) / dir_name
result = await workspace.execute_command(checkout_command, git_dir)
if result.exit_code:
_logger.warning(f'Git checkout failed: {result.stderr}')
async def maybe_run_setup_script(
self,
@@ -295,3 +343,39 @@ class AppConversationServiceBase(AppConversationService, ABC):
return
_logger.info('Git pre-commit hook installed successfully')
def _create_condenser(
self,
llm: LLM,
agent_type: AgentType,
condenser_max_size: int | None,
) -> LLMSummarizingCondenser:
"""Create a condenser based on user settings and agent type.
Args:
llm: The LLM instance to use for condensation
agent_type: Type of agent (PLAN or DEFAULT)
condenser_max_size: condenser_max_size setting
Returns:
Configured LLMSummarizingCondenser instance
"""
# LLMSummarizingCondenser has defaults: max_size=120, keep_first=4
condenser_kwargs = {
'llm': llm.model_copy(
update={
'usage_id': (
'condenser'
if agent_type == AgentType.DEFAULT
else 'planning_condenser'
)
}
),
}
# Only override max_size if user has a custom value
if condenser_max_size is not None:
condenser_kwargs['max_size'] = condenser_max_size
condenser = LLMSummarizingCondenser(**condenser_kwargs)
return condenser

View File

@@ -4,12 +4,12 @@ from collections import defaultdict
from dataclasses import dataclass
from datetime import datetime, timedelta
from time import time
from typing import AsyncGenerator, Sequence
from typing import Any, AsyncGenerator, Sequence
from uuid import UUID, uuid4
import httpx
from fastapi import Request
from pydantic import Field, TypeAdapter
from pydantic import Field, SecretStr, TypeAdapter
from openhands.agent_server.models import (
ConversationInfo,
@@ -63,19 +63,25 @@ from openhands.app_server.sandbox.sandbox_spec_service import SandboxSpecService
from openhands.app_server.services.injector import InjectorState
from openhands.app_server.services.jwt_service import JwtService
from openhands.app_server.user.user_context import UserContext
from openhands.app_server.user.user_models import UserInfo
from openhands.app_server.utils.docker_utils import (
replace_localhost_hostname_for_docker,
)
from openhands.experiments.experiment_manager import ExperimentManagerImpl
from openhands.integrations.provider import ProviderType
from openhands.sdk import AgentContext, LocalWorkspace
from openhands.sdk import Agent, AgentContext, LocalWorkspace
from openhands.sdk.conversation.secret_source import LookupSecret, StaticSecret
from openhands.sdk.llm import LLM
from openhands.sdk.security.confirmation_policy import AlwaysConfirm
from openhands.sdk.workspace.remote.async_remote_workspace import AsyncRemoteWorkspace
from openhands.server.types import AppMode
from openhands.tools.preset.default import get_default_agent
from openhands.tools.preset.planning import get_planning_agent
from openhands.tools.preset.default import (
get_default_tools,
)
from openhands.tools.preset.planning import (
format_plan_structure,
get_planning_tools,
)
_conversation_info_type_adapter = TypeAdapter(list[ConversationInfo | None])
_logger = logging.getLogger(__name__)
@@ -99,6 +105,7 @@ class LiveStatusAppConversationService(AppConversationServiceBase):
access_token_hard_timeout: timedelta | None
app_mode: str | None = None
keycloak_auth_cookie: str | None = None
tavily_api_key: str | None = None
async def search_app_conversations(
self,
@@ -519,6 +526,224 @@ class LiveStatusAppConversationService(AppConversationServiceBase):
if not request.llm_model and parent_info.llm_model:
request.llm_model = parent_info.llm_model
async def _setup_secrets_for_git_provider(
self, git_provider: ProviderType | None, user: UserInfo
) -> dict:
"""Set up secrets for git provider authentication.
Args:
git_provider: The git provider type (GitHub, GitLab, etc.)
user: User information containing authentication details
Returns:
Dictionary of secrets for the conversation
"""
secrets = await self.user_context.get_secrets()
if not git_provider:
return secrets
secret_name = f'{git_provider.name}_TOKEN'
if self.web_url:
# Create an access token for web-based authentication
access_token = self.jwt_service.create_jws_token(
payload={
'user_id': user.id,
'provider_type': git_provider.value,
},
expires_in=self.access_token_hard_timeout,
)
headers = {'X-Access-Token': access_token}
# Include keycloak_auth cookie in headers if app_mode is SaaS
if self.app_mode == 'saas' and self.keycloak_auth_cookie:
headers['Cookie'] = f'keycloak_auth={self.keycloak_auth_cookie}'
secrets[secret_name] = LookupSecret(
url=self.web_url + '/api/v1/webhooks/secrets',
headers=headers,
)
else:
# Use static token for environments without web URL access
static_token = await self.user_context.get_latest_token(git_provider)
if static_token:
secrets[secret_name] = StaticSecret(value=static_token)
return secrets
async def _configure_llm_and_mcp(
self, user: UserInfo, llm_model: str | None
) -> tuple[LLM, dict]:
"""Configure LLM and MCP (Model Context Protocol) settings.
Args:
user: User information containing LLM preferences
llm_model: Optional specific model to use, falls back to user default
Returns:
Tuple of (configured LLM instance, MCP config dictionary)
"""
# Configure LLM
model = llm_model or user.llm_model
llm = LLM(
model=model,
base_url=user.llm_base_url,
api_key=user.llm_api_key,
usage_id='agent',
)
# Configure MCP
mcp_config: dict[str, Any] = {}
if self.web_url:
mcp_url = f'{self.web_url}/mcp/mcp'
mcp_config = {
'default': {
'url': mcp_url,
}
}
# Add API key if available
mcp_api_key = await self.user_context.get_mcp_api_key()
if mcp_api_key:
mcp_config['default']['headers'] = {
'X-Session-API-Key': mcp_api_key,
}
# Get the actual API key values, prioritizing user's key over service key
user_search_key = None
if user.search_api_key:
key_value = user.search_api_key.get_secret_value()
if key_value and key_value.strip():
user_search_key = key_value
service_tavily_key = None
if self.tavily_api_key:
# tavily_api_key is already a string (extracted in the factory method)
if self.tavily_api_key.strip():
service_tavily_key = self.tavily_api_key
tavily_api_key = user_search_key or service_tavily_key
if tavily_api_key:
_logger.info('Adding search engine to MCP config')
mcp_config['tavily'] = {
'url': f'https://mcp.tavily.com/mcp/?tavilyApiKey={tavily_api_key}'
}
else:
_logger.info('No search engine API key found, skipping search engine')
return llm, mcp_config
def _create_agent_with_context(
self,
llm: LLM,
agent_type: AgentType,
system_message_suffix: str | None,
mcp_config: dict,
condenser_max_size: int | None,
) -> Agent:
"""Create an agent with appropriate tools and context based on agent type.
Args:
llm: Configured LLM instance
agent_type: Type of agent to create (PLAN or DEFAULT)
system_message_suffix: Optional suffix for system messages
mcp_config: MCP configuration dictionary
condenser_max_size: condenser_max_size setting
Returns:
Configured Agent instance with context
"""
# Create condenser with user's settings
condenser = self._create_condenser(llm, agent_type, condenser_max_size)
# Create agent based on type
if agent_type == AgentType.PLAN:
agent = Agent(
llm=llm,
tools=get_planning_tools(),
system_prompt_filename='system_prompt_planning.j2',
system_prompt_kwargs={'plan_structure': format_plan_structure()},
condenser=condenser,
security_analyzer=None,
mcp_config=mcp_config,
)
else:
agent = Agent(
llm=llm,
tools=get_default_tools(enable_browser=True),
system_prompt_kwargs={'cli_mode': False},
condenser=condenser,
mcp_config=mcp_config,
)
# Add agent context
agent_context = AgentContext(system_message_suffix=system_message_suffix)
agent = agent.model_copy(update={'agent_context': agent_context})
return agent
async def _finalize_conversation_request(
self,
agent: Agent,
conversation_id: UUID | None,
user: UserInfo,
workspace: LocalWorkspace,
initial_message: SendMessageRequest | None,
secrets: dict,
sandbox: SandboxInfo,
remote_workspace: AsyncRemoteWorkspace | None,
selected_repository: str | None,
working_dir: str,
) -> StartConversationRequest:
"""Finalize the conversation request with experiment variants and skills.
Args:
agent: The configured agent
conversation_id: Optional conversation ID, generates new one if None
user: User information
workspace: Local workspace instance
initial_message: Optional initial message for the conversation
secrets: Dictionary of secrets for authentication
sandbox: Sandbox information
remote_workspace: Optional remote workspace for skills loading
selected_repository: Optional repository name
working_dir: Working directory path
Returns:
Complete StartConversationRequest ready for use
"""
# Generate conversation ID if not provided
conversation_id = conversation_id or uuid4()
# Apply experiment variants
agent = ExperimentManagerImpl.run_agent_variant_tests__v1(
user.id, conversation_id, agent
)
# Load and merge skills if remote workspace is available
if remote_workspace:
try:
agent = await self._load_skills_and_update_agent(
sandbox, agent, remote_workspace, selected_repository, working_dir
)
except Exception as e:
_logger.warning(f'Failed to load skills: {e}', exc_info=True)
# Continue without skills - don't fail conversation startup
# Create and return the final request
return StartConversationRequest(
conversation_id=conversation_id,
agent=agent,
workspace=workspace,
confirmation_policy=(
AlwaysConfirm() if user.confirmation_mode else NeverConfirm()
),
initial_message=initial_message,
secrets=secrets,
)
async def _build_start_conversation_request_for_user(
self,
sandbox: SandboxInfo,
@@ -532,87 +757,41 @@ class LiveStatusAppConversationService(AppConversationServiceBase):
remote_workspace: AsyncRemoteWorkspace | None = None,
selected_repository: str | None = None,
) -> StartConversationRequest:
"""Build a complete conversation request for a user.
This method orchestrates the creation of a conversation request by:
1. Setting up git provider secrets
2. Configuring LLM and MCP settings
3. Creating an agent with appropriate context
4. Finalizing the request with skills and experiment variants
"""
user = await self.user_context.get_user_info()
# Set up a secret for the git token
secrets = await self.user_context.get_secrets()
if git_provider:
secret_name = f'{git_provider.name}_TOKEN'
if self.web_url:
# If there is a web url, then we create an access token to access it.
# For security reasons, we are explicit here - only this user, and
# only this provider, with a timeout
access_token = self.jwt_service.create_jws_token(
payload={
'user_id': user.id,
'provider_type': git_provider.value,
},
expires_in=self.access_token_hard_timeout,
)
headers = {'X-Access-Token': access_token}
# Include keycloak_auth cookie in headers if app_mode is SaaS
if self.app_mode == 'saas' and self.keycloak_auth_cookie:
headers['Cookie'] = f'keycloak_auth={self.keycloak_auth_cookie}'
secrets[secret_name] = LookupSecret(
url=self.web_url + '/api/v1/webhooks/secrets',
headers=headers,
)
else:
# If there is no URL specified where the sandbox can access the app server
# then we supply a static secret with the most recent value. Depending
# on the type, this may eventually expire.
static_token = await self.user_context.get_latest_token(git_provider)
if static_token:
secrets[secret_name] = StaticSecret(value=static_token)
workspace = LocalWorkspace(working_dir=working_dir)
# Use provided llm_model if available, otherwise fall back to user's default
model = llm_model or user.llm_model
llm = LLM(
model=model,
base_url=user.llm_base_url,
api_key=user.llm_api_key,
usage_id='agent',
)
# The agent gets passed initial instructions
# Select agent based on agent_type
if agent_type == AgentType.PLAN:
agent = get_planning_agent(llm=llm)
else:
agent = get_default_agent(llm=llm)
# Set up secrets for git provider
secrets = await self._setup_secrets_for_git_provider(git_provider, user)
agent_context = AgentContext(system_message_suffix=system_message_suffix)
agent = agent.model_copy(update={'agent_context': agent_context})
# Configure LLM and MCP
llm, mcp_config = await self._configure_llm_and_mcp(user, llm_model)
conversation_id = conversation_id or uuid4()
agent = ExperimentManagerImpl.run_agent_variant_tests__v1(
user.id, conversation_id, agent
# Create agent with context
agent = self._create_agent_with_context(
llm, agent_type, system_message_suffix, mcp_config, user.condenser_max_size
)
# Load and merge all skills if remote_workspace is available
if remote_workspace:
try:
agent = await self._load_skills_and_update_agent(
sandbox, agent, remote_workspace, selected_repository, working_dir
)
except Exception as e:
_logger.warning(f'Failed to load skills: {e}', exc_info=True)
# Continue without skills - don't fail conversation startup
start_conversation_request = StartConversationRequest(
conversation_id=conversation_id,
agent=agent,
workspace=workspace,
confirmation_policy=(
AlwaysConfirm() if user.confirmation_mode else NeverConfirm()
),
initial_message=initial_message,
secrets=secrets,
# Finalize and return the conversation request
return await self._finalize_conversation_request(
agent,
conversation_id,
user,
workspace,
initial_message,
secrets,
sandbox,
remote_workspace,
selected_repository,
working_dir,
)
return start_conversation_request
async def update_agent_server_conversation_title(
self,
@@ -817,6 +996,10 @@ class LiveStatusAppConversationServiceInjector(AppConversationServiceInjector):
'be retrieved by a sandboxed conversation.'
),
)
tavily_api_key: SecretStr | None = Field(
default=None,
description='The Tavily Search API key to add to MCP integration',
)
async def inject(
self, state: InjectorState, request: Request | None = None
@@ -874,6 +1057,14 @@ class LiveStatusAppConversationServiceInjector(AppConversationServiceInjector):
# If server_config is not available (e.g., in tests), continue without it
pass
# We supply the global tavily key only if the app mode is not SAAS, where
# currently the search endpoints are patched into the app server instead
# so the tavily key does not need to be shared
if self.tavily_api_key and app_mode != AppMode.SAAS:
tavily_api_key = self.tavily_api_key.get_secret_value()
else:
tavily_api_key = None
yield LiveStatusAppConversationService(
init_git_in_empty_workspace=self.init_git_in_empty_workspace,
user_context=user_context,
@@ -890,4 +1081,5 @@ class LiveStatusAppConversationServiceInjector(AppConversationServiceInjector):
access_token_hard_timeout=access_token_hard_timeout,
app_mode=app_mode,
keycloak_auth_cookie=keycloak_auth_cookie,
tavily_api_key=tavily_api_key,
)

View File

@@ -45,6 +45,8 @@ from openhands.app_server.utils.sql_utils import (
create_json_type_decorator,
)
from openhands.integrations.provider import ProviderType
from openhands.sdk.conversation.conversation_stats import ConversationStats
from openhands.sdk.event import ConversationStateUpdateEvent
from openhands.sdk.llm import MetricsSnapshot
from openhands.sdk.llm.utils.metrics import TokenUsage
from openhands.storage.data_models.conversation_metadata import ConversationTrigger
@@ -354,6 +356,130 @@ class SQLAppConversationInfoService(AppConversationInfoService):
await self.db_session.commit()
return info
async def update_conversation_statistics(
self, conversation_id: UUID, stats: ConversationStats
) -> None:
"""Update conversation statistics from stats event data.
Args:
conversation_id: The ID of the conversation to update
stats: ConversationStats object containing usage_to_metrics data from stats event
"""
# Extract agent metrics from usage_to_metrics
usage_to_metrics = stats.usage_to_metrics
agent_metrics = usage_to_metrics.get('agent')
if not agent_metrics:
logger.debug(
'No agent metrics found in stats for conversation %s', conversation_id
)
return
# Query existing record using secure select (filters for V1 and user if available)
query = await self._secure_select()
query = query.where(
StoredConversationMetadata.conversation_id == str(conversation_id)
)
result = await self.db_session.execute(query)
stored = result.scalar_one_or_none()
if not stored:
logger.debug(
'Conversation %s not found or not accessible, skipping statistics update',
conversation_id,
)
return
# Extract accumulated_cost and max_budget_per_task from Metrics object
accumulated_cost = agent_metrics.accumulated_cost
max_budget_per_task = agent_metrics.max_budget_per_task
# Extract accumulated_token_usage from Metrics object
accumulated_token_usage = agent_metrics.accumulated_token_usage
if accumulated_token_usage:
prompt_tokens = accumulated_token_usage.prompt_tokens
completion_tokens = accumulated_token_usage.completion_tokens
cache_read_tokens = accumulated_token_usage.cache_read_tokens
cache_write_tokens = accumulated_token_usage.cache_write_tokens
reasoning_tokens = accumulated_token_usage.reasoning_tokens
context_window = accumulated_token_usage.context_window
per_turn_token = accumulated_token_usage.per_turn_token
else:
prompt_tokens = None
completion_tokens = None
cache_read_tokens = None
cache_write_tokens = None
reasoning_tokens = None
context_window = None
per_turn_token = None
# Update fields only if values are provided (not None)
if accumulated_cost is not None:
stored.accumulated_cost = accumulated_cost
if max_budget_per_task is not None:
stored.max_budget_per_task = max_budget_per_task
if prompt_tokens is not None:
stored.prompt_tokens = prompt_tokens
if completion_tokens is not None:
stored.completion_tokens = completion_tokens
if cache_read_tokens is not None:
stored.cache_read_tokens = cache_read_tokens
if cache_write_tokens is not None:
stored.cache_write_tokens = cache_write_tokens
if reasoning_tokens is not None:
stored.reasoning_tokens = reasoning_tokens
if context_window is not None:
stored.context_window = context_window
if per_turn_token is not None:
stored.per_turn_token = per_turn_token
# Update last_updated_at timestamp
stored.last_updated_at = utc_now()
await self.db_session.commit()
async def process_stats_event(
self,
event: ConversationStateUpdateEvent,
conversation_id: UUID,
) -> None:
"""Process a stats event and update conversation statistics.
Args:
event: The ConversationStateUpdateEvent with key='stats'
conversation_id: The ID of the conversation to update
"""
try:
# Parse event value into ConversationStats model for type safety
# event.value can be a dict (from JSON deserialization) or a ConversationStats object
event_value = event.value
conversation_stats: ConversationStats | None = None
if isinstance(event_value, ConversationStats):
# Already a ConversationStats object
conversation_stats = event_value
elif isinstance(event_value, dict):
# Parse dict into ConversationStats model
# This validates the structure and ensures type safety
conversation_stats = ConversationStats.model_validate(event_value)
elif hasattr(event_value, 'usage_to_metrics'):
# Handle objects with usage_to_metrics attribute (e.g., from tests)
# Convert to dict first, then validate
stats_dict = {'usage_to_metrics': event_value.usage_to_metrics}
conversation_stats = ConversationStats.model_validate(stats_dict)
if conversation_stats and conversation_stats.usage_to_metrics:
# Pass ConversationStats object directly for type safety
await self.update_conversation_statistics(
conversation_id, conversation_stats
)
except Exception:
logger.exception(
'Error updating conversation statistics for conversation %s',
conversation_id,
stack_info=True,
)
async def _secure_select(self):
query = select(StoredConversationMetadata).where(
StoredConversationMetadata.conversation_version == 'V1'

View File

@@ -6,7 +6,7 @@ from typing import AsyncContextManager
import httpx
from fastapi import Depends, Request
from pydantic import Field
from pydantic import Field, SecretStr
from sqlalchemy.ext.asyncio import AsyncSession
# Import the event_callback module to ensure all processors are registered
@@ -185,7 +185,13 @@ def config_from_env() -> AppServerConfig:
)
if config.app_conversation is None:
config.app_conversation = LiveStatusAppConversationServiceInjector()
tavily_api_key = None
tavily_api_key_str = os.getenv('TAVILY_API_KEY') or os.getenv('SEARCH_API_KEY')
if tavily_api_key_str:
tavily_api_key = SecretStr(tavily_api_key_str)
config.app_conversation = LiveStatusAppConversationServiceInjector(
tavily_api_key=tavily_api_key
)
if config.user is None:
config.user = AuthUserContextInjector()

View File

@@ -6,7 +6,6 @@ from __future__ import annotations
import asyncio
import logging
from dataclasses import dataclass
from datetime import datetime
from typing import AsyncGenerator
from uuid import UUID
@@ -15,6 +14,7 @@ from sqlalchemy import UUID as SQLUUID
from sqlalchemy import Column, Enum, String, and_, func, or_, select
from sqlalchemy.ext.asyncio import AsyncSession
from openhands.agent_server.utils import utc_now
from openhands.app_server.event_callback.event_callback_models import (
CreateEventCallbackRequest,
EventCallback,
@@ -177,7 +177,7 @@ class SQLEventCallbackService(EventCallbackService):
return EventCallbackPage(items=callbacks, next_page_id=next_page_id)
async def save_event_callback(self, event_callback: EventCallback) -> EventCallback:
event_callback.updated_at = datetime.now()
event_callback.updated_at = utc_now()
stored_callback = StoredEventCallback(**event_callback.model_dump())
await self.db_session.merge(stored_callback)
return event_callback

View File

@@ -43,6 +43,7 @@ from openhands.app_server.user.specifiy_user_context import (
from openhands.app_server.user.user_context import UserContext
from openhands.integrations.provider import ProviderType
from openhands.sdk import Event
from openhands.sdk.event import ConversationStateUpdateEvent
from openhands.server.user_auth.default_user_auth import DefaultUserAuth
from openhands.server.user_auth.user_auth import (
get_for_user as get_user_auth_for_user,
@@ -144,6 +145,13 @@ async def on_event(
*[event_service.save_event(conversation_id, event) for event in events]
)
# Process stats events for V1 conversations
for event in events:
if isinstance(event, ConversationStateUpdateEvent) and event.key == 'stats':
await app_conversation_info_service.process_stats_event(
event, conversation_id
)
asyncio.create_task(
_run_callbacks_in_bg_and_close(
conversation_id, app_conversation_info.created_by_user_id, events

View File

@@ -78,6 +78,10 @@ class AuthUserContext(UserContext):
return results
async def get_mcp_api_key(self) -> str | None:
mcp_api_key = await self.user_auth.get_mcp_api_key()
return mcp_api_key
USER_ID_ATTR = 'user_id'

View File

@@ -30,6 +30,9 @@ class SpecifyUserContext(UserContext):
async def get_secrets(self) -> dict[str, SecretSource]:
raise NotImplementedError()
async def get_mcp_api_key(self) -> str | None:
raise NotImplementedError()
USER_CONTEXT_ATTR = 'user_context'
ADMIN = SpecifyUserContext(user_id=None)

View File

@@ -34,6 +34,10 @@ class UserContext(ABC):
async def get_secrets(self) -> dict[str, SecretSource]:
"""Get custom secrets and github provider secrets for the conversation."""
@abstractmethod
async def get_mcp_api_key(self) -> str | None:
"""Get an MCP API Key."""
class UserContextInjector(DiscriminatedUnionMixin, Injector[UserContext], ABC):
"""Injector for user contexts."""

View File

@@ -42,10 +42,6 @@ from openhands.core.exceptions import (
from openhands.core.logger import LOG_ALL_EVENTS
from openhands.core.logger import openhands_logger as logger
from openhands.core.schema import AgentState
from openhands.utils.posthog_tracker import (
track_agent_task_completed,
track_credit_limit_reached,
)
from openhands.events import (
EventSource,
EventStream,
@@ -713,20 +709,6 @@ class AgentController:
EventSource.ENVIRONMENT,
)
# Track agent task completion in PostHog
if new_state == AgentState.FINISHED:
try:
# Get app_mode from environment, default to 'oss'
app_mode = os.environ.get('APP_MODE', 'oss')
track_agent_task_completed(
conversation_id=self.id,
user_id=self.user_id,
app_mode=app_mode,
)
except Exception as e:
# Don't let tracking errors interrupt the agent
self.log('warning', f'Failed to track agent completion: {e}')
# Save state whenever agent state changes to ensure we don't lose state
# in case of crashes or unexpected circumstances
self.save_state()
@@ -905,18 +887,6 @@ class AgentController:
self.state_tracker.run_control_flags()
except Exception as e:
logger.warning('Control flag limits hit')
# Track credit limit reached if it's a budget exception
if 'budget' in str(e).lower() and self.state.budget_flag:
try:
track_credit_limit_reached(
conversation_id=self.id,
user_id=self.user_id,
current_budget=self.state.budget_flag.current_value,
max_budget=self.state.budget_flag.max_value,
)
except Exception as track_error:
# Don't let tracking errors interrupt the agent
self.log('warning', f'Failed to track credit limit: {track_error}')
await self._react_to_exception(e)
return

View File

@@ -188,12 +188,14 @@ class LLM(RetryMixin, DebugMixin):
if 'claude-opus-4-1' in self.config.model.lower():
kwargs['thinking'] = {'type': 'disabled'}
# Anthropic constraint: Opus 4.1 and Sonnet 4 models cannot accept both temperature and top_p
# Anthropic constraint: Opus 4.1, Opus 4.5, and Sonnet 4 models cannot accept both temperature and top_p
# Prefer temperature (drop top_p) if both are specified.
_model_lower = self.config.model.lower()
# Apply to Opus 4.1 and Sonnet 4 models to avoid API errors
# Apply to Opus 4.1, Opus 4.5, and Sonnet 4 models to avoid API errors
if (
('claude-opus-4-1' in _model_lower) or ('claude-sonnet-4' in _model_lower)
('claude-opus-4-1' in _model_lower)
or ('claude-opus-4-5' in _model_lower)
or ('claude-sonnet-4' in _model_lower)
) and ('temperature' in kwargs and 'top_p' in kwargs):
kwargs.pop('top_p', None)

View File

@@ -132,6 +132,8 @@ SUPPORTS_STOP_WORDS_FALSE_PATTERNS: list[str] = [
'grok-code-fast-1',
# DeepSeek R1 family
'deepseek-r1-0528*',
# Azure GPT-5 family
'azure/gpt-5*',
]

View File

@@ -26,13 +26,11 @@ from openhands.microagent.types import (
)
from openhands.server.dependencies import get_dependencies
from openhands.server.shared import server_config
from openhands.server.types import AppMode
from openhands.server.user_auth import (
get_access_token,
get_provider_tokens,
get_user_id,
)
from openhands.utils.posthog_tracker import alias_user_identities
app = APIRouter(prefix='/api/user', dependencies=get_dependencies())
@@ -119,14 +117,6 @@ async def get_user(
try:
user: User = await client.get_user()
# Alias git provider login with Keycloak user ID in PostHog (SaaS mode only)
if user_id and user.login and server_config.app_mode == AppMode.SAAS:
alias_user_identities(
keycloak_user_id=user_id,
git_login=user.login,
)
return user
except UnknownException as e:

View File

@@ -88,6 +88,9 @@ class DefaultUserAuth(UserAuth):
return None
return user_secrets.provider_tokens
async def get_mcp_api_key(self) -> str | None:
return None
@classmethod
async def get_instance(cls, request: Request) -> UserAuth:
user_auth = DefaultUserAuth()

View File

@@ -75,6 +75,10 @@ class UserAuth(ABC):
def get_auth_type(self) -> AuthType | None:
return None
@abstractmethod
async def get_mcp_api_key(self) -> str | None:
"""Get an mcp api key for the user"""
@classmethod
@abstractmethod
async def get_instance(cls, request: Request) -> UserAuth:

View File

@@ -1,270 +0,0 @@
"""PostHog tracking utilities for OpenHands events."""
import os
from openhands.core.logger import openhands_logger as logger
# Lazy import posthog to avoid import errors in environments where it's not installed
posthog = None
def _init_posthog():
"""Initialize PostHog client lazily."""
global posthog
if posthog is None:
try:
import posthog as ph
posthog = ph
posthog.api_key = os.environ.get(
'POSTHOG_CLIENT_KEY', 'phc_3ESMmY9SgqEAGBB6sMGK5ayYHkeUuknH2vP6FmWH9RA'
)
posthog.host = os.environ.get('POSTHOG_HOST', 'https://us.i.posthog.com')
except ImportError:
logger.warning(
'PostHog not installed. Analytics tracking will be disabled.'
)
posthog = None
def track_agent_task_completed(
conversation_id: str,
user_id: str | None = None,
app_mode: str | None = None,
) -> None:
"""Track when an agent completes a task.
Args:
conversation_id: The ID of the conversation/session
user_id: The ID of the user (optional, may be None for unauthenticated users)
app_mode: The application mode (saas/oss), optional
"""
_init_posthog()
if posthog is None:
return
# Use conversation_id as distinct_id if user_id is not available
# This ensures we can track completions even for anonymous users
distinct_id = user_id if user_id else f'conversation_{conversation_id}'
try:
posthog.capture(
distinct_id=distinct_id,
event='agent_task_completed',
properties={
'conversation_id': conversation_id,
'user_id': user_id,
'app_mode': app_mode or 'unknown',
},
)
logger.debug(
'posthog_track',
extra={
'event': 'agent_task_completed',
'conversation_id': conversation_id,
'user_id': user_id,
},
)
except Exception as e:
logger.warning(
f'Failed to track agent_task_completed to PostHog: {e}',
extra={
'conversation_id': conversation_id,
'error': str(e),
},
)
def track_user_signup_completed(
user_id: str,
signup_timestamp: str,
) -> None:
"""Track when a user completes signup by accepting TOS.
Args:
user_id: The ID of the user (Keycloak user ID)
signup_timestamp: ISO format timestamp of when TOS was accepted
"""
_init_posthog()
if posthog is None:
return
try:
posthog.capture(
distinct_id=user_id,
event='user_signup_completed',
properties={
'user_id': user_id,
'signup_timestamp': signup_timestamp,
},
)
logger.debug(
'posthog_track',
extra={
'event': 'user_signup_completed',
'user_id': user_id,
},
)
except Exception as e:
logger.warning(
f'Failed to track user_signup_completed to PostHog: {e}',
extra={
'user_id': user_id,
'error': str(e),
},
)
def track_credit_limit_reached(
conversation_id: str,
user_id: str | None = None,
current_budget: float = 0.0,
max_budget: float = 0.0,
) -> None:
"""Track when a user reaches their credit limit during a conversation.
Args:
conversation_id: The ID of the conversation/session
user_id: The ID of the user (optional, may be None for unauthenticated users)
current_budget: The current budget spent
max_budget: The maximum budget allowed
"""
_init_posthog()
if posthog is None:
return
distinct_id = user_id if user_id else f'conversation_{conversation_id}'
try:
posthog.capture(
distinct_id=distinct_id,
event='credit_limit_reached',
properties={
'conversation_id': conversation_id,
'user_id': user_id,
'current_budget': current_budget,
'max_budget': max_budget,
},
)
logger.debug(
'posthog_track',
extra={
'event': 'credit_limit_reached',
'conversation_id': conversation_id,
'user_id': user_id,
'current_budget': current_budget,
'max_budget': max_budget,
},
)
except Exception as e:
logger.warning(
f'Failed to track credit_limit_reached to PostHog: {e}',
extra={
'conversation_id': conversation_id,
'error': str(e),
},
)
def track_credits_purchased(
user_id: str,
amount_usd: float,
credits_added: float,
stripe_session_id: str,
) -> None:
"""Track when a user successfully purchases credits.
Args:
user_id: The ID of the user (Keycloak user ID)
amount_usd: The amount paid in USD (cents converted to dollars)
credits_added: The number of credits added to the user's account
stripe_session_id: The Stripe checkout session ID
"""
_init_posthog()
if posthog is None:
return
try:
posthog.capture(
distinct_id=user_id,
event='credits_purchased',
properties={
'user_id': user_id,
'amount_usd': amount_usd,
'credits_added': credits_added,
'stripe_session_id': stripe_session_id,
},
)
logger.debug(
'posthog_track',
extra={
'event': 'credits_purchased',
'user_id': user_id,
'amount_usd': amount_usd,
'credits_added': credits_added,
},
)
except Exception as e:
logger.warning(
f'Failed to track credits_purchased to PostHog: {e}',
extra={
'user_id': user_id,
'error': str(e),
},
)
def alias_user_identities(
keycloak_user_id: str,
git_login: str,
) -> None:
"""Alias a user's Keycloak ID with their git provider login for unified tracking.
This allows PostHog to link events tracked from the frontend (using git provider login)
with events tracked from the backend (using Keycloak user ID).
PostHog Python alias syntax: alias(previous_id, distinct_id)
- previous_id: The old/previous distinct ID that will be merged
- distinct_id: The new/canonical distinct ID to merge into
For our use case:
- Git provider login is the previous_id (first used in frontend, before backend auth)
- Keycloak user ID is the distinct_id (canonical backend ID)
- Result: All events with git login will be merged into Keycloak user ID
Args:
keycloak_user_id: The Keycloak user ID (canonical distinct_id)
git_login: The git provider username (GitHub/GitLab/Bitbucket) to merge
Reference:
https://github.com/PostHog/posthog-python/blob/master/posthog/client.py
"""
_init_posthog()
if posthog is None:
return
try:
# Merge git provider login into Keycloak user ID
# posthog.alias(previous_id, distinct_id) - official Python SDK signature
posthog.alias(git_login, keycloak_user_id)
logger.debug(
'posthog_alias',
extra={
'previous_id': git_login,
'distinct_id': keycloak_user_id,
},
)
except Exception as e:
logger.warning(
f'Failed to alias user identities in PostHog: {e}',
extra={
'keycloak_user_id': keycloak_user_id,
'git_login': git_login,
'error': str(e),
},
)

View File

@@ -0,0 +1,628 @@
"""Unit tests for git functionality in AppConversationServiceBase.
This module tests the git-related functionality, specifically the clone_or_init_git_repo method
and the recent bug fixes for git checkout operations.
"""
import subprocess
from unittest.mock import AsyncMock, MagicMock, Mock, patch
import pytest
from openhands.app_server.app_conversation.app_conversation_models import AgentType
from openhands.app_server.app_conversation.app_conversation_service_base import (
AppConversationServiceBase,
)
from openhands.app_server.user.user_context import UserContext
class MockUserInfo:
"""Mock class for UserInfo to simulate user settings."""
def __init__(
self, git_user_name: str | None = None, git_user_email: str | None = None
):
self.git_user_name = git_user_name
self.git_user_email = git_user_email
class MockCommandResult:
"""Mock class for command execution result."""
def __init__(self, exit_code: int = 0, stderr: str = ''):
self.exit_code = exit_code
self.stderr = stderr
class MockWorkspace:
"""Mock class for AsyncRemoteWorkspace."""
def __init__(self, working_dir: str = '/workspace'):
self.working_dir = working_dir
self.execute_command = AsyncMock(return_value=MockCommandResult())
class MockAppConversationServiceBase:
"""Mock class to test git functionality without complex dependencies."""
def __init__(self):
self.logger = MagicMock()
async def clone_or_init_git_repo(
self,
workspace_path: str,
repo_url: str,
branch: str = 'main',
timeout: int = 300,
) -> bool:
"""Clone or initialize a git repository.
This is a simplified version of the actual method for testing purposes.
"""
try:
# Try to clone the repository
clone_result = subprocess.run(
['git', 'clone', '--branch', branch, repo_url, workspace_path],
capture_output=True,
text=True,
timeout=timeout,
)
if clone_result.returncode == 0:
self.logger.info(
f'Successfully cloned repository {repo_url} to {workspace_path}'
)
return True
# If clone fails, try to checkout the branch
checkout_result = subprocess.run(
['git', 'checkout', branch],
cwd=workspace_path,
capture_output=True,
text=True,
timeout=timeout,
)
if checkout_result.returncode == 0:
self.logger.info(f'Successfully checked out branch {branch}')
return True
else:
self.logger.error(
f'Failed to checkout branch {branch}: {checkout_result.stderr}'
)
return False
except subprocess.TimeoutExpired:
self.logger.error(f'Git operation timed out after {timeout} seconds')
return False
except Exception as e:
self.logger.error(f'Git operation failed: {str(e)}')
return False
@pytest.fixture
def service():
"""Create a mock service instance for testing."""
return MockAppConversationServiceBase()
@pytest.mark.asyncio
async def test_clone_or_init_git_repo_successful_clone(service):
"""Test successful git clone operation."""
with patch('subprocess.run') as mock_run:
# Mock successful clone
mock_run.return_value = MagicMock(returncode=0, stderr='', stdout='Cloning...')
result = await service.clone_or_init_git_repo(
workspace_path='/tmp/test_repo',
repo_url='https://github.com/test/repo.git',
branch='main',
timeout=300,
)
assert result is True
mock_run.assert_called_once_with(
[
'git',
'clone',
'--branch',
'main',
'https://github.com/test/repo.git',
'/tmp/test_repo',
],
capture_output=True,
text=True,
timeout=300,
)
service.logger.info.assert_called_with(
'Successfully cloned repository https://github.com/test/repo.git to /tmp/test_repo'
)
@pytest.mark.asyncio
async def test_clone_or_init_git_repo_clone_fails_checkout_succeeds(service):
"""Test git clone fails but checkout succeeds."""
with patch('subprocess.run') as mock_run:
# Mock clone failure, then checkout success
mock_run.side_effect = [
MagicMock(returncode=1, stderr='Clone failed', stdout=''), # Clone fails
MagicMock(
returncode=0, stderr='', stdout='Switched to branch'
), # Checkout succeeds
]
result = await service.clone_or_init_git_repo(
workspace_path='/tmp/test_repo',
repo_url='https://github.com/test/repo.git',
branch='feature-branch',
timeout=300,
)
assert result is True
assert mock_run.call_count == 2
# Check clone call
mock_run.assert_any_call(
[
'git',
'clone',
'--branch',
'feature-branch',
'https://github.com/test/repo.git',
'/tmp/test_repo',
],
capture_output=True,
text=True,
timeout=300,
)
# Check checkout call
mock_run.assert_any_call(
['git', 'checkout', 'feature-branch'],
cwd='/tmp/test_repo',
capture_output=True,
text=True,
timeout=300,
)
service.logger.info.assert_called_with(
'Successfully checked out branch feature-branch'
)
@pytest.mark.asyncio
async def test_clone_or_init_git_repo_both_operations_fail(service):
"""Test both git clone and checkout operations fail."""
with patch('subprocess.run') as mock_run:
# Mock both operations failing
mock_run.side_effect = [
MagicMock(returncode=1, stderr='Clone failed', stdout=''), # Clone fails
MagicMock(
returncode=1, stderr='Checkout failed', stdout=''
), # Checkout fails
]
result = await service.clone_or_init_git_repo(
workspace_path='/tmp/test_repo',
repo_url='https://github.com/test/repo.git',
branch='nonexistent-branch',
timeout=300,
)
assert result is False
assert mock_run.call_count == 2
service.logger.error.assert_called_with(
'Failed to checkout branch nonexistent-branch: Checkout failed'
)
@pytest.mark.asyncio
async def test_clone_or_init_git_repo_timeout(service):
"""Test git operation timeout."""
with patch('subprocess.run') as mock_run:
# Mock timeout exception
mock_run.side_effect = subprocess.TimeoutExpired(
cmd=['git', 'clone'], timeout=300
)
result = await service.clone_or_init_git_repo(
workspace_path='/tmp/test_repo',
repo_url='https://github.com/test/repo.git',
branch='main',
timeout=300,
)
assert result is False
service.logger.error.assert_called_with(
'Git operation timed out after 300 seconds'
)
@pytest.mark.asyncio
async def test_clone_or_init_git_repo_exception(service):
"""Test git operation with unexpected exception."""
with patch('subprocess.run') as mock_run:
# Mock unexpected exception
mock_run.side_effect = Exception('Unexpected error')
result = await service.clone_or_init_git_repo(
workspace_path='/tmp/test_repo',
repo_url='https://github.com/test/repo.git',
branch='main',
timeout=300,
)
assert result is False
service.logger.error.assert_called_with(
'Git operation failed: Unexpected error'
)
@pytest.mark.asyncio
async def test_clone_or_init_git_repo_custom_timeout(service):
"""Test git operation with custom timeout."""
with patch('subprocess.run') as mock_run:
# Mock successful clone with custom timeout
mock_run.return_value = MagicMock(returncode=0, stderr='', stdout='Cloning...')
result = await service.clone_or_init_git_repo(
workspace_path='/tmp/test_repo',
repo_url='https://github.com/test/repo.git',
branch='main',
timeout=600, # Custom timeout
)
assert result is True
mock_run.assert_called_once_with(
[
'git',
'clone',
'--branch',
'main',
'https://github.com/test/repo.git',
'/tmp/test_repo',
],
capture_output=True,
text=True,
timeout=600, # Verify custom timeout is used
)
@patch(
'openhands.app_server.app_conversation.app_conversation_service_base.LLMSummarizingCondenser'
)
def test_create_condenser_default_agent_with_none_max_size(mock_condenser_class):
"""Test _create_condenser for DEFAULT agent with condenser_max_size = None uses default."""
# Arrange
mock_user_context = Mock(spec=UserContext)
with patch.object(
AppConversationServiceBase,
'__abstractmethods__',
set(),
):
service = AppConversationServiceBase(
init_git_in_empty_workspace=True,
user_context=mock_user_context,
)
mock_llm = MagicMock()
mock_llm_copy = MagicMock()
mock_llm_copy.usage_id = 'condenser'
mock_llm.model_copy.return_value = mock_llm_copy
mock_condenser_instance = MagicMock()
mock_condenser_class.return_value = mock_condenser_instance
# Act
service._create_condenser(mock_llm, AgentType.DEFAULT, None)
# Assert
mock_condenser_class.assert_called_once()
call_kwargs = mock_condenser_class.call_args[1]
# When condenser_max_size is None, max_size should not be passed (uses SDK default of 120)
assert 'max_size' not in call_kwargs
# keep_first is never passed (uses SDK default of 4)
assert 'keep_first' not in call_kwargs
assert call_kwargs['llm'].usage_id == 'condenser'
mock_llm.model_copy.assert_called_once()
@patch(
'openhands.app_server.app_conversation.app_conversation_service_base.LLMSummarizingCondenser'
)
def test_create_condenser_default_agent_with_custom_max_size(mock_condenser_class):
"""Test _create_condenser for DEFAULT agent with custom condenser_max_size."""
# Arrange
mock_user_context = Mock(spec=UserContext)
with patch.object(
AppConversationServiceBase,
'__abstractmethods__',
set(),
):
service = AppConversationServiceBase(
init_git_in_empty_workspace=True,
user_context=mock_user_context,
)
mock_llm = MagicMock()
mock_llm_copy = MagicMock()
mock_llm_copy.usage_id = 'condenser'
mock_llm.model_copy.return_value = mock_llm_copy
mock_condenser_instance = MagicMock()
mock_condenser_class.return_value = mock_condenser_instance
# Act
service._create_condenser(mock_llm, AgentType.DEFAULT, 150)
# Assert
mock_condenser_class.assert_called_once()
call_kwargs = mock_condenser_class.call_args[1]
assert call_kwargs['max_size'] == 150 # Custom value should be used
# keep_first is never passed (uses SDK default of 4)
assert 'keep_first' not in call_kwargs
assert call_kwargs['llm'].usage_id == 'condenser'
mock_llm.model_copy.assert_called_once()
@patch(
'openhands.app_server.app_conversation.app_conversation_service_base.LLMSummarizingCondenser'
)
def test_create_condenser_plan_agent_with_none_max_size(mock_condenser_class):
"""Test _create_condenser for PLAN agent with condenser_max_size = None uses default."""
# Arrange
mock_user_context = Mock(spec=UserContext)
with patch.object(
AppConversationServiceBase,
'__abstractmethods__',
set(),
):
service = AppConversationServiceBase(
init_git_in_empty_workspace=True,
user_context=mock_user_context,
)
mock_llm = MagicMock()
mock_llm_copy = MagicMock()
mock_llm_copy.usage_id = 'planning_condenser'
mock_llm.model_copy.return_value = mock_llm_copy
mock_condenser_instance = MagicMock()
mock_condenser_class.return_value = mock_condenser_instance
# Act
service._create_condenser(mock_llm, AgentType.PLAN, None)
# Assert
mock_condenser_class.assert_called_once()
call_kwargs = mock_condenser_class.call_args[1]
# When condenser_max_size is None, max_size should not be passed (uses SDK default of 120)
assert 'max_size' not in call_kwargs
# keep_first is never passed (uses SDK default of 4)
assert 'keep_first' not in call_kwargs
assert call_kwargs['llm'].usage_id == 'planning_condenser'
mock_llm.model_copy.assert_called_once()
@patch(
'openhands.app_server.app_conversation.app_conversation_service_base.LLMSummarizingCondenser'
)
def test_create_condenser_plan_agent_with_custom_max_size(mock_condenser_class):
"""Test _create_condenser for PLAN agent with custom condenser_max_size."""
# Arrange
mock_user_context = Mock(spec=UserContext)
with patch.object(
AppConversationServiceBase,
'__abstractmethods__',
set(),
):
service = AppConversationServiceBase(
init_git_in_empty_workspace=True,
user_context=mock_user_context,
)
mock_llm = MagicMock()
mock_llm_copy = MagicMock()
mock_llm_copy.usage_id = 'planning_condenser'
mock_llm.model_copy.return_value = mock_llm_copy
mock_condenser_instance = MagicMock()
mock_condenser_class.return_value = mock_condenser_instance
# Act
service._create_condenser(mock_llm, AgentType.PLAN, 200)
# Assert
mock_condenser_class.assert_called_once()
call_kwargs = mock_condenser_class.call_args[1]
assert call_kwargs['max_size'] == 200 # Custom value should be used
# keep_first is never passed (uses SDK default of 4)
assert 'keep_first' not in call_kwargs
assert call_kwargs['llm'].usage_id == 'planning_condenser'
mock_llm.model_copy.assert_called_once()
# =============================================================================
# Tests for _configure_git_user_settings
# =============================================================================
def _create_service_with_mock_user_context(user_info: MockUserInfo) -> tuple:
"""Create a mock service with the actual _configure_git_user_settings method.
Uses MagicMock for the service but binds the real method for testing.
Returns a tuple of (service, mock_user_context) for testing.
"""
mock_user_context = MagicMock()
mock_user_context.get_user_info = AsyncMock(return_value=user_info)
# Create a simple mock service and set required attribute
service = MagicMock()
service.user_context = mock_user_context
# Bind the actual method from the real class to test real implementation
service._configure_git_user_settings = (
lambda workspace: AppConversationServiceBase._configure_git_user_settings(
service, workspace
)
)
return service, mock_user_context
@pytest.fixture
def mock_workspace():
"""Create a mock workspace instance for testing."""
return MockWorkspace(working_dir='/workspace/project')
@pytest.mark.asyncio
async def test_configure_git_user_settings_both_name_and_email(mock_workspace):
"""Test configuring both git user name and email."""
user_info = MockUserInfo(
git_user_name='Test User', git_user_email='test@example.com'
)
service, mock_user_context = _create_service_with_mock_user_context(user_info)
await service._configure_git_user_settings(mock_workspace)
# Verify get_user_info was called
mock_user_context.get_user_info.assert_called_once()
# Verify both git config commands were executed
assert mock_workspace.execute_command.call_count == 2
# Check git config user.name call
mock_workspace.execute_command.assert_any_call(
'git config --global user.name "Test User"', '/workspace/project'
)
# Check git config user.email call
mock_workspace.execute_command.assert_any_call(
'git config --global user.email "test@example.com"', '/workspace/project'
)
@pytest.mark.asyncio
async def test_configure_git_user_settings_only_name(mock_workspace):
"""Test configuring only git user name."""
user_info = MockUserInfo(git_user_name='Test User', git_user_email=None)
service, _ = _create_service_with_mock_user_context(user_info)
await service._configure_git_user_settings(mock_workspace)
# Verify only user.name was configured
assert mock_workspace.execute_command.call_count == 1
mock_workspace.execute_command.assert_called_once_with(
'git config --global user.name "Test User"', '/workspace/project'
)
@pytest.mark.asyncio
async def test_configure_git_user_settings_only_email(mock_workspace):
"""Test configuring only git user email."""
user_info = MockUserInfo(git_user_name=None, git_user_email='test@example.com')
service, _ = _create_service_with_mock_user_context(user_info)
await service._configure_git_user_settings(mock_workspace)
# Verify only user.email was configured
assert mock_workspace.execute_command.call_count == 1
mock_workspace.execute_command.assert_called_once_with(
'git config --global user.email "test@example.com"', '/workspace/project'
)
@pytest.mark.asyncio
async def test_configure_git_user_settings_neither_set(mock_workspace):
"""Test when neither git user name nor email is set."""
user_info = MockUserInfo(git_user_name=None, git_user_email=None)
service, _ = _create_service_with_mock_user_context(user_info)
await service._configure_git_user_settings(mock_workspace)
# Verify no git config commands were executed
mock_workspace.execute_command.assert_not_called()
@pytest.mark.asyncio
async def test_configure_git_user_settings_empty_strings(mock_workspace):
"""Test when git user name and email are empty strings."""
user_info = MockUserInfo(git_user_name='', git_user_email='')
service, _ = _create_service_with_mock_user_context(user_info)
await service._configure_git_user_settings(mock_workspace)
# Empty strings are falsy, so no commands should be executed
mock_workspace.execute_command.assert_not_called()
@pytest.mark.asyncio
async def test_configure_git_user_settings_get_user_info_fails(mock_workspace):
"""Test handling of exception when get_user_info fails."""
user_info = MockUserInfo()
service, mock_user_context = _create_service_with_mock_user_context(user_info)
mock_user_context.get_user_info = AsyncMock(
side_effect=Exception('User info error')
)
# Should not raise exception, just log warning
await service._configure_git_user_settings(mock_workspace)
# Verify no git config commands were executed
mock_workspace.execute_command.assert_not_called()
@pytest.mark.asyncio
async def test_configure_git_user_settings_name_command_fails(mock_workspace):
"""Test handling when git config user.name command fails."""
user_info = MockUserInfo(
git_user_name='Test User', git_user_email='test@example.com'
)
service, _ = _create_service_with_mock_user_context(user_info)
# Make the first command fail (user.name), second succeed (user.email)
mock_workspace.execute_command = AsyncMock(
side_effect=[
MockCommandResult(exit_code=1, stderr='Permission denied'),
MockCommandResult(exit_code=0),
]
)
# Should not raise exception
await service._configure_git_user_settings(mock_workspace)
# Verify both commands were still attempted
assert mock_workspace.execute_command.call_count == 2
@pytest.mark.asyncio
async def test_configure_git_user_settings_email_command_fails(mock_workspace):
"""Test handling when git config user.email command fails."""
user_info = MockUserInfo(
git_user_name='Test User', git_user_email='test@example.com'
)
service, _ = _create_service_with_mock_user_context(user_info)
# Make the first command succeed (user.name), second fail (user.email)
mock_workspace.execute_command = AsyncMock(
side_effect=[
MockCommandResult(exit_code=0),
MockCommandResult(exit_code=1, stderr='Permission denied'),
]
)
# Should not raise exception
await service._configure_git_user_settings(mock_workspace)
# Verify both commands were still attempted
assert mock_workspace.execute_command.call_count == 2
@pytest.mark.asyncio
async def test_configure_git_user_settings_special_characters_in_name(mock_workspace):
"""Test git user name with special characters."""
user_info = MockUserInfo(
git_user_name="Test O'Brien", git_user_email='test@example.com'
)
service, _ = _create_service_with_mock_user_context(user_info)
await service._configure_git_user_settings(mock_workspace)
# Verify the name is passed with special characters
mock_workspace.execute_command.assert_any_call(
'git config --global user.name "Test O\'Brien"', '/workspace/project'
)

View File

@@ -0,0 +1,721 @@
"""Unit tests for the methods in LiveStatusAppConversationService."""
from unittest.mock import AsyncMock, Mock, patch
from uuid import UUID, uuid4
import pytest
from openhands.agent_server.models import SendMessageRequest, StartConversationRequest
from openhands.app_server.app_conversation.app_conversation_models import AgentType
from openhands.app_server.app_conversation.live_status_app_conversation_service import (
LiveStatusAppConversationService,
)
from openhands.app_server.sandbox.sandbox_models import SandboxInfo, SandboxStatus
from openhands.app_server.user.user_context import UserContext
from openhands.integrations.provider import ProviderType
from openhands.sdk import Agent
from openhands.sdk.conversation.secret_source import LookupSecret, StaticSecret
from openhands.sdk.llm import LLM
from openhands.sdk.workspace import LocalWorkspace
from openhands.sdk.workspace.remote.async_remote_workspace import AsyncRemoteWorkspace
from openhands.server.types import AppMode
class TestLiveStatusAppConversationService:
"""Test cases for the methods in LiveStatusAppConversationService."""
def setup_method(self):
"""Set up test fixtures."""
# Create mock dependencies
self.mock_user_context = Mock(spec=UserContext)
self.mock_jwt_service = Mock()
self.mock_sandbox_service = Mock()
self.mock_sandbox_spec_service = Mock()
self.mock_app_conversation_info_service = Mock()
self.mock_app_conversation_start_task_service = Mock()
self.mock_event_callback_service = Mock()
self.mock_httpx_client = Mock()
# Create service instance
self.service = LiveStatusAppConversationService(
init_git_in_empty_workspace=True,
user_context=self.mock_user_context,
app_conversation_info_service=self.mock_app_conversation_info_service,
app_conversation_start_task_service=self.mock_app_conversation_start_task_service,
event_callback_service=self.mock_event_callback_service,
sandbox_service=self.mock_sandbox_service,
sandbox_spec_service=self.mock_sandbox_spec_service,
jwt_service=self.mock_jwt_service,
sandbox_startup_timeout=30,
sandbox_startup_poll_frequency=1,
httpx_client=self.mock_httpx_client,
web_url='https://test.example.com',
access_token_hard_timeout=None,
app_mode='test',
keycloak_auth_cookie=None,
)
# Mock user info
self.mock_user = Mock()
self.mock_user.id = 'test_user_123'
self.mock_user.llm_model = 'gpt-4'
self.mock_user.llm_base_url = 'https://api.openai.com/v1'
self.mock_user.llm_api_key = 'test_api_key'
self.mock_user.confirmation_mode = False
self.mock_user.search_api_key = None # Default to None
self.mock_user.condenser_max_size = None # Default to None
# Mock sandbox
self.mock_sandbox = Mock(spec=SandboxInfo)
self.mock_sandbox.id = uuid4()
self.mock_sandbox.status = SandboxStatus.RUNNING
@pytest.mark.asyncio
async def test_setup_secrets_for_git_provider_no_provider(self):
"""Test _setup_secrets_for_git_provider with no git provider."""
# Arrange
base_secrets = {'existing': 'secret'}
self.mock_user_context.get_secrets.return_value = base_secrets
# Act
result = await self.service._setup_secrets_for_git_provider(
None, self.mock_user
)
# Assert
assert result == base_secrets
self.mock_user_context.get_secrets.assert_called_once()
@pytest.mark.asyncio
async def test_setup_secrets_for_git_provider_with_web_url(self):
"""Test _setup_secrets_for_git_provider with web URL (creates access token)."""
# Arrange
base_secrets = {}
self.mock_user_context.get_secrets.return_value = base_secrets
self.mock_jwt_service.create_jws_token.return_value = 'test_access_token'
git_provider = ProviderType.GITHUB
# Act
result = await self.service._setup_secrets_for_git_provider(
git_provider, self.mock_user
)
# Assert
assert 'GITHUB_TOKEN' in result
assert isinstance(result['GITHUB_TOKEN'], LookupSecret)
assert (
result['GITHUB_TOKEN'].url
== 'https://test.example.com/api/v1/webhooks/secrets'
)
assert result['GITHUB_TOKEN'].headers['X-Access-Token'] == 'test_access_token'
self.mock_jwt_service.create_jws_token.assert_called_once_with(
payload={
'user_id': self.mock_user.id,
'provider_type': git_provider.value,
},
expires_in=None,
)
@pytest.mark.asyncio
async def test_setup_secrets_for_git_provider_with_saas_mode(self):
"""Test _setup_secrets_for_git_provider with SaaS mode (includes keycloak cookie)."""
# Arrange
self.service.app_mode = 'saas'
self.service.keycloak_auth_cookie = 'test_cookie'
base_secrets = {}
self.mock_user_context.get_secrets.return_value = base_secrets
self.mock_jwt_service.create_jws_token.return_value = 'test_access_token'
git_provider = ProviderType.GITLAB
# Act
result = await self.service._setup_secrets_for_git_provider(
git_provider, self.mock_user
)
# Assert
assert 'GITLAB_TOKEN' in result
lookup_secret = result['GITLAB_TOKEN']
assert isinstance(lookup_secret, LookupSecret)
assert 'Cookie' in lookup_secret.headers
assert lookup_secret.headers['Cookie'] == 'keycloak_auth=test_cookie'
@pytest.mark.asyncio
async def test_setup_secrets_for_git_provider_without_web_url(self):
"""Test _setup_secrets_for_git_provider without web URL (uses static token)."""
# Arrange
self.service.web_url = None
base_secrets = {}
self.mock_user_context.get_secrets.return_value = base_secrets
self.mock_user_context.get_latest_token.return_value = 'static_token_value'
git_provider = ProviderType.GITHUB
# Act
result = await self.service._setup_secrets_for_git_provider(
git_provider, self.mock_user
)
# Assert
assert 'GITHUB_TOKEN' in result
assert isinstance(result['GITHUB_TOKEN'], StaticSecret)
assert result['GITHUB_TOKEN'].value.get_secret_value() == 'static_token_value'
self.mock_user_context.get_latest_token.assert_called_once_with(git_provider)
@pytest.mark.asyncio
async def test_setup_secrets_for_git_provider_no_static_token(self):
"""Test _setup_secrets_for_git_provider when no static token is available."""
# Arrange
self.service.web_url = None
base_secrets = {}
self.mock_user_context.get_secrets.return_value = base_secrets
self.mock_user_context.get_latest_token.return_value = None
git_provider = ProviderType.GITHUB
# Act
result = await self.service._setup_secrets_for_git_provider(
git_provider, self.mock_user
)
# Assert
assert 'GITHUB_TOKEN' not in result
assert result == base_secrets
@pytest.mark.asyncio
async def test_configure_llm_and_mcp_with_custom_model(self):
"""Test _configure_llm_and_mcp with custom LLM model."""
# Arrange
custom_model = 'gpt-3.5-turbo'
self.mock_user_context.get_mcp_api_key.return_value = 'mcp_api_key'
# Act
llm, mcp_config = await self.service._configure_llm_and_mcp(
self.mock_user, custom_model
)
# Assert
assert isinstance(llm, LLM)
assert llm.model == custom_model
assert llm.base_url == self.mock_user.llm_base_url
assert llm.api_key.get_secret_value() == self.mock_user.llm_api_key
assert llm.usage_id == 'agent'
assert 'default' in mcp_config
assert mcp_config['default']['url'] == 'https://test.example.com/mcp/mcp'
assert mcp_config['default']['headers']['X-Session-API-Key'] == 'mcp_api_key'
@pytest.mark.asyncio
async def test_configure_llm_and_mcp_with_user_default_model(self):
"""Test _configure_llm_and_mcp using user's default model."""
# Arrange
self.mock_user_context.get_mcp_api_key.return_value = None
# Act
llm, mcp_config = await self.service._configure_llm_and_mcp(
self.mock_user, None
)
# Assert
assert llm.model == self.mock_user.llm_model
assert 'default' in mcp_config
assert 'headers' not in mcp_config['default']
@pytest.mark.asyncio
async def test_configure_llm_and_mcp_without_web_url(self):
"""Test _configure_llm_and_mcp without web URL (no MCP config)."""
# Arrange
self.service.web_url = None
# Act
llm, mcp_config = await self.service._configure_llm_and_mcp(
self.mock_user, None
)
# Assert
assert isinstance(llm, LLM)
assert mcp_config == {}
@pytest.mark.asyncio
async def test_configure_llm_and_mcp_tavily_with_user_search_api_key(self):
"""Test _configure_llm_and_mcp adds tavily when user has search_api_key."""
# Arrange
from pydantic import SecretStr
self.mock_user.search_api_key = SecretStr('user_search_key')
self.mock_user_context.get_mcp_api_key.return_value = 'mcp_api_key'
# Act
llm, mcp_config = await self.service._configure_llm_and_mcp(
self.mock_user, None
)
# Assert
assert isinstance(llm, LLM)
assert 'default' in mcp_config
assert 'tavily' in mcp_config
assert (
mcp_config['tavily']['url']
== 'https://mcp.tavily.com/mcp/?tavilyApiKey=user_search_key'
)
@pytest.mark.asyncio
async def test_configure_llm_and_mcp_tavily_with_env_tavily_key(self):
"""Test _configure_llm_and_mcp adds tavily when service has tavily_api_key."""
# Arrange
self.service.tavily_api_key = 'env_tavily_key'
self.mock_user_context.get_mcp_api_key.return_value = None
# Act
llm, mcp_config = await self.service._configure_llm_and_mcp(
self.mock_user, None
)
# Assert
assert isinstance(llm, LLM)
assert 'default' in mcp_config
assert 'tavily' in mcp_config
assert (
mcp_config['tavily']['url']
== 'https://mcp.tavily.com/mcp/?tavilyApiKey=env_tavily_key'
)
@pytest.mark.asyncio
async def test_configure_llm_and_mcp_tavily_user_key_takes_precedence(self):
"""Test _configure_llm_and_mcp user search_api_key takes precedence over env key."""
# Arrange
from pydantic import SecretStr
self.mock_user.search_api_key = SecretStr('user_search_key')
self.service.tavily_api_key = 'env_tavily_key'
self.mock_user_context.get_mcp_api_key.return_value = None
# Act
llm, mcp_config = await self.service._configure_llm_and_mcp(
self.mock_user, None
)
# Assert
assert isinstance(llm, LLM)
assert 'tavily' in mcp_config
assert (
mcp_config['tavily']['url']
== 'https://mcp.tavily.com/mcp/?tavilyApiKey=user_search_key'
)
@pytest.mark.asyncio
async def test_configure_llm_and_mcp_no_tavily_without_keys(self):
"""Test _configure_llm_and_mcp does not add tavily when no keys are available."""
# Arrange
self.mock_user.search_api_key = None
self.service.tavily_api_key = None
self.mock_user_context.get_mcp_api_key.return_value = None
# Act
llm, mcp_config = await self.service._configure_llm_and_mcp(
self.mock_user, None
)
# Assert
assert isinstance(llm, LLM)
assert 'default' in mcp_config
assert 'tavily' not in mcp_config
@pytest.mark.asyncio
async def test_configure_llm_and_mcp_saas_mode_no_tavily_without_user_key(self):
"""Test _configure_llm_and_mcp does not add tavily in SAAS mode without user search_api_key.
In SAAS mode, the global tavily_api_key should not be passed to the service instance,
so tavily should only be added if the user has their own search_api_key.
"""
# Arrange - simulate SAAS mode where no global tavily key is available
self.service.app_mode = AppMode.SAAS.value
self.service.tavily_api_key = None # In SAAS mode, this should be None
self.mock_user.search_api_key = None
self.mock_user_context.get_mcp_api_key.return_value = None
# Act
llm, mcp_config = await self.service._configure_llm_and_mcp(
self.mock_user, None
)
# Assert
assert isinstance(llm, LLM)
assert 'default' in mcp_config
assert 'tavily' not in mcp_config
@pytest.mark.asyncio
async def test_configure_llm_and_mcp_saas_mode_with_user_search_key(self):
"""Test _configure_llm_and_mcp adds tavily in SAAS mode when user has search_api_key.
Even in SAAS mode, if the user has their own search_api_key, tavily should be added.
"""
# Arrange - simulate SAAS mode with user having their own search key
from pydantic import SecretStr
self.service.app_mode = AppMode.SAAS.value
self.service.tavily_api_key = None # In SAAS mode, this should be None
self.mock_user.search_api_key = SecretStr('user_search_key')
self.mock_user_context.get_mcp_api_key.return_value = None
# Act
llm, mcp_config = await self.service._configure_llm_and_mcp(
self.mock_user, None
)
# Assert
assert isinstance(llm, LLM)
assert 'default' in mcp_config
assert 'tavily' in mcp_config
assert (
mcp_config['tavily']['url']
== 'https://mcp.tavily.com/mcp/?tavilyApiKey=user_search_key'
)
@pytest.mark.asyncio
async def test_configure_llm_and_mcp_tavily_with_empty_user_search_key(self):
"""Test _configure_llm_and_mcp handles empty user search_api_key correctly."""
# Arrange
from pydantic import SecretStr
self.mock_user.search_api_key = SecretStr('') # Empty string
self.service.tavily_api_key = 'env_tavily_key'
self.mock_user_context.get_mcp_api_key.return_value = None
# Act
llm, mcp_config = await self.service._configure_llm_and_mcp(
self.mock_user, None
)
# Assert
assert isinstance(llm, LLM)
assert 'tavily' in mcp_config
# Should fall back to env key since user key is empty
assert (
mcp_config['tavily']['url']
== 'https://mcp.tavily.com/mcp/?tavilyApiKey=env_tavily_key'
)
@pytest.mark.asyncio
async def test_configure_llm_and_mcp_tavily_with_whitespace_user_search_key(self):
"""Test _configure_llm_and_mcp handles whitespace-only user search_api_key correctly."""
# Arrange
from pydantic import SecretStr
self.mock_user.search_api_key = SecretStr(' ') # Whitespace only
self.service.tavily_api_key = 'env_tavily_key'
self.mock_user_context.get_mcp_api_key.return_value = None
# Act
llm, mcp_config = await self.service._configure_llm_and_mcp(
self.mock_user, None
)
# Assert
assert isinstance(llm, LLM)
assert 'tavily' in mcp_config
# Should fall back to env key since user key is whitespace only
assert (
mcp_config['tavily']['url']
== 'https://mcp.tavily.com/mcp/?tavilyApiKey=env_tavily_key'
)
@patch(
'openhands.app_server.app_conversation.live_status_app_conversation_service.get_planning_tools'
)
@patch(
'openhands.app_server.app_conversation.app_conversation_service_base.AppConversationServiceBase._create_condenser'
)
@patch(
'openhands.app_server.app_conversation.live_status_app_conversation_service.format_plan_structure'
)
def test_create_agent_with_context_planning_agent(
self, mock_format_plan, mock_create_condenser, mock_get_tools
):
"""Test _create_agent_with_context for planning agent type."""
# Arrange
mock_llm = Mock(spec=LLM)
mock_llm.model_copy.return_value = mock_llm
mock_get_tools.return_value = []
mock_condenser = Mock()
mock_create_condenser.return_value = mock_condenser
mock_format_plan.return_value = 'test_plan_structure'
mcp_config = {'default': {'url': 'test'}}
system_message_suffix = 'Test suffix'
# Act
with patch(
'openhands.app_server.app_conversation.live_status_app_conversation_service.Agent'
) as mock_agent_class:
mock_agent_instance = Mock()
mock_agent_instance.model_copy.return_value = mock_agent_instance
mock_agent_class.return_value = mock_agent_instance
self.service._create_agent_with_context(
mock_llm,
AgentType.PLAN,
system_message_suffix,
mcp_config,
self.mock_user.condenser_max_size,
)
# Assert
mock_agent_class.assert_called_once()
call_kwargs = mock_agent_class.call_args[1]
assert call_kwargs['llm'] == mock_llm
assert call_kwargs['system_prompt_filename'] == 'system_prompt_planning.j2'
assert (
call_kwargs['system_prompt_kwargs']['plan_structure']
== 'test_plan_structure'
)
assert call_kwargs['mcp_config'] == mcp_config
assert call_kwargs['security_analyzer'] is None
assert call_kwargs['condenser'] == mock_condenser
mock_create_condenser.assert_called_once_with(
mock_llm, AgentType.PLAN, self.mock_user.condenser_max_size
)
@patch(
'openhands.app_server.app_conversation.live_status_app_conversation_service.get_default_tools'
)
@patch(
'openhands.app_server.app_conversation.app_conversation_service_base.AppConversationServiceBase._create_condenser'
)
def test_create_agent_with_context_default_agent(
self, mock_create_condenser, mock_get_tools
):
"""Test _create_agent_with_context for default agent type."""
# Arrange
mock_llm = Mock(spec=LLM)
mock_llm.model_copy.return_value = mock_llm
mock_get_tools.return_value = []
mock_condenser = Mock()
mock_create_condenser.return_value = mock_condenser
mcp_config = {'default': {'url': 'test'}}
# Act
with patch(
'openhands.app_server.app_conversation.live_status_app_conversation_service.Agent'
) as mock_agent_class:
mock_agent_instance = Mock()
mock_agent_instance.model_copy.return_value = mock_agent_instance
mock_agent_class.return_value = mock_agent_instance
self.service._create_agent_with_context(
mock_llm,
AgentType.DEFAULT,
None,
mcp_config,
self.mock_user.condenser_max_size,
)
# Assert
mock_agent_class.assert_called_once()
call_kwargs = mock_agent_class.call_args[1]
assert call_kwargs['llm'] == mock_llm
assert call_kwargs['system_prompt_kwargs']['cli_mode'] is False
assert call_kwargs['mcp_config'] == mcp_config
assert call_kwargs['condenser'] == mock_condenser
mock_get_tools.assert_called_once_with(enable_browser=True)
mock_create_condenser.assert_called_once_with(
mock_llm, AgentType.DEFAULT, self.mock_user.condenser_max_size
)
@pytest.mark.asyncio
@patch(
'openhands.app_server.app_conversation.live_status_app_conversation_service.ExperimentManagerImpl'
)
async def test_finalize_conversation_request_with_skills(
self, mock_experiment_manager
):
"""Test _finalize_conversation_request with skills loading."""
# Arrange
mock_agent = Mock(spec=Agent)
mock_updated_agent = Mock(spec=Agent)
mock_experiment_manager.run_agent_variant_tests__v1.return_value = (
mock_updated_agent
)
conversation_id = uuid4()
workspace = LocalWorkspace(working_dir='/test')
initial_message = Mock(spec=SendMessageRequest)
secrets = {'test': StaticSecret(value='secret')}
remote_workspace = Mock(spec=AsyncRemoteWorkspace)
# Mock the skills loading method
self.service._load_skills_and_update_agent = AsyncMock(
return_value=mock_updated_agent
)
# Act
result = await self.service._finalize_conversation_request(
mock_agent,
conversation_id,
self.mock_user,
workspace,
initial_message,
secrets,
self.mock_sandbox,
remote_workspace,
'test_repo',
'/test/dir',
)
# Assert
assert isinstance(result, StartConversationRequest)
assert result.conversation_id == conversation_id
assert result.agent == mock_updated_agent
assert result.workspace == workspace
assert result.initial_message == initial_message
assert result.secrets == secrets
mock_experiment_manager.run_agent_variant_tests__v1.assert_called_once_with(
self.mock_user.id, conversation_id, mock_agent
)
self.service._load_skills_and_update_agent.assert_called_once_with(
self.mock_sandbox,
mock_updated_agent,
remote_workspace,
'test_repo',
'/test/dir',
)
@pytest.mark.asyncio
@patch(
'openhands.app_server.app_conversation.live_status_app_conversation_service.ExperimentManagerImpl'
)
async def test_finalize_conversation_request_without_skills(
self, mock_experiment_manager
):
"""Test _finalize_conversation_request without remote workspace (no skills)."""
# Arrange
mock_agent = Mock(spec=Agent)
mock_updated_agent = Mock(spec=Agent)
mock_experiment_manager.run_agent_variant_tests__v1.return_value = (
mock_updated_agent
)
workspace = LocalWorkspace(working_dir='/test')
secrets = {'test': StaticSecret(value='secret')}
# Act
result = await self.service._finalize_conversation_request(
mock_agent,
None,
self.mock_user,
workspace,
None,
secrets,
self.mock_sandbox,
None,
None,
'/test/dir',
)
# Assert
assert isinstance(result, StartConversationRequest)
assert isinstance(result.conversation_id, UUID)
assert result.agent == mock_updated_agent
mock_experiment_manager.run_agent_variant_tests__v1.assert_called_once()
@pytest.mark.asyncio
@patch(
'openhands.app_server.app_conversation.live_status_app_conversation_service.ExperimentManagerImpl'
)
async def test_finalize_conversation_request_skills_loading_fails(
self, mock_experiment_manager
):
"""Test _finalize_conversation_request when skills loading fails."""
# Arrange
mock_agent = Mock(spec=Agent)
mock_updated_agent = Mock(spec=Agent)
mock_experiment_manager.run_agent_variant_tests__v1.return_value = (
mock_updated_agent
)
workspace = LocalWorkspace(working_dir='/test')
secrets = {'test': StaticSecret(value='secret')}
remote_workspace = Mock(spec=AsyncRemoteWorkspace)
# Mock skills loading to raise an exception
self.service._load_skills_and_update_agent = AsyncMock(
side_effect=Exception('Skills loading failed')
)
# Act
with patch(
'openhands.app_server.app_conversation.live_status_app_conversation_service._logger'
) as mock_logger:
result = await self.service._finalize_conversation_request(
mock_agent,
None,
self.mock_user,
workspace,
None,
secrets,
self.mock_sandbox,
remote_workspace,
'test_repo',
'/test/dir',
)
# Assert
assert isinstance(result, StartConversationRequest)
assert (
result.agent == mock_updated_agent
) # Should still use the experiment-modified agent
mock_logger.warning.assert_called_once()
@pytest.mark.asyncio
async def test_build_start_conversation_request_for_user_integration(self):
"""Test the main _build_start_conversation_request_for_user method integration."""
# Arrange
self.mock_user_context.get_user_info.return_value = self.mock_user
# Mock all the helper methods
mock_secrets = {'GITHUB_TOKEN': Mock()}
mock_llm = Mock(spec=LLM)
mock_mcp_config = {'default': {'url': 'test'}}
mock_agent = Mock(spec=Agent)
mock_final_request = Mock(spec=StartConversationRequest)
self.service._setup_secrets_for_git_provider = AsyncMock(
return_value=mock_secrets
)
self.service._configure_llm_and_mcp = AsyncMock(
return_value=(mock_llm, mock_mcp_config)
)
self.service._create_agent_with_context = Mock(return_value=mock_agent)
self.service._finalize_conversation_request = AsyncMock(
return_value=mock_final_request
)
# Act
result = await self.service._build_start_conversation_request_for_user(
sandbox=self.mock_sandbox,
initial_message=None,
system_message_suffix='Test suffix',
git_provider=ProviderType.GITHUB,
working_dir='/test/dir',
agent_type=AgentType.DEFAULT,
llm_model='gpt-4',
conversation_id=None,
remote_workspace=None,
selected_repository='test/repo',
)
# Assert
assert result == mock_final_request
self.service._setup_secrets_for_git_provider.assert_called_once_with(
ProviderType.GITHUB, self.mock_user
)
self.service._configure_llm_and_mcp.assert_called_once_with(
self.mock_user, 'gpt-4'
)
self.service._create_agent_with_context.assert_called_once_with(
mock_llm,
AgentType.DEFAULT,
'Test suffix',
mock_mcp_config,
self.mock_user.condenser_max_size,
)
self.service._finalize_conversation_request.assert_called_once()

View File

@@ -0,0 +1,615 @@
"""Tests for stats event processing in webhook_router.
This module tests the stats event processing functionality introduced for
updating conversation statistics from ConversationStateUpdateEvent events.
"""
from datetime import datetime, timezone
from typing import AsyncGenerator
from unittest.mock import AsyncMock, MagicMock, patch
from uuid import uuid4
import pytest
from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker, create_async_engine
from sqlalchemy.pool import StaticPool
from openhands.app_server.app_conversation.app_conversation_models import (
AppConversationInfo,
)
from openhands.app_server.app_conversation.sql_app_conversation_info_service import (
SQLAppConversationInfoService,
StoredConversationMetadata,
)
from openhands.app_server.user.specifiy_user_context import SpecifyUserContext
from openhands.app_server.utils.sql_utils import Base
from openhands.sdk.conversation.conversation_stats import ConversationStats
from openhands.sdk.event import ConversationStateUpdateEvent
from openhands.sdk.llm.utils.metrics import Metrics, TokenUsage
# ---------------------------------------------------------------------------
# Fixtures
# ---------------------------------------------------------------------------
@pytest.fixture
async def async_engine():
"""Create an async SQLite engine for testing."""
engine = create_async_engine(
'sqlite+aiosqlite:///:memory:',
poolclass=StaticPool,
connect_args={'check_same_thread': False},
echo=False,
)
# Create all tables
async with engine.begin() as conn:
await conn.run_sync(Base.metadata.create_all)
yield engine
await engine.dispose()
@pytest.fixture
async def async_session(async_engine) -> AsyncGenerator[AsyncSession, None]:
"""Create an async session for testing."""
async_session_maker = async_sessionmaker(
async_engine, class_=AsyncSession, expire_on_commit=False
)
async with async_session_maker() as db_session:
yield db_session
@pytest.fixture
def service(async_session) -> SQLAppConversationInfoService:
"""Create a SQLAppConversationInfoService instance for testing."""
return SQLAppConversationInfoService(
db_session=async_session, user_context=SpecifyUserContext(user_id=None)
)
@pytest.fixture
async def v1_conversation_metadata(async_session, service):
"""Create a V1 conversation metadata record for testing."""
conversation_id = uuid4()
stored = StoredConversationMetadata(
conversation_id=str(conversation_id),
user_id='test_user_123',
sandbox_id='sandbox_123',
conversation_version='V1',
title='Test Conversation',
accumulated_cost=0.0,
prompt_tokens=0,
completion_tokens=0,
cache_read_tokens=0,
cache_write_tokens=0,
reasoning_tokens=0,
context_window=0,
per_turn_token=0,
created_at=datetime.now(timezone.utc),
last_updated_at=datetime.now(timezone.utc),
)
async_session.add(stored)
await async_session.commit()
return conversation_id, stored
@pytest.fixture
def stats_event_with_dict_value():
"""Create a ConversationStateUpdateEvent with dict value."""
event_value = {
'usage_to_metrics': {
'agent': {
'accumulated_cost': 0.03411525,
'max_budget_per_task': None,
'accumulated_token_usage': {
'prompt_tokens': 8770,
'completion_tokens': 82,
'cache_read_tokens': 0,
'cache_write_tokens': 8767,
'reasoning_tokens': 0,
'context_window': 0,
'per_turn_token': 8852,
},
},
'condenser': {
'accumulated_cost': 0.0,
'accumulated_token_usage': {
'prompt_tokens': 0,
'completion_tokens': 0,
},
},
}
}
return ConversationStateUpdateEvent(key='stats', value=event_value)
@pytest.fixture
def stats_event_with_object_value():
"""Create a ConversationStateUpdateEvent with object value."""
event_value = MagicMock()
event_value.usage_to_metrics = {
'agent': {
'accumulated_cost': 0.05,
'accumulated_token_usage': {
'prompt_tokens': 1000,
'completion_tokens': 100,
},
}
}
return ConversationStateUpdateEvent(key='stats', value=event_value)
@pytest.fixture
def stats_event_no_usage_to_metrics():
"""Create a ConversationStateUpdateEvent without usage_to_metrics."""
event_value = {'some_other_key': 'value'}
return ConversationStateUpdateEvent(key='stats', value=event_value)
# ---------------------------------------------------------------------------
# Tests for update_conversation_statistics
# ---------------------------------------------------------------------------
class TestUpdateConversationStatistics:
"""Test the update_conversation_statistics method."""
@pytest.mark.asyncio
async def test_update_statistics_success(
self, service, async_session, v1_conversation_metadata
):
"""Test successfully updating conversation statistics."""
conversation_id, stored = v1_conversation_metadata
agent_metrics = Metrics(
model_name='test-model',
accumulated_cost=0.03411525,
max_budget_per_task=10.0,
accumulated_token_usage=TokenUsage(
model='test-model',
prompt_tokens=8770,
completion_tokens=82,
cache_read_tokens=0,
cache_write_tokens=8767,
reasoning_tokens=0,
context_window=0,
per_turn_token=8852,
),
)
stats = ConversationStats(usage_to_metrics={'agent': agent_metrics})
await service.update_conversation_statistics(conversation_id, stats)
# Verify the update
await async_session.refresh(stored)
assert stored.accumulated_cost == 0.03411525
assert stored.max_budget_per_task == 10.0
assert stored.prompt_tokens == 8770
assert stored.completion_tokens == 82
assert stored.cache_read_tokens == 0
assert stored.cache_write_tokens == 8767
assert stored.reasoning_tokens == 0
assert stored.context_window == 0
assert stored.per_turn_token == 8852
assert stored.last_updated_at is not None
@pytest.mark.asyncio
async def test_update_statistics_partial_update(
self, service, async_session, v1_conversation_metadata
):
"""Test updating only some statistics fields."""
conversation_id, stored = v1_conversation_metadata
# Set initial values
stored.accumulated_cost = 0.01
stored.prompt_tokens = 100
await async_session.commit()
agent_metrics = Metrics(
model_name='test-model',
accumulated_cost=0.05,
accumulated_token_usage=TokenUsage(
model='test-model',
prompt_tokens=200,
completion_tokens=0, # Default value
),
)
stats = ConversationStats(usage_to_metrics={'agent': agent_metrics})
await service.update_conversation_statistics(conversation_id, stats)
# Verify updated fields
await async_session.refresh(stored)
assert stored.accumulated_cost == 0.05
assert stored.prompt_tokens == 200
# completion_tokens should remain unchanged (not None in stats)
assert stored.completion_tokens == 0
@pytest.mark.asyncio
async def test_update_statistics_no_agent_metrics(
self, service, v1_conversation_metadata
):
"""Test that update is skipped when no agent metrics are present."""
conversation_id, stored = v1_conversation_metadata
original_cost = stored.accumulated_cost
condenser_metrics = Metrics(
model_name='test-model',
accumulated_cost=0.1,
)
stats = ConversationStats(usage_to_metrics={'condenser': condenser_metrics})
await service.update_conversation_statistics(conversation_id, stats)
# Verify no update occurred
assert stored.accumulated_cost == original_cost
@pytest.mark.asyncio
async def test_update_statistics_conversation_not_found(self, service):
"""Test that update is skipped when conversation doesn't exist."""
nonexistent_id = uuid4()
agent_metrics = Metrics(
model_name='test-model',
accumulated_cost=0.1,
)
stats = ConversationStats(usage_to_metrics={'agent': agent_metrics})
# Should not raise an exception
await service.update_conversation_statistics(nonexistent_id, stats)
@pytest.mark.asyncio
async def test_update_statistics_v0_conversation_skipped(
self, service, async_session
):
"""Test that V0 conversations are skipped."""
conversation_id = uuid4()
stored = StoredConversationMetadata(
conversation_id=str(conversation_id),
user_id='test_user_123',
sandbox_id='sandbox_123',
conversation_version='V0', # V0 conversation
title='V0 Conversation',
accumulated_cost=0.0,
created_at=datetime.now(timezone.utc),
last_updated_at=datetime.now(timezone.utc),
)
async_session.add(stored)
await async_session.commit()
original_cost = stored.accumulated_cost
agent_metrics = Metrics(
model_name='test-model',
accumulated_cost=0.1,
)
stats = ConversationStats(usage_to_metrics={'agent': agent_metrics})
await service.update_conversation_statistics(conversation_id, stats)
# Verify no update occurred
await async_session.refresh(stored)
assert stored.accumulated_cost == original_cost
@pytest.mark.asyncio
async def test_update_statistics_with_none_values(
self, service, async_session, v1_conversation_metadata
):
"""Test that None values in stats don't overwrite existing values."""
conversation_id, stored = v1_conversation_metadata
# Set initial values
stored.accumulated_cost = 0.01
stored.max_budget_per_task = 5.0
stored.prompt_tokens = 100
await async_session.commit()
agent_metrics = Metrics(
model_name='test-model',
accumulated_cost=0.05,
max_budget_per_task=None, # None value
accumulated_token_usage=TokenUsage(
model='test-model',
prompt_tokens=200,
completion_tokens=0, # Default value (None is not valid for int)
),
)
stats = ConversationStats(usage_to_metrics={'agent': agent_metrics})
await service.update_conversation_statistics(conversation_id, stats)
# Verify updated fields and that None values didn't overwrite
await async_session.refresh(stored)
assert stored.accumulated_cost == 0.05
assert stored.max_budget_per_task == 5.0 # Should remain unchanged
assert stored.prompt_tokens == 200
assert (
stored.completion_tokens == 0
) # Should remain unchanged (was 0, None doesn't update)
# ---------------------------------------------------------------------------
# Tests for process_stats_event
# ---------------------------------------------------------------------------
class TestProcessStatsEvent:
"""Test the process_stats_event method."""
@pytest.mark.asyncio
async def test_process_stats_event_with_dict_value(
self,
service,
async_session,
stats_event_with_dict_value,
v1_conversation_metadata,
):
"""Test processing stats event with dict value."""
conversation_id, stored = v1_conversation_metadata
await service.process_stats_event(stats_event_with_dict_value, conversation_id)
# Verify the update occurred
await async_session.refresh(stored)
assert stored.accumulated_cost == 0.03411525
assert stored.prompt_tokens == 8770
assert stored.completion_tokens == 82
@pytest.mark.asyncio
async def test_process_stats_event_with_object_value(
self,
service,
async_session,
stats_event_with_object_value,
v1_conversation_metadata,
):
"""Test processing stats event with object value."""
conversation_id, stored = v1_conversation_metadata
await service.process_stats_event(
stats_event_with_object_value, conversation_id
)
# Verify the update occurred
await async_session.refresh(stored)
assert stored.accumulated_cost == 0.05
assert stored.prompt_tokens == 1000
assert stored.completion_tokens == 100
@pytest.mark.asyncio
async def test_process_stats_event_no_usage_to_metrics(
self,
service,
async_session,
stats_event_no_usage_to_metrics,
v1_conversation_metadata,
):
"""Test processing stats event without usage_to_metrics."""
conversation_id, stored = v1_conversation_metadata
original_cost = stored.accumulated_cost
await service.process_stats_event(
stats_event_no_usage_to_metrics, conversation_id
)
# Verify update_conversation_statistics was NOT called
await async_session.refresh(stored)
assert stored.accumulated_cost == original_cost
@pytest.mark.asyncio
async def test_process_stats_event_service_error_handled(
self, service, stats_event_with_dict_value
):
"""Test that errors from service are caught and logged."""
conversation_id = uuid4()
# Should not raise an exception
with (
patch.object(
service,
'update_conversation_statistics',
side_effect=Exception('Database error'),
),
patch(
'openhands.app_server.app_conversation.sql_app_conversation_info_service.logger'
) as mock_logger,
):
await service.process_stats_event(
stats_event_with_dict_value, conversation_id
)
# Verify error was logged
mock_logger.exception.assert_called_once()
@pytest.mark.asyncio
async def test_process_stats_event_empty_usage_to_metrics(
self, service, async_session, v1_conversation_metadata
):
"""Test processing stats event with empty usage_to_metrics."""
conversation_id, stored = v1_conversation_metadata
original_cost = stored.accumulated_cost
# Create event with empty usage_to_metrics
event = ConversationStateUpdateEvent(
key='stats', value={'usage_to_metrics': {}}
)
await service.process_stats_event(event, conversation_id)
# Empty dict is falsy, so update_conversation_statistics should NOT be called
await async_session.refresh(stored)
assert stored.accumulated_cost == original_cost
# ---------------------------------------------------------------------------
# Integration tests for on_event endpoint
# ---------------------------------------------------------------------------
class TestOnEventStatsProcessing:
"""Test stats event processing in the on_event endpoint."""
@pytest.mark.asyncio
async def test_on_event_processes_stats_events(self):
"""Test that on_event processes stats events."""
from openhands.app_server.event_callback.webhook_router import on_event
from openhands.app_server.sandbox.sandbox_models import (
SandboxInfo,
SandboxStatus,
)
conversation_id = uuid4()
sandbox_id = 'sandbox_123'
# Create stats event
stats_event = ConversationStateUpdateEvent(
key='stats',
value={
'usage_to_metrics': {
'agent': {
'accumulated_cost': 0.1,
'accumulated_token_usage': {
'prompt_tokens': 1000,
},
}
}
},
)
# Create non-stats event
other_event = ConversationStateUpdateEvent(
key='execution_status', value='running'
)
events = [stats_event, other_event]
# Mock dependencies
mock_sandbox = SandboxInfo(
id=sandbox_id,
status=SandboxStatus.RUNNING,
session_api_key='test_key',
created_by_user_id='user_123',
sandbox_spec_id='spec_123',
)
mock_app_conversation_info = AppConversationInfo(
id=conversation_id,
sandbox_id=sandbox_id,
created_by_user_id='user_123',
)
mock_event_service = AsyncMock()
mock_app_conversation_info_service = AsyncMock()
mock_app_conversation_info_service.get_app_conversation_info.return_value = (
mock_app_conversation_info
)
# Set up process_stats_event to call update_conversation_statistics
async def process_stats_event_side_effect(event, conversation_id):
# Simulate what process_stats_event does - call update_conversation_statistics
from openhands.sdk.conversation.conversation_stats import ConversationStats
if isinstance(event.value, dict):
stats = ConversationStats.model_validate(event.value)
if stats and stats.usage_to_metrics:
await mock_app_conversation_info_service.update_conversation_statistics(
conversation_id, stats
)
mock_app_conversation_info_service.process_stats_event.side_effect = (
process_stats_event_side_effect
)
with (
patch(
'openhands.app_server.event_callback.webhook_router.valid_sandbox',
return_value=mock_sandbox,
),
patch(
'openhands.app_server.event_callback.webhook_router.valid_conversation',
return_value=mock_app_conversation_info,
),
patch(
'openhands.app_server.event_callback.webhook_router._run_callbacks_in_bg_and_close'
) as mock_callbacks,
):
await on_event(
events=events,
conversation_id=conversation_id,
sandbox_info=mock_sandbox,
app_conversation_info_service=mock_app_conversation_info_service,
event_service=mock_event_service,
)
# Verify events were saved
assert mock_event_service.save_event.call_count == 2
# Verify stats event was processed
mock_app_conversation_info_service.update_conversation_statistics.assert_called_once()
# Verify callbacks were scheduled
mock_callbacks.assert_called_once()
@pytest.mark.asyncio
async def test_on_event_skips_non_stats_events(self):
"""Test that on_event skips non-stats events."""
from openhands.app_server.event_callback.webhook_router import on_event
from openhands.app_server.sandbox.sandbox_models import (
SandboxInfo,
SandboxStatus,
)
from openhands.events.action.message import MessageAction
conversation_id = uuid4()
sandbox_id = 'sandbox_123'
# Create non-stats events
events = [
ConversationStateUpdateEvent(key='execution_status', value='running'),
MessageAction(content='test'),
]
mock_sandbox = SandboxInfo(
id=sandbox_id,
status=SandboxStatus.RUNNING,
session_api_key='test_key',
created_by_user_id='user_123',
sandbox_spec_id='spec_123',
)
mock_app_conversation_info = AppConversationInfo(
id=conversation_id,
sandbox_id=sandbox_id,
created_by_user_id='user_123',
)
mock_event_service = AsyncMock()
mock_app_conversation_info_service = AsyncMock()
mock_app_conversation_info_service.get_app_conversation_info.return_value = (
mock_app_conversation_info
)
with (
patch(
'openhands.app_server.event_callback.webhook_router.valid_sandbox',
return_value=mock_sandbox,
),
patch(
'openhands.app_server.event_callback.webhook_router.valid_conversation',
return_value=mock_app_conversation_info,
),
patch(
'openhands.app_server.event_callback.webhook_router._run_callbacks_in_bg_and_close'
),
):
await on_event(
events=events,
conversation_id=conversation_id,
sandbox_info=mock_sandbox,
app_conversation_info_service=mock_app_conversation_info_service,
event_service=mock_event_service,
)
# Verify stats update was NOT called
mock_app_conversation_info_service.update_conversation_statistics.assert_not_called()

View File

@@ -1,243 +0,0 @@
"""Integration tests for PostHog tracking in AgentController."""
import asyncio
from unittest.mock import MagicMock, patch
import pytest
from openhands.controller.agent import Agent
from openhands.controller.agent_controller import AgentController
from openhands.core.config import OpenHandsConfig
from openhands.core.config.agent_config import AgentConfig
from openhands.core.config.llm_config import LLMConfig
from openhands.core.schema import AgentState
from openhands.events import EventSource, EventStream
from openhands.events.action.message import SystemMessageAction
from openhands.llm.llm_registry import LLMRegistry
from openhands.server.services.conversation_stats import ConversationStats
from openhands.storage.memory import InMemoryFileStore
@pytest.fixture(scope='function')
def event_loop():
"""Create event loop for async tests."""
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close()
@pytest.fixture
def mock_agent_with_stats():
"""Create a mock agent with properly connected LLM registry and conversation stats."""
import uuid
# Create LLM registry
config = OpenHandsConfig()
llm_registry = LLMRegistry(config=config)
# Create conversation stats
file_store = InMemoryFileStore({})
conversation_id = f'test-conversation-{uuid.uuid4()}'
conversation_stats = ConversationStats(
file_store=file_store, conversation_id=conversation_id, user_id='test-user'
)
# Connect registry to stats
llm_registry.subscribe(conversation_stats.register_llm)
# Create mock agent
agent = MagicMock(spec=Agent)
agent_config = MagicMock(spec=AgentConfig)
llm_config = LLMConfig(
model='gpt-4o',
api_key='test_key',
num_retries=2,
retry_min_wait=1,
retry_max_wait=2,
)
agent_config.disabled_microagents = []
agent_config.enable_mcp = True
agent_config.enable_stuck_detection = True
llm_registry.service_to_llm.clear()
mock_llm = llm_registry.get_llm('agent_llm', llm_config)
agent.llm = mock_llm
agent.name = 'test-agent'
agent.sandbox_plugins = []
agent.config = agent_config
agent.llm_registry = llm_registry
agent.prompt_manager = MagicMock()
# Add a proper system message mock
system_message = SystemMessageAction(
content='Test system message', tools=['test_tool']
)
system_message._source = EventSource.AGENT
system_message._id = -1 # Set invalid ID to avoid the ID check
agent.get_system_message.return_value = system_message
return agent, conversation_stats, llm_registry
@pytest.fixture
def mock_event_stream():
"""Create a mock event stream."""
mock = MagicMock(
spec=EventStream,
event_stream=EventStream(sid='test', file_store=InMemoryFileStore({})),
)
mock.get_latest_event_id.return_value = 0
return mock
@pytest.mark.asyncio
async def test_agent_finish_triggers_posthog_tracking(
mock_agent_with_stats, mock_event_stream
):
"""Test that setting agent state to FINISHED triggers PostHog tracking."""
mock_agent, conversation_stats, llm_registry = mock_agent_with_stats
controller = AgentController(
agent=mock_agent,
event_stream=mock_event_stream,
conversation_stats=conversation_stats,
iteration_delta=10,
sid='test-conversation-123',
user_id='test-user-456',
confirmation_mode=False,
headless_mode=True,
)
with (
patch('openhands.utils.posthog_tracker.posthog') as mock_posthog,
patch('os.environ.get') as mock_env_get,
):
# Setup mocks
mock_posthog.capture = MagicMock()
mock_env_get.return_value = 'saas'
# Initialize posthog in the tracker module
import openhands.utils.posthog_tracker as tracker
tracker.posthog = mock_posthog
# Set agent state to FINISHED
await controller.set_agent_state_to(AgentState.FINISHED)
# Verify PostHog tracking was called
mock_posthog.capture.assert_called_once()
call_args = mock_posthog.capture.call_args
assert call_args[1]['distinct_id'] == 'test-user-456'
assert call_args[1]['event'] == 'agent_task_completed'
assert 'conversation_id' in call_args[1]['properties']
assert call_args[1]['properties']['user_id'] == 'test-user-456'
assert call_args[1]['properties']['app_mode'] == 'saas'
await controller.close()
@pytest.mark.asyncio
async def test_agent_finish_without_user_id(mock_agent_with_stats, mock_event_stream):
"""Test tracking when user_id is None."""
mock_agent, conversation_stats, llm_registry = mock_agent_with_stats
controller = AgentController(
agent=mock_agent,
event_stream=mock_event_stream,
conversation_stats=conversation_stats,
iteration_delta=10,
sid='test-conversation-789',
user_id=None,
confirmation_mode=False,
headless_mode=True,
)
with (
patch('openhands.utils.posthog_tracker.posthog') as mock_posthog,
patch('os.environ.get') as mock_env_get,
):
mock_posthog.capture = MagicMock()
mock_env_get.return_value = 'oss'
import openhands.utils.posthog_tracker as tracker
tracker.posthog = mock_posthog
await controller.set_agent_state_to(AgentState.FINISHED)
mock_posthog.capture.assert_called_once()
call_args = mock_posthog.capture.call_args
# When user_id is None, distinct_id should be conversation_id
assert call_args[1]['distinct_id'].startswith('conversation_')
assert call_args[1]['properties']['user_id'] is None
await controller.close()
@pytest.mark.asyncio
async def test_other_states_dont_trigger_tracking(
mock_agent_with_stats, mock_event_stream
):
"""Test that non-FINISHED states don't trigger tracking."""
mock_agent, conversation_stats, llm_registry = mock_agent_with_stats
controller = AgentController(
agent=mock_agent,
event_stream=mock_event_stream,
conversation_stats=conversation_stats,
iteration_delta=10,
sid='test-conversation-999',
confirmation_mode=False,
headless_mode=True,
)
with patch('openhands.utils.posthog_tracker.posthog') as mock_posthog:
mock_posthog.capture = MagicMock()
import openhands.utils.posthog_tracker as tracker
tracker.posthog = mock_posthog
# Try different states
await controller.set_agent_state_to(AgentState.RUNNING)
await controller.set_agent_state_to(AgentState.PAUSED)
await controller.set_agent_state_to(AgentState.STOPPED)
# PostHog should not be called for non-FINISHED states
mock_posthog.capture.assert_not_called()
await controller.close()
@pytest.mark.asyncio
async def test_tracking_error_doesnt_break_agent(
mock_agent_with_stats, mock_event_stream
):
"""Test that tracking errors don't interrupt agent operation."""
mock_agent, conversation_stats, llm_registry = mock_agent_with_stats
controller = AgentController(
agent=mock_agent,
event_stream=mock_event_stream,
conversation_stats=conversation_stats,
iteration_delta=10,
sid='test-conversation-error',
confirmation_mode=False,
headless_mode=True,
)
with patch('openhands.utils.posthog_tracker.posthog') as mock_posthog:
mock_posthog.capture = MagicMock(side_effect=Exception('PostHog error'))
import openhands.utils.posthog_tracker as tracker
tracker.posthog = mock_posthog
# Should not raise an exception
await controller.set_agent_state_to(AgentState.FINISHED)
# Agent state should still be FINISHED despite tracking error
assert controller.state.agent_state == AgentState.FINISHED
await controller.close()

View File

@@ -152,6 +152,7 @@ class TestExperimentManagerIntegration:
llm_base_url=None,
llm_api_key=None,
confirmation_mode=False,
condenser_max_size=None,
)
async def get_secrets(self):
@@ -200,8 +201,24 @@ class TestExperimentManagerIntegration:
# Patch the pieces invoked by the service
with (
patch(
'openhands.app_server.app_conversation.live_status_app_conversation_service.get_default_agent',
patch.object(
service,
'_setup_secrets_for_git_provider',
return_value={},
),
patch.object(
service,
'_configure_llm_and_mcp',
return_value=(mock_llm, {}),
),
patch.object(
service,
'_create_agent_with_context',
return_value=mock_agent,
),
patch.object(
service,
'_load_skills_and_update_agent',
return_value=mock_agent,
),
patch(

View File

@@ -1255,6 +1255,25 @@ def test_opus_41_keeps_temperature_top_p(mock_completion):
assert 'top_p' not in call_kwargs
@patch('openhands.llm.llm.litellm_completion')
def test_opus_45_keeps_temperature_drops_top_p(mock_completion):
mock_completion.return_value = {
'choices': [{'message': {'content': 'ok'}}],
}
config = LLMConfig(
model='anthropic/claude-opus-4-5-20251101',
api_key='k',
temperature=0.7,
top_p=0.9,
)
llm = LLM(config, service_id='svc')
llm.completion(messages=[{'role': 'user', 'content': 'hi'}])
call_kwargs = mock_completion.call_args[1]
assert call_kwargs.get('temperature') == 0.7
# Anthropic rejects both temperature and top_p together on Opus 4.5; we keep temperature and drop top_p
assert 'top_p' not in call_kwargs
@patch('openhands.llm.llm.litellm_completion')
def test_sonnet_4_keeps_temperature_drops_top_p(mock_completion):
mock_completion.return_value = {

View File

@@ -46,6 +46,9 @@ class MockUserAuth(UserAuth):
async def get_secrets(self) -> Secrets | None:
return None
async def get_mcp_api_key(self) -> str | None:
return None
@classmethod
async def get_instance(cls, request: Request) -> UserAuth:
return MockUserAuth()

View File

@@ -46,6 +46,9 @@ class MockUserAuth(UserAuth):
async def get_secrets(self) -> Secrets | None:
return None
async def get_mcp_api_key(self) -> str | None:
return None
@classmethod
async def get_instance(cls, request: Request) -> UserAuth:
return MockUserAuth()

View File

@@ -1,356 +0,0 @@
"""Unit tests for PostHog tracking utilities."""
from unittest.mock import MagicMock, patch
import pytest
from openhands.utils.posthog_tracker import (
alias_user_identities,
track_agent_task_completed,
track_credit_limit_reached,
track_credits_purchased,
track_user_signup_completed,
)
@pytest.fixture
def mock_posthog():
"""Mock the posthog module."""
with patch('openhands.utils.posthog_tracker.posthog') as mock_ph:
mock_ph.capture = MagicMock()
yield mock_ph
def test_track_agent_task_completed_with_user_id(mock_posthog):
"""Test tracking agent task completion with user ID."""
# Initialize posthog manually in the test
import openhands.utils.posthog_tracker as tracker
tracker.posthog = mock_posthog
track_agent_task_completed(
conversation_id='test-conversation-123',
user_id='user-456',
app_mode='saas',
)
mock_posthog.capture.assert_called_once_with(
distinct_id='user-456',
event='agent_task_completed',
properties={
'conversation_id': 'test-conversation-123',
'user_id': 'user-456',
'app_mode': 'saas',
},
)
def test_track_agent_task_completed_without_user_id(mock_posthog):
"""Test tracking agent task completion without user ID (anonymous)."""
import openhands.utils.posthog_tracker as tracker
tracker.posthog = mock_posthog
track_agent_task_completed(
conversation_id='test-conversation-789',
user_id=None,
app_mode='oss',
)
mock_posthog.capture.assert_called_once_with(
distinct_id='conversation_test-conversation-789',
event='agent_task_completed',
properties={
'conversation_id': 'test-conversation-789',
'user_id': None,
'app_mode': 'oss',
},
)
def test_track_agent_task_completed_default_app_mode(mock_posthog):
"""Test tracking with default app_mode."""
import openhands.utils.posthog_tracker as tracker
tracker.posthog = mock_posthog
track_agent_task_completed(
conversation_id='test-conversation-999',
user_id='user-111',
)
mock_posthog.capture.assert_called_once_with(
distinct_id='user-111',
event='agent_task_completed',
properties={
'conversation_id': 'test-conversation-999',
'user_id': 'user-111',
'app_mode': 'unknown',
},
)
def test_track_agent_task_completed_handles_errors(mock_posthog):
"""Test that tracking errors are handled gracefully."""
import openhands.utils.posthog_tracker as tracker
tracker.posthog = mock_posthog
mock_posthog.capture.side_effect = Exception('PostHog API error')
# Should not raise an exception
track_agent_task_completed(
conversation_id='test-conversation-error',
user_id='user-error',
app_mode='saas',
)
def test_track_agent_task_completed_when_posthog_not_installed():
"""Test tracking when posthog is not installed."""
import openhands.utils.posthog_tracker as tracker
# Simulate posthog not being installed
tracker.posthog = None
# Should not raise an exception
track_agent_task_completed(
conversation_id='test-conversation-no-ph',
user_id='user-no-ph',
app_mode='oss',
)
def test_track_user_signup_completed(mock_posthog):
"""Test tracking user signup completion."""
import openhands.utils.posthog_tracker as tracker
tracker.posthog = mock_posthog
track_user_signup_completed(
user_id='test-user-123',
signup_timestamp='2025-01-15T10:30:00Z',
)
mock_posthog.capture.assert_called_once_with(
distinct_id='test-user-123',
event='user_signup_completed',
properties={
'user_id': 'test-user-123',
'signup_timestamp': '2025-01-15T10:30:00Z',
},
)
def test_track_user_signup_completed_handles_errors(mock_posthog):
"""Test that user signup tracking errors are handled gracefully."""
import openhands.utils.posthog_tracker as tracker
tracker.posthog = mock_posthog
mock_posthog.capture.side_effect = Exception('PostHog API error')
# Should not raise an exception
track_user_signup_completed(
user_id='test-user-error',
signup_timestamp='2025-01-15T12:00:00Z',
)
def test_track_user_signup_completed_when_posthog_not_installed():
"""Test user signup tracking when posthog is not installed."""
import openhands.utils.posthog_tracker as tracker
# Simulate posthog not being installed
tracker.posthog = None
# Should not raise an exception
track_user_signup_completed(
user_id='test-user-no-ph',
signup_timestamp='2025-01-15T13:00:00Z',
)
def test_track_credit_limit_reached_with_user_id(mock_posthog):
"""Test tracking credit limit reached with user ID."""
import openhands.utils.posthog_tracker as tracker
tracker.posthog = mock_posthog
track_credit_limit_reached(
conversation_id='test-conversation-456',
user_id='user-789',
current_budget=10.50,
max_budget=10.00,
)
mock_posthog.capture.assert_called_once_with(
distinct_id='user-789',
event='credit_limit_reached',
properties={
'conversation_id': 'test-conversation-456',
'user_id': 'user-789',
'current_budget': 10.50,
'max_budget': 10.00,
},
)
def test_track_credit_limit_reached_without_user_id(mock_posthog):
"""Test tracking credit limit reached without user ID (anonymous)."""
import openhands.utils.posthog_tracker as tracker
tracker.posthog = mock_posthog
track_credit_limit_reached(
conversation_id='test-conversation-999',
user_id=None,
current_budget=5.25,
max_budget=5.00,
)
mock_posthog.capture.assert_called_once_with(
distinct_id='conversation_test-conversation-999',
event='credit_limit_reached',
properties={
'conversation_id': 'test-conversation-999',
'user_id': None,
'current_budget': 5.25,
'max_budget': 5.00,
},
)
def test_track_credit_limit_reached_handles_errors(mock_posthog):
"""Test that credit limit tracking errors are handled gracefully."""
import openhands.utils.posthog_tracker as tracker
tracker.posthog = mock_posthog
mock_posthog.capture.side_effect = Exception('PostHog API error')
# Should not raise an exception
track_credit_limit_reached(
conversation_id='test-conversation-error',
user_id='user-error',
current_budget=15.00,
max_budget=10.00,
)
def test_track_credit_limit_reached_when_posthog_not_installed():
"""Test credit limit tracking when posthog is not installed."""
import openhands.utils.posthog_tracker as tracker
# Simulate posthog not being installed
tracker.posthog = None
# Should not raise an exception
track_credit_limit_reached(
conversation_id='test-conversation-no-ph',
user_id='user-no-ph',
current_budget=8.00,
max_budget=5.00,
)
def test_track_credits_purchased(mock_posthog):
"""Test tracking credits purchased."""
import openhands.utils.posthog_tracker as tracker
tracker.posthog = mock_posthog
track_credits_purchased(
user_id='test-user-999',
amount_usd=50.00,
credits_added=50.00,
stripe_session_id='cs_test_abc123',
)
mock_posthog.capture.assert_called_once_with(
distinct_id='test-user-999',
event='credits_purchased',
properties={
'user_id': 'test-user-999',
'amount_usd': 50.00,
'credits_added': 50.00,
'stripe_session_id': 'cs_test_abc123',
},
)
def test_track_credits_purchased_handles_errors(mock_posthog):
"""Test that credits purchased tracking errors are handled gracefully."""
import openhands.utils.posthog_tracker as tracker
tracker.posthog = mock_posthog
mock_posthog.capture.side_effect = Exception('PostHog API error')
# Should not raise an exception
track_credits_purchased(
user_id='test-user-error',
amount_usd=100.00,
credits_added=100.00,
stripe_session_id='cs_test_error',
)
def test_track_credits_purchased_when_posthog_not_installed():
"""Test credits purchased tracking when posthog is not installed."""
import openhands.utils.posthog_tracker as tracker
# Simulate posthog not being installed
tracker.posthog = None
# Should not raise an exception
track_credits_purchased(
user_id='test-user-no-ph',
amount_usd=25.00,
credits_added=25.00,
stripe_session_id='cs_test_no_ph',
)
def test_alias_user_identities(mock_posthog):
"""Test aliasing user identities.
Verifies that posthog.alias(previous_id, distinct_id) is called correctly
where git_login is the previous_id and keycloak_user_id is the distinct_id.
"""
import openhands.utils.posthog_tracker as tracker
tracker.posthog = mock_posthog
mock_posthog.alias = MagicMock()
alias_user_identities(
keycloak_user_id='keycloak-123',
git_login='git-user',
)
# Verify: posthog.alias(previous_id='git-user', distinct_id='keycloak-123')
mock_posthog.alias.assert_called_once_with('git-user', 'keycloak-123')
def test_alias_user_identities_handles_errors(mock_posthog):
"""Test that aliasing errors are handled gracefully."""
import openhands.utils.posthog_tracker as tracker
tracker.posthog = mock_posthog
mock_posthog.alias = MagicMock(side_effect=Exception('PostHog API error'))
# Should not raise an exception
alias_user_identities(
keycloak_user_id='keycloak-error',
git_login='git-error',
)
def test_alias_user_identities_when_posthog_not_installed():
"""Test aliasing when posthog is not installed."""
import openhands.utils.posthog_tracker as tracker
# Simulate posthog not being installed
tracker.posthog = None
# Should not raise an exception
alias_user_identities(
keycloak_user_id='keycloak-no-ph',
git_login='git-no-ph',
)