pr comments

This commit is contained in:
Swifty
2026-02-09 12:29:53 +01:00
parent 17cafff60c
commit 46c65cb567
2 changed files with 23 additions and 40 deletions

View File

@@ -7,7 +7,6 @@ from collections import defaultdict
from datetime import datetime, timezone
from typing import Annotated, Any, Sequence, get_args
import prisma.models
import pydantic
import stripe
from autogpt_libs.auth import get_user_id, requires_user
@@ -829,43 +828,27 @@ async def update_graph(
existing_versions = await graph_db.get_graph_all_versions(graph_id, user_id=user_id)
if not existing_versions:
# User doesn't own this graph -- check if they have it in their library
# (e.g. added from the marketplace). If so, fork it with their edits applied.
library_agent = await prisma.models.LibraryAgent.prisma().find_first(
where={
"userId": user_id,
"agentGraphId": graph_id,
"isDeleted": False,
}
# (e.g. added from the marketplace). If so, fork it and apply their edits.
library_agent = await library_db.get_library_agent_by_graph_id(
user_id=user_id, graph_id=graph_id
)
if not library_agent:
raise HTTPException(404, detail=f"Graph #{graph_id} not found")
# Fork: apply the user's edits to a new user-owned graph
graph.version = 1
graph.is_active = True
forked = graph_db.make_graph_model(graph, user_id)
forked.forked_from_id = graph_id
forked.forked_from_version = library_agent.agentGraphVersion
forked.reassign_ids(user_id=user_id, reassign_graph_id=True)
forked.validate_graph(for_run=False)
new_graph_version = await graph_db.create_graph(forked, user_id=user_id)
new_graph_version = await on_graph_activate(new_graph_version, user_id=user_id)
# Fork the marketplace agent to create a user-owned copy
forked = await graph_db.fork_graph(
graph_id, library_agent.graph_version, user_id
)
forked = await on_graph_activate(forked, user_id=user_id)
await graph_db.set_graph_active_version(
graph_id=new_graph_version.id,
version=new_graph_version.version,
user_id=user_id,
graph_id=forked.id, version=forked.version, user_id=user_id
)
await library_db.create_library_agent(new_graph_version, user_id)
await library_db.create_library_agent(forked, user_id)
new_graph_with_subgraphs = await graph_db.get_graph(
new_graph_version.id,
new_graph_version.version,
user_id=user_id,
include_subgraphs=True,
)
assert new_graph_with_subgraphs
return new_graph_with_subgraphs
# Apply the user's edits on top of the fork via the normal update path
graph_id = forked.id
graph.id = forked.id
existing_versions = [forked]
graph.version = max(g.version for g in existing_versions) + 1
current_active_version = next((v for v in existing_versions if v.is_active), None)

View File

@@ -531,12 +531,12 @@ class LLMResponse(BaseModel):
def convert_openai_tool_fmt_to_anthropic(
openai_tools: list[dict] | None = None,
) -> Iterable[ToolParam] | anthropic.Omit:
) -> Iterable[ToolParam] | anthropic.NotGiven:
"""
Convert OpenAI tool format to Anthropic tool format.
"""
if not openai_tools or len(openai_tools) == 0:
return anthropic.omit
return anthropic.NOT_GIVEN
anthropic_tools = []
for tool in openai_tools:
@@ -596,10 +596,10 @@ def extract_openai_tool_calls(response) -> list[ToolContentBlock] | None:
def get_parallel_tool_calls_param(
llm_model: LlmModel, parallel_tool_calls: bool | None
) -> bool | openai.Omit:
) -> bool | openai.NotGiven:
"""Get the appropriate parallel_tool_calls parameter for OpenAI-compatible APIs."""
if llm_model.startswith("o") or parallel_tool_calls is None:
return openai.omit
return openai.NOT_GIVEN
return parallel_tool_calls
@@ -676,7 +676,7 @@ async def llm_call(
response_format=response_format, # type: ignore
max_completion_tokens=max_tokens,
tools=tools_param, # type: ignore
parallel_tool_calls=parallel_tool_calls,
parallel_tool_calls=parallel_tool_calls, # type: ignore
)
tool_calls = extract_openai_tool_calls(response)
@@ -722,7 +722,7 @@ async def llm_call(
system=sysprompt,
messages=messages,
max_tokens=max_tokens,
tools=an_tools,
tools=an_tools, # type: ignore
timeout=600,
)
@@ -838,7 +838,7 @@ async def llm_call(
messages=prompt, # type: ignore
max_tokens=max_tokens,
tools=tools_param, # type: ignore
parallel_tool_calls=parallel_tool_calls_param,
parallel_tool_calls=parallel_tool_calls_param, # type: ignore
)
# If there's no response, raise an error
@@ -880,7 +880,7 @@ async def llm_call(
messages=prompt, # type: ignore
max_tokens=max_tokens,
tools=tools_param, # type: ignore
parallel_tool_calls=parallel_tool_calls_param,
parallel_tool_calls=parallel_tool_calls_param, # type: ignore
)
# If there's no response, raise an error
@@ -951,7 +951,7 @@ async def llm_call(
response_format=response_format, # type: ignore
max_tokens=max_tokens,
tools=tools_param, # type: ignore
parallel_tool_calls=parallel_tool_calls_param,
parallel_tool_calls=parallel_tool_calls_param, # type: ignore
)
tool_calls = extract_openai_tool_calls(response)