mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-10 07:38:04 -05:00
Merge remote-tracking branch 'origin/dev' into lluisagusti/open-2925-inconsistent-buttonicon-styling-in-action-button-bar
This commit is contained in:
@@ -39,7 +39,7 @@ import backend.data.user
|
||||
import backend.integrations.webhooks.utils
|
||||
import backend.util.service
|
||||
import backend.util.settings
|
||||
from backend.blocks.llm import LlmModel
|
||||
from backend.blocks.llm import DEFAULT_LLM_MODEL
|
||||
from backend.data.model import Credentials
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.monitoring.instrumentation import instrument_fastapi
|
||||
@@ -113,7 +113,7 @@ async def lifespan_context(app: fastapi.FastAPI):
|
||||
|
||||
await backend.data.user.migrate_and_encrypt_user_integrations()
|
||||
await backend.data.graph.fix_llm_provider_credentials()
|
||||
await backend.data.graph.migrate_llm_models(LlmModel.GPT4O)
|
||||
await backend.data.graph.migrate_llm_models(DEFAULT_LLM_MODEL)
|
||||
await backend.integrations.webhooks.utils.migrate_legacy_triggered_graphs()
|
||||
|
||||
with launch_darkly_context():
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
from typing import Any
|
||||
|
||||
from backend.blocks.llm import (
|
||||
DEFAULT_LLM_MODEL,
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
AIBlockBase,
|
||||
@@ -49,7 +50,7 @@ class AIConditionBlock(AIBlockBase):
|
||||
)
|
||||
model: LlmModel = SchemaField(
|
||||
title="LLM Model",
|
||||
default=LlmModel.GPT4O,
|
||||
default=DEFAULT_LLM_MODEL,
|
||||
description="The language model to use for evaluating the condition.",
|
||||
advanced=False,
|
||||
)
|
||||
@@ -81,7 +82,7 @@ class AIConditionBlock(AIBlockBase):
|
||||
"condition": "the input is an email address",
|
||||
"yes_value": "Valid email",
|
||||
"no_value": "Not an email",
|
||||
"model": LlmModel.GPT4O,
|
||||
"model": DEFAULT_LLM_MODEL,
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
|
||||
2896
autogpt_platform/backend/backend/blocks/google/docs.py
Normal file
2896
autogpt_platform/backend/backend/blocks/google/docs.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -92,8 +92,9 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
|
||||
O1 = "o1"
|
||||
O1_MINI = "o1-mini"
|
||||
# GPT-5 models
|
||||
GPT5 = "gpt-5-2025-08-07"
|
||||
GPT5_2 = "gpt-5.2-2025-12-11"
|
||||
GPT5_1 = "gpt-5.1-2025-11-13"
|
||||
GPT5 = "gpt-5-2025-08-07"
|
||||
GPT5_MINI = "gpt-5-mini-2025-08-07"
|
||||
GPT5_NANO = "gpt-5-nano-2025-08-07"
|
||||
GPT5_CHAT = "gpt-5-chat-latest"
|
||||
@@ -194,8 +195,9 @@ MODEL_METADATA = {
|
||||
LlmModel.O1: ModelMetadata("openai", 200000, 100000), # o1-2024-12-17
|
||||
LlmModel.O1_MINI: ModelMetadata("openai", 128000, 65536), # o1-mini-2024-09-12
|
||||
# GPT-5 models
|
||||
LlmModel.GPT5: ModelMetadata("openai", 400000, 128000),
|
||||
LlmModel.GPT5_2: ModelMetadata("openai", 400000, 128000),
|
||||
LlmModel.GPT5_1: ModelMetadata("openai", 400000, 128000),
|
||||
LlmModel.GPT5: ModelMetadata("openai", 400000, 128000),
|
||||
LlmModel.GPT5_MINI: ModelMetadata("openai", 400000, 128000),
|
||||
LlmModel.GPT5_NANO: ModelMetadata("openai", 400000, 128000),
|
||||
LlmModel.GPT5_CHAT: ModelMetadata("openai", 400000, 16384),
|
||||
@@ -303,6 +305,8 @@ MODEL_METADATA = {
|
||||
LlmModel.V0_1_0_MD: ModelMetadata("v0", 128000, 64000),
|
||||
}
|
||||
|
||||
DEFAULT_LLM_MODEL = LlmModel.GPT5_2
|
||||
|
||||
for model in LlmModel:
|
||||
if model not in MODEL_METADATA:
|
||||
raise ValueError(f"Missing MODEL_METADATA metadata for model: {model}")
|
||||
@@ -790,7 +794,7 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
|
||||
)
|
||||
model: LlmModel = SchemaField(
|
||||
title="LLM Model",
|
||||
default=LlmModel.GPT4O,
|
||||
default=DEFAULT_LLM_MODEL,
|
||||
description="The language model to use for answering the prompt.",
|
||||
advanced=False,
|
||||
)
|
||||
@@ -855,7 +859,7 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
|
||||
input_schema=AIStructuredResponseGeneratorBlock.Input,
|
||||
output_schema=AIStructuredResponseGeneratorBlock.Output,
|
||||
test_input={
|
||||
"model": LlmModel.GPT4O,
|
||||
"model": DEFAULT_LLM_MODEL,
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"expected_format": {
|
||||
"key1": "value1",
|
||||
@@ -1221,7 +1225,7 @@ class AITextGeneratorBlock(AIBlockBase):
|
||||
)
|
||||
model: LlmModel = SchemaField(
|
||||
title="LLM Model",
|
||||
default=LlmModel.GPT4O,
|
||||
default=DEFAULT_LLM_MODEL,
|
||||
description="The language model to use for answering the prompt.",
|
||||
advanced=False,
|
||||
)
|
||||
@@ -1317,7 +1321,7 @@ class AITextSummarizerBlock(AIBlockBase):
|
||||
)
|
||||
model: LlmModel = SchemaField(
|
||||
title="LLM Model",
|
||||
default=LlmModel.GPT4O,
|
||||
default=DEFAULT_LLM_MODEL,
|
||||
description="The language model to use for summarizing the text.",
|
||||
)
|
||||
focus: str = SchemaField(
|
||||
@@ -1534,7 +1538,7 @@ class AIConversationBlock(AIBlockBase):
|
||||
)
|
||||
model: LlmModel = SchemaField(
|
||||
title="LLM Model",
|
||||
default=LlmModel.GPT4O,
|
||||
default=DEFAULT_LLM_MODEL,
|
||||
description="The language model to use for the conversation.",
|
||||
)
|
||||
credentials: AICredentials = AICredentialsField()
|
||||
@@ -1572,7 +1576,7 @@ class AIConversationBlock(AIBlockBase):
|
||||
},
|
||||
{"role": "user", "content": "Where was it played?"},
|
||||
],
|
||||
"model": LlmModel.GPT4O,
|
||||
"model": DEFAULT_LLM_MODEL,
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
@@ -1635,7 +1639,7 @@ class AIListGeneratorBlock(AIBlockBase):
|
||||
)
|
||||
model: LlmModel = SchemaField(
|
||||
title="LLM Model",
|
||||
default=LlmModel.GPT4O,
|
||||
default=DEFAULT_LLM_MODEL,
|
||||
description="The language model to use for generating the list.",
|
||||
advanced=True,
|
||||
)
|
||||
@@ -1692,7 +1696,7 @@ class AIListGeneratorBlock(AIBlockBase):
|
||||
"drawing explorers to uncover its mysteries. Each planet showcases the limitless possibilities of "
|
||||
"fictional worlds."
|
||||
),
|
||||
"model": LlmModel.GPT4O,
|
||||
"model": DEFAULT_LLM_MODEL,
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"max_retries": 3,
|
||||
"force_json_output": False,
|
||||
|
||||
@@ -226,7 +226,7 @@ class SmartDecisionMakerBlock(Block):
|
||||
)
|
||||
model: llm.LlmModel = SchemaField(
|
||||
title="LLM Model",
|
||||
default=llm.LlmModel.GPT4O,
|
||||
default=llm.DEFAULT_LLM_MODEL,
|
||||
description="The language model to use for answering the prompt.",
|
||||
advanced=False,
|
||||
)
|
||||
|
||||
@@ -28,7 +28,7 @@ class TestLLMStatsTracking:
|
||||
|
||||
response = await llm.llm_call(
|
||||
credentials=llm.TEST_CREDENTIALS,
|
||||
llm_model=llm.LlmModel.GPT4O,
|
||||
llm_model=llm.DEFAULT_LLM_MODEL,
|
||||
prompt=[{"role": "user", "content": "Hello"}],
|
||||
max_tokens=100,
|
||||
)
|
||||
@@ -65,7 +65,7 @@ class TestLLMStatsTracking:
|
||||
input_data = llm.AIStructuredResponseGeneratorBlock.Input(
|
||||
prompt="Test prompt",
|
||||
expected_format={"key1": "desc1", "key2": "desc2"},
|
||||
model=llm.LlmModel.GPT4O,
|
||||
model=llm.DEFAULT_LLM_MODEL,
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore # type: ignore
|
||||
)
|
||||
|
||||
@@ -109,7 +109,7 @@ class TestLLMStatsTracking:
|
||||
# Run the block
|
||||
input_data = llm.AITextGeneratorBlock.Input(
|
||||
prompt="Generate text",
|
||||
model=llm.LlmModel.GPT4O,
|
||||
model=llm.DEFAULT_LLM_MODEL,
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
)
|
||||
|
||||
@@ -170,7 +170,7 @@ class TestLLMStatsTracking:
|
||||
input_data = llm.AIStructuredResponseGeneratorBlock.Input(
|
||||
prompt="Test prompt",
|
||||
expected_format={"key1": "desc1", "key2": "desc2"},
|
||||
model=llm.LlmModel.GPT4O,
|
||||
model=llm.DEFAULT_LLM_MODEL,
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
retry=2,
|
||||
)
|
||||
@@ -228,7 +228,7 @@ class TestLLMStatsTracking:
|
||||
|
||||
input_data = llm.AITextSummarizerBlock.Input(
|
||||
text=long_text,
|
||||
model=llm.LlmModel.GPT4O,
|
||||
model=llm.DEFAULT_LLM_MODEL,
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
max_tokens=100, # Small chunks
|
||||
chunk_overlap=10,
|
||||
@@ -299,7 +299,7 @@ class TestLLMStatsTracking:
|
||||
# Test with very short text (should only need 1 chunk + 1 final summary)
|
||||
input_data = llm.AITextSummarizerBlock.Input(
|
||||
text="This is a short text.",
|
||||
model=llm.LlmModel.GPT4O,
|
||||
model=llm.DEFAULT_LLM_MODEL,
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
max_tokens=1000, # Large enough to avoid chunking
|
||||
)
|
||||
@@ -346,7 +346,7 @@ class TestLLMStatsTracking:
|
||||
{"role": "assistant", "content": "Hi there!"},
|
||||
{"role": "user", "content": "How are you?"},
|
||||
],
|
||||
model=llm.LlmModel.GPT4O,
|
||||
model=llm.DEFAULT_LLM_MODEL,
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
)
|
||||
|
||||
@@ -387,7 +387,7 @@ class TestLLMStatsTracking:
|
||||
# Run the block
|
||||
input_data = llm.AIListGeneratorBlock.Input(
|
||||
focus="test items",
|
||||
model=llm.LlmModel.GPT4O,
|
||||
model=llm.DEFAULT_LLM_MODEL,
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
max_retries=3,
|
||||
)
|
||||
@@ -469,7 +469,7 @@ class TestLLMStatsTracking:
|
||||
input_data = llm.AIStructuredResponseGeneratorBlock.Input(
|
||||
prompt="Test",
|
||||
expected_format={"result": "desc"},
|
||||
model=llm.LlmModel.GPT4O,
|
||||
model=llm.DEFAULT_LLM_MODEL,
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
)
|
||||
|
||||
@@ -513,7 +513,7 @@ class TestAITextSummarizerValidation:
|
||||
# Create input data
|
||||
input_data = llm.AITextSummarizerBlock.Input(
|
||||
text="Some text to summarize",
|
||||
model=llm.LlmModel.GPT4O,
|
||||
model=llm.DEFAULT_LLM_MODEL,
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
style=llm.SummaryStyle.BULLET_POINTS,
|
||||
)
|
||||
@@ -558,7 +558,7 @@ class TestAITextSummarizerValidation:
|
||||
# Create input data
|
||||
input_data = llm.AITextSummarizerBlock.Input(
|
||||
text="Some text to summarize",
|
||||
model=llm.LlmModel.GPT4O,
|
||||
model=llm.DEFAULT_LLM_MODEL,
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
style=llm.SummaryStyle.BULLET_POINTS,
|
||||
max_tokens=1000,
|
||||
@@ -593,7 +593,7 @@ class TestAITextSummarizerValidation:
|
||||
# Create input data
|
||||
input_data = llm.AITextSummarizerBlock.Input(
|
||||
text="Some text to summarize",
|
||||
model=llm.LlmModel.GPT4O,
|
||||
model=llm.DEFAULT_LLM_MODEL,
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
)
|
||||
|
||||
@@ -623,7 +623,7 @@ class TestAITextSummarizerValidation:
|
||||
# Create input data
|
||||
input_data = llm.AITextSummarizerBlock.Input(
|
||||
text="Some text to summarize",
|
||||
model=llm.LlmModel.GPT4O,
|
||||
model=llm.DEFAULT_LLM_MODEL,
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
max_tokens=1000,
|
||||
)
|
||||
@@ -654,7 +654,7 @@ class TestAITextSummarizerValidation:
|
||||
# Create input data
|
||||
input_data = llm.AITextSummarizerBlock.Input(
|
||||
text="Some text to summarize",
|
||||
model=llm.LlmModel.GPT4O,
|
||||
model=llm.DEFAULT_LLM_MODEL,
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
)
|
||||
|
||||
|
||||
@@ -233,7 +233,7 @@ async def test_smart_decision_maker_tracks_llm_stats():
|
||||
# Create test input
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Should I continue with this task?",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
model=llm_module.DEFAULT_LLM_MODEL,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=0,
|
||||
)
|
||||
@@ -335,7 +335,7 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Search for keywords",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
model=llm_module.DEFAULT_LLM_MODEL,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
retry=2, # Set retry to 2 for testing
|
||||
agent_mode_max_iterations=0,
|
||||
@@ -402,7 +402,7 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Search for keywords",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
model=llm_module.DEFAULT_LLM_MODEL,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=0,
|
||||
)
|
||||
@@ -462,7 +462,7 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Search for keywords",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
model=llm_module.DEFAULT_LLM_MODEL,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=0,
|
||||
)
|
||||
@@ -526,7 +526,7 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Search for keywords",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
model=llm_module.DEFAULT_LLM_MODEL,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=0,
|
||||
)
|
||||
@@ -648,7 +648,7 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Test prompt",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
model=llm_module.DEFAULT_LLM_MODEL,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
retry=2,
|
||||
agent_mode_max_iterations=0,
|
||||
@@ -722,7 +722,7 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
):
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Simple prompt",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
model=llm_module.DEFAULT_LLM_MODEL,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=0,
|
||||
)
|
||||
@@ -778,7 +778,7 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
):
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Another test",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
model=llm_module.DEFAULT_LLM_MODEL,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=0,
|
||||
)
|
||||
@@ -931,7 +931,7 @@ async def test_smart_decision_maker_agent_mode():
|
||||
# Test agent mode with max_iterations = 3
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Complete this task using tools",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
model=llm_module.DEFAULT_LLM_MODEL,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=3, # Enable agent mode with 3 max iterations
|
||||
)
|
||||
@@ -1020,7 +1020,7 @@ async def test_smart_decision_maker_traditional_mode_default():
|
||||
# Test default behavior (traditional mode)
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Test prompt",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
model=llm_module.DEFAULT_LLM_MODEL,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=0, # Traditional mode
|
||||
)
|
||||
|
||||
@@ -373,7 +373,7 @@ async def test_output_yielding_with_dynamic_fields():
|
||||
input_data = block.input_schema(
|
||||
prompt="Create a user dictionary",
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT,
|
||||
model=llm.LlmModel.GPT4O,
|
||||
model=llm.DEFAULT_LLM_MODEL,
|
||||
agent_mode_max_iterations=0, # Use traditional mode to test output yielding
|
||||
)
|
||||
|
||||
@@ -594,7 +594,7 @@ async def test_validation_errors_dont_pollute_conversation():
|
||||
input_data = block.input_schema(
|
||||
prompt="Test prompt",
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT,
|
||||
model=llm.LlmModel.GPT4O,
|
||||
model=llm.DEFAULT_LLM_MODEL,
|
||||
retry=3, # Allow retries
|
||||
agent_mode_max_iterations=1,
|
||||
)
|
||||
|
||||
@@ -111,6 +111,8 @@ class TranscribeYoutubeVideoBlock(Block):
|
||||
return parsed_url.path.split("/")[2]
|
||||
if parsed_url.path[:3] == "/v/":
|
||||
return parsed_url.path.split("/")[2]
|
||||
if parsed_url.path.startswith("/shorts/"):
|
||||
return parsed_url.path.split("/")[2]
|
||||
raise ValueError(f"Invalid YouTube URL: {url}")
|
||||
|
||||
def get_transcript(
|
||||
|
||||
@@ -59,12 +59,13 @@ from backend.integrations.credentials_store import (
|
||||
|
||||
MODEL_COST: dict[LlmModel, int] = {
|
||||
LlmModel.O3: 4,
|
||||
LlmModel.O3_MINI: 2, # $1.10 / $4.40
|
||||
LlmModel.O1: 16, # $15 / $60
|
||||
LlmModel.O3_MINI: 2,
|
||||
LlmModel.O1: 16,
|
||||
LlmModel.O1_MINI: 4,
|
||||
# GPT-5 models
|
||||
LlmModel.GPT5: 2,
|
||||
LlmModel.GPT5_2: 6,
|
||||
LlmModel.GPT5_1: 5,
|
||||
LlmModel.GPT5: 2,
|
||||
LlmModel.GPT5_MINI: 1,
|
||||
LlmModel.GPT5_NANO: 1,
|
||||
LlmModel.GPT5_CHAT: 5,
|
||||
@@ -87,7 +88,7 @@ MODEL_COST: dict[LlmModel, int] = {
|
||||
LlmModel.AIML_API_LLAMA3_3_70B: 1,
|
||||
LlmModel.AIML_API_META_LLAMA_3_1_70B: 1,
|
||||
LlmModel.AIML_API_LLAMA_3_2_3B: 1,
|
||||
LlmModel.LLAMA3_3_70B: 1, # $0.59 / $0.79
|
||||
LlmModel.LLAMA3_3_70B: 1,
|
||||
LlmModel.LLAMA3_1_8B: 1,
|
||||
LlmModel.OLLAMA_LLAMA3_3: 1,
|
||||
LlmModel.OLLAMA_LLAMA3_2: 1,
|
||||
|
||||
18
autogpt_platform/backend/poetry.lock
generated
18
autogpt_platform/backend/poetry.lock
generated
@@ -1906,6 +1906,22 @@ httpx = {version = ">=0.26,<0.29", extras = ["http2"]}
|
||||
pydantic = ">=1.10,<3"
|
||||
pyjwt = ">=2.10.1,<3.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "gravitas-md2gdocs"
|
||||
version = "0.1.0"
|
||||
description = "Convert Markdown to Google Docs API requests"
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "gravitas_md2gdocs-0.1.0-py3-none-any.whl", hash = "sha256:0cb0627779fdd65c1604818af4142eea1b25d055060183363de1bae4d9e46508"},
|
||||
{file = "gravitas_md2gdocs-0.1.0.tar.gz", hash = "sha256:bb3122fe9fa35c528f3f00b785d3f1398d350082d5d03f60f56c895bdcc68033"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
dev = ["google-auth-oauthlib (>=1.0.0)", "pytest (>=7.0.0)", "pytest-cov (>=4.0.0)", "python-dotenv (>=1.0.0)", "ruff (>=0.1.0)"]
|
||||
google = ["google-api-python-client (>=2.0.0)", "google-auth (>=2.0.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "gravitasml"
|
||||
version = "0.1.3"
|
||||
@@ -7279,4 +7295,4 @@ cffi = ["cffi (>=1.11)"]
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = ">=3.10,<3.14"
|
||||
content-hash = "13b191b2a1989d3321ff713c66ff6f5f4f3b82d15df4d407e0e5dbf87d7522c4"
|
||||
content-hash = "b762806d5d58fcf811220890c4705a16dc62b33387af43e3a29399c62a641098"
|
||||
|
||||
@@ -82,6 +82,7 @@ firecrawl-py = "^4.3.6"
|
||||
exa-py = "^1.14.20"
|
||||
croniter = "^6.0.0"
|
||||
stagehand = "^0.5.1"
|
||||
gravitas-md2gdocs = "^0.1.0"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
aiohappyeyeballs = "^2.6.1"
|
||||
|
||||
@@ -0,0 +1,113 @@
|
||||
from unittest.mock import Mock
|
||||
|
||||
from backend.blocks.google.docs import GoogleDocsFormatTextBlock
|
||||
|
||||
|
||||
def _make_mock_docs_service() -> Mock:
|
||||
service = Mock()
|
||||
# Ensure chained call exists: service.documents().batchUpdate(...).execute()
|
||||
service.documents.return_value.batchUpdate.return_value.execute.return_value = {}
|
||||
return service
|
||||
|
||||
|
||||
def test_format_text_parses_shorthand_hex_color():
|
||||
block = GoogleDocsFormatTextBlock()
|
||||
service = _make_mock_docs_service()
|
||||
|
||||
result = block._format_text(
|
||||
service,
|
||||
document_id="doc_1",
|
||||
start_index=1,
|
||||
end_index=2,
|
||||
bold=False,
|
||||
italic=False,
|
||||
underline=False,
|
||||
font_size=0,
|
||||
foreground_color="#FFF",
|
||||
)
|
||||
|
||||
assert result["success"] is True
|
||||
|
||||
# Verify request body contains correct rgbColor for white.
|
||||
_, kwargs = service.documents.return_value.batchUpdate.call_args
|
||||
requests = kwargs["body"]["requests"]
|
||||
rgb = requests[0]["updateTextStyle"]["textStyle"]["foregroundColor"]["color"][
|
||||
"rgbColor"
|
||||
]
|
||||
assert rgb == {"red": 1.0, "green": 1.0, "blue": 1.0}
|
||||
|
||||
|
||||
def test_format_text_parses_full_hex_color():
|
||||
block = GoogleDocsFormatTextBlock()
|
||||
service = _make_mock_docs_service()
|
||||
|
||||
result = block._format_text(
|
||||
service,
|
||||
document_id="doc_1",
|
||||
start_index=1,
|
||||
end_index=2,
|
||||
bold=False,
|
||||
italic=False,
|
||||
underline=False,
|
||||
font_size=0,
|
||||
foreground_color="#FF0000",
|
||||
)
|
||||
|
||||
assert result["success"] is True
|
||||
|
||||
_, kwargs = service.documents.return_value.batchUpdate.call_args
|
||||
requests = kwargs["body"]["requests"]
|
||||
rgb = requests[0]["updateTextStyle"]["textStyle"]["foregroundColor"]["color"][
|
||||
"rgbColor"
|
||||
]
|
||||
assert rgb == {"red": 1.0, "green": 0.0, "blue": 0.0}
|
||||
|
||||
|
||||
def test_format_text_ignores_invalid_color_when_other_fields_present():
|
||||
block = GoogleDocsFormatTextBlock()
|
||||
service = _make_mock_docs_service()
|
||||
|
||||
result = block._format_text(
|
||||
service,
|
||||
document_id="doc_1",
|
||||
start_index=1,
|
||||
end_index=2,
|
||||
bold=True,
|
||||
italic=False,
|
||||
underline=False,
|
||||
font_size=0,
|
||||
foreground_color="#GGG",
|
||||
)
|
||||
|
||||
assert result["success"] is True
|
||||
assert "warning" in result
|
||||
|
||||
# Should still apply bold, but should NOT include foregroundColor in textStyle.
|
||||
_, kwargs = service.documents.return_value.batchUpdate.call_args
|
||||
requests = kwargs["body"]["requests"]
|
||||
text_style = requests[0]["updateTextStyle"]["textStyle"]
|
||||
fields = requests[0]["updateTextStyle"]["fields"]
|
||||
|
||||
assert text_style == {"bold": True}
|
||||
assert fields == "bold"
|
||||
|
||||
|
||||
def test_format_text_invalid_color_only_does_not_call_api():
|
||||
block = GoogleDocsFormatTextBlock()
|
||||
service = _make_mock_docs_service()
|
||||
|
||||
result = block._format_text(
|
||||
service,
|
||||
document_id="doc_1",
|
||||
start_index=1,
|
||||
end_index=2,
|
||||
bold=False,
|
||||
italic=False,
|
||||
underline=False,
|
||||
font_size=0,
|
||||
foreground_color="#F",
|
||||
)
|
||||
|
||||
assert result["success"] is False
|
||||
assert "Invalid foreground_color" in result["message"]
|
||||
service.documents.return_value.batchUpdate.assert_not_called()
|
||||
@@ -37,6 +37,18 @@ class TestTranscribeYoutubeVideoBlock:
|
||||
video_id = self.youtube_block.extract_video_id(url)
|
||||
assert video_id == "dQw4w9WgXcQ"
|
||||
|
||||
def test_extract_video_id_shorts_url(self):
|
||||
"""Test extracting video ID from YouTube Shorts URL."""
|
||||
url = "https://www.youtube.com/shorts/dtUqwMu3e-g"
|
||||
video_id = self.youtube_block.extract_video_id(url)
|
||||
assert video_id == "dtUqwMu3e-g"
|
||||
|
||||
def test_extract_video_id_shorts_url_with_params(self):
|
||||
"""Test extracting video ID from YouTube Shorts URL with query parameters."""
|
||||
url = "https://www.youtube.com/shorts/dtUqwMu3e-g?feature=share"
|
||||
video_id = self.youtube_block.extract_video_id(url)
|
||||
assert video_id == "dtUqwMu3e-g"
|
||||
|
||||
@patch("backend.blocks.youtube.YouTubeTranscriptApi")
|
||||
def test_get_transcript_english_available(self, mock_api_class):
|
||||
"""Test getting transcript when English is available."""
|
||||
|
||||
@@ -1,27 +1,27 @@
|
||||
import { ReactFlow, Background } from "@xyflow/react";
|
||||
import NewControlPanel from "../../NewControlPanel/NewControlPanel";
|
||||
import CustomEdge from "../edges/CustomEdge";
|
||||
import { useFlow } from "./useFlow";
|
||||
import { useShallow } from "zustand/react/shallow";
|
||||
import { useNodeStore } from "../../../stores/nodeStore";
|
||||
import { useMemo, useCallback } from "react";
|
||||
import { CustomNode } from "../nodes/CustomNode/CustomNode";
|
||||
import { useCustomEdge } from "../edges/useCustomEdge";
|
||||
import { useFlowRealtime } from "./useFlowRealtime";
|
||||
import { GraphLoadingBox } from "./components/GraphLoadingBox";
|
||||
import { BuilderActions } from "../../BuilderActions/BuilderActions";
|
||||
import { RunningBackground } from "./components/RunningBackground";
|
||||
import { useGraphStore } from "../../../stores/graphStore";
|
||||
import { useCopyPaste } from "./useCopyPaste";
|
||||
import { FloatingReviewsPanel } from "@/components/organisms/FloatingReviewsPanel/FloatingReviewsPanel";
|
||||
import { parseAsString, useQueryStates } from "nuqs";
|
||||
import { CustomControls } from "./components/CustomControl";
|
||||
import { useGetV1GetSpecificGraph } from "@/app/api/__generated__/endpoints/graphs/graphs";
|
||||
import { okData } from "@/app/api/helpers";
|
||||
import { FloatingReviewsPanel } from "@/components/organisms/FloatingReviewsPanel/FloatingReviewsPanel";
|
||||
import { Background, ReactFlow } from "@xyflow/react";
|
||||
import { parseAsString, useQueryStates } from "nuqs";
|
||||
import { useCallback, useMemo } from "react";
|
||||
import { useShallow } from "zustand/react/shallow";
|
||||
import { useGraphStore } from "../../../stores/graphStore";
|
||||
import { useNodeStore } from "../../../stores/nodeStore";
|
||||
import { BuilderActions } from "../../BuilderActions/BuilderActions";
|
||||
import { DraftRecoveryPopup } from "../../DraftRecoveryDialog/DraftRecoveryPopup";
|
||||
import { FloatingSafeModeToggle } from "../../FloatingSafeModeToogle";
|
||||
import NewControlPanel from "../../NewControlPanel/NewControlPanel";
|
||||
import CustomEdge from "../edges/CustomEdge";
|
||||
import { useCustomEdge } from "../edges/useCustomEdge";
|
||||
import { CustomNode } from "../nodes/CustomNode/CustomNode";
|
||||
import { CustomControls } from "./components/CustomControl";
|
||||
import { GraphLoadingBox } from "./components/GraphLoadingBox";
|
||||
import { RunningBackground } from "./components/RunningBackground";
|
||||
import { TriggerAgentBanner } from "./components/TriggerAgentBanner";
|
||||
import { resolveCollisions } from "./helpers/resolve-collision";
|
||||
import { FloatingSafeModeToggle } from "../../FloatingSafeModeToogle";
|
||||
import { DraftRecoveryPopup } from "../../DraftRecoveryDialog/DraftRecoveryPopup";
|
||||
import { useCopyPaste } from "./useCopyPaste";
|
||||
import { useFlow } from "./useFlow";
|
||||
import { useFlowRealtime } from "./useFlowRealtime";
|
||||
|
||||
export const Flow = () => {
|
||||
const [{ flowID, flowExecutionID }] = useQueryStates({
|
||||
@@ -42,14 +42,18 @@ export const Flow = () => {
|
||||
|
||||
const nodes = useNodeStore(useShallow((state) => state.nodes));
|
||||
const setNodes = useNodeStore(useShallow((state) => state.setNodes));
|
||||
|
||||
const onNodesChange = useNodeStore(
|
||||
useShallow((state) => state.onNodesChange),
|
||||
);
|
||||
|
||||
const hasWebhookNodes = useNodeStore(
|
||||
useShallow((state) => state.hasWebhookNodes()),
|
||||
);
|
||||
|
||||
const nodeTypes = useMemo(() => ({ custom: CustomNode }), []);
|
||||
const edgeTypes = useMemo(() => ({ custom: CustomEdge }), []);
|
||||
|
||||
const onNodeDragStop = useCallback(() => {
|
||||
setNodes(
|
||||
resolveCollisions(nodes, {
|
||||
@@ -80,6 +84,7 @@ export const Flow = () => {
|
||||
const isGraphRunning = useGraphStore(
|
||||
useShallow((state) => state.isGraphRunning),
|
||||
);
|
||||
|
||||
return (
|
||||
<div className="flex h-full w-full dark:bg-slate-900">
|
||||
<div className="relative flex-1">
|
||||
@@ -99,6 +104,7 @@ export const Flow = () => {
|
||||
nodesDraggable={!isLocked}
|
||||
nodesConnectable={!isLocked}
|
||||
elementsSelectable={!isLocked}
|
||||
deleteKeyCode={["Backspace", "Delete"]}
|
||||
>
|
||||
<Background />
|
||||
<CustomControls setIsLocked={setIsLocked} isLocked={isLocked} />
|
||||
|
||||
@@ -2,6 +2,7 @@ import {
|
||||
ConnectionData,
|
||||
CustomNodeData,
|
||||
} from "@/app/(platform)/build/components/legacy-builder/CustomNode/CustomNode";
|
||||
import { NodeTableInput } from "@/app/(platform)/build/components/legacy-builder/NodeTableInput";
|
||||
import { CredentialsInput } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/CredentialsInputs";
|
||||
import { Button } from "@/components/__legacy__/ui/button";
|
||||
import { Calendar } from "@/components/__legacy__/ui/calendar";
|
||||
@@ -28,7 +29,6 @@ import {
|
||||
} from "@/components/__legacy__/ui/select";
|
||||
import { Switch } from "@/components/atoms/Switch/Switch";
|
||||
import { GoogleDrivePickerInput } from "@/components/contextual/GoogleDrivePicker/GoogleDrivePickerInput";
|
||||
import { NodeTableInput } from "@/components/node-table-input";
|
||||
import {
|
||||
BlockIOArraySubSchema,
|
||||
BlockIOBooleanSubSchema,
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
import React, { FC, useCallback, useEffect, useState } from "react";
|
||||
import { FC, useCallback, useEffect, useState } from "react";
|
||||
|
||||
import { PlusIcon, XIcon } from "@phosphor-icons/react";
|
||||
import { cn } from "@/lib/utils";
|
||||
import NodeHandle from "@/app/(platform)/build/components/legacy-builder/NodeHandle";
|
||||
import {
|
||||
BlockIOTableSubSchema,
|
||||
TableRow,
|
||||
TableCellValue,
|
||||
TableRow,
|
||||
} from "@/lib/autogpt-server-api/types";
|
||||
import { Input } from "./atoms/Input/Input";
|
||||
import { Button } from "./atoms/Button/Button";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { PlusIcon, XIcon } from "@phosphor-icons/react";
|
||||
import { Button } from "../../../../../components/atoms/Button/Button";
|
||||
import { Input } from "../../../../../components/atoms/Input/Input";
|
||||
|
||||
interface NodeTableInputProps {
|
||||
/** Unique identifier for the node in the builder graph */
|
||||
@@ -115,10 +115,11 @@ export const useEdgeStore = create<EdgeStore>((set, get) => ({
|
||||
edge.data?.beadData ??
|
||||
new Map<string, NodeExecutionResult["status"]>();
|
||||
|
||||
if (
|
||||
edge.targetHandle &&
|
||||
edge.targetHandle in executionResult.input_data
|
||||
) {
|
||||
const inputValue = edge.targetHandle
|
||||
? executionResult.input_data[edge.targetHandle]
|
||||
: undefined;
|
||||
|
||||
if (inputValue !== undefined && inputValue !== null) {
|
||||
beadData.set(executionResult.node_exec_id, executionResult.status);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
import { Metadata } from "next";
|
||||
import { getServerUser } from "@/lib/supabase/server/getServerUser";
|
||||
import { prefetchGetV2GetAgentByStoreIdQuery } from "@/app/api/__generated__/endpoints/library/library";
|
||||
import {
|
||||
getV2GetSpecificAgent,
|
||||
prefetchGetV2GetSpecificAgentQuery,
|
||||
prefetchGetV2ListStoreAgentsQuery,
|
||||
} from "@/app/api/__generated__/endpoints/store/store";
|
||||
import { StoreAgentDetails } from "@/app/api/__generated__/models/storeAgentDetails";
|
||||
import { MainAgentPage } from "../../../components/MainAgentPage/MainAgentPage";
|
||||
import { getQueryClient } from "@/lib/react-query/queryClient";
|
||||
import { prefetchGetV2GetAgentByStoreIdQuery } from "@/app/api/__generated__/endpoints/library/library";
|
||||
import { getServerUser } from "@/lib/supabase/server/getServerUser";
|
||||
import { dehydrate, HydrationBoundary } from "@tanstack/react-query";
|
||||
import { Metadata } from "next";
|
||||
import { MainAgentPage } from "../../../components/MainAgentPage/MainAgentPage";
|
||||
|
||||
export const dynamic = "force-dynamic";
|
||||
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
import { getQueryClient } from "@/lib/react-query/queryClient";
|
||||
import {
|
||||
getV2GetCreatorDetails,
|
||||
prefetchGetV2GetCreatorDetailsQuery,
|
||||
prefetchGetV2ListStoreAgentsQuery,
|
||||
} from "@/app/api/__generated__/endpoints/store/store";
|
||||
import { dehydrate, HydrationBoundary } from "@tanstack/react-query";
|
||||
import { MainCreatorPage } from "../../components/MainCreatorPage/MainCreatorPage";
|
||||
import { Metadata } from "next";
|
||||
import { CreatorDetails } from "@/app/api/__generated__/models/creatorDetails";
|
||||
import { getQueryClient } from "@/lib/react-query/queryClient";
|
||||
import { dehydrate, HydrationBoundary } from "@tanstack/react-query";
|
||||
import { Metadata } from "next";
|
||||
import { MainCreatorPage } from "../../components/MainCreatorPage/MainCreatorPage";
|
||||
|
||||
export const dynamic = "force-dynamic";
|
||||
|
||||
|
||||
@@ -1,16 +1,15 @@
|
||||
"use client";
|
||||
|
||||
import * as Sentry from "@sentry/nextjs";
|
||||
import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard";
|
||||
import * as Sentry from "@sentry/nextjs";
|
||||
import { useEffect } from "react";
|
||||
|
||||
export default function GlobalError({
|
||||
error,
|
||||
reset,
|
||||
}: {
|
||||
interface Props {
|
||||
error: Error & { digest?: string };
|
||||
reset: () => void;
|
||||
}) {
|
||||
}
|
||||
|
||||
export default function GlobalError({ error, reset }: Props) {
|
||||
useEffect(() => {
|
||||
Sentry.captureException(error);
|
||||
}, [error]);
|
||||
|
||||
@@ -64,7 +64,9 @@ export const useAnyOfField = (
|
||||
|
||||
const [selectedType, setSelectedType] = useState<string>(initialSelectedType);
|
||||
|
||||
const isEnabled = formData !== null && formData !== undefined;
|
||||
// Only check for explicit null (set by toggle off), not undefined (empty input)
|
||||
// This allows users to clear number inputs without the field disappearing
|
||||
const isEnabled = formData !== null;
|
||||
|
||||
const handleTypeChange = (t: string) => {
|
||||
setSelectedType(t);
|
||||
@@ -79,7 +81,13 @@ export const useAnyOfField = (
|
||||
}
|
||||
};
|
||||
|
||||
const handleValueChange = (value: any) => onChange(value);
|
||||
const handleValueChange = (value: any) => {
|
||||
if (isNullableType && value === null) {
|
||||
onChange(undefined);
|
||||
return;
|
||||
}
|
||||
onChange(value);
|
||||
};
|
||||
|
||||
const currentTypeOption = typeOptions.find((o) => o.type === selectedType);
|
||||
|
||||
|
||||
@@ -51,7 +51,7 @@ export const TextInputWidget = (props: WidgetProps) => {
|
||||
handleChange: (v: string) => (v === "" ? undefined : Number(v)),
|
||||
},
|
||||
[InputType.INTEGER]: {
|
||||
htmlType: "account",
|
||||
htmlType: "number",
|
||||
placeholder: "Enter integer value...",
|
||||
handleChange: (v: string) => (v === "" ? undefined : Number(v)),
|
||||
},
|
||||
|
||||
Reference in New Issue
Block a user