feat(rnd): Add block description and categories (#7463)

### Background

Add block description and categories metadata.

### Changes 🏗️

* Add block description and categories metadata.
* Initialize description and categories on the existing blocks.
This commit is contained in:
Zamil Majdy
2024-07-19 20:10:16 +04:00
committed by GitHub
parent f833fa3624
commit 82fd3166ef
15 changed files with 182 additions and 31 deletions

View File

@@ -0,0 +1,10 @@
# LLM
OPENAI_API_KEY=
ANTHROPIC_API_KEY=
GROQ_API_KEY=
# Reddit
REDDIT_CLIENT_ID=
REDDIT_CLIENT_SECRET=
REDDIT_USERNAME=
REDDIT_PASSWORD=

View File

@@ -19,7 +19,7 @@ from forge.llm.providers.schema import ModelProviderName
from forge.models.json_schema import JSONSchema
from pydantic import Field, SecretStr
from autogpt_server.data.block import Block, BlockOutput, BlockSchema
from autogpt_server.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from autogpt_server.data.model import BlockSecret, SchemaField, SecretField
if TYPE_CHECKING:
@@ -107,6 +107,8 @@ class AutoGPTAgentBlock(Block):
def __init__(self):
super().__init__(
id="d2e2ecd2-9ae6-422d-8dfe-ceca500ce6a6",
description="AutoGPT agent, it utilizes a Large Language Model and enabled components/tools to perform a task.",
categories={BlockCategory.LLM},
input_schema=AutoGPTAgentBlock.Input,
output_schema=AutoGPTAgentBlock.Output,
test_input={

View File

@@ -2,7 +2,7 @@ from typing import Any
from pydantic import Field
from autogpt_server.data.block import Block, BlockOutput, BlockSchema
from autogpt_server.data.block import Block, BlockCategory, BlockOutput, BlockSchema
class ValueBlock(Block):
@@ -42,6 +42,11 @@ class ValueBlock(Block):
def __init__(self):
super().__init__(
id="1ff065e9-88e8-4358-9d82-8dc91f622ba9",
description="This block forwards the `input` pin to `output` pin. "
"If the `data` is provided, it will prioritize forwarding `data` "
"over `input`. By connecting the `output` pin to `data` pin, "
"you can retain a constant value for the next executions.",
categories={BlockCategory.BASIC},
input_schema=ValueBlock.Input,
output_schema=ValueBlock.Output,
test_input=[
@@ -68,6 +73,8 @@ class PrintingBlock(Block):
def __init__(self):
super().__init__(
id="f3b1c1b2-4c4f-4f0d-8d2f-4c4f0d8d2f4c",
description="Print the given text to the console, this is used for a debugging purpose.",
categories={BlockCategory.BASIC},
input_schema=PrintingBlock.Input,
output_schema=PrintingBlock.Output,
test_input={"text": "Hello, World!"},
@@ -91,6 +98,8 @@ class ObjectLookupBlock(Block):
def __init__(self):
super().__init__(
id="b2g2c3d4-5e6f-7g8h-9i0j-k1l2m3n4o5p6",
description="Lookup the given key in the input dictionary/object/list and return the value.",
categories={BlockCategory.BASIC},
input_schema=ObjectLookupBlock.Input,
output_schema=ObjectLookupBlock.Output,
test_input=[
@@ -99,6 +108,7 @@ class ObjectLookupBlock(Block):
{"input": [1, 2, 3], "key": 1},
{"input": [1, 2, 3], "key": 3},
{"input": ObjectLookupBlock.Input(input="!!", key="key"), "key": "key"},
{"input": [{"k1": "v1"}, {"k2": "v2"}, {"k1": "v3"}], "key": "k1"},
],
test_output=[
("output", 2),
@@ -106,6 +116,7 @@ class ObjectLookupBlock(Block):
("output", 2),
("missing", [1, 2, 3]),
("output", "key"),
("output", ["v1", "v3"]),
],
)
@@ -117,6 +128,13 @@ class ObjectLookupBlock(Block):
yield "output", obj[key]
elif isinstance(obj, list) and isinstance(key, int) and 0 <= key < len(obj):
yield "output", obj[key]
elif isinstance(obj, list) and isinstance(key, str):
if len(obj) == 0:
yield "output", []
elif isinstance(obj[0], dict) and key in obj[0]:
yield "output", [item[key] for item in obj if key in item]
else:
yield "output", [getattr(val, key) for val in obj if hasattr(val, key)]
elif isinstance(obj, object) and isinstance(key, str) and hasattr(obj, key):
yield "output", getattr(obj, key)
else:

View File

@@ -2,7 +2,7 @@ import os
import re
from typing import Type
from autogpt_server.data.block import Block, BlockOutput, BlockSchema
from autogpt_server.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from autogpt_server.util.test import execute_block_test
@@ -25,6 +25,8 @@ class BlockInstallationBlock(Block):
def __init__(self):
super().__init__(
id="45e78db5-03e9-447f-9395-308d712f5f08",
description="Given a code string, this block allows the verification and installation of a block code into the system.",
categories={BlockCategory.BASIC},
input_schema=BlockInstallationBlock.Input,
output_schema=BlockInstallationBlock.Output,
)

View File

@@ -39,10 +39,12 @@ class CreateMediumPostBlock(Block):
placeholder="public",
)
license: str = SchemaField(
default="all-rights-reserved",
description="The license of the post: 'all-rights-reserved', 'cc-40-by', 'cc-40-by-sa', 'cc-40-by-nd', 'cc-40-by-nc', 'cc-40-by-nc-nd', 'cc-40-by-nc-sa', 'cc-40-zero', 'public-domain'",
placeholder="all-rights-reserved",
)
notify_followers: bool = SchemaField(
default=False,
description="Whether to notify followers that the user has published",
placeholder="False",
)

View File

@@ -2,7 +2,7 @@ from enum import Enum
import requests
from autogpt_server.data.block import Block, BlockOutput, BlockSchema
from autogpt_server.data.block import Block, BlockCategory, BlockOutput, BlockSchema
class HttpMethod(Enum):
@@ -30,6 +30,8 @@ class HttpRequestBlock(Block):
def __init__(self):
super().__init__(
id="6595ae1f-b924-42cb-9a41-551a0611c4b4",
description="This block makes an HTTP request to the given URL.",
categories={BlockCategory.BASIC},
input_schema=HttpRequestBlock.Input,
output_schema=HttpRequestBlock.Output,
)

View File

@@ -7,7 +7,7 @@ import ollama
import openai
from groq import Groq
from autogpt_server.data.block import Block, BlockOutput, BlockSchema
from autogpt_server.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from autogpt_server.data.model import BlockSecret, SecretField
from autogpt_server.util import json
@@ -65,13 +65,13 @@ MODEL_METADATA = {
}
class LlmCallBlock(Block):
class ObjectLlmCallBlock(Block):
class Input(BlockSchema):
prompt: str
expected_format: dict[str, str]
model: LlmModel = LlmModel.GPT4_TURBO
api_key: BlockSecret = SecretField(key="openai_api_key")
api_key: BlockSecret = SecretField(value="")
sys_prompt: str = ""
expected_format: dict[str, str] = {}
retry: int = 3
class Output(BlockSchema):
@@ -81,8 +81,10 @@ class LlmCallBlock(Block):
def __init__(self):
super().__init__(
id="ed55ac19-356e-4243-a6cb-bc599e9b716f",
input_schema=LlmCallBlock.Input,
output_schema=LlmCallBlock.Output,
description="Call a Large Language Model (LLM) to generate formatted object based on the given prompt.",
categories={BlockCategory.LLM},
input_schema=ObjectLlmCallBlock.Input,
output_schema=ObjectLlmCallBlock.Output,
test_input={
"model": LlmModel.GPT4_TURBO,
"api_key": "fake-api",
@@ -232,6 +234,51 @@ class LlmCallBlock(Block):
yield "error", retry_prompt
class TextLlmCallBlock(Block):
class Input(BlockSchema):
prompt: str
model: LlmModel = LlmModel.GPT4_TURBO
api_key: BlockSecret = SecretField(value="")
sys_prompt: str = ""
retry: int = 3
class Output(BlockSchema):
response: str
error: str
def __init__(self):
super().__init__(
id="1f292d4a-41a4-4977-9684-7c8d560b9f91",
description="Call a Large Language Model (LLM) to generate a string based on the given prompt.",
categories={BlockCategory.LLM},
input_schema=TextLlmCallBlock.Input,
output_schema=TextLlmCallBlock.Output,
test_input={"prompt": "User prompt"},
test_output=("response", "Response text"),
test_mock={"llm_call": lambda *args, **kwargs: "Response text"},
)
@staticmethod
def llm_call(input_data: ObjectLlmCallBlock.Input) -> str:
object_block = ObjectLlmCallBlock()
for output_name, output_data in object_block.run(input_data):
if output_name == "response":
return output_data["response"]
else:
raise output_data
raise ValueError("Failed to get a response from the LLM.")
def run(self, input_data: Input) -> BlockOutput:
try:
object_input_data = ObjectLlmCallBlock.Input(
**{attr: getattr(input_data, attr) for attr in input_data.model_fields},
expected_format={},
)
yield "response", self.llm_call(object_input_data)
except Exception as e:
yield "error", str(e)
class TextSummarizerBlock(Block):
class Input(BlockSchema):
text: str
@@ -248,6 +295,8 @@ class TextSummarizerBlock(Block):
def __init__(self):
super().__init__(
id="c3d4e5f6-7g8h-9i0j-1k2l-m3n4o5p6q7r8",
description="Utilize a Large Language Model (LLM) to summarize a long text.",
categories={BlockCategory.LLM, BlockCategory.TEXT},
input_schema=TextSummarizerBlock.Input,
output_schema=TextSummarizerBlock.Output,
test_input={"text": "Lorem ipsum..." * 100},
@@ -294,8 +343,8 @@ class TextSummarizerBlock(Block):
return chunks
@staticmethod
def llm_call(input_data: LlmCallBlock.Input) -> dict[str, str]:
llm_block = LlmCallBlock()
def llm_call(input_data: ObjectLlmCallBlock.Input) -> dict[str, str]:
llm_block = ObjectLlmCallBlock()
for output_name, output_data in llm_block.run(input_data):
if output_name == "response":
return output_data
@@ -305,7 +354,7 @@ class TextSummarizerBlock(Block):
prompt = f"Summarize the following text concisely:\n\n{chunk}"
llm_response = self.llm_call(
LlmCallBlock.Input(
ObjectLlmCallBlock.Input(
prompt=prompt,
api_key=input_data.api_key,
model=input_data.model,
@@ -325,7 +374,7 @@ class TextSummarizerBlock(Block):
)
llm_response = self.llm_call(
LlmCallBlock.Input(
ObjectLlmCallBlock.Input(
prompt=prompt,
api_key=input_data.api_key,
model=input_data.model,

View File

@@ -4,7 +4,7 @@ from typing import Iterator
import praw
from pydantic import BaseModel, ConfigDict, Field
from autogpt_server.data.block import Block, BlockOutput, BlockSchema
from autogpt_server.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from autogpt_server.data.model import BlockSecret, SecretField
from autogpt_server.util.mock import MockObject
@@ -71,6 +71,8 @@ class RedditGetPostsBlock(Block):
def __init__(self):
super().__init__(
id="c6731acb-4285-4ee1-bc9b-03d0766c370f",
description="This block fetches Reddit posts from a defined subreddit name.",
categories={BlockCategory.SOCIAL},
input_schema=RedditGetPostsBlock.Input,
output_schema=RedditGetPostsBlock.Output,
test_input={
@@ -149,6 +151,8 @@ class RedditPostCommentBlock(Block):
def __init__(self):
super().__init__(
id="4a92261b-701e-4ffb-8970-675fd28e261f",
description="This block posts a Reddit comment on a specified Reddit post.",
categories={BlockCategory.SOCIAL},
input_schema=RedditPostCommentBlock.Input,
output_schema=RedditPostCommentBlock.Output,
test_input={"data": {"post_id": "id", "comment": "comment"}},

View File

@@ -3,7 +3,7 @@ from urllib.parse import quote
import requests
from autogpt_server.data.block import Block, BlockOutput, BlockSchema
from autogpt_server.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from autogpt_server.data.model import BlockSecret, SecretField
@@ -26,6 +26,8 @@ class WikipediaSummaryBlock(Block, GetRequest):
def __init__(self):
super().__init__(
id="h5e7f8g9-1b2c-3d4e-5f6g-7h8i9j0k1l2m",
description="This block fetches the summary of a given topic from Wikipedia.",
categories={BlockCategory.SEARCH},
input_schema=WikipediaSummaryBlock.Input,
output_schema=WikipediaSummaryBlock.Output,
test_input={"topic": "Artificial Intelligence"},
@@ -61,6 +63,8 @@ class WebSearchBlock(Block, GetRequest):
def __init__(self):
super().__init__(
id="b2c3d4e5-6f7g-8h9i-0j1k-l2m3n4o5p6q7",
description="This block searches the internet for the given search query.",
categories={BlockCategory.SEARCH},
input_schema=WebSearchBlock.Input,
output_schema=WebSearchBlock.Output,
test_input={"query": "Artificial Intelligence"},
@@ -100,6 +104,8 @@ class WebScraperBlock(Block, GetRequest):
def __init__(self):
super().__init__(
id="a1b2c3d4-5e6f-7g8h-9i0j-k1l2m3n4o5p6", # Unique ID for the block
description="This block scrapes the content from the given web URL.",
categories={BlockCategory.SEARCH},
input_schema=WebScraperBlock.Input,
output_schema=WebScraperBlock.Output,
test_input={"url": "https://en.wikipedia.org/wiki/Artificial_intelligence"},

View File

@@ -4,7 +4,7 @@ from typing import Any
from pydantic import Field
from autogpt_server.data.block import Block, BlockOutput, BlockSchema
from autogpt_server.data.block import Block, BlockCategory, BlockOutput, BlockSchema
class TextMatcherBlock(Block):
@@ -22,6 +22,10 @@ class TextMatcherBlock(Block):
def __init__(self):
super().__init__(
id="3060088f-6ed9-4928-9ba7-9c92823a7ccd",
description="This block matches the given text with the pattern (regex) and"
" forwards the provided data to positive (if matching) or"
" negative (if not matching) output.",
categories={BlockCategory.TEXT},
input_schema=TextMatcherBlock.Input,
output_schema=TextMatcherBlock.Output,
test_input=[
@@ -72,6 +76,8 @@ class TextParserBlock(Block):
def __init__(self):
super().__init__(
id="3146e4fe-2cdd-4f29-bd12-0c9d5bb4deb0",
description="This block extracts the text from the given text using the pattern (regex).",
categories={BlockCategory.TEXT},
input_schema=TextParserBlock.Input,
output_schema=TextParserBlock.Output,
test_input=[
@@ -123,6 +129,8 @@ class TextFormatterBlock(Block):
def __init__(self):
super().__init__(
id="db7d8f02-2f44-4c55-ab7a-eae0941f0c30",
description="This block formats the given texts using the format template.",
categories={BlockCategory.TEXT},
input_schema=TextFormatterBlock.Input,
output_schema=TextFormatterBlock.Output,
test_input=[

View File

@@ -1,4 +1,5 @@
from abc import ABC, abstractmethod
from enum import Enum
from typing import Any, ClassVar, Generator, Generic, Type, TypeVar, cast
import jsonref
@@ -13,6 +14,17 @@ BlockData = tuple[str, Any]
BlockOutput = Generator[BlockData, None, None]
class BlockCategory(Enum):
LLM = "Block that leverages the Large Language Model to perform a task."
SOCIAL = "Block that interacts with social media platforms."
TEXT = "Block that processes text data."
SEARCH = "Block that searches or extracts information from the internet."
BASIC = "Block that performs basic operations."
def dict(self) -> dict[str, str]:
return {"category": self.name, "description": self.value}
class BlockSchema(BaseModel):
cached_jsonschema: ClassVar[dict[str, Any]] = {}
@@ -101,6 +113,8 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
def __init__(
self,
id: str = "",
description: str = "",
categories: set[BlockCategory] | None = None,
input_schema: Type[BlockSchemaInputType] = EmptySchema,
output_schema: Type[BlockSchemaOutputType] = EmptySchema,
test_input: BlockInput | list[BlockInput] | None = None,
@@ -126,6 +140,8 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
self.test_input = test_input
self.test_output = test_output
self.test_mock = test_mock
self.description = description
self.categories = categories or set()
@abstractmethod
def run(self, input_data: BlockSchemaInputType) -> BlockOutput:
@@ -150,6 +166,8 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
"name": self.name,
"inputSchema": self.input_schema.jsonschema(),
"outputSchema": self.output_schema.jsonschema(),
"description": self.description,
"categories": [category.dict() for category in self.categories],
}
def execute(self, input_data: BlockInput) -> BlockOutput:

View File

@@ -3,13 +3,13 @@ from pathlib import Path
from autogpt_server.blocks.basic import ValueBlock
from autogpt_server.blocks.block import BlockInstallationBlock
from autogpt_server.blocks.http import HttpRequestBlock
from autogpt_server.blocks.llm import LlmCallBlock
from autogpt_server.blocks.llm import TextLlmCallBlock
from autogpt_server.blocks.text import TextFormatterBlock, TextParserBlock
from autogpt_server.data.graph import Graph, Link, Node, create_graph
from autogpt_server.util.test import SpinTestServer, wait_execution
sample_block_modules = {
"ai": "Block that calls the AI model to generate text.",
"llm": "Block that calls the AI model to generate text.",
"basic": "Block that does basic operations.",
"text": "Blocks that do text operations.",
"reddit": "Blocks that interacts with Reddit.",
@@ -40,7 +40,7 @@ def create_test_graph() -> Graph:
| ||
| ||
| v
| LlmCallBlock <===== TextFormatterBlock (query)
| TextLlmCallBlock <===== TextFormatterBlock (query)
| || ^
| v ||
| TextParserBlock ||
@@ -50,6 +50,10 @@ def create_test_graph() -> Graph:
"""
# ======= Nodes ========= #
input_data = Node(block_id=ValueBlock().id)
input_query_constant = Node(
block_id=ValueBlock().id,
input_default={"data": None},
)
input_text_formatter = Node(
block_id=TextFormatterBlock().id,
input_default={
@@ -84,7 +88,7 @@ Here is your previous attempt:
},
)
code_gen_llm_call = Node(
block_id=LlmCallBlock().id,
block_id=TextLlmCallBlock().id,
input_default={
"sys_prompt": f"""
You are a software engineer and you are asked to write the full class implementation.
@@ -123,6 +127,7 @@ Here are a couple of sample of the Block class implementation:
)
nodes = [
input_data,
input_query_constant,
input_text_formatter,
search_http_request,
search_result_constant,
@@ -134,12 +139,24 @@ Here are a couple of sample of the Block class implementation:
# ======= Links ========= #
links = [
Link(
source_id=input_data.id,
sink_id=input_query_constant.id,
source_name="output",
sink_name="input",
),
Link(
source_id=input_data.id,
sink_id=input_text_formatter.id,
source_name="output",
sink_name="named_texts_#_query",
),
Link(
source_id=input_query_constant.id,
sink_id=input_query_constant.id,
source_name="output",
sink_name="data",
),
Link(
source_id=input_text_formatter.id,
sink_id=search_http_request.id,
@@ -165,7 +182,7 @@ Here are a couple of sample of the Block class implementation:
sink_name="named_texts_#_search_result",
),
Link(
source_id=input_data.id,
source_id=input_query_constant.id,
sink_id=prompt_text_formatter.id,
source_name="output",
sink_name="named_texts_#_query",
@@ -179,7 +196,7 @@ Here are a couple of sample of the Block class implementation:
Link(
source_id=code_gen_llm_call.id,
sink_id=code_text_parser.id,
source_name="response_#_response",
source_name="response",
sink_name="text",
),
Link(
@@ -200,6 +217,12 @@ Here are a couple of sample of the Block class implementation:
source_name="error",
sink_name="input",
),
Link( # Re-trigger search result.
source_id=block_installation.id,
sink_id=input_query_constant.id,
source_name="error",
sink_name="input",
),
]
# ======= Graph ========= #

View File

@@ -1,4 +1,4 @@
from autogpt_server.blocks.llm import LlmCallBlock
from autogpt_server.blocks.llm import ObjectLlmCallBlock
from autogpt_server.blocks.reddit import RedditGetPostsBlock, RedditPostCommentBlock
from autogpt_server.blocks.text import TextFormatterBlock, TextMatcherBlock
from autogpt_server.data.graph import Graph, Link, Node, create_graph
@@ -18,7 +18,7 @@ def create_test_graph() -> Graph:
TextFormatterBlock (format)
||
v
LlmCallBlock / TextRelevancy
ObjectLlmCallBlock / TextRelevancy
|| || ||
post_id is_relevant marketing_text
|| || ||
@@ -68,7 +68,7 @@ Make sure to only comment on a relevant post.
block_id=TextFormatterBlock().id,
input_default=text_formatter_input,
)
llm_call_node = Node(block_id=LlmCallBlock().id, input_default=llm_call_input)
llm_call_node = Node(block_id=ObjectLlmCallBlock().id, input_default=llm_call_input)
text_matcher_node = Node(
block_id=TextMatcherBlock().id,
input_default=text_matcher_input,

View File

@@ -82,7 +82,10 @@ def execute_block_test(block: Block):
for mock_name, mock_obj in (block.test_mock or {}).items():
log(f"{prefix} mocking {mock_name}...")
setattr(block, mock_name, mock_obj)
if hasattr(block, mock_name):
setattr(block, mock_name, mock_obj)
else:
log(f"{prefix} mock {mock_name} not found in block")
for input_data in block.test_input:
log(f"{prefix} in: {input_data}")

View File

@@ -10,10 +10,14 @@ def run(*command: str) -> None:
def lint():
run("ruff", "check", ".", "--exit-zero")
run("isort", "--diff", "--check", "--profile", "black", ".")
run("black", "--diff", "--check", ".")
run("pyright")
try:
run("ruff", "check", ".", "--exit-zero")
run("isort", "--diff", "--check", "--profile", "black", ".")
run("black", "--diff", "--check", ".")
run("pyright")
except subprocess.CalledProcessError as e:
print("Lint failed, try running `poetry run format` to fix the issues: ", e)
raise e
def format():