mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-09 15:17:59 -05:00
fix(agent, forge): Fix Pydantic v2 protected namespace model_ warnings (#7340)
Rename `model_*` fields to `llm_*`
This commit is contained in:
committed by
GitHub
parent
2ecce27653
commit
bffb92bfbc
@@ -19,7 +19,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AgentProfileGeneratorConfiguration(SystemConfiguration):
|
||||
model_classification: LanguageModelClassification = UserConfigurable(
|
||||
llm_classification: LanguageModelClassification = UserConfigurable(
|
||||
default=LanguageModelClassification.SMART_MODEL
|
||||
)
|
||||
_example_call: object = {
|
||||
@@ -148,12 +148,12 @@ class AgentProfileGenerator(PromptStrategy):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model_classification: LanguageModelClassification,
|
||||
llm_classification: LanguageModelClassification,
|
||||
system_prompt: str,
|
||||
user_prompt_template: str,
|
||||
create_agent_function: dict,
|
||||
):
|
||||
self._model_classification = model_classification
|
||||
self._llm_classification = llm_classification
|
||||
self._system_prompt_message = system_prompt
|
||||
self._user_prompt_template = user_prompt_template
|
||||
self._create_agent_function = CompletionModelFunction.model_validate(
|
||||
@@ -161,8 +161,8 @@ class AgentProfileGenerator(PromptStrategy):
|
||||
)
|
||||
|
||||
@property
|
||||
def model_classification(self) -> LanguageModelClassification:
|
||||
return self._model_classification
|
||||
def llm_classification(self) -> LanguageModelClassification:
|
||||
return self._llm_classification
|
||||
|
||||
def build_prompt(self, user_objective: str = "", **kwargs) -> ChatPrompt:
|
||||
system_message = ChatMessage.system(self._system_prompt_message)
|
||||
|
||||
@@ -119,7 +119,7 @@ class Agent(BaseAgent[OneShotAgentActionProposal], Configurable[AgentSettings]):
|
||||
lambda x: self.llm_provider.count_tokens(x, self.llm.name),
|
||||
llm_provider,
|
||||
ActionHistoryConfiguration(
|
||||
model_name=app_config.fast_llm, max_tokens=self.send_token_limit
|
||||
llm_name=app_config.fast_llm, max_tokens=self.send_token_limit
|
||||
),
|
||||
)
|
||||
.run_after(WatchdogComponent)
|
||||
|
||||
@@ -100,7 +100,7 @@ class OneShotAgentPromptStrategy(PromptStrategy):
|
||||
self.logger = logger
|
||||
|
||||
@property
|
||||
def model_classification(self) -> LanguageModelClassification:
|
||||
def llm_classification(self) -> LanguageModelClassification:
|
||||
return LanguageModelClassification.FAST_MODEL # FIXME: dynamic switching
|
||||
|
||||
def build_prompt(
|
||||
|
||||
@@ -40,7 +40,7 @@ Necessary for saving and loading agent's state (preserving session).
|
||||
|
||||
| Config variable | Details | Type | Default |
|
||||
| ---------------- | -------------------------------------- | ----- | ---------------------------------- |
|
||||
| `storage_path` | Path to agent files, e.g. state | `str` | `agents/{agent_id}/`[^1] |
|
||||
| `storage_path` | Path to agent files, e.g. state | `str` | `agents/{agent_id}/`[^1] |
|
||||
| `workspace_path` | Path to files that agent has access to | `str` | `agents/{agent_id}/workspace/`[^1] |
|
||||
|
||||
[^1] This option is set dynamically during component construction as opposed to by default inside the configuration model, `{agent_id}` is replaced with the agent's unique identifier.
|
||||
@@ -84,7 +84,7 @@ Keeps track of agent's actions and their outcomes. Provides their summary to the
|
||||
|
||||
| Config variable | Details | Type | Default |
|
||||
| ---------------------- | ------------------------------------------------------- | ----------- | ------------------ |
|
||||
| `model_name` | Name of the llm model used to compress the history | `ModelName` | `"gpt-3.5-turbo"` |
|
||||
| `llm_name` | Name of the llm model used to compress the history | `ModelName` | `"gpt-3.5-turbo"` |
|
||||
| `max_tokens` | Maximum number of tokens to use for the history summary | `int` | `1024` |
|
||||
| `spacy_language_model` | Language model used for summary chunking using spacy | `str` | `"en_core_web_sm"` |
|
||||
| `full_message_count` | Number of cycles to include unsummarized in the prompt | `int` | `4` |
|
||||
@@ -178,7 +178,7 @@ Allows agent to read websites using Selenium.
|
||||
|
||||
| Config variable | Details | Type | Default |
|
||||
| ----------------------------- | ------------------------------------------- | --------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `model_name` | Name of the llm model used to read websites | `ModelName` | `"gpt-3.5-turbo"` |
|
||||
| `llm_name` | Name of the llm model used to read websites | `ModelName` | `"gpt-3.5-turbo"` |
|
||||
| `web_browser` | Web browser used by Selenium | `"chrome" \| "firefox" \| "safari" \| "edge"` | `"chrome"` |
|
||||
| `headless` | Run browser in headless mode | `bool` | `True` |
|
||||
| `user_agent` | User agent used by the browser | `str` | `"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"` |
|
||||
|
||||
@@ -116,7 +116,7 @@ You can set sensitive variables in the `.json` file as well but it's recommended
|
||||
"github_username": null
|
||||
},
|
||||
"ActionHistoryConfiguration": {
|
||||
"model_name": "gpt-3.5-turbo",
|
||||
"llm_name": "gpt-3.5-turbo",
|
||||
"max_tokens": 1024,
|
||||
"spacy_language_model": "en_core_web_sm"
|
||||
},
|
||||
@@ -129,7 +129,7 @@ You can set sensitive variables in the `.json` file as well but it's recommended
|
||||
"duckduckgo_max_attempts": 3
|
||||
},
|
||||
"WebSeleniumConfiguration": {
|
||||
"model_name": "gpt-3.5-turbo",
|
||||
"llm_name": "gpt-3.5-turbo",
|
||||
"web_browser": "chrome",
|
||||
"headless": true,
|
||||
"user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",
|
||||
|
||||
@@ -16,7 +16,7 @@ from .model import ActionResult, AnyProposal, Episode, EpisodicActionHistory
|
||||
|
||||
|
||||
class ActionHistoryConfiguration(BaseModel):
|
||||
model_name: ModelName = OpenAIModelName.GPT3
|
||||
llm_name: ModelName = OpenAIModelName.GPT3
|
||||
"""Name of the llm model used to compress the history"""
|
||||
max_tokens: int = 1024
|
||||
"""Maximum number of tokens to use up with generated history messages"""
|
||||
@@ -97,7 +97,7 @@ class ActionHistoryComponent(
|
||||
async def after_execute(self, result: ActionResult) -> None:
|
||||
self.event_history.register_result(result)
|
||||
await self.event_history.handle_compression(
|
||||
self.llm_provider, self.config.model_name, self.config.spacy_language_model
|
||||
self.llm_provider, self.config.llm_name, self.config.spacy_language_model
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
|
||||
@@ -55,7 +55,7 @@ class BrowsingError(CommandExecutionError):
|
||||
|
||||
|
||||
class WebSeleniumConfiguration(BaseModel):
|
||||
model_name: ModelName = OpenAIModelName.GPT3
|
||||
llm_name: ModelName = OpenAIModelName.GPT3
|
||||
"""Name of the llm model used to read websites"""
|
||||
web_browser: Literal["chrome", "firefox", "safari", "edge"] = "chrome"
|
||||
"""Web browser used by Selenium"""
|
||||
@@ -164,7 +164,7 @@ class WebSeleniumComponent(
|
||||
elif get_raw_content:
|
||||
if (
|
||||
output_tokens := self.llm_provider.count_tokens(
|
||||
text, self.config.model_name
|
||||
text, self.config.llm_name
|
||||
)
|
||||
) > MAX_RAW_CONTENT_LENGTH:
|
||||
oversize_factor = round(output_tokens / MAX_RAW_CONTENT_LENGTH, 1)
|
||||
@@ -382,7 +382,7 @@ class WebSeleniumComponent(
|
||||
text,
|
||||
topics_of_interest=topics_of_interest,
|
||||
llm_provider=self.llm_provider,
|
||||
model_name=self.config.model_name,
|
||||
model_name=self.config.llm_name,
|
||||
spacy_model=self.config.browse_spacy_language_model,
|
||||
)
|
||||
return "\n".join(f"* {i}" for i in information)
|
||||
@@ -391,7 +391,7 @@ class WebSeleniumComponent(
|
||||
text,
|
||||
question=question,
|
||||
llm_provider=self.llm_provider,
|
||||
model_name=self.config.model_name,
|
||||
model_name=self.config.llm_name,
|
||||
spacy_model=self.config.browse_spacy_language_model,
|
||||
)
|
||||
return result
|
||||
|
||||
@@ -10,7 +10,7 @@ from .schema import ChatPrompt, LanguageModelClassification
|
||||
class PromptStrategy(abc.ABC):
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def model_classification(self) -> LanguageModelClassification:
|
||||
def llm_classification(self) -> LanguageModelClassification:
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
|
||||
@@ -224,7 +224,7 @@ class BaseOpenAIChatProvider(
|
||||
tool_calls=tool_calls or None,
|
||||
),
|
||||
parsed_result=parsed_result,
|
||||
model_info=self.CHAT_MODELS[model_name],
|
||||
llm_info=self.CHAT_MODELS[model_name],
|
||||
prompt_tokens_used=t_input,
|
||||
completion_tokens_used=t_output,
|
||||
)
|
||||
@@ -457,7 +457,7 @@ class BaseOpenAIEmbeddingProvider(
|
||||
|
||||
return EmbeddingModelResponse(
|
||||
embedding=embedding_parser(response.data[0].embedding),
|
||||
model_info=self.EMBEDDING_MODELS[model_name],
|
||||
llm_info=self.EMBEDDING_MODELS[model_name],
|
||||
prompt_tokens_used=response.usage.prompt_tokens,
|
||||
)
|
||||
|
||||
|
||||
@@ -309,7 +309,7 @@ class AnthropicProvider(BaseChatModelProvider[AnthropicModelName, AnthropicSetti
|
||||
return ChatModelResponse(
|
||||
response=assistant_msg,
|
||||
parsed_result=parsed_result,
|
||||
model_info=ANTHROPIC_CHAT_MODELS[model_name],
|
||||
llm_info=ANTHROPIC_CHAT_MODELS[model_name],
|
||||
prompt_tokens_used=t_input,
|
||||
completion_tokens_used=t_output,
|
||||
)
|
||||
|
||||
@@ -186,7 +186,7 @@ class ModelResponse(BaseModel):
|
||||
|
||||
prompt_tokens_used: int
|
||||
completion_tokens_used: int
|
||||
model_info: ModelInfo
|
||||
llm_info: ModelInfo
|
||||
|
||||
|
||||
class ModelProviderConfiguration(SystemConfiguration):
|
||||
|
||||
Reference in New Issue
Block a user