From f722c70c5071f942611e98d90c93237b97dd664c Mon Sep 17 00:00:00 2001
From: Nicholas Tindle
Date: Wed, 19 Feb 2025 10:30:46 -0600
Subject: [PATCH 1/5] feat(blocks): add base for smartlead, apollo, and
zerobounce blocks (#9387)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
We want to support some more advanced search specific actions. These are
the base API layers and sample blocks for some of the services we need.
### Changes 🏗️
- support pydantic models as an output format
- add apollo
- add smartlead
- add zerobounce
### Checklist 📋
#### For code changes:
- [x] I have clearly listed my changes in the PR description
- [x] I have made a test plan
- [x] I have tested my changes according to the test plan:
- [x] Built agents to test
---------
Co-authored-by: Krzysztof Czerwinski <34861343+kcze@users.noreply.github.com>
Co-authored-by: Bently
---
autogpt_platform/backend/.env.example | 9 +
.../backend/backend/blocks/apollo/_api.py | 108 ++++
.../backend/backend/blocks/apollo/_auth.py | 35 ++
.../backend/backend/blocks/apollo/models.py | 543 ++++++++++++++++++
.../backend/blocks/apollo/organization.py | 219 +++++++
.../backend/backend/blocks/apollo/people.py | 394 +++++++++++++
.../backend/backend/blocks/smartlead/_api.py | 97 ++++
.../backend/backend/blocks/smartlead/_auth.py | 35 ++
.../backend/blocks/smartlead/campaign.py | 326 +++++++++++
.../backend/blocks/smartlead/models.py | 147 +++++
.../backend/backend/blocks/zerobounce/_api.py | 10 +
.../backend/blocks/zerobounce/_auth.py | 35 ++
.../blocks/zerobounce/validate_emails.py | 175 ++++++
.../backend/backend/executor/manager.py | 1 +
.../backend/integrations/credentials_store.py | 32 ++
.../backend/backend/integrations/providers.py | 3 +
autogpt_platform/backend/backend/util/json.py | 17 +-
.../backend/backend/util/settings.py | 4 +
autogpt_platform/backend/poetry.lock | 17 +-
autogpt_platform/backend/pyproject.toml | 3 +-
.../app/profile/(user)/integrations/page.tsx | 3 +
.../integrations/credentials-input.tsx | 3 +
.../integrations/credentials-provider.tsx | 5 +-
.../src/lib/autogpt-server-api/types.ts | 5 +-
24 files changed, 2221 insertions(+), 5 deletions(-)
create mode 100644 autogpt_platform/backend/backend/blocks/apollo/_api.py
create mode 100644 autogpt_platform/backend/backend/blocks/apollo/_auth.py
create mode 100644 autogpt_platform/backend/backend/blocks/apollo/models.py
create mode 100644 autogpt_platform/backend/backend/blocks/apollo/organization.py
create mode 100644 autogpt_platform/backend/backend/blocks/apollo/people.py
create mode 100644 autogpt_platform/backend/backend/blocks/smartlead/_api.py
create mode 100644 autogpt_platform/backend/backend/blocks/smartlead/_auth.py
create mode 100644 autogpt_platform/backend/backend/blocks/smartlead/campaign.py
create mode 100644 autogpt_platform/backend/backend/blocks/smartlead/models.py
create mode 100644 autogpt_platform/backend/backend/blocks/zerobounce/_api.py
create mode 100644 autogpt_platform/backend/backend/blocks/zerobounce/_auth.py
create mode 100644 autogpt_platform/backend/backend/blocks/zerobounce/validate_emails.py
diff --git a/autogpt_platform/backend/.env.example b/autogpt_platform/backend/.env.example
index 11383b9589..8b28244f42 100644
--- a/autogpt_platform/backend/.env.example
+++ b/autogpt_platform/backend/.env.example
@@ -165,6 +165,15 @@ MEM0_API_KEY=
# Nvidia
NVIDIA_API_KEY=
+# Apollo
+APOLLO_API_KEY=
+
+# SmartLead
+SMARTLEAD_API_KEY=
+
+# ZeroBounce
+ZEROBOUNCE_API_KEY=
+
# Logging Configuration
LOG_LEVEL=INFO
ENABLE_CLOUD_LOGGING=false
diff --git a/autogpt_platform/backend/backend/blocks/apollo/_api.py b/autogpt_platform/backend/backend/blocks/apollo/_api.py
new file mode 100644
index 0000000000..157235ff0f
--- /dev/null
+++ b/autogpt_platform/backend/backend/blocks/apollo/_api.py
@@ -0,0 +1,108 @@
+import logging
+from typing import List
+
+from backend.blocks.apollo._auth import ApolloCredentials
+from backend.blocks.apollo.models import (
+ Contact,
+ Organization,
+ SearchOrganizationsRequest,
+ SearchOrganizationsResponse,
+ SearchPeopleRequest,
+ SearchPeopleResponse,
+)
+from backend.util.request import Requests
+
+logger = logging.getLogger(name=__name__)
+
+
+class ApolloClient:
+ """Client for the Apollo API"""
+
+ API_URL = "https://api.apollo.io/api/v1"
+
+ def __init__(self, credentials: ApolloCredentials):
+ self.credentials = credentials
+ self.requests = Requests()
+
+ def _get_headers(self) -> dict[str, str]:
+ return {"x-api-key": self.credentials.api_key.get_secret_value()}
+
+ def search_people(self, query: SearchPeopleRequest) -> List[Contact]:
+ """Search for people in Apollo"""
+ response = self.requests.get(
+ f"{self.API_URL}/mixed_people/search",
+ headers=self._get_headers(),
+ params=query.model_dump(exclude={"credentials", "max_results"}),
+ )
+ parsed_response = SearchPeopleResponse(**response.json())
+ if parsed_response.pagination.total_entries == 0:
+ return []
+
+ people = parsed_response.people
+
+ # handle pagination
+ if (
+ query.max_results is not None
+ and query.max_results < parsed_response.pagination.total_entries
+ and len(people) < query.max_results
+ ):
+ while (
+ len(people) < query.max_results
+ and query.page < parsed_response.pagination.total_pages
+ and len(parsed_response.people) > 0
+ ):
+ query.page += 1
+ response = self.requests.get(
+ f"{self.API_URL}/mixed_people/search",
+ headers=self._get_headers(),
+ params=query.model_dump(exclude={"credentials", "max_results"}),
+ )
+ parsed_response = SearchPeopleResponse(**response.json())
+ people.extend(parsed_response.people[: query.max_results - len(people)])
+
+ logger.info(f"Found {len(people)} people")
+ return people[: query.max_results] if query.max_results else people
+
+ def search_organizations(
+ self, query: SearchOrganizationsRequest
+ ) -> List[Organization]:
+ """Search for organizations in Apollo"""
+ response = self.requests.get(
+ f"{self.API_URL}/mixed_companies/search",
+ headers=self._get_headers(),
+ params=query.model_dump(exclude={"credentials", "max_results"}),
+ )
+ parsed_response = SearchOrganizationsResponse(**response.json())
+ if parsed_response.pagination.total_entries == 0:
+ return []
+
+ organizations = parsed_response.organizations
+
+ # handle pagination
+ if (
+ query.max_results is not None
+ and query.max_results < parsed_response.pagination.total_entries
+ and len(organizations) < query.max_results
+ ):
+ while (
+ len(organizations) < query.max_results
+ and query.page < parsed_response.pagination.total_pages
+ and len(parsed_response.organizations) > 0
+ ):
+ query.page += 1
+ response = self.requests.get(
+ f"{self.API_URL}/mixed_companies/search",
+ headers=self._get_headers(),
+ params=query.model_dump(exclude={"credentials", "max_results"}),
+ )
+ parsed_response = SearchOrganizationsResponse(**response.json())
+ organizations.extend(
+ parsed_response.organizations[
+ : query.max_results - len(organizations)
+ ]
+ )
+
+ logger.info(f"Found {len(organizations)} organizations")
+ return (
+ organizations[: query.max_results] if query.max_results else organizations
+ )
diff --git a/autogpt_platform/backend/backend/blocks/apollo/_auth.py b/autogpt_platform/backend/backend/blocks/apollo/_auth.py
new file mode 100644
index 0000000000..c813c72d99
--- /dev/null
+++ b/autogpt_platform/backend/backend/blocks/apollo/_auth.py
@@ -0,0 +1,35 @@
+from typing import Literal
+
+from pydantic import SecretStr
+
+from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput
+from backend.integrations.providers import ProviderName
+
+ApolloCredentials = APIKeyCredentials
+ApolloCredentialsInput = CredentialsMetaInput[
+ Literal[ProviderName.APOLLO],
+ Literal["api_key"],
+]
+
+TEST_CREDENTIALS = APIKeyCredentials(
+ id="01234567-89ab-cdef-0123-456789abcdef",
+ provider="apollo",
+ api_key=SecretStr("mock-apollo-api-key"),
+ title="Mock Apollo API key",
+ expires_at=None,
+)
+TEST_CREDENTIALS_INPUT = {
+ "provider": TEST_CREDENTIALS.provider,
+ "id": TEST_CREDENTIALS.id,
+ "type": TEST_CREDENTIALS.type,
+ "title": TEST_CREDENTIALS.title,
+}
+
+
+def ApolloCredentialsField() -> ApolloCredentialsInput:
+ """
+ Creates a Apollo credentials input on a block.
+ """
+ return CredentialsField(
+ description="The Apollo integration can be used with an API Key.",
+ )
diff --git a/autogpt_platform/backend/backend/blocks/apollo/models.py b/autogpt_platform/backend/backend/blocks/apollo/models.py
new file mode 100644
index 0000000000..b9fa621ddc
--- /dev/null
+++ b/autogpt_platform/backend/backend/blocks/apollo/models.py
@@ -0,0 +1,543 @@
+from enum import Enum
+from typing import Any, Optional
+
+from pydantic import BaseModel
+
+from backend.data.model import SchemaField
+
+
+class PrimaryPhone(BaseModel):
+ """A primary phone in Apollo"""
+
+ number: str
+ source: str
+ sanitized_number: str
+
+
+class SenorityLevels(str, Enum):
+ """Seniority levels in Apollo"""
+
+ OWNER = "owner"
+ FOUNDER = "founder"
+ C_SUITE = "c_suite"
+ PARTNER = "partner"
+ VP = "vp"
+ HEAD = "head"
+ DIRECTOR = "director"
+ MANAGER = "manager"
+ SENIOR = "senior"
+ ENTRY = "entry"
+ INTERN = "intern"
+
+
+class ContactEmailStatuses(str, Enum):
+ """Contact email statuses in Apollo"""
+
+ VERIFIED = "verified"
+ UNVERIFIED = "unverified"
+ LIKELY_TO_ENGAGE = "likely_to_engage"
+ UNAVAILABLE = "unavailable"
+
+
+class RuleConfigStatus(BaseModel):
+ """A rule config status in Apollo"""
+
+ _id: str
+ created_at: str
+ rule_action_config_id: str
+ rule_config_id: str
+ status_cd: str
+ updated_at: str
+ id: str
+ key: str
+
+
+class ContactCampaignStatus(BaseModel):
+ """A contact campaign status in Apollo"""
+
+ id: str
+ emailer_campaign_id: str
+ send_email_from_user_id: str
+ inactive_reason: str
+ status: str
+ added_at: str
+ added_by_user_id: str
+ finished_at: str
+ paused_at: str
+ auto_unpause_at: str
+ send_email_from_email_address: str
+ send_email_from_email_account_id: str
+ manually_set_unpause: str
+ failure_reason: str
+ current_step_id: str
+ in_response_to_emailer_message_id: str
+ cc_emails: str
+ bcc_emails: str
+ to_emails: str
+
+
+class Account(BaseModel):
+ """An account in Apollo"""
+
+ id: str
+ name: str
+ website_url: str
+ blog_url: str
+ angellist_url: str
+ linkedin_url: str
+ twitter_url: str
+ facebook_url: str
+ primary_phone: PrimaryPhone
+ languages: list[str]
+ alexa_ranking: int
+ phone: str
+ linkedin_uid: str
+ founded_year: int
+ publicly_traded_symbol: str
+ publicly_traded_exchange: str
+ logo_url: str
+ chrunchbase_url: str
+ primary_domain: str
+ domain: str
+ team_id: str
+ organization_id: str
+ account_stage_id: str
+ source: str
+ original_source: str
+ creator_id: str
+ owner_id: str
+ created_at: str
+ phone_status: str
+ hubspot_id: str
+ salesforce_id: str
+ crm_owner_id: str
+ parent_account_id: str
+ sanitized_phone: str
+ # no listed type on the API docs
+ account_playbook_statues: list[Any]
+ account_rule_config_statuses: list[RuleConfigStatus]
+ existence_level: str
+ label_ids: list[str]
+ typed_custom_fields: Any
+ custom_field_errors: Any
+ modality: str
+ source_display_name: str
+ salesforce_record_id: str
+ crm_record_url: str
+
+
+class ContactEmail(BaseModel):
+ """A contact email in Apollo"""
+
+ email: str = ""
+ email_md5: str = ""
+ email_sha256: str = ""
+ email_status: str = ""
+ email_source: str = ""
+ extrapolated_email_confidence: str = ""
+ position: int = 0
+ email_from_customer: str = ""
+ free_domain: bool = True
+
+
+class EmploymentHistory(BaseModel):
+ """An employment history in Apollo"""
+
+ class Config:
+ extra = "allow"
+ arbitrary_types_allowed = True
+ from_attributes = True
+ populate_by_name = True
+
+ _id: Optional[str] = None
+ created_at: Optional[str] = None
+ current: Optional[bool] = None
+ degree: Optional[str] = None
+ description: Optional[str] = None
+ emails: Optional[str] = None
+ end_date: Optional[str] = None
+ grade_level: Optional[str] = None
+ kind: Optional[str] = None
+ major: Optional[str] = None
+ organization_id: Optional[str] = None
+ organization_name: Optional[str] = None
+ raw_address: Optional[str] = None
+ start_date: Optional[str] = None
+ title: Optional[str] = None
+ updated_at: Optional[str] = None
+ id: Optional[str] = None
+ key: Optional[str] = None
+
+
+class Breadcrumb(BaseModel):
+ """A breadcrumb in Apollo"""
+
+ label: Optional[str] = "N/A"
+ signal_field_name: Optional[str] = "N/A"
+ value: str | list | None = "N/A"
+ display_name: Optional[str] = "N/A"
+
+
+class TypedCustomField(BaseModel):
+ """A typed custom field in Apollo"""
+
+ id: Optional[str] = "N/A"
+ value: Optional[str] = "N/A"
+
+
+class Pagination(BaseModel):
+ """Pagination in Apollo"""
+
+ class Config:
+ extra = "allow" # Allow extra fields
+ arbitrary_types_allowed = True # Allow any type
+ from_attributes = True # Allow from_orm
+ populate_by_name = True # Allow field aliases to work both ways
+
+ page: int = 0
+ per_page: int = 0
+ total_entries: int = 0
+ total_pages: int = 0
+
+
+class DialerFlags(BaseModel):
+ """A dialer flags in Apollo"""
+
+ country_name: str
+ country_enabled: bool
+ high_risk_calling_enabled: bool
+ potential_high_risk_number: bool
+
+
+class PhoneNumber(BaseModel):
+ """A phone number in Apollo"""
+
+ raw_number: str = ""
+ sanitized_number: str = ""
+ type: str = ""
+ position: int = 0
+ status: str = ""
+ dnc_status: str = ""
+ dnc_other_info: str = ""
+ dailer_flags: DialerFlags = DialerFlags(
+ country_name="",
+ country_enabled=True,
+ high_risk_calling_enabled=True,
+ potential_high_risk_number=True,
+ )
+
+
+class Organization(BaseModel):
+ """An organization in Apollo"""
+
+ class Config:
+ extra = "allow"
+ arbitrary_types_allowed = True
+ from_attributes = True
+ populate_by_name = True
+
+ id: Optional[str] = "N/A"
+ name: Optional[str] = "N/A"
+ website_url: Optional[str] = "N/A"
+ blog_url: Optional[str] = "N/A"
+ angellist_url: Optional[str] = "N/A"
+ linkedin_url: Optional[str] = "N/A"
+ twitter_url: Optional[str] = "N/A"
+ facebook_url: Optional[str] = "N/A"
+ primary_phone: Optional[PrimaryPhone] = PrimaryPhone(
+ number="N/A", source="N/A", sanitized_number="N/A"
+ )
+ languages: list[str] = []
+ alexa_ranking: Optional[int] = 0
+ phone: Optional[str] = "N/A"
+ linkedin_uid: Optional[str] = "N/A"
+ founded_year: Optional[int] = 0
+ publicly_traded_symbol: Optional[str] = "N/A"
+ publicly_traded_exchange: Optional[str] = "N/A"
+ logo_url: Optional[str] = "N/A"
+ chrunchbase_url: Optional[str] = "N/A"
+ primary_domain: Optional[str] = "N/A"
+ sanitized_phone: Optional[str] = "N/A"
+ owned_by_organization_id: Optional[str] = "N/A"
+ intent_strength: Optional[str] = "N/A"
+ show_intent: bool = True
+ has_intent_signal_account: Optional[bool] = True
+ intent_signal_account: Optional[str] = "N/A"
+
+
+class Contact(BaseModel):
+ """A contact in Apollo"""
+
+ class Config:
+ extra = "allow"
+ arbitrary_types_allowed = True
+ from_attributes = True
+ populate_by_name = True
+
+ contact_roles: list[Any] = []
+ id: Optional[str] = None
+ first_name: Optional[str] = None
+ last_name: Optional[str] = None
+ name: Optional[str] = None
+ linkedin_url: Optional[str] = None
+ title: Optional[str] = None
+ contact_stage_id: Optional[str] = None
+ owner_id: Optional[str] = None
+ creator_id: Optional[str] = None
+ person_id: Optional[str] = None
+ email_needs_tickling: bool = True
+ organization_name: Optional[str] = None
+ source: Optional[str] = None
+ original_source: Optional[str] = None
+ organization_id: Optional[str] = None
+ headline: Optional[str] = None
+ photo_url: Optional[str] = None
+ present_raw_address: Optional[str] = None
+ linkededin_uid: Optional[str] = None
+ extrapolated_email_confidence: Optional[float] = None
+ salesforce_id: Optional[str] = None
+ salesforce_lead_id: Optional[str] = None
+ salesforce_contact_id: Optional[str] = None
+ saleforce_account_id: Optional[str] = None
+ crm_owner_id: Optional[str] = None
+ created_at: Optional[str] = None
+ emailer_campaign_ids: list[str] = []
+ direct_dial_status: Optional[str] = None
+ direct_dial_enrichment_failed_at: Optional[str] = None
+ email_status: Optional[str] = None
+ email_source: Optional[str] = None
+ account_id: Optional[str] = None
+ last_activity_date: Optional[str] = None
+ hubspot_vid: Optional[str] = None
+ hubspot_company_id: Optional[str] = None
+ crm_id: Optional[str] = None
+ sanitized_phone: Optional[str] = None
+ merged_crm_ids: Optional[str] = None
+ updated_at: Optional[str] = None
+ queued_for_crm_push: bool = True
+ suggested_from_rule_engine_config_id: Optional[str] = None
+ email_unsubscribed: Optional[str] = None
+ label_ids: list[Any] = []
+ has_pending_email_arcgate_request: bool = True
+ has_email_arcgate_request: bool = True
+ existence_level: Optional[str] = None
+ email: Optional[str] = None
+ email_from_customer: Optional[str] = None
+ typed_custom_fields: list[TypedCustomField] = []
+ custom_field_errors: Any = None
+ salesforce_record_id: Optional[str] = None
+ crm_record_url: Optional[str] = None
+ email_status_unavailable_reason: Optional[str] = None
+ email_true_status: Optional[str] = None
+ updated_email_true_status: bool = True
+ contact_rule_config_statuses: list[RuleConfigStatus] = []
+ source_display_name: Optional[str] = None
+ twitter_url: Optional[str] = None
+ contact_campaign_statuses: list[ContactCampaignStatus] = []
+ state: Optional[str] = None
+ city: Optional[str] = None
+ country: Optional[str] = None
+ account: Optional[Account] = None
+ contact_emails: list[ContactEmail] = []
+ organization: Optional[Organization] = None
+ employment_history: list[EmploymentHistory] = []
+ time_zone: Optional[str] = None
+ intent_strength: Optional[str] = None
+ show_intent: bool = True
+ phone_numbers: list[PhoneNumber] = []
+ account_phone_note: Optional[str] = None
+ free_domain: bool = True
+ is_likely_to_engage: bool = True
+ email_domain_catchall: bool = True
+ contact_job_change_event: Optional[str] = None
+
+
+class SearchOrganizationsRequest(BaseModel):
+ """Request for Apollo's search organizations API"""
+
+ organization_num_empoloyees_range: list[int] = SchemaField(
+ description="""The number range of employees working for the company. This enables you to find companies based on headcount. You can add multiple ranges to expand your search results.
+
+Each range you add needs to be a string, with the upper and lower numbers of the range separated only by a comma.""",
+ default=[0, 1000000],
+ )
+
+ organization_locations: list[str] = SchemaField(
+ description="""The location of the company headquarters. You can search across cities, US states, and countries.
+
+If a company has several office locations, results are still based on the headquarters location. For example, if you search chicago but a company's HQ location is in boston, any Boston-based companies will not appearch in your search results, even if they match other parameters.
+
+To exclude companies based on location, use the organization_not_locations parameter.
+""",
+ default=[],
+ )
+ organizations_not_locations: list[str] = SchemaField(
+ description="""Exclude companies from search results based on the location of the company headquarters. You can use cities, US states, and countries as locations to exclude.
+
+This parameter is useful for ensuring you do not prospect in an undesirable territory. For example, if you use ireland as a value, no Ireland-based companies will appear in your search results.
+""",
+ default=[],
+ )
+ q_organization_keyword_tags: list[str] = SchemaField(
+ description="""Filter search results based on keywords associated with companies. For example, you can enter mining as a value to return only companies that have an association with the mining industry."""
+ )
+ q_organization_name: str = SchemaField(
+ description="""Filter search results to include a specific company name.
+
+If the value you enter for this parameter does not match with a company's name, the company will not appear in search results, even if it matches other parameters. Partial matches are accepted. For example, if you filter by the value marketing, a company called NY Marketing Unlimited would still be eligible as a search result, but NY Market Analysis would not be eligible."""
+ )
+ organization_ids: list[str] = SchemaField(
+ description="""The Apollo IDs for the companies you want to include in your search results. Each company in the Apollo database is assigned a unique ID.
+
+To find IDs, identify the values for organization_id when you call this endpoint.""",
+ default=[],
+ )
+ max_results: int = SchemaField(
+ description="""The maximum number of results to return. If you don't specify this parameter, the default is 100.""",
+ default=100,
+ ge=1,
+ le=50000,
+ advanced=True,
+ )
+
+ page: int = SchemaField(
+ description="""The page number of the Apollo data that you want to retrieve.
+
+Use this parameter in combination with the per_page parameter to make search results for navigable and improve the performance of the endpoint.""",
+ default=1,
+ )
+ per_page: int = SchemaField(
+ description="""The number of search results that should be returned for each page. Limited the number of results per page improves the endpoint's performance.
+
+Use the page parameter to search the different pages of data.""",
+ default=100,
+ )
+
+
+class SearchOrganizationsResponse(BaseModel):
+ """Response from Apollo's search organizations API"""
+
+ breadcrumbs: list[Breadcrumb] = []
+ partial_results_only: bool = True
+ has_join: bool = True
+ disable_eu_prospecting: bool = True
+ partial_results_limit: int = 0
+ pagination: Pagination = Pagination(
+ page=0, per_page=0, total_entries=0, total_pages=0
+ )
+ # no listed type on the API docs
+ accounts: list[Any] = []
+ organizations: list[Organization] = []
+ models_ids: list[str] = []
+ num_fetch_result: Optional[str] = "N/A"
+ derived_params: Optional[str] = "N/A"
+
+
+class SearchPeopleRequest(BaseModel):
+ """Request for Apollo's search people API"""
+
+ person_titles: list[str] = SchemaField(
+ description="""Job titles held by the people you want to find. For a person to be included in search results, they only need to match 1 of the job titles you add. Adding more job titles expands your search results.
+
+Results also include job titles with the same terms, even if they are not exact matches. For example, searching for marketing manager might return people with the job title content marketing manager.
+
+Use this parameter in combination with the person_seniorities[] parameter to find people based on specific job functions and seniority levels.
+""",
+ default=[],
+ placeholder="marketing manager",
+ )
+ person_locations: list[str] = SchemaField(
+ description="""The location where people live. You can search across cities, US states, and countries.
+
+To find people based on the headquarters locations of their current employer, use the organization_locations parameter.""",
+ default=[],
+ )
+ person_seniorities: list[SenorityLevels] = SchemaField(
+ description="""The job seniority that people hold within their current employer. This enables you to find people that currently hold positions at certain reporting levels, such as Director level or senior IC level.
+
+For a person to be included in search results, they only need to match 1 of the seniorities you add. Adding more seniorities expands your search results.
+
+Searches only return results based on their current job title, so searching for Director-level employees only returns people that currently hold a Director-level title. If someone was previously a Director, but is currently a VP, they would not be included in your search results.
+
+Use this parameter in combination with the person_titles[] parameter to find people based on specific job functions and seniority levels.""",
+ default=[],
+ )
+ organization_locations: list[str] = SchemaField(
+ description="""The location of the company headquarters for a person's current employer. You can search across cities, US states, and countries.
+
+If a company has several office locations, results are still based on the headquarters location. For example, if you search chicago but a company's HQ location is in boston, people that work for the Boston-based company will not appear in your results, even if they match other parameters.
+
+To find people based on their personal location, use the person_locations parameter.""",
+ default=[],
+ )
+ q_organization_domains: list[str] = SchemaField(
+ description="""The domain name for the person's employer. This can be the current employer or a previous employer. Do not include www., the @ symbol, or similar.
+
+You can add multiple domains to search across companies.
+
+ Examples: apollo.io and microsoft.com""",
+ default=[],
+ )
+ contact_email_statuses: list[ContactEmailStatuses] = SchemaField(
+ description="""The email statuses for the people you want to find. You can add multiple statuses to expand your search.""",
+ default=[],
+ )
+ organization_ids: list[str] = SchemaField(
+ description="""The Apollo IDs for the companies (employers) you want to include in your search results. Each company in the Apollo database is assigned a unique ID.
+
+To find IDs, call the Organization Search endpoint and identify the values for organization_id.""",
+ default=[],
+ )
+ organization_num_empoloyees_range: list[int] = SchemaField(
+ description="""The number range of employees working for the company. This enables you to find companies based on headcount. You can add multiple ranges to expand your search results.
+
+Each range you add needs to be a string, with the upper and lower numbers of the range separated only by a comma.""",
+ default=[],
+ )
+ q_keywords: str = SchemaField(
+ description="""A string of words over which we want to filter the results""",
+ default="",
+ )
+ page: int = SchemaField(
+ description="""The page number of the Apollo data that you want to retrieve.
+
+Use this parameter in combination with the per_page parameter to make search results for navigable and improve the performance of the endpoint.""",
+ default=1,
+ )
+ per_page: int = SchemaField(
+ description="""The number of search results that should be returned for each page. Limited the number of results per page improves the endpoint's performance.
+
+Use the page parameter to search the different pages of data.""",
+ default=100,
+ )
+ max_results: int = SchemaField(
+ description="""The maximum number of results to return. If you don't specify this parameter, the default is 100.""",
+ default=100,
+ ge=1,
+ le=50000,
+ advanced=True,
+ )
+
+
+class SearchPeopleResponse(BaseModel):
+ """Response from Apollo's search people API"""
+
+ class Config:
+ extra = "allow" # Allow extra fields
+ arbitrary_types_allowed = True # Allow any type
+ from_attributes = True # Allow from_orm
+ populate_by_name = True # Allow field aliases to work both ways
+
+ breadcrumbs: list[Breadcrumb] = []
+ partial_results_only: bool = True
+ has_join: bool = True
+ disable_eu_prospecting: bool = True
+ partial_results_limit: int = 0
+ pagination: Pagination = Pagination(
+ page=0, per_page=0, total_entries=0, total_pages=0
+ )
+ contacts: list[Contact] = []
+ people: list[Contact] = []
+ model_ids: list[str] = []
+ num_fetch_result: Optional[str] = "N/A"
+ derived_params: Optional[str] = "N/A"
diff --git a/autogpt_platform/backend/backend/blocks/apollo/organization.py b/autogpt_platform/backend/backend/blocks/apollo/organization.py
new file mode 100644
index 0000000000..45ce769fcf
--- /dev/null
+++ b/autogpt_platform/backend/backend/blocks/apollo/organization.py
@@ -0,0 +1,219 @@
+from backend.blocks.apollo._api import ApolloClient
+from backend.blocks.apollo._auth import (
+ TEST_CREDENTIALS,
+ TEST_CREDENTIALS_INPUT,
+ ApolloCredentials,
+ ApolloCredentialsInput,
+)
+from backend.blocks.apollo.models import (
+ Organization,
+ PrimaryPhone,
+ SearchOrganizationsRequest,
+)
+from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
+from backend.data.model import SchemaField
+
+
+class SearchOrganizationsBlock(Block):
+ """Search for organizations in Apollo"""
+
+ class Input(BlockSchema):
+ organization_num_empoloyees_range: list[int] = SchemaField(
+ description="""The number range of employees working for the company. This enables you to find companies based on headcount. You can add multiple ranges to expand your search results.
+
+Each range you add needs to be a string, with the upper and lower numbers of the range separated only by a comma.""",
+ default=[0, 1000000],
+ )
+
+ organization_locations: list[str] = SchemaField(
+ description="""The location of the company headquarters. You can search across cities, US states, and countries.
+
+If a company has several office locations, results are still based on the headquarters location. For example, if you search chicago but a company's HQ location is in boston, any Boston-based companies will not appearch in your search results, even if they match other parameters.
+
+To exclude companies based on location, use the organization_not_locations parameter.
+""",
+ default=[],
+ )
+ organizations_not_locations: list[str] = SchemaField(
+ description="""Exclude companies from search results based on the location of the company headquarters. You can use cities, US states, and countries as locations to exclude.
+
+This parameter is useful for ensuring you do not prospect in an undesirable territory. For example, if you use ireland as a value, no Ireland-based companies will appear in your search results.
+""",
+ default=[],
+ )
+ q_organization_keyword_tags: list[str] = SchemaField(
+ description="""Filter search results based on keywords associated with companies. For example, you can enter mining as a value to return only companies that have an association with the mining industry.""",
+ default=[],
+ )
+ q_organization_name: str = SchemaField(
+ description="""Filter search results to include a specific company name.
+
+If the value you enter for this parameter does not match with a company's name, the company will not appear in search results, even if it matches other parameters. Partial matches are accepted. For example, if you filter by the value marketing, a company called NY Marketing Unlimited would still be eligible as a search result, but NY Market Analysis would not be eligible.""",
+ default="",
+ advanced=False,
+ )
+ organization_ids: list[str] = SchemaField(
+ description="""The Apollo IDs for the companies you want to include in your search results. Each company in the Apollo database is assigned a unique ID.
+
+To find IDs, identify the values for organization_id when you call this endpoint.""",
+ default=[],
+ )
+ max_results: int = SchemaField(
+ description="""The maximum number of results to return. If you don't specify this parameter, the default is 100.""",
+ default=100,
+ ge=1,
+ le=50000,
+ advanced=True,
+ )
+ credentials: ApolloCredentialsInput = SchemaField(
+ description="Apollo credentials",
+ )
+
+ class Output(BlockSchema):
+ organizations: list[Organization] = SchemaField(
+ description="List of organizations found",
+ default=[],
+ )
+ organization: Organization = SchemaField(
+ description="Each found organization, one at a time",
+ )
+ error: str = SchemaField(
+ description="Error message if the search failed",
+ default="",
+ )
+
+ def __init__(self):
+ super().__init__(
+ id="3d71270d-599e-4148-9b95-71b35d2f44f0",
+ description="Search for organizations in Apollo",
+ categories={BlockCategory.SEARCH},
+ input_schema=SearchOrganizationsBlock.Input,
+ output_schema=SearchOrganizationsBlock.Output,
+ test_credentials=TEST_CREDENTIALS,
+ test_input={"query": "Google", "credentials": TEST_CREDENTIALS_INPUT},
+ test_output=[
+ (
+ "organization",
+ Organization(
+ id="1",
+ name="Google",
+ website_url="https://google.com",
+ blog_url="https://google.com/blog",
+ angellist_url="https://angel.co/google",
+ linkedin_url="https://linkedin.com/company/google",
+ twitter_url="https://twitter.com/google",
+ facebook_url="https://facebook.com/google",
+ primary_phone=PrimaryPhone(
+ source="google",
+ number="1234567890",
+ sanitized_number="1234567890",
+ ),
+ languages=["en"],
+ alexa_ranking=1000,
+ phone="1234567890",
+ linkedin_uid="1234567890",
+ founded_year=2000,
+ publicly_traded_symbol="GOOGL",
+ publicly_traded_exchange="NASDAQ",
+ logo_url="https://google.com/logo.png",
+ chrunchbase_url="https://chrunchbase.com/google",
+ primary_domain="google.com",
+ sanitized_phone="1234567890",
+ owned_by_organization_id="1",
+ intent_strength="strong",
+ show_intent=True,
+ has_intent_signal_account=True,
+ intent_signal_account="1",
+ ),
+ ),
+ (
+ "organizations",
+ [
+ Organization(
+ id="1",
+ name="Google",
+ website_url="https://google.com",
+ blog_url="https://google.com/blog",
+ angellist_url="https://angel.co/google",
+ linkedin_url="https://linkedin.com/company/google",
+ twitter_url="https://twitter.com/google",
+ facebook_url="https://facebook.com/google",
+ primary_phone=PrimaryPhone(
+ source="google",
+ number="1234567890",
+ sanitized_number="1234567890",
+ ),
+ languages=["en"],
+ alexa_ranking=1000,
+ phone="1234567890",
+ linkedin_uid="1234567890",
+ founded_year=2000,
+ publicly_traded_symbol="GOOGL",
+ publicly_traded_exchange="NASDAQ",
+ logo_url="https://google.com/logo.png",
+ chrunchbase_url="https://chrunchbase.com/google",
+ primary_domain="google.com",
+ sanitized_phone="1234567890",
+ owned_by_organization_id="1",
+ intent_strength="strong",
+ show_intent=True,
+ has_intent_signal_account=True,
+ intent_signal_account="1",
+ ),
+ ],
+ ),
+ ],
+ test_mock={
+ "search_organizations": lambda *args, **kwargs: [
+ Organization(
+ id="1",
+ name="Google",
+ website_url="https://google.com",
+ blog_url="https://google.com/blog",
+ angellist_url="https://angel.co/google",
+ linkedin_url="https://linkedin.com/company/google",
+ twitter_url="https://twitter.com/google",
+ facebook_url="https://facebook.com/google",
+ primary_phone=PrimaryPhone(
+ source="google",
+ number="1234567890",
+ sanitized_number="1234567890",
+ ),
+ languages=["en"],
+ alexa_ranking=1000,
+ phone="1234567890",
+ linkedin_uid="1234567890",
+ founded_year=2000,
+ publicly_traded_symbol="GOOGL",
+ publicly_traded_exchange="NASDAQ",
+ logo_url="https://google.com/logo.png",
+ chrunchbase_url="https://chrunchbase.com/google",
+ primary_domain="google.com",
+ sanitized_phone="1234567890",
+ owned_by_organization_id="1",
+ intent_strength="strong",
+ show_intent=True,
+ has_intent_signal_account=True,
+ intent_signal_account="1",
+ )
+ ]
+ },
+ )
+
+ @staticmethod
+ def search_organizations(
+ query: SearchOrganizationsRequest, credentials: ApolloCredentials
+ ) -> list[Organization]:
+ client = ApolloClient(credentials)
+ return client.search_organizations(query)
+
+ def run(
+ self, input_data: Input, *, credentials: ApolloCredentials, **kwargs
+ ) -> BlockOutput:
+ query = SearchOrganizationsRequest(
+ **input_data.model_dump(exclude={"credentials"})
+ )
+ organizations = self.search_organizations(query, credentials)
+ for organization in organizations:
+ yield "organization", organization
+ yield "organizations", organizations
diff --git a/autogpt_platform/backend/backend/blocks/apollo/people.py b/autogpt_platform/backend/backend/blocks/apollo/people.py
new file mode 100644
index 0000000000..ac75e03d1d
--- /dev/null
+++ b/autogpt_platform/backend/backend/blocks/apollo/people.py
@@ -0,0 +1,394 @@
+from backend.blocks.apollo._api import ApolloClient
+from backend.blocks.apollo._auth import (
+ TEST_CREDENTIALS,
+ TEST_CREDENTIALS_INPUT,
+ ApolloCredentials,
+ ApolloCredentialsInput,
+)
+from backend.blocks.apollo.models import (
+ Contact,
+ ContactEmailStatuses,
+ SearchPeopleRequest,
+ SenorityLevels,
+)
+from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
+from backend.data.model import SchemaField
+
+
+class SearchPeopleBlock(Block):
+ """Search for people in Apollo"""
+
+ class Input(BlockSchema):
+ person_titles: list[str] = SchemaField(
+ description="""Job titles held by the people you want to find. For a person to be included in search results, they only need to match 1 of the job titles you add. Adding more job titles expands your search results.
+
+ Results also include job titles with the same terms, even if they are not exact matches. For example, searching for marketing manager might return people with the job title content marketing manager.
+
+ Use this parameter in combination with the person_seniorities[] parameter to find people based on specific job functions and seniority levels.
+ """,
+ default=[],
+ advanced=False,
+ )
+ person_locations: list[str] = SchemaField(
+ description="""The location where people live. You can search across cities, US states, and countries.
+
+ To find people based on the headquarters locations of their current employer, use the organization_locations parameter.""",
+ default=[],
+ advanced=False,
+ )
+ person_seniorities: list[SenorityLevels] = SchemaField(
+ description="""The job seniority that people hold within their current employer. This enables you to find people that currently hold positions at certain reporting levels, such as Director level or senior IC level.
+
+ For a person to be included in search results, they only need to match 1 of the seniorities you add. Adding more seniorities expands your search results.
+
+ Searches only return results based on their current job title, so searching for Director-level employees only returns people that currently hold a Director-level title. If someone was previously a Director, but is currently a VP, they would not be included in your search results.
+
+ Use this parameter in combination with the person_titles[] parameter to find people based on specific job functions and seniority levels.""",
+ default=[],
+ advanced=False,
+ )
+ organization_locations: list[str] = SchemaField(
+ description="""The location of the company headquarters for a person's current employer. You can search across cities, US states, and countries.
+
+ If a company has several office locations, results are still based on the headquarters location. For example, if you search chicago but a company's HQ location is in boston, people that work for the Boston-based company will not appear in your results, even if they match other parameters.
+
+ To find people based on their personal location, use the person_locations parameter.""",
+ default=[],
+ advanced=False,
+ )
+ q_organization_domains: list[str] = SchemaField(
+ description="""The domain name for the person's employer. This can be the current employer or a previous employer. Do not include www., the @ symbol, or similar.
+
+ You can add multiple domains to search across companies.
+
+ Examples: apollo.io and microsoft.com""",
+ default=[],
+ advanced=False,
+ )
+ contact_email_statuses: list[ContactEmailStatuses] = SchemaField(
+ description="""The email statuses for the people you want to find. You can add multiple statuses to expand your search.""",
+ default=[],
+ advanced=False,
+ )
+ organization_ids: list[str] = SchemaField(
+ description="""The Apollo IDs for the companies (employers) you want to include in your search results. Each company in the Apollo database is assigned a unique ID.
+
+ To find IDs, call the Organization Search endpoint and identify the values for organization_id.""",
+ default=[],
+ advanced=False,
+ )
+ organization_num_empoloyees_range: list[int] = SchemaField(
+ description="""The number range of employees working for the company. This enables you to find companies based on headcount. You can add multiple ranges to expand your search results.
+
+ Each range you add needs to be a string, with the upper and lower numbers of the range separated only by a comma.""",
+ default=[],
+ advanced=False,
+ )
+ q_keywords: str = SchemaField(
+ description="""A string of words over which we want to filter the results""",
+ default="",
+ advanced=False,
+ )
+ max_results: int = SchemaField(
+ description="""The maximum number of results to return. If you don't specify this parameter, the default is 100.""",
+ default=100,
+ ge=1,
+ le=50000,
+ advanced=True,
+ )
+
+ credentials: ApolloCredentialsInput = SchemaField(
+ description="Apollo credentials",
+ )
+
+ class Output(BlockSchema):
+ people: list[Contact] = SchemaField(
+ description="List of people found",
+ default=[],
+ )
+ person: Contact = SchemaField(
+ description="Each found person, one at a time",
+ )
+ error: str = SchemaField(
+ description="Error message if the search failed",
+ default="",
+ )
+
+ def __init__(self):
+ super().__init__(
+ id="c2adb3aa-5aae-488d-8a6e-4eb8c23e2ed6",
+ description="Search for people in Apollo",
+ categories={BlockCategory.SEARCH},
+ input_schema=SearchPeopleBlock.Input,
+ output_schema=SearchPeopleBlock.Output,
+ test_credentials=TEST_CREDENTIALS,
+ test_input={"credentials": TEST_CREDENTIALS_INPUT},
+ test_output=[
+ (
+ "person",
+ Contact(
+ contact_roles=[],
+ id="1",
+ name="John Doe",
+ first_name="John",
+ last_name="Doe",
+ linkedin_url="https://www.linkedin.com/in/johndoe",
+ title="Software Engineer",
+ organization_name="Google",
+ organization_id="123456",
+ contact_stage_id="1",
+ owner_id="1",
+ creator_id="1",
+ person_id="1",
+ email_needs_tickling=True,
+ source="apollo",
+ original_source="apollo",
+ headline="Software Engineer",
+ photo_url="https://www.linkedin.com/in/johndoe",
+ present_raw_address="123 Main St, Anytown, USA",
+ linkededin_uid="123456",
+ extrapolated_email_confidence=0.8,
+ salesforce_id="123456",
+ salesforce_lead_id="123456",
+ salesforce_contact_id="123456",
+ saleforce_account_id="123456",
+ crm_owner_id="123456",
+ created_at="2021-01-01",
+ emailer_campaign_ids=[],
+ direct_dial_status="active",
+ direct_dial_enrichment_failed_at="2021-01-01",
+ email_status="active",
+ email_source="apollo",
+ account_id="123456",
+ last_activity_date="2021-01-01",
+ hubspot_vid="123456",
+ hubspot_company_id="123456",
+ crm_id="123456",
+ sanitized_phone="123456",
+ merged_crm_ids="123456",
+ updated_at="2021-01-01",
+ queued_for_crm_push=True,
+ suggested_from_rule_engine_config_id="123456",
+ email_unsubscribed=None,
+ label_ids=[],
+ has_pending_email_arcgate_request=True,
+ has_email_arcgate_request=True,
+ existence_level=None,
+ email=None,
+ email_from_customer=None,
+ typed_custom_fields=[],
+ custom_field_errors=None,
+ salesforce_record_id=None,
+ crm_record_url=None,
+ email_status_unavailable_reason=None,
+ email_true_status=None,
+ updated_email_true_status=True,
+ contact_rule_config_statuses=[],
+ source_display_name=None,
+ twitter_url=None,
+ contact_campaign_statuses=[],
+ state=None,
+ city=None,
+ country=None,
+ account=None,
+ contact_emails=[],
+ organization=None,
+ employment_history=[],
+ time_zone=None,
+ intent_strength=None,
+ show_intent=True,
+ phone_numbers=[],
+ account_phone_note=None,
+ free_domain=True,
+ is_likely_to_engage=True,
+ email_domain_catchall=True,
+ contact_job_change_event=None,
+ ),
+ ),
+ (
+ "people",
+ [
+ Contact(
+ contact_roles=[],
+ id="1",
+ name="John Doe",
+ first_name="John",
+ last_name="Doe",
+ linkedin_url="https://www.linkedin.com/in/johndoe",
+ title="Software Engineer",
+ organization_name="Google",
+ organization_id="123456",
+ contact_stage_id="1",
+ owner_id="1",
+ creator_id="1",
+ person_id="1",
+ email_needs_tickling=True,
+ source="apollo",
+ original_source="apollo",
+ headline="Software Engineer",
+ photo_url="https://www.linkedin.com/in/johndoe",
+ present_raw_address="123 Main St, Anytown, USA",
+ linkededin_uid="123456",
+ extrapolated_email_confidence=0.8,
+ salesforce_id="123456",
+ salesforce_lead_id="123456",
+ salesforce_contact_id="123456",
+ saleforce_account_id="123456",
+ crm_owner_id="123456",
+ created_at="2021-01-01",
+ emailer_campaign_ids=[],
+ direct_dial_status="active",
+ direct_dial_enrichment_failed_at="2021-01-01",
+ email_status="active",
+ email_source="apollo",
+ account_id="123456",
+ last_activity_date="2021-01-01",
+ hubspot_vid="123456",
+ hubspot_company_id="123456",
+ crm_id="123456",
+ sanitized_phone="123456",
+ merged_crm_ids="123456",
+ updated_at="2021-01-01",
+ queued_for_crm_push=True,
+ suggested_from_rule_engine_config_id="123456",
+ email_unsubscribed=None,
+ label_ids=[],
+ has_pending_email_arcgate_request=True,
+ has_email_arcgate_request=True,
+ existence_level=None,
+ email=None,
+ email_from_customer=None,
+ typed_custom_fields=[],
+ custom_field_errors=None,
+ salesforce_record_id=None,
+ crm_record_url=None,
+ email_status_unavailable_reason=None,
+ email_true_status=None,
+ updated_email_true_status=True,
+ contact_rule_config_statuses=[],
+ source_display_name=None,
+ twitter_url=None,
+ contact_campaign_statuses=[],
+ state=None,
+ city=None,
+ country=None,
+ account=None,
+ contact_emails=[],
+ organization=None,
+ employment_history=[],
+ time_zone=None,
+ intent_strength=None,
+ show_intent=True,
+ phone_numbers=[],
+ account_phone_note=None,
+ free_domain=True,
+ is_likely_to_engage=True,
+ email_domain_catchall=True,
+ contact_job_change_event=None,
+ ),
+ ],
+ ),
+ ],
+ test_mock={
+ "search_people": lambda query, credentials: [
+ Contact(
+ id="1",
+ name="John Doe",
+ first_name="John",
+ last_name="Doe",
+ linkedin_url="https://www.linkedin.com/in/johndoe",
+ title="Software Engineer",
+ organization_name="Google",
+ organization_id="123456",
+ contact_stage_id="1",
+ owner_id="1",
+ creator_id="1",
+ person_id="1",
+ email_needs_tickling=True,
+ source="apollo",
+ original_source="apollo",
+ headline="Software Engineer",
+ photo_url="https://www.linkedin.com/in/johndoe",
+ present_raw_address="123 Main St, Anytown, USA",
+ linkededin_uid="123456",
+ extrapolated_email_confidence=0.8,
+ salesforce_id="123456",
+ salesforce_lead_id="123456",
+ salesforce_contact_id="123456",
+ saleforce_account_id="123456",
+ crm_owner_id="123456",
+ created_at="2021-01-01",
+ emailer_campaign_ids=[],
+ direct_dial_status="active",
+ direct_dial_enrichment_failed_at="2021-01-01",
+ email_status="active",
+ email_source="apollo",
+ account_id="123456",
+ last_activity_date="2021-01-01",
+ hubspot_vid="123456",
+ hubspot_company_id="123456",
+ crm_id="123456",
+ sanitized_phone="123456",
+ merged_crm_ids="123456",
+ updated_at="2021-01-01",
+ queued_for_crm_push=True,
+ suggested_from_rule_engine_config_id="123456",
+ email_unsubscribed=None,
+ label_ids=[],
+ has_pending_email_arcgate_request=True,
+ has_email_arcgate_request=True,
+ existence_level=None,
+ email=None,
+ email_from_customer=None,
+ typed_custom_fields=[],
+ custom_field_errors=None,
+ salesforce_record_id=None,
+ crm_record_url=None,
+ email_status_unavailable_reason=None,
+ email_true_status=None,
+ updated_email_true_status=True,
+ contact_rule_config_statuses=[],
+ source_display_name=None,
+ twitter_url=None,
+ contact_campaign_statuses=[],
+ state=None,
+ city=None,
+ country=None,
+ account=None,
+ contact_emails=[],
+ organization=None,
+ employment_history=[],
+ time_zone=None,
+ intent_strength=None,
+ show_intent=True,
+ phone_numbers=[],
+ account_phone_note=None,
+ free_domain=True,
+ is_likely_to_engage=True,
+ email_domain_catchall=True,
+ contact_job_change_event=None,
+ ),
+ ]
+ },
+ )
+
+ @staticmethod
+ def search_people(
+ query: SearchPeopleRequest, credentials: ApolloCredentials
+ ) -> list[Contact]:
+ client = ApolloClient(credentials)
+ return client.search_people(query)
+
+ def run(
+ self,
+ input_data: Input,
+ *,
+ credentials: ApolloCredentials,
+ **kwargs,
+ ) -> BlockOutput:
+
+ query = SearchPeopleRequest(**input_data.model_dump(exclude={"credentials"}))
+ people = self.search_people(query, credentials)
+ for person in people:
+ yield "person", person
+ yield "people", people
diff --git a/autogpt_platform/backend/backend/blocks/smartlead/_api.py b/autogpt_platform/backend/backend/blocks/smartlead/_api.py
new file mode 100644
index 0000000000..8caa266c2c
--- /dev/null
+++ b/autogpt_platform/backend/backend/blocks/smartlead/_api.py
@@ -0,0 +1,97 @@
+from backend.blocks.smartlead.models import (
+ AddLeadsRequest,
+ AddLeadsToCampaignResponse,
+ CreateCampaignRequest,
+ CreateCampaignResponse,
+ SaveSequencesRequest,
+ SaveSequencesResponse,
+)
+from backend.util.request import Requests
+
+
+class SmartLeadClient:
+ """Client for the SmartLead API"""
+
+ # This api is stupid and requires your api key in the url. DO NOT RAISE ERRORS FOR BAD REQUESTS.
+ # FILTER OUT THE API KEY FROM THE ERROR MESSAGE.
+
+ API_URL = "https://server.smartlead.ai/api/v1"
+
+ def __init__(self, api_key: str):
+ self.api_key = api_key
+ self.requests = Requests()
+
+ def _add_auth_to_url(self, url: str) -> str:
+ return f"{url}?api_key={self.api_key}"
+
+ def _handle_error(self, e: Exception) -> str:
+ return e.__str__().replace(self.api_key, "API KEY")
+
+ def create_campaign(self, request: CreateCampaignRequest) -> CreateCampaignResponse:
+ try:
+ response = self.requests.post(
+ self._add_auth_to_url(f"{self.API_URL}/campaigns/create"),
+ json=request.model_dump(),
+ )
+ response_data = response.json()
+ return CreateCampaignResponse(**response_data)
+ except ValueError as e:
+ raise ValueError(f"Invalid response format: {str(e)}")
+ except Exception as e:
+ raise ValueError(f"Failed to create campaign: {self._handle_error(e)}")
+
+ def add_leads_to_campaign(
+ self, request: AddLeadsRequest
+ ) -> AddLeadsToCampaignResponse:
+ try:
+ response = self.requests.post(
+ self._add_auth_to_url(
+ f"{self.API_URL}/campaigns/{request.campaign_id}/leads"
+ ),
+ json=request.model_dump(exclude={"campaign_id"}),
+ )
+ response_data = response.json()
+ response_parsed = AddLeadsToCampaignResponse(**response_data)
+ if not response_parsed.ok:
+ raise ValueError(
+ f"Failed to add leads to campaign: {response_parsed.error}"
+ )
+ return response_parsed
+ except ValueError as e:
+ raise ValueError(f"Invalid response format: {str(e)}")
+ except Exception as e:
+ raise ValueError(
+ f"Failed to add leads to campaign: {self._handle_error(e)}"
+ )
+
+ def save_campaign_sequences(
+ self, campaign_id: int, request: SaveSequencesRequest
+ ) -> SaveSequencesResponse:
+ """
+ Save sequences within a campaign.
+
+ Args:
+ campaign_id: ID of the campaign to save sequences for
+ request: SaveSequencesRequest containing the sequences configuration
+
+ Returns:
+ SaveSequencesResponse with the result of the operation
+
+ Note:
+ For variant_distribution_type:
+ - MANUAL_EQUAL: Equally distributes variants across leads
+ - AI_EQUAL: Requires winning_metric_property and lead_distribution_percentage
+ - MANUAL_PERCENTAGE: Requires variant_distribution_percentage in seq_variants
+ """
+ try:
+ response = self.requests.post(
+ self._add_auth_to_url(
+ f"{self.API_URL}/campaigns/{campaign_id}/sequences"
+ ),
+ json=request.model_dump(exclude_none=True),
+ )
+ return SaveSequencesResponse(**response.json())
+ except Exception as e:
+ raise ValueError(
+ f"Failed to save campaign sequences: {e.__str__().replace(self.api_key, 'API KEY')}"
+ )
diff --git a/autogpt_platform/backend/backend/blocks/smartlead/_auth.py b/autogpt_platform/backend/backend/blocks/smartlead/_auth.py
new file mode 100644
index 0000000000..219524126c
--- /dev/null
+++ b/autogpt_platform/backend/backend/blocks/smartlead/_auth.py
@@ -0,0 +1,35 @@
+from typing import Literal
+
+from pydantic import SecretStr
+
+from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput
+from backend.integrations.providers import ProviderName
+
+SmartLeadCredentials = APIKeyCredentials
+SmartLeadCredentialsInput = CredentialsMetaInput[
+ Literal[ProviderName.SMARTLEAD],
+ Literal["api_key"],
+]
+
+TEST_CREDENTIALS = APIKeyCredentials(
+ id="01234567-89ab-cdef-0123-456789abcdef",
+ provider="smartlead",
+ api_key=SecretStr("mock-smartlead-api-key"),
+ title="Mock SmartLead API key",
+ expires_at=None,
+)
+TEST_CREDENTIALS_INPUT = {
+ "provider": TEST_CREDENTIALS.provider,
+ "id": TEST_CREDENTIALS.id,
+ "type": TEST_CREDENTIALS.type,
+ "title": TEST_CREDENTIALS.title,
+}
+
+
+def SmartLeadCredentialsField() -> SmartLeadCredentialsInput:
+ """
+ Creates a SmartLead credentials input on a block.
+ """
+ return CredentialsField(
+ description="The SmartLead integration can be used with an API Key.",
+ )
diff --git a/autogpt_platform/backend/backend/blocks/smartlead/campaign.py b/autogpt_platform/backend/backend/blocks/smartlead/campaign.py
new file mode 100644
index 0000000000..942ebcd471
--- /dev/null
+++ b/autogpt_platform/backend/backend/blocks/smartlead/campaign.py
@@ -0,0 +1,326 @@
+from backend.blocks.smartlead._api import SmartLeadClient
+from backend.blocks.smartlead._auth import (
+ TEST_CREDENTIALS,
+ TEST_CREDENTIALS_INPUT,
+ SmartLeadCredentials,
+ SmartLeadCredentialsInput,
+)
+from backend.blocks.smartlead.models import (
+ AddLeadsRequest,
+ AddLeadsToCampaignResponse,
+ CreateCampaignRequest,
+ CreateCampaignResponse,
+ LeadInput,
+ LeadUploadSettings,
+ SaveSequencesRequest,
+ SaveSequencesResponse,
+ Sequence,
+)
+from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
+from backend.data.model import SchemaField
+
+
+class CreateCampaignBlock(Block):
+ """Create a campaign in SmartLead"""
+
+ class Input(BlockSchema):
+ name: str = SchemaField(
+ description="The name of the campaign",
+ )
+ credentials: SmartLeadCredentialsInput = SchemaField(
+ description="SmartLead credentials",
+ )
+
+ class Output(BlockSchema):
+ id: int = SchemaField(
+ description="The ID of the created campaign",
+ )
+ name: str = SchemaField(
+ description="The name of the created campaign",
+ )
+ created_at: str = SchemaField(
+ description="The date and time the campaign was created",
+ )
+ error: str = SchemaField(
+ description="Error message if the search failed",
+ default="",
+ )
+
+ def __init__(self):
+ super().__init__(
+ id="8865699f-9188-43c4-89b0-79c84cfaa03e",
+ description="Create a campaign in SmartLead",
+ categories={BlockCategory.CRM},
+ input_schema=CreateCampaignBlock.Input,
+ output_schema=CreateCampaignBlock.Output,
+ test_credentials=TEST_CREDENTIALS,
+ test_input={"name": "Test Campaign", "credentials": TEST_CREDENTIALS_INPUT},
+ test_output=[
+ (
+ "id",
+ 1,
+ ),
+ (
+ "name",
+ "Test Campaign",
+ ),
+ (
+ "created_at",
+ "2024-01-01T00:00:00Z",
+ ),
+ ],
+ test_mock={
+ "create_campaign": lambda name, credentials: CreateCampaignResponse(
+ ok=True,
+ id=1,
+ name=name,
+ created_at="2024-01-01T00:00:00Z",
+ )
+ },
+ )
+
+ @staticmethod
+ def create_campaign(
+ name: str, credentials: SmartLeadCredentials
+ ) -> CreateCampaignResponse:
+ client = SmartLeadClient(credentials.api_key.get_secret_value())
+ return client.create_campaign(CreateCampaignRequest(name=name))
+
+ def run(
+ self,
+ input_data: Input,
+ *,
+ credentials: SmartLeadCredentials,
+ **kwargs,
+ ) -> BlockOutput:
+ response = self.create_campaign(input_data.name, credentials)
+
+ yield "id", response.id
+ yield "name", response.name
+ yield "created_at", response.created_at
+ if not response.ok:
+ yield "error", "Failed to create campaign"
+
+
+class AddLeadToCampaignBlock(Block):
+ """Add a lead to a campaign in SmartLead"""
+
+ class Input(BlockSchema):
+ campaign_id: int = SchemaField(
+ description="The ID of the campaign to add the lead to",
+ )
+ lead_list: list[LeadInput] = SchemaField(
+ description="An array of JSON objects, each representing a lead's details. Can hold max 100 leads.",
+ max_length=100,
+ default=[],
+ advanced=False,
+ )
+ settings: LeadUploadSettings = SchemaField(
+ description="Settings for lead upload",
+ default=LeadUploadSettings(),
+ )
+ credentials: SmartLeadCredentialsInput = SchemaField(
+ description="SmartLead credentials",
+ )
+
+ class Output(BlockSchema):
+ campaign_id: int = SchemaField(
+ description="The ID of the campaign the lead was added to (passed through)",
+ )
+ upload_count: int = SchemaField(
+ description="The number of leads added to the campaign",
+ )
+ already_added_to_campaign: int = SchemaField(
+ description="The number of leads that were already added to the campaign",
+ )
+ duplicate_count: int = SchemaField(
+ description="The number of emails that were duplicates",
+ )
+ invalid_email_count: int = SchemaField(
+ description="The number of emails that were invalidly formatted",
+ )
+ is_lead_limit_exhausted: bool = SchemaField(
+ description="Whether the lead limit was exhausted",
+ )
+ lead_import_stopped_count: int = SchemaField(
+ description="The number of leads that were not added to the campaign because the lead import was stopped",
+ )
+ error: str = SchemaField(
+ description="Error message if the lead was not added to the campaign",
+ default="",
+ )
+
+ def __init__(self):
+ super().__init__(
+ id="fb8106a4-1a8f-42f9-a502-f6d07e6fe0ec",
+ description="Add a lead to a campaign in SmartLead",
+ categories={BlockCategory.CRM},
+ input_schema=AddLeadToCampaignBlock.Input,
+ output_schema=AddLeadToCampaignBlock.Output,
+ test_credentials=TEST_CREDENTIALS,
+ test_input={
+ "campaign_id": 1,
+ "lead_list": [],
+ "credentials": TEST_CREDENTIALS_INPUT,
+ },
+ test_output=[
+ (
+ "campaign_id",
+ 1,
+ ),
+ (
+ "upload_count",
+ 1,
+ ),
+ ],
+ test_mock={
+ "add_leads_to_campaign": lambda campaign_id, lead_list, credentials: AddLeadsToCampaignResponse(
+ ok=True,
+ upload_count=1,
+ already_added_to_campaign=0,
+ duplicate_count=0,
+ invalid_email_count=0,
+ is_lead_limit_exhausted=False,
+ lead_import_stopped_count=0,
+ error="",
+ total_leads=1,
+ block_count=0,
+ invalid_emails=[],
+ unsubscribed_leads=[],
+ bounce_count=0,
+ )
+ },
+ )
+
+ @staticmethod
+ def add_leads_to_campaign(
+ campaign_id: int, lead_list: list[LeadInput], credentials: SmartLeadCredentials
+ ) -> AddLeadsToCampaignResponse:
+ client = SmartLeadClient(credentials.api_key.get_secret_value())
+ return client.add_leads_to_campaign(
+ AddLeadsRequest(
+ campaign_id=campaign_id,
+ lead_list=lead_list,
+ settings=LeadUploadSettings(
+ ignore_global_block_list=False,
+ ignore_unsubscribe_list=False,
+ ignore_community_bounce_list=False,
+ ignore_duplicate_leads_in_other_campaign=False,
+ ),
+ ),
+ )
+
+ def run(
+ self,
+ input_data: Input,
+ *,
+ credentials: SmartLeadCredentials,
+ **kwargs,
+ ) -> BlockOutput:
+ response = self.add_leads_to_campaign(
+ input_data.campaign_id, input_data.lead_list, credentials
+ )
+
+ yield "campaign_id", input_data.campaign_id
+ yield "upload_count", response.upload_count
+ if response.already_added_to_campaign:
+ yield "already_added_to_campaign", response.already_added_to_campaign
+ if response.duplicate_count:
+ yield "duplicate_count", response.duplicate_count
+ if response.invalid_email_count:
+ yield "invalid_email_count", response.invalid_email_count
+ if response.is_lead_limit_exhausted:
+ yield "is_lead_limit_exhausted", response.is_lead_limit_exhausted
+ if response.lead_import_stopped_count:
+ yield "lead_import_stopped_count", response.lead_import_stopped_count
+ if response.error:
+ yield "error", response.error
+ if not response.ok:
+ yield "error", "Failed to add leads to campaign"
+
+
+class SaveCampaignSequencesBlock(Block):
+ """Save sequences within a campaign"""
+
+ class Input(BlockSchema):
+ campaign_id: int = SchemaField(
+ description="The ID of the campaign to save sequences for",
+ )
+ sequences: list[Sequence] = SchemaField(
+ description="The sequences to save",
+ default=[],
+ advanced=False,
+ )
+ credentials: SmartLeadCredentialsInput = SchemaField(
+ description="SmartLead credentials",
+ )
+
+ class Output(BlockSchema):
+ data: dict | str | None = SchemaField(
+ description="Data from the API",
+ default=None,
+ )
+ message: str = SchemaField(
+ description="Message from the API",
+ default="",
+ )
+ error: str = SchemaField(
+ description="Error message if the sequences were not saved",
+ default="",
+ )
+
+ def __init__(self):
+ super().__init__(
+ id="e7d9f41c-dc10-4f39-98ba-a432abd128c0",
+ description="Save sequences within a campaign",
+ categories={BlockCategory.CRM},
+ input_schema=SaveCampaignSequencesBlock.Input,
+ output_schema=SaveCampaignSequencesBlock.Output,
+ test_credentials=TEST_CREDENTIALS,
+ test_input={
+ "campaign_id": 1,
+ "sequences": [],
+ "credentials": TEST_CREDENTIALS_INPUT,
+ },
+ test_output=[
+ (
+ "message",
+ "Sequences saved successfully",
+ ),
+ ],
+ test_mock={
+ "save_campaign_sequences": lambda campaign_id, sequences, credentials: SaveSequencesResponse(
+ ok=True,
+ message="Sequences saved successfully",
+ )
+ },
+ )
+
+ @staticmethod
+ def save_campaign_sequences(
+ campaign_id: int, sequences: list[Sequence], credentials: SmartLeadCredentials
+ ) -> SaveSequencesResponse:
+ client = SmartLeadClient(credentials.api_key.get_secret_value())
+ return client.save_campaign_sequences(
+ campaign_id=campaign_id, request=SaveSequencesRequest(sequences=sequences)
+ )
+
+ def run(
+ self,
+ input_data: Input,
+ *,
+ credentials: SmartLeadCredentials,
+ **kwargs,
+ ) -> BlockOutput:
+ response = self.save_campaign_sequences(
+ input_data.campaign_id, input_data.sequences, credentials
+ )
+
+ if response.data:
+ yield "data", response.data
+ if response.message:
+ yield "message", response.message
+ if response.error:
+ yield "error", response.error
+ if not response.ok:
+ yield "error", "Failed to save sequences"
diff --git a/autogpt_platform/backend/backend/blocks/smartlead/models.py b/autogpt_platform/backend/backend/blocks/smartlead/models.py
new file mode 100644
index 0000000000..a41d0e6ee8
--- /dev/null
+++ b/autogpt_platform/backend/backend/blocks/smartlead/models.py
@@ -0,0 +1,147 @@
+from enum import Enum
+
+from pydantic import BaseModel
+
+from backend.data.model import SchemaField
+
+
+class CreateCampaignResponse(BaseModel):
+ ok: bool
+ id: int
+ name: str
+ created_at: str
+
+
+class CreateCampaignRequest(BaseModel):
+ name: str
+ client_id: str | None = None
+
+
+class AddLeadsToCampaignResponse(BaseModel):
+ ok: bool
+ upload_count: int
+ total_leads: int
+ block_count: int
+ duplicate_count: int
+ invalid_email_count: int
+ invalid_emails: list[str]
+ already_added_to_campaign: int
+ unsubscribed_leads: list[str]
+ is_lead_limit_exhausted: bool
+ lead_import_stopped_count: int
+ bounce_count: int
+ error: str | None = None
+
+
+class LeadCustomFields(BaseModel):
+ """Custom fields for a lead (max 20 fields)"""
+
+ fields: dict[str, str] = SchemaField(
+ description="Custom fields for a lead (max 20 fields)",
+ max_length=20,
+ default={},
+ )
+
+
+class LeadInput(BaseModel):
+ """Single lead input data"""
+
+ first_name: str
+ last_name: str
+ email: str
+ phone_number: str | None = None # Changed from int to str for phone numbers
+ company_name: str | None = None
+ website: str | None = None
+ location: str | None = None
+ custom_fields: LeadCustomFields | None = None
+ linkedin_profile: str | None = None
+ company_url: str | None = None
+
+
+class LeadUploadSettings(BaseModel):
+ """Settings for lead upload"""
+
+ ignore_global_block_list: bool = SchemaField(
+ description="Ignore the global block list",
+ default=False,
+ )
+ ignore_unsubscribe_list: bool = SchemaField(
+ description="Ignore the unsubscribe list",
+ default=False,
+ )
+ ignore_community_bounce_list: bool = SchemaField(
+ description="Ignore the community bounce list",
+ default=False,
+ )
+ ignore_duplicate_leads_in_other_campaign: bool = SchemaField(
+ description="Ignore duplicate leads in other campaigns",
+ default=False,
+ )
+
+
+class AddLeadsRequest(BaseModel):
+ """Request body for adding leads to a campaign"""
+
+ lead_list: list[LeadInput] = SchemaField(
+ description="List of leads to add to the campaign",
+ max_length=100,
+ default=[],
+ )
+ settings: LeadUploadSettings
+ campaign_id: int
+
+
+class VariantDistributionType(str, Enum):
+ MANUAL_EQUAL = "MANUAL_EQUAL"
+ MANUAL_PERCENTAGE = "MANUAL_PERCENTAGE"
+ AI_EQUAL = "AI_EQUAL"
+
+
+class WinningMetricProperty(str, Enum):
+ OPEN_RATE = "OPEN_RATE"
+ CLICK_RATE = "CLICK_RATE"
+ REPLY_RATE = "REPLY_RATE"
+ POSITIVE_REPLY_RATE = "POSITIVE_REPLY_RATE"
+
+
+class SequenceDelayDetails(BaseModel):
+ delay_in_days: int
+
+
+class SequenceVariant(BaseModel):
+ subject: str
+ email_body: str
+ variant_label: str
+ id: int | None = None # Optional for creation, required for updates
+ variant_distribution_percentage: int | None = None
+
+
+class Sequence(BaseModel):
+ seq_number: int = SchemaField(
+ description="The sequence number",
+ default=1,
+ )
+ seq_delay_details: SequenceDelayDetails
+ id: int | None = None
+ variant_distribution_type: VariantDistributionType | None = None
+ lead_distribution_percentage: int | None = SchemaField(
+ None, ge=20, le=100
+ ) # >= 20% for fair calculation
+ winning_metric_property: WinningMetricProperty | None = None
+ seq_variants: list[SequenceVariant] | None = None
+ subject: str = "" # blank makes the follow up in the same thread
+ email_body: str | None = None
+
+
+class SaveSequencesRequest(BaseModel):
+ sequences: list[Sequence]
+
+
+class SaveSequencesResponse(BaseModel):
+ ok: bool
+ message: str = SchemaField(
+ description="Message from the API",
+ default="",
+ )
+ data: dict | str | None = None
+ error: str | None = None
diff --git a/autogpt_platform/backend/backend/blocks/zerobounce/_api.py b/autogpt_platform/backend/backend/blocks/zerobounce/_api.py
new file mode 100644
index 0000000000..c78fe38c09
--- /dev/null
+++ b/autogpt_platform/backend/backend/blocks/zerobounce/_api.py
@@ -0,0 +1,10 @@
+from zerobouncesdk import ZBValidateResponse, ZeroBounce
+
+
+class ZeroBounceClient:
+ def __init__(self, api_key: str):
+ self.api_key = api_key
+ self.client = ZeroBounce(api_key)
+
+ def validate_email(self, email: str, ip_address: str) -> ZBValidateResponse:
+ return self.client.validate(email, ip_address)
diff --git a/autogpt_platform/backend/backend/blocks/zerobounce/_auth.py b/autogpt_platform/backend/backend/blocks/zerobounce/_auth.py
new file mode 100644
index 0000000000..e7125fc3c9
--- /dev/null
+++ b/autogpt_platform/backend/backend/blocks/zerobounce/_auth.py
@@ -0,0 +1,35 @@
+from typing import Literal
+
+from pydantic import SecretStr
+
+from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput
+from backend.integrations.providers import ProviderName
+
+ZeroBounceCredentials = APIKeyCredentials
+ZeroBounceCredentialsInput = CredentialsMetaInput[
+ Literal[ProviderName.ZEROBOUNCE],
+ Literal["api_key"],
+]
+
+TEST_CREDENTIALS = APIKeyCredentials(
+ id="01234567-89ab-cdef-0123-456789abcdef",
+ provider="zerobounce",
+ api_key=SecretStr("mock-zerobounce-api-key"),
+ title="Mock ZeroBounce API key",
+ expires_at=None,
+)
+TEST_CREDENTIALS_INPUT = {
+ "provider": TEST_CREDENTIALS.provider,
+ "id": TEST_CREDENTIALS.id,
+ "type": TEST_CREDENTIALS.type,
+ "title": TEST_CREDENTIALS.title,
+}
+
+
+def ZeroBounceCredentialsField() -> ZeroBounceCredentialsInput:
+ """
+ Creates a ZeroBounce credentials input on a block.
+ """
+ return CredentialsField(
+ description="The ZeroBounce integration can be used with an API Key.",
+ )
diff --git a/autogpt_platform/backend/backend/blocks/zerobounce/validate_emails.py b/autogpt_platform/backend/backend/blocks/zerobounce/validate_emails.py
new file mode 100644
index 0000000000..ee87a8f285
--- /dev/null
+++ b/autogpt_platform/backend/backend/blocks/zerobounce/validate_emails.py
@@ -0,0 +1,175 @@
+from typing import Optional
+
+from pydantic import BaseModel
+from zerobouncesdk.zb_validate_response import (
+ ZBValidateResponse,
+ ZBValidateStatus,
+ ZBValidateSubStatus,
+)
+
+from backend.blocks.zerobounce._api import ZeroBounceClient
+from backend.blocks.zerobounce._auth import (
+ TEST_CREDENTIALS,
+ TEST_CREDENTIALS_INPUT,
+ ZeroBounceCredentials,
+ ZeroBounceCredentialsInput,
+)
+from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
+from backend.data.model import SchemaField
+
+
+class Response(BaseModel):
+ address: str = SchemaField(
+ description="The email address you are validating.", default="N/A"
+ )
+ status: ZBValidateStatus = SchemaField(
+ description="The status of the email address.", default=ZBValidateStatus.unknown
+ )
+ sub_status: ZBValidateSubStatus = SchemaField(
+ description="The sub-status of the email address.",
+ default=ZBValidateSubStatus.none,
+ )
+ account: Optional[str] = SchemaField(
+ description="The portion of the email address before the '@' symbol.",
+ default="N/A",
+ )
+ domain: Optional[str] = SchemaField(
+ description="The portion of the email address after the '@' symbol."
+ )
+ did_you_mean: Optional[str] = SchemaField(
+ description="Suggestive Fix for an email typo",
+ default=None,
+ )
+ domain_age_days: Optional[str] = SchemaField(
+ description="Age of the email domain in days or [null].",
+ default=None,
+ )
+ free_email: Optional[bool] = SchemaField(
+ description="Whether the email address is a free email provider.", default=False
+ )
+ mx_found: Optional[bool] = SchemaField(
+ description="Whether the MX record was found.", default=False
+ )
+ mx_record: Optional[str] = SchemaField(
+ description="The MX record of the email address.", default=None
+ )
+ smtp_provider: Optional[str] = SchemaField(
+ description="The SMTP provider of the email address.", default=None
+ )
+ firstname: Optional[str] = SchemaField(
+ description="The first name of the email address.", default=None
+ )
+ lastname: Optional[str] = SchemaField(
+ description="The last name of the email address.", default=None
+ )
+ gender: Optional[str] = SchemaField(
+ description="The gender of the email address.", default=None
+ )
+ city: Optional[str] = SchemaField(
+ description="The city of the email address.", default=None
+ )
+ region: Optional[str] = SchemaField(
+ description="The region of the email address.", default=None
+ )
+ zipcode: Optional[str] = SchemaField(
+ description="The zipcode of the email address.", default=None
+ )
+ country: Optional[str] = SchemaField(
+ description="The country of the email address.", default=None
+ )
+
+
+class ValidateEmailsBlock(Block):
+ """Search for people in Apollo"""
+
+ class Input(BlockSchema):
+ email: str = SchemaField(
+ description="Email to validate",
+ )
+ ip_address: str = SchemaField(
+ description="IP address to validate",
+ default="",
+ )
+ credentials: ZeroBounceCredentialsInput = SchemaField(
+ description="ZeroBounce credentials",
+ )
+
+ class Output(BlockSchema):
+ response: Response = SchemaField(
+ description="Response from ZeroBounce",
+ )
+ error: str = SchemaField(
+ description="Error message if the search failed",
+ default="",
+ )
+
+ def __init__(self):
+ super().__init__(
+ id="e3950439-fa0b-40e8-b19f-e0dca0bf5853",
+ description="Validate emails",
+ categories={BlockCategory.SEARCH},
+ input_schema=ValidateEmailsBlock.Input,
+ output_schema=ValidateEmailsBlock.Output,
+ test_credentials=TEST_CREDENTIALS,
+ test_input={
+ "credentials": TEST_CREDENTIALS_INPUT,
+ "email": "test@test.com",
+ },
+ test_output=[
+ (
+ "response",
+ Response(
+ address="test@test.com",
+ status=ZBValidateStatus.valid,
+ sub_status=ZBValidateSubStatus.allowed,
+ account="test",
+ domain="test.com",
+ did_you_mean=None,
+ domain_age_days=None,
+ free_email=False,
+ mx_found=False,
+ mx_record=None,
+ smtp_provider=None,
+ ),
+ )
+ ],
+ test_mock={
+ "validate_email": lambda email, ip_address, credentials: ZBValidateResponse(
+ data={
+ "address": email,
+ "status": ZBValidateStatus.valid,
+ "sub_status": ZBValidateSubStatus.allowed,
+ "account": "test",
+ "domain": "test.com",
+ "did_you_mean": None,
+ "domain_age_days": None,
+ "free_email": False,
+ "mx_found": False,
+ "mx_record": None,
+ "smtp_provider": None,
+ }
+ )
+ },
+ )
+
+ @staticmethod
+ def validate_email(
+ email: str, ip_address: str, credentials: ZeroBounceCredentials
+ ) -> ZBValidateResponse:
+ client = ZeroBounceClient(credentials.api_key.get_secret_value())
+ return client.validate_email(email, ip_address)
+
+ def run(
+ self,
+ input_data: Input,
+ *,
+ credentials: ZeroBounceCredentials,
+ **kwargs,
+ ) -> BlockOutput:
+ response: ZBValidateResponse = self.validate_email(
+ input_data.email, input_data.ip_address, credentials
+ )
+
+ response_model = Response(**response.__dict__)
+
+ yield "response", response_model
diff --git a/autogpt_platform/backend/backend/executor/manager.py b/autogpt_platform/backend/backend/executor/manager.py
index 65c84ee1bc..6e7d109d15 100644
--- a/autogpt_platform/backend/backend/executor/manager.py
+++ b/autogpt_platform/backend/backend/executor/manager.py
@@ -210,6 +210,7 @@ def execute_node(
for output_name, output_data in node_block.execute(
input_data, **extra_exec_kwargs
):
+ output_data = json.convert_pydantic_to_json(output_data)
output_size += len(json.dumps(output_data))
log_metadata.info("Node produced output", **{output_name: output_data})
db_client.upsert_execution_output(node_exec_id, output_name, output_data)
diff --git a/autogpt_platform/backend/backend/integrations/credentials_store.py b/autogpt_platform/backend/backend/integrations/credentials_store.py
index 5968cbe32d..3284c4b8fe 100644
--- a/autogpt_platform/backend/backend/integrations/credentials_store.py
+++ b/autogpt_platform/backend/backend/integrations/credentials_store.py
@@ -145,6 +145,29 @@ mem0_credentials = APIKeyCredentials(
expires_at=None,
)
+apollo_credentials = APIKeyCredentials(
+ id="544c62b5-1d0f-4156-8fb4-9525f11656eb",
+ provider="apollo",
+ api_key=SecretStr(settings.secrets.apollo_api_key),
+ title="Use Credits for Apollo",
+ expires_at=None,
+)
+
+smartlead_credentials = APIKeyCredentials(
+ id="3bcdbda3-84a3-46af-8fdb-bfd2472298b8",
+ provider="smartlead",
+ api_key=SecretStr(settings.secrets.smartlead_api_key),
+ title="Use Credits for SmartLead",
+ expires_at=None,
+)
+
+zerobounce_credentials = APIKeyCredentials(
+ id="63a6e279-2dc2-448e-bf57-85776f7176dc",
+ provider="zerobounce",
+ api_key=SecretStr(settings.secrets.zerobounce_api_key),
+ title="Use Credits for ZeroBounce",
+ expires_at=None,
+)
DEFAULT_CREDENTIALS = [
ollama_credentials,
@@ -164,6 +187,9 @@ DEFAULT_CREDENTIALS = [
mem0_credentials,
nvidia_credentials,
screenshotone_credentials,
+ apollo_credentials,
+ smartlead_credentials,
+ zerobounce_credentials,
]
@@ -231,6 +257,12 @@ class IntegrationCredentialsStore:
all_credentials.append(screenshotone_credentials)
if settings.secrets.mem0_api_key:
all_credentials.append(mem0_credentials)
+ if settings.secrets.apollo_api_key:
+ all_credentials.append(apollo_credentials)
+ if settings.secrets.smartlead_api_key:
+ all_credentials.append(smartlead_credentials)
+ if settings.secrets.zerobounce_api_key:
+ all_credentials.append(zerobounce_credentials)
return all_credentials
def get_creds_by_id(self, user_id: str, credentials_id: str) -> Credentials | None:
diff --git a/autogpt_platform/backend/backend/integrations/providers.py b/autogpt_platform/backend/backend/integrations/providers.py
index 6f31988d7f..eb1f513c2e 100644
--- a/autogpt_platform/backend/backend/integrations/providers.py
+++ b/autogpt_platform/backend/backend/integrations/providers.py
@@ -4,6 +4,7 @@ from enum import Enum
# --8<-- [start:ProviderName]
class ProviderName(str, Enum):
ANTHROPIC = "anthropic"
+ APOLLO = "apollo"
COMPASS = "compass"
DISCORD = "discord"
D_ID = "d_id"
@@ -32,8 +33,10 @@ class ProviderName(str, Enum):
REVID = "revid"
SCREENSHOTONE = "screenshotone"
SLANT3D = "slant3d"
+ SMARTLEAD = "smartlead"
SMTP = "smtp"
TWITTER = "twitter"
TODOIST = "todoist"
UNREAL_SPEECH = "unreal_speech"
+ ZEROBOUNCE = "zerobounce"
# --8<-- [end:ProviderName]
diff --git a/autogpt_platform/backend/backend/util/json.py b/autogpt_platform/backend/backend/util/json.py
index 7f88917414..fbec1312a9 100644
--- a/autogpt_platform/backend/backend/util/json.py
+++ b/autogpt_platform/backend/backend/util/json.py
@@ -1,8 +1,9 @@
import json
-from typing import Any, Type, TypeVar, overload
+from typing import Any, Type, TypeGuard, TypeVar, overload
import jsonschema
from fastapi.encoders import jsonable_encoder
+from pydantic import BaseModel
from .type import type_match
@@ -45,3 +46,17 @@ def validate_with_jsonschema(
return None
except jsonschema.ValidationError as e:
return str(e)
+
+
+def is_list_of_basemodels(value: object) -> TypeGuard[list[BaseModel]]:
+ return isinstance(value, list) and all(
+ isinstance(item, BaseModel) for item in value
+ )
+
+
+def convert_pydantic_to_json(output_data: Any) -> Any:
+ if isinstance(output_data, BaseModel):
+ return output_data.model_dump()
+ if is_list_of_basemodels(output_data):
+ return [item.model_dump() for item in output_data]
+ return output_data
diff --git a/autogpt_platform/backend/backend/util/settings.py b/autogpt_platform/backend/backend/util/settings.py
index d04293a8a2..bb1ffdb7ef 100644
--- a/autogpt_platform/backend/backend/util/settings.py
+++ b/autogpt_platform/backend/backend/util/settings.py
@@ -364,6 +364,10 @@ class Secrets(UpdateTrackingModel["Secrets"], BaseSettings):
screenshotone_api_key: str = Field(default="", description="ScreenshotOne API Key")
+ apollo_api_key: str = Field(default="", description="Apollo API Key")
+ smartlead_api_key: str = Field(default="", description="SmartLead API Key")
+ zerobounce_api_key: str = Field(default="", description="ZeroBounce API Key")
+
# Add more secret fields as needed
model_config = SettingsConfigDict(
diff --git a/autogpt_platform/backend/poetry.lock b/autogpt_platform/backend/poetry.lock
index 2e88aded96..463ad6ea3f 100644
--- a/autogpt_platform/backend/poetry.lock
+++ b/autogpt_platform/backend/poetry.lock
@@ -5936,6 +5936,21 @@ files = [
defusedxml = ">=0.7.1,<0.8.0"
requests = "*"
+[[package]]
+name = "zerobouncesdk"
+version = "1.1.1"
+description = "ZeroBounce Python API - https://www.zerobounce.net."
+optional = false
+python-versions = ">=3.7"
+groups = ["main"]
+files = [
+ {file = "zerobouncesdk-1.1.1-py3-none-any.whl", hash = "sha256:9fb9dfa44fe4ce35d6f2e43d5144c31ca03544a3317d75643cb9f86b0c028675"},
+ {file = "zerobouncesdk-1.1.1.tar.gz", hash = "sha256:00aa537263d5bc21534c0007dd9f94ce8e0986caa530c5a0bbe0bd917451f236"},
+]
+
+[package.dependencies]
+requests = ">=2.22.0"
+
[[package]]
name = "zipp"
version = "3.21.0"
@@ -6072,4 +6087,4 @@ cffi = ["cffi (>=1.11)"]
[metadata]
lock-version = "2.1"
python-versions = ">=3.10,<3.13"
-content-hash = "1a222b9695cc0506038d4f0b021f71ff9a443490cee021beca5365a47b0437a2"
+content-hash = "ba36ce74308bd37e19ca790e63dae387e5ad6173b2945dd63b58b3e918e85b46"
diff --git a/autogpt_platform/backend/pyproject.toml b/autogpt_platform/backend/pyproject.toml
index bcf834a8bd..31be7656fb 100644
--- a/autogpt_platform/backend/pyproject.toml
+++ b/autogpt_platform/backend/pyproject.toml
@@ -37,6 +37,7 @@ ollama = "^0.4.1"
openai = "^1.61.1"
pika = "^1.3.2"
pinecone = "^5.3.1"
+poetry = "^2.1.1"
postmarker = "^1.0"
praw = "~7.8.1"
prisma = "^0.15.0"
@@ -62,8 +63,8 @@ tweepy = "^4.14.0"
uvicorn = { extras = ["standard"], version = "^0.34.0" }
websockets = "^13.1"
youtube-transcript-api = "^0.6.2"
+zerobouncesdk = "^1.1.1"
# NOTE: please insert new dependencies in their alphabetical location
-poetry = "^2.1.1"
[tool.poetry.group.dev.dependencies]
aiohappyeyeballs = "^2.4.4"
diff --git a/autogpt_platform/frontend/src/app/profile/(user)/integrations/page.tsx b/autogpt_platform/frontend/src/app/profile/(user)/integrations/page.tsx
index 8069664017..182a6b2bb8 100644
--- a/autogpt_platform/frontend/src/app/profile/(user)/integrations/page.tsx
+++ b/autogpt_platform/frontend/src/app/profile/(user)/integrations/page.tsx
@@ -113,6 +113,9 @@ export default function PrivatePage() {
"78d19fd7-4d59-4a16-8277-3ce310acf2b7", // E2B
"96b83908-2789-4dec-9968-18f0ece4ceb3", // Nvidia
"ed55ac19-356e-4243-a6cb-bc599e9b716f", // Mem0
+ "544c62b5-1d0f-4156-8fb4-9525f11656eb", // Apollo
+ "3bcdbda3-84a3-46af-8fdb-bfd2472298b8", // SmartLead
+ "63a6e279-2dc2-448e-bf57-85776f7176dc", // ZeroBounce
],
[],
);
diff --git a/autogpt_platform/frontend/src/components/integrations/credentials-input.tsx b/autogpt_platform/frontend/src/components/integrations/credentials-input.tsx
index 9663c8cc5f..16d9f8d0a6 100644
--- a/autogpt_platform/frontend/src/components/integrations/credentials-input.tsx
+++ b/autogpt_platform/frontend/src/components/integrations/credentials-input.tsx
@@ -56,6 +56,7 @@ export const providerIcons: Record<
React.FC<{ className?: string }>
> = {
anthropic: fallbackIcon,
+ apollo: fallbackIcon,
e2b: fallbackIcon,
github: FaGithub,
google: FaGoogle,
@@ -86,7 +87,9 @@ export const providerIcons: Record<
unreal_speech: fallbackIcon,
exa: fallbackIcon,
hubspot: FaHubspot,
+ smartlead: fallbackIcon,
todoist: fallbackIcon,
+ zerobounce: fallbackIcon,
};
// --8<-- [end:ProviderIconsEmbed]
diff --git a/autogpt_platform/frontend/src/components/integrations/credentials-provider.tsx b/autogpt_platform/frontend/src/components/integrations/credentials-provider.tsx
index 990cc04f99..db98f646c7 100644
--- a/autogpt_platform/frontend/src/components/integrations/credentials-provider.tsx
+++ b/autogpt_platform/frontend/src/components/integrations/credentials-provider.tsx
@@ -18,6 +18,7 @@ const CREDENTIALS_PROVIDER_NAMES = Object.values(
// --8<-- [start:CredentialsProviderNames]
const providerDisplayNames: Record = {
anthropic: "Anthropic",
+ apollo: "Apollo",
discord: "Discord",
d_id: "D-ID",
e2b: "E2B",
@@ -40,8 +41,9 @@ const providerDisplayNames: Record = {
openweathermap: "OpenWeatherMap",
open_router: "Open Router",
pinecone: "Pinecone",
- slant3d: "Slant3D",
screenshotone: "ScreenshotOne",
+ slant3d: "Slant3D",
+ smartlead: "SmartLead",
smtp: "SMTP",
reddit: "Reddit",
replicate: "Replicate",
@@ -49,6 +51,7 @@ const providerDisplayNames: Record = {
twitter: "Twitter",
todoist: "Todoist",
unreal_speech: "Unreal Speech",
+ zerobounce: "ZeroBounce",
} as const;
// --8<-- [end:CredentialsProviderNames]
diff --git a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts
index 9734ad2bea..6eb3199518 100644
--- a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts
+++ b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts
@@ -112,6 +112,7 @@ export type Credentials =
// --8<-- [start:BlockIOCredentialsSubSchema]
export const PROVIDER_NAMES = {
ANTHROPIC: "anthropic",
+ APOLLO: "apollo",
D_ID: "d_id",
DISCORD: "discord",
E2B: "e2b",
@@ -134,8 +135,9 @@ export const PROVIDER_NAMES = {
OPENWEATHERMAP: "openweathermap",
OPEN_ROUTER: "open_router",
PINECONE: "pinecone",
- SLANT3D: "slant3d",
SCREENSHOTONE: "screenshotone",
+ SLANT3D: "slant3d",
+ SMARTLEAD: "smartlead",
SMTP: "smtp",
TWITTER: "twitter",
REPLICATE: "replicate",
@@ -143,6 +145,7 @@ export const PROVIDER_NAMES = {
REVID: "revid",
UNREAL_SPEECH: "unreal_speech",
TODOIST: "todoist",
+ ZEROBOUNCE: "zerobounce",
} as const;
// --8<-- [end:BlockIOCredentialsSubSchema]
From 296eee0b4f2e056381b0497b7c873e260d5fe573 Mon Sep 17 00:00:00 2001
From: Reinier van der Leer
Date: Wed, 19 Feb 2025 23:07:03 +0100
Subject: [PATCH 2/5] feat(platform/library): Library v2 > Agent Runs page
(#9051)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
- Resolves #8780
- Part of #8774
### Changes 🏗️
- Add new UI components
- Add `/agents/[id]` page, with sub-components:
- `AgentRunsSelectorList`
- `AgentRunSummaryCard`
- `AgentRunStatusChip`
- `AgentRunDetailsView`
- `AgentRunDraftView`
- `AgentScheduleDetailsView`
Backend improvements:
- Improve output of execution-related API endpoints: return
`GraphExecution` instead of `NodeExecutionResult[]`
- Reduce log spam from Prisma in tests
General frontend improvements:
- Hide nav link names on smaller screens to prevent navbar overflow
- Clean up styling and fix sizing of `agptui/Button`
Technical frontend improvements:
- Fix tailwind config size increments
- Rename `font-poppin` -> `font-poppins`
- Clean up component implementations and usages
- Yeet all occurrences of `variant="default"`
- Remove `default` button variant as duplicate of `outline`; make
`outline` the default
- Fix minor typing issues
DX:
- Add front end type-check step to `pre-commit` config
- Fix logging setup in conftest.py
### Checklist 📋
#### For code changes:
- [x] I have clearly listed my changes in the PR description
- [x] I have made a test plan
- [x] I have tested my changes according to the test plan:
- `/agents/[id]` (new)
- Go to page -> list of runs loads
- Create new run -> runs; all I/O is visible
- Click "Run again" -> runs again with same input
- `/monitoring` (existing)
- Go to page -> everything loads
- Selecting agents and agent runs works
---------
Co-authored-by: Nicholas Tindle
Co-authored-by: Nicholas Tindle
Co-authored-by: Swifty
Co-authored-by: Zamil Majdy
---
.pre-commit-config.yaml | 20 ++
.../autogpt_libs/feature_flag/client.py | 1 -
.../backend/backend/data/graph.py | 113 +++++++--
.../backend/backend/server/rest_api.py | 6 +-
.../backend/backend/server/routers/v1.py | 45 +++-
.../backend/backend/server/v2/store/routes.py | 3 +-
autogpt_platform/backend/backend/util/test.py | 3 +-
autogpt_platform/backend/test/conftest.py | 16 +-
.../backend/test/executor/test_manager.py | 24 +-
.../frontend/src/app/agents/[id]/page.tsx | 184 +++++++++++++++
autogpt_platform/frontend/src/app/globals.css | 124 ++++++----
.../frontend/src/app/monitoring/page.tsx | 8 +-
.../src/app/profile/(user)/credits/page.tsx | 1 -
.../src/app/profile/(user)/dashboard/page.tsx | 2 -
.../agents/agent-run-details-view.tsx | 214 ++++++++++++++++++
.../agents/agent-run-draft-view.tsx | 99 ++++++++
.../agents/agent-run-status-chip.tsx | 65 ++++++
.../agents/agent-run-summary-card.tsx | 95 ++++++++
.../agents/agent-runs-selector-list.tsx | 133 +++++++++++
.../agents/agent-schedule-details-view.tsx | 141 ++++++++++++
.../src/components/agptui/AgentImageItem.tsx | 1 -
.../src/components/agptui/AgentInfo.tsx | 2 +-
.../src/components/agptui/BecomeACreator.tsx | 6 +-
.../src/components/agptui/Button.stories.tsx | 7 +-
.../frontend/src/components/agptui/Button.tsx | 25 +-
.../src/components/agptui/CreatorInfoCard.tsx | 2 +-
.../frontend/src/components/agptui/Navbar.tsx | 7 +-
.../src/components/agptui/NavbarLink.tsx | 2 +-
.../src/components/agptui/ProfileInfoForm.tsx | 1 -
.../agptui/PublishAgentAwaitingReview.tsx | 2 -
.../components/agptui/PublishAgentSelect.tsx | 11 +-
.../agptui/PublishAgentSelectInfo.tsx | 8 +-
.../src/components/agptui/StoreCard.tsx | 2 +-
.../agptui/composite/AgentsSection.tsx | 4 +-
.../agptui/composite/FeaturedCreators.tsx | 2 +-
.../agptui/composite/FeaturedSection.tsx | 2 +-
.../agptui/composite/HeroSection.tsx | 8 +-
.../agptui/composite/PublishAgentPopout.tsx | 2 +-
.../src/components/monitor/AgentFlowList.tsx | 6 +-
.../src/components/monitor/FlowInfo.tsx | 5 +-
.../src/components/monitor/FlowRunInfo.tsx | 11 +-
.../components/monitor/FlowRunStatusBadge.tsx | 4 +-
.../src/components/monitor/FlowRunsList.tsx | 8 +-
.../src/components/monitor/FlowRunsStatus.tsx | 4 +-
.../components/monitor/FlowRunsTimeline.tsx | 10 +-
.../src/components/monitor/scheduleTable.tsx | 2 +-
.../frontend/src/components/ui/card.tsx | 5 +-
.../frontend/src/hooks/useAgentGraph.tsx | 7 +-
.../src/lib/autogpt-server-api/client.ts | 49 ++--
.../src/lib/autogpt-server-api/types.ts | 48 ++--
autogpt_platform/frontend/tailwind.config.ts | 16 +-
51 files changed, 1330 insertions(+), 236 deletions(-)
create mode 100644 autogpt_platform/frontend/src/app/agents/[id]/page.tsx
create mode 100644 autogpt_platform/frontend/src/components/agents/agent-run-details-view.tsx
create mode 100644 autogpt_platform/frontend/src/components/agents/agent-run-draft-view.tsx
create mode 100644 autogpt_platform/frontend/src/components/agents/agent-run-status-chip.tsx
create mode 100644 autogpt_platform/frontend/src/components/agents/agent-run-summary-card.tsx
create mode 100644 autogpt_platform/frontend/src/components/agents/agent-runs-selector-list.tsx
create mode 100644 autogpt_platform/frontend/src/components/agents/agent-schedule-details-view.tsx
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 293a9e2d43..f262441f0e 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -170,6 +170,16 @@ repos:
files: ^classic/benchmark/(agbenchmark|tests)/((?!reports).)*[/.]
args: [--config=classic/benchmark/.flake8]
+ - repo: local
+ hooks:
+ - id: prettier
+ name: Format (Prettier) - AutoGPT Platform - Frontend
+ alias: format-platform-frontend
+ entry: bash -c 'cd autogpt_platform/frontend && npx prettier --write $(echo "$@" | sed "s|autogpt_platform/frontend/||g")' --
+ files: ^autogpt_platform/frontend/
+ types: [file]
+ language: system
+
- repo: local
# To have watertight type checking, we check *all* the files in an affected
# project. To trigger on poetry.lock we also reset the file `types` filter.
@@ -221,6 +231,16 @@ repos:
language: system
pass_filenames: false
+ - repo: local
+ hooks:
+ - id: tsc
+ name: Typecheck - AutoGPT Platform - Frontend
+ entry: bash -c 'cd autogpt_platform/frontend && npm run type-check'
+ files: ^autogpt_platform/frontend/
+ types: [file]
+ language: system
+ pass_filenames: false
+
- repo: local
hooks:
- id: pytest
diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/feature_flag/client.py b/autogpt_platform/autogpt_libs/autogpt_libs/feature_flag/client.py
index dde516c1d8..9aed891706 100644
--- a/autogpt_platform/autogpt_libs/autogpt_libs/feature_flag/client.py
+++ b/autogpt_platform/autogpt_libs/autogpt_libs/feature_flag/client.py
@@ -13,7 +13,6 @@ from typing_extensions import ParamSpec
from .config import SETTINGS
logger = logging.getLogger(__name__)
-logging.basicConfig(level=logging.DEBUG)
P = ParamSpec("P")
T = TypeVar("T")
diff --git a/autogpt_platform/backend/backend/data/graph.py b/autogpt_platform/backend/backend/data/graph.py
index 5483c24bd3..52e37625cd 100644
--- a/autogpt_platform/backend/backend/data/graph.py
+++ b/autogpt_platform/backend/backend/data/graph.py
@@ -23,12 +23,15 @@ from backend.util import type
from .block import BlockInput, BlockType, get_block, get_blocks
from .db import BaseDbModel, transaction
-from .execution import ExecutionStatus
+from .execution import ExecutionResult, ExecutionStatus
from .includes import AGENT_GRAPH_INCLUDE, AGENT_NODE_INCLUDE
from .integrations import Webhook
logger = logging.getLogger(__name__)
+_INPUT_BLOCK_ID = AgentInputBlock().id
+_OUTPUT_BLOCK_ID = AgentOutputBlock().id
+
class Link(BaseDbModel):
source_id: str
@@ -105,7 +108,7 @@ class NodeModel(Node):
Webhook.model_rebuild()
-class GraphExecution(BaseDbModel):
+class GraphExecutionMeta(BaseDbModel):
execution_id: str
started_at: datetime
ended_at: datetime
@@ -114,33 +117,83 @@ class GraphExecution(BaseDbModel):
status: ExecutionStatus
graph_id: str
graph_version: int
+ preset_id: Optional[str]
@staticmethod
- def from_db(execution: AgentGraphExecution):
+ def from_db(_graph_exec: AgentGraphExecution):
now = datetime.now(timezone.utc)
- start_time = execution.startedAt or execution.createdAt
- end_time = execution.updatedAt or now
+ start_time = _graph_exec.startedAt or _graph_exec.createdAt
+ end_time = _graph_exec.updatedAt or now
duration = (end_time - start_time).total_seconds()
total_run_time = duration
try:
- stats = type.convert(execution.stats or {}, dict[str, Any])
+ stats = type.convert(_graph_exec.stats or {}, dict[str, Any])
except ValueError:
stats = {}
duration = stats.get("walltime", duration)
total_run_time = stats.get("nodes_walltime", total_run_time)
- return GraphExecution(
- id=execution.id,
- execution_id=execution.id,
+ return GraphExecutionMeta(
+ id=_graph_exec.id,
+ execution_id=_graph_exec.id,
started_at=start_time,
ended_at=end_time,
duration=duration,
total_run_time=total_run_time,
- status=ExecutionStatus(execution.executionStatus),
- graph_id=execution.agentGraphId,
- graph_version=execution.agentGraphVersion,
+ status=ExecutionStatus(_graph_exec.executionStatus),
+ graph_id=_graph_exec.agentGraphId,
+ graph_version=_graph_exec.agentGraphVersion,
+ preset_id=_graph_exec.agentPresetId,
+ )
+
+
+class GraphExecution(GraphExecutionMeta):
+ inputs: dict[str, Any]
+ outputs: dict[str, list[Any]]
+ node_executions: list[ExecutionResult]
+
+ @staticmethod
+ def from_db(_graph_exec: AgentGraphExecution):
+ if _graph_exec.AgentNodeExecutions is None:
+ raise ValueError("Node executions must be included in query")
+
+ graph_exec = GraphExecutionMeta.from_db(_graph_exec)
+
+ node_executions = [
+ ExecutionResult.from_db(ne) for ne in _graph_exec.AgentNodeExecutions
+ ]
+
+ inputs = {
+ **{
+ # inputs from Agent Input Blocks
+ exec.input_data["name"]: exec.input_data["value"]
+ for exec in node_executions
+ if exec.block_id == _INPUT_BLOCK_ID
+ },
+ **{
+ # input from webhook-triggered block
+ "payload": exec.input_data["payload"]
+ for exec in node_executions
+ if (block := get_block(exec.block_id))
+ and block.block_type in [BlockType.WEBHOOK, BlockType.WEBHOOK_MANUAL]
+ },
+ }
+
+ outputs: dict[str, list] = defaultdict(list)
+ for exec in node_executions:
+ if exec.block_id == _OUTPUT_BLOCK_ID:
+ outputs[exec.input_data["name"]].append(exec.input_data["value"])
+
+ return GraphExecution(
+ **{
+ field_name: getattr(graph_exec, field_name)
+ for field_name in graph_exec.model_fields
+ },
+ inputs=inputs,
+ outputs=outputs,
+ node_executions=node_executions,
)
@@ -514,17 +567,45 @@ async def get_graphs(
return graph_models
-async def get_executions(user_id: str) -> list[GraphExecution]:
+async def get_graphs_executions(user_id: str) -> list[GraphExecutionMeta]:
executions = await AgentGraphExecution.prisma().find_many(
where={"userId": user_id},
order={"createdAt": "desc"},
)
- return [GraphExecution.from_db(execution) for execution in executions]
+ return [GraphExecutionMeta.from_db(execution) for execution in executions]
+
+
+async def get_graph_executions(graph_id: str, user_id: str) -> list[GraphExecutionMeta]:
+ executions = await AgentGraphExecution.prisma().find_many(
+ where={"agentGraphId": graph_id, "userId": user_id},
+ order={"createdAt": "desc"},
+ )
+ return [GraphExecutionMeta.from_db(execution) for execution in executions]
+
+
+async def get_execution_meta(
+ user_id: str, execution_id: str
+) -> GraphExecutionMeta | None:
+ execution = await AgentGraphExecution.prisma().find_first(
+ where={"id": execution_id, "userId": user_id}
+ )
+ return GraphExecutionMeta.from_db(execution) if execution else None
async def get_execution(user_id: str, execution_id: str) -> GraphExecution | None:
execution = await AgentGraphExecution.prisma().find_first(
- where={"id": execution_id, "userId": user_id}
+ where={"id": execution_id, "userId": user_id},
+ include={
+ "AgentNodeExecutions": {
+ "include": {"AgentNode": True, "Input": True, "Output": True},
+ "order_by": [
+ {"queuedTime": "asc"},
+ { # Fallback: Incomplete execs has no queuedTime.
+ "addedTime": "asc"
+ },
+ ],
+ },
+ },
)
return GraphExecution.from_db(execution) if execution else None
@@ -629,7 +710,7 @@ async def create_graph(graph: Graph, user_id: str) -> GraphModel:
await __create_graph(tx, graph, user_id)
if created_graph := await get_graph(
- graph.id, graph.version, graph.is_template, user_id=user_id
+ graph.id, graph.version, template=graph.is_template, user_id=user_id
):
return created_graph
diff --git a/autogpt_platform/backend/backend/server/rest_api.py b/autogpt_platform/backend/backend/server/rest_api.py
index ae756ee71f..d5435e3615 100644
--- a/autogpt_platform/backend/backend/server/rest_api.py
+++ b/autogpt_platform/backend/backend/server/rest_api.py
@@ -155,7 +155,7 @@ class AgentServer(backend.util.service.AppProcess):
@staticmethod
async def test_get_graph_run_status(graph_exec_id: str, user_id: str):
- execution = await backend.data.graph.get_execution(
+ execution = await backend.data.graph.get_execution_meta(
user_id=user_id, execution_id=graph_exec_id
)
if not execution:
@@ -163,10 +163,10 @@ class AgentServer(backend.util.service.AppProcess):
return execution.status
@staticmethod
- async def test_get_graph_run_node_execution_results(
+ async def test_get_graph_run_results(
graph_id: str, graph_exec_id: str, user_id: str
):
- return await backend.server.routers.v1.get_graph_run_node_execution_results(
+ return await backend.server.routers.v1.get_graph_execution(
graph_id, graph_exec_id, user_id
)
diff --git a/autogpt_platform/backend/backend/server/routers/v1.py b/autogpt_platform/backend/backend/server/routers/v1.py
index 276930abd7..5e0eeea0ec 100644
--- a/autogpt_platform/backend/backend/server/routers/v1.py
+++ b/autogpt_platform/backend/backend/server/routers/v1.py
@@ -16,7 +16,6 @@ import backend.data.block
import backend.server.integrations.router
import backend.server.routers.analytics
import backend.server.v2.library.db as library_db
-from backend.data import execution as execution_db
from backend.data import graph as graph_db
from backend.data.api_key import (
APIKeyError,
@@ -546,7 +545,7 @@ async def set_graph_active_version(
)
def execute_graph(
graph_id: str,
- node_input: Annotated[dict[str, Any], Body(..., embed=True, default_factory=dict)],
+ node_input: Annotated[dict[str, Any], Body(..., default_factory=dict)],
user_id: Annotated[str, Depends(get_user_id)],
graph_version: Optional[int] = None,
) -> ExecuteGraphResponse:
@@ -567,8 +566,10 @@ def execute_graph(
)
async def stop_graph_run(
graph_exec_id: str, user_id: Annotated[str, Depends(get_user_id)]
-) -> Sequence[execution_db.ExecutionResult]:
- if not await graph_db.get_execution(user_id=user_id, execution_id=graph_exec_id):
+) -> graph_db.GraphExecution:
+ if not await graph_db.get_execution_meta(
+ user_id=user_id, execution_id=graph_exec_id
+ ):
raise HTTPException(404, detail=f"Agent execution #{graph_exec_id} not found")
await asyncio.to_thread(
@@ -576,7 +577,13 @@ async def stop_graph_run(
)
# Retrieve & return canceled graph execution in its final state
- return await execution_db.get_execution_results(graph_exec_id)
+ result = await graph_db.get_execution(execution_id=graph_exec_id, user_id=user_id)
+ if not result:
+ raise HTTPException(
+ 500,
+ detail=f"Could not fetch graph execution #{graph_exec_id} after stopping",
+ )
+ return result
@v1_router.get(
@@ -584,10 +591,22 @@ async def stop_graph_run(
tags=["graphs"],
dependencies=[Depends(auth_middleware)],
)
-async def get_executions(
+async def get_graphs_executions(
user_id: Annotated[str, Depends(get_user_id)],
-) -> list[graph_db.GraphExecution]:
- return await graph_db.get_executions(user_id=user_id)
+) -> list[graph_db.GraphExecutionMeta]:
+ return await graph_db.get_graphs_executions(user_id=user_id)
+
+
+@v1_router.get(
+ path="/graphs/{graph_id}/executions",
+ tags=["graphs"],
+ dependencies=[Depends(auth_middleware)],
+)
+async def get_graph_executions(
+ graph_id: str,
+ user_id: Annotated[str, Depends(get_user_id)],
+) -> list[graph_db.GraphExecutionMeta]:
+ return await graph_db.get_graph_executions(graph_id=graph_id, user_id=user_id)
@v1_router.get(
@@ -595,16 +614,20 @@ async def get_executions(
tags=["graphs"],
dependencies=[Depends(auth_middleware)],
)
-async def get_graph_run_node_execution_results(
+async def get_graph_execution(
graph_id: str,
graph_exec_id: str,
user_id: Annotated[str, Depends(get_user_id)],
-) -> Sequence[execution_db.ExecutionResult]:
+) -> graph_db.GraphExecution:
graph = await graph_db.get_graph(graph_id, user_id=user_id)
if not graph:
raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.")
- return await execution_db.get_execution_results(graph_exec_id)
+ result = await graph_db.get_execution(execution_id=graph_exec_id, user_id=user_id)
+ if not result:
+ raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.")
+
+ return result
########################################################
diff --git a/autogpt_platform/backend/backend/server/v2/store/routes.py b/autogpt_platform/backend/backend/server/v2/store/routes.py
index 65d9d9954f..0921cb650e 100644
--- a/autogpt_platform/backend/backend/server/v2/store/routes.py
+++ b/autogpt_platform/backend/backend/server/v2/store/routes.py
@@ -669,7 +669,8 @@ async def review_submission(
reviewer_id=user.user_id,
)
return submission
- except Exception:
+ except Exception as e:
+ logger.error(f"Could not create store submission review: {e}")
raise fastapi.HTTPException(
status_code=500,
detail="An error occurred while creating the store submission review",
diff --git a/autogpt_platform/backend/backend/util/test.py b/autogpt_platform/backend/backend/util/test.py
index 29b2cfdbc5..7691fec7d1 100644
--- a/autogpt_platform/backend/backend/util/test.py
+++ b/autogpt_platform/backend/backend/util/test.py
@@ -78,9 +78,10 @@ async def wait_execution(
# Wait for the executions to complete
for i in range(timeout):
if await is_execution_completed():
- return await AgentServer().test_get_graph_run_node_execution_results(
+ graph_exec = await AgentServer().test_get_graph_run_results(
graph_id, graph_exec_id, user_id
)
+ return graph_exec.node_executions
time.sleep(1)
assert False, "Execution did not complete in time."
diff --git a/autogpt_platform/backend/test/conftest.py b/autogpt_platform/backend/test/conftest.py
index d107a0b322..ec8e661a01 100644
--- a/autogpt_platform/backend/test/conftest.py
+++ b/autogpt_platform/backend/test/conftest.py
@@ -1,23 +1,25 @@
import logging
+import os
import pytest
-from backend.util.test import SpinTestServer
+from backend.util.logging import configure_logging
# NOTE: You can run tests like with the --log-cli-level=INFO to see the logs
# Set up logging
+configure_logging()
logger = logging.getLogger(__name__)
-# Create console handler with formatting
-ch = logging.StreamHandler()
-ch.setLevel(logging.INFO)
-formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
-ch.setFormatter(formatter)
-logger.addHandler(ch)
+# Reduce Prisma log spam unless PRISMA_DEBUG is set
+if not os.getenv("PRISMA_DEBUG"):
+ prisma_logger = logging.getLogger("prisma")
+ prisma_logger.setLevel(logging.INFO)
@pytest.fixture(scope="session")
async def server():
+ from backend.util.test import SpinTestServer
+
async with SpinTestServer() as server:
yield server
diff --git a/autogpt_platform/backend/test/executor/test_manager.py b/autogpt_platform/backend/test/executor/test_manager.py
index 3e4a7cacd2..0412a779bc 100644
--- a/autogpt_platform/backend/test/executor/test_manager.py
+++ b/autogpt_platform/backend/test/executor/test_manager.py
@@ -58,7 +58,7 @@ async def assert_sample_graph_executions(
graph_exec_id: str,
):
logger.info(f"Checking execution results for graph {test_graph.id}")
- executions = await agent_server.test_get_graph_run_node_execution_results(
+ graph_run = await agent_server.test_get_graph_run_results(
test_graph.id,
graph_exec_id,
test_user.id,
@@ -77,7 +77,7 @@ async def assert_sample_graph_executions(
]
# Executing StoreValueBlock
- exec = executions[0]
+ exec = graph_run.node_executions[0]
logger.info(f"Checking first StoreValueBlock execution: {exec}")
assert exec.status == execution.ExecutionStatus.COMPLETED
assert exec.graph_exec_id == graph_exec_id
@@ -90,7 +90,7 @@ async def assert_sample_graph_executions(
assert exec.node_id in [test_graph.nodes[0].id, test_graph.nodes[1].id]
# Executing StoreValueBlock
- exec = executions[1]
+ exec = graph_run.node_executions[1]
logger.info(f"Checking second StoreValueBlock execution: {exec}")
assert exec.status == execution.ExecutionStatus.COMPLETED
assert exec.graph_exec_id == graph_exec_id
@@ -103,7 +103,7 @@ async def assert_sample_graph_executions(
assert exec.node_id in [test_graph.nodes[0].id, test_graph.nodes[1].id]
# Executing FillTextTemplateBlock
- exec = executions[2]
+ exec = graph_run.node_executions[2]
logger.info(f"Checking FillTextTemplateBlock execution: {exec}")
assert exec.status == execution.ExecutionStatus.COMPLETED
assert exec.graph_exec_id == graph_exec_id
@@ -118,7 +118,7 @@ async def assert_sample_graph_executions(
assert exec.node_id == test_graph.nodes[2].id
# Executing PrintToConsoleBlock
- exec = executions[3]
+ exec = graph_run.node_executions[3]
logger.info(f"Checking PrintToConsoleBlock execution: {exec}")
assert exec.status == execution.ExecutionStatus.COMPLETED
assert exec.graph_exec_id == graph_exec_id
@@ -201,14 +201,14 @@ async def test_input_pin_always_waited(server: SpinTestServer):
)
logger.info("Checking execution results")
- executions = await server.agent_server.test_get_graph_run_node_execution_results(
+ graph_exec = await server.agent_server.test_get_graph_run_results(
test_graph.id, graph_exec_id, test_user.id
)
- assert len(executions) == 3
+ assert len(graph_exec.node_executions) == 3
# FindInDictionaryBlock should wait for the input pin to be provided,
# Hence executing extraction of "key" from {"key1": "value1", "key2": "value2"}
- assert executions[2].status == execution.ExecutionStatus.COMPLETED
- assert executions[2].output_data == {"output": ["value2"]}
+ assert graph_exec.node_executions[2].status == execution.ExecutionStatus.COMPLETED
+ assert graph_exec.node_executions[2].output_data == {"output": ["value2"]}
logger.info("Completed test_input_pin_always_waited")
@@ -284,12 +284,12 @@ async def test_static_input_link_on_graph(server: SpinTestServer):
server.agent_server, test_graph, test_user, {}, 8
)
logger.info("Checking execution results")
- executions = await server.agent_server.test_get_graph_run_node_execution_results(
+ graph_exec = await server.agent_server.test_get_graph_run_results(
test_graph.id, graph_exec_id, test_user.id
)
- assert len(executions) == 8
+ assert len(graph_exec.node_executions) == 8
# The last 3 executions will be a+b=4+5=9
- for i, exec_data in enumerate(executions[-3:]):
+ for i, exec_data in enumerate(graph_exec.node_executions[-3:]):
logger.info(f"Checking execution {i+1} of last 3: {exec_data}")
assert exec_data.status == execution.ExecutionStatus.COMPLETED
assert exec_data.output_data == {"result": [9]}
diff --git a/autogpt_platform/frontend/src/app/agents/[id]/page.tsx b/autogpt_platform/frontend/src/app/agents/[id]/page.tsx
new file mode 100644
index 0000000000..7967deae8a
--- /dev/null
+++ b/autogpt_platform/frontend/src/app/agents/[id]/page.tsx
@@ -0,0 +1,184 @@
+"use client";
+import React, { useCallback, useEffect, useMemo, useState } from "react";
+import { useParams, useRouter } from "next/navigation";
+
+import { useBackendAPI } from "@/lib/autogpt-server-api/context";
+import {
+ GraphExecution,
+ GraphExecutionMeta,
+ GraphMeta,
+ Schedule,
+} from "@/lib/autogpt-server-api";
+
+import AgentRunDraftView from "@/components/agents/agent-run-draft-view";
+import AgentRunDetailsView from "@/components/agents/agent-run-details-view";
+import AgentRunsSelectorList from "@/components/agents/agent-runs-selector-list";
+import AgentScheduleDetailsView from "@/components/agents/agent-schedule-details-view";
+
+export default function AgentRunsPage(): React.ReactElement {
+ const { id: agentID }: { id: string } = useParams();
+ const router = useRouter();
+ const api = useBackendAPI();
+
+ const [agent, setAgent] = useState(null);
+ const [agentRuns, setAgentRuns] = useState([]);
+ const [schedules, setSchedules] = useState([]);
+ const [selectedView, selectView] = useState<{
+ type: "run" | "schedule";
+ id?: string;
+ }>({ type: "run" });
+ const [selectedRun, setSelectedRun] = useState<
+ GraphExecution | GraphExecutionMeta | null
+ >(null);
+ const [selectedSchedule, setSelectedSchedule] = useState(
+ null,
+ );
+ const [isFirstLoad, setIsFirstLoad] = useState(true);
+
+ const openRunDraftView = useCallback(() => {
+ selectView({ type: "run" });
+ }, []);
+
+ const selectRun = useCallback((id: string) => {
+ selectView({ type: "run", id });
+ }, []);
+
+ const selectSchedule = useCallback((schedule: Schedule) => {
+ selectView({ type: "schedule", id: schedule.id });
+ setSelectedSchedule(schedule);
+ }, []);
+
+ const fetchAgents = useCallback(() => {
+ api.getGraph(agentID).then(setAgent);
+ api.getGraphExecutions(agentID).then((agentRuns) => {
+ const sortedRuns = agentRuns.toSorted(
+ (a, b) => b.started_at - a.started_at,
+ );
+ setAgentRuns(sortedRuns);
+
+ if (!selectedView.id && isFirstLoad && sortedRuns.length > 0) {
+ // only for first load or first execution
+ setIsFirstLoad(false);
+ selectView({ type: "run", id: sortedRuns[0].execution_id });
+ setSelectedRun(sortedRuns[0]);
+ }
+ });
+ if (selectedView.type == "run" && selectedView.id) {
+ api.getGraphExecutionInfo(agentID, selectedView.id).then(setSelectedRun);
+ }
+ }, [api, agentID, selectedView, isFirstLoad]);
+
+ useEffect(() => {
+ fetchAgents();
+ }, []);
+
+ // load selectedRun based on selectedView
+ useEffect(() => {
+ if (selectedView.type != "run" || !selectedView.id) return;
+
+ // pull partial data from "cache" while waiting for the rest to load
+ if (selectedView.id !== selectedRun?.execution_id) {
+ setSelectedRun(
+ agentRuns.find((r) => r.execution_id == selectedView.id) ?? null,
+ );
+ }
+
+ api.getGraphExecutionInfo(agentID, selectedView.id).then(setSelectedRun);
+ }, [api, selectedView, agentRuns, agentID]);
+
+ const fetchSchedules = useCallback(async () => {
+ // TODO: filter in backend - https://github.com/Significant-Gravitas/AutoGPT/issues/9183
+ setSchedules(
+ (await api.listSchedules()).filter((s) => s.graph_id == agentID),
+ );
+ }, [api, agentID]);
+
+ useEffect(() => {
+ fetchSchedules();
+ }, [fetchSchedules]);
+
+ const removeSchedule = useCallback(
+ async (scheduleId: string) => {
+ const removedSchedule = await api.deleteSchedule(scheduleId);
+ setSchedules(schedules.filter((s) => s.id !== removedSchedule.id));
+ },
+ [schedules, api],
+ );
+
+ /* TODO: use websockets instead of polling - https://github.com/Significant-Gravitas/AutoGPT/issues/8782 */
+ useEffect(() => {
+ const intervalId = setInterval(() => fetchAgents(), 5000);
+ return () => clearInterval(intervalId);
+ }, [fetchAgents, agent]);
+
+ const agentActions: { label: string; callback: () => void }[] = useMemo(
+ () => [
+ {
+ label: "Open in builder",
+ callback: () => agent && router.push(`/build?flowID=${agent.id}`),
+ },
+ ],
+ [agent, router],
+ );
+
+ if (!agent) {
+ /* TODO: implement loading indicators / skeleton page */
+ return Loading...;
+ }
+
+ return (
+
+ {/* Sidebar w/ list of runs */}
+ {/* TODO: render this below header in sm and md layouts */}
+
+
+
+ {/* Header */}
+
+
+ {
+ agent.name /* TODO: use dynamic/custom run title - https://github.com/Significant-Gravitas/AutoGPT/issues/9184 */
+ }
+
+
+
+ {/* Run / Schedule views */}
+ {(selectedView.type == "run" ? (
+ selectedView.id ? (
+ selectedRun && (
+
+ )
+ ) : (
+
selectRun(runID)}
+ agentActions={agentActions}
+ />
+ )
+ ) : selectedView.type == "schedule" ? (
+ selectedSchedule && (
+ selectRun(runID)}
+ agentActions={agentActions}
+ />
+ )
+ ) : null) || Loading...
}
+
+
+ );
+}
diff --git a/autogpt_platform/frontend/src/app/globals.css b/autogpt_platform/frontend/src/app/globals.css
index c2998c08d0..46b185d69e 100644
--- a/autogpt_platform/frontend/src/app/globals.css
+++ b/autogpt_platform/frontend/src/app/globals.css
@@ -2,54 +2,9 @@
@tailwind components;
@tailwind utilities;
-@layer base {
- .font-neue {
- font-family: "PP Neue Montreal TT", sans-serif;
- }
-}
-
-@layer utilities {
- .w-110 {
- width: 27.5rem;
- }
- .h-7\.5 {
- height: 1.1875rem;
- }
- .h-18 {
- height: 4.5rem;
- }
- .h-238 {
- height: 14.875rem;
- }
- .top-158 {
- top: 9.875rem;
- }
- .top-254 {
- top: 15.875rem;
- }
- .top-284 {
- top: 17.75rem;
- }
- .top-360 {
- top: 22.5rem;
- }
- .left-297 {
- left: 18.5625rem;
- }
- .left-34 {
- left: 2.125rem;
- }
-}
-
-@layer utilities {
- .text-balance {
- text-wrap: balance;
- }
-}
-
@layer base {
:root {
- --background: 0 0% 100%;
+ --background: 0 0% 99.6%; /* #FEFEFE */
--foreground: 240 10% 3.9%;
--card: 0 0% 100%;
--card-foreground: 240 10% 3.9%;
@@ -61,8 +16,8 @@
--secondary-foreground: 240 5.9% 10%;
--muted: 240 4.8% 95.9%;
--muted-foreground: 240 3.8% 46.1%;
- --accent: 240 4.8% 95.9%;
- --accent-foreground: 240 5.9% 10%;
+ --accent: 262 83% 58%;
+ --accent-foreground: 0 0% 100%;
--destructive: 0 84.2% 60.2%;
--destructive-foreground: 0 0% 98%;
--border: 240 5.9% 90%;
@@ -102,9 +57,7 @@
--chart-4: 280 65% 60%;
--chart-5: 340 75% 55%;
}
-}
-@layer base {
* {
@apply border-border;
}
@@ -112,6 +65,14 @@
@apply bg-background text-foreground;
}
+ .font-neue {
+ font-family: "PP Neue Montreal TT", sans-serif;
+ }
+}
+
+/* *** AutoGPT Design Components *** */
+
+@layer components {
.agpt-border-input {
@apply border border-input focus-visible:border-gray-400 focus-visible:outline-none;
}
@@ -119,4 +80,67 @@
.agpt-shadow-input {
@apply shadow-sm focus-visible:shadow-md;
}
+
+ .agpt-rounded-card {
+ @apply rounded-2xl;
+ }
+
+ .agpt-rounded-box {
+ @apply rounded-3xl;
+ }
+
+ .agpt-card {
+ @apply agpt-rounded-card border border-zinc-300 bg-white p-[1px];
+ }
+
+ .agpt-box {
+ @apply agpt-card agpt-rounded-box;
+ }
+
+ .agpt-div {
+ @apply border-zinc-200 p-5;
+ }
+}
+
+@layer utilities {
+ .agpt-card-selected {
+ @apply border-2 border-accent bg-violet-50/50 p-0;
+ }
+}
+
+@layer utilities {
+ /* TODO: 1. remove unused utility classes */
+ /* TODO: 2. fix naming of numbered dimensions so that the number is 4*dimension */
+ /* TODO: 3. move to tailwind.config.ts spacing config */
+ .h-7\.5 {
+ height: 1.1875rem;
+ }
+ .h-18 {
+ height: 4.5rem;
+ }
+ .h-238 {
+ height: 14.875rem;
+ }
+ .top-158 {
+ top: 9.875rem;
+ }
+ .top-254 {
+ top: 15.875rem;
+ }
+ .top-284 {
+ top: 17.75rem;
+ }
+ .top-360 {
+ top: 22.5rem;
+ }
+ .left-297 {
+ left: 18.5625rem;
+ }
+ .left-34 {
+ left: 2.125rem;
+ }
+
+ .text-balance {
+ text-wrap: balance;
+ }
}
diff --git a/autogpt_platform/frontend/src/app/monitoring/page.tsx b/autogpt_platform/frontend/src/app/monitoring/page.tsx
index 55af3cbdda..bfacf44cf4 100644
--- a/autogpt_platform/frontend/src/app/monitoring/page.tsx
+++ b/autogpt_platform/frontend/src/app/monitoring/page.tsx
@@ -2,7 +2,7 @@
import React, { useCallback, useEffect, useState } from "react";
import {
- GraphExecution,
+ GraphExecutionMeta,
Schedule,
LibraryAgent,
} from "@/lib/autogpt-server-api";
@@ -20,10 +20,12 @@ import { useBackendAPI } from "@/lib/autogpt-server-api/context";
const Monitor = () => {
const [flows, setFlows] = useState([]);
- const [executions, setExecutions] = useState([]);
+ const [executions, setExecutions] = useState([]);
const [schedules, setSchedules] = useState([]);
const [selectedFlow, setSelectedFlow] = useState(null);
- const [selectedRun, setSelectedRun] = useState(null);
+ const [selectedRun, setSelectedRun] = useState(
+ null,
+ );
const [sortColumn, setSortColumn] = useState("id");
const [sortDirection, setSortDirection] = useState<"asc" | "desc">("asc");
const api = useBackendAPI();
diff --git a/autogpt_platform/frontend/src/app/profile/(user)/credits/page.tsx b/autogpt_platform/frontend/src/app/profile/(user)/credits/page.tsx
index 5a8991acb8..3ece782495 100644
--- a/autogpt_platform/frontend/src/app/profile/(user)/credits/page.tsx
+++ b/autogpt_platform/frontend/src/app/profile/(user)/credits/page.tsx
@@ -260,7 +260,6 @@ export default function CreditsPage() {