diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 293a9e2d43..f262441f0e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -170,6 +170,16 @@ repos: files: ^classic/benchmark/(agbenchmark|tests)/((?!reports).)*[/.] args: [--config=classic/benchmark/.flake8] + - repo: local + hooks: + - id: prettier + name: Format (Prettier) - AutoGPT Platform - Frontend + alias: format-platform-frontend + entry: bash -c 'cd autogpt_platform/frontend && npx prettier --write $(echo "$@" | sed "s|autogpt_platform/frontend/||g")' -- + files: ^autogpt_platform/frontend/ + types: [file] + language: system + - repo: local # To have watertight type checking, we check *all* the files in an affected # project. To trigger on poetry.lock we also reset the file `types` filter. @@ -221,6 +231,16 @@ repos: language: system pass_filenames: false + - repo: local + hooks: + - id: tsc + name: Typecheck - AutoGPT Platform - Frontend + entry: bash -c 'cd autogpt_platform/frontend && npm run type-check' + files: ^autogpt_platform/frontend/ + types: [file] + language: system + pass_filenames: false + - repo: local hooks: - id: pytest diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/feature_flag/client.py b/autogpt_platform/autogpt_libs/autogpt_libs/feature_flag/client.py index dde516c1d8..9aed891706 100644 --- a/autogpt_platform/autogpt_libs/autogpt_libs/feature_flag/client.py +++ b/autogpt_platform/autogpt_libs/autogpt_libs/feature_flag/client.py @@ -13,7 +13,6 @@ from typing_extensions import ParamSpec from .config import SETTINGS logger = logging.getLogger(__name__) -logging.basicConfig(level=logging.DEBUG) P = ParamSpec("P") T = TypeVar("T") diff --git a/autogpt_platform/backend/.env.example b/autogpt_platform/backend/.env.example index 11383b9589..8b28244f42 100644 --- a/autogpt_platform/backend/.env.example +++ b/autogpt_platform/backend/.env.example @@ -165,6 +165,15 @@ MEM0_API_KEY= # Nvidia NVIDIA_API_KEY= +# Apollo +APOLLO_API_KEY= + +# SmartLead +SMARTLEAD_API_KEY= + +# ZeroBounce +ZEROBOUNCE_API_KEY= + # Logging Configuration LOG_LEVEL=INFO ENABLE_CLOUD_LOGGING=false diff --git a/autogpt_platform/backend/backend/blocks/apollo/_api.py b/autogpt_platform/backend/backend/blocks/apollo/_api.py new file mode 100644 index 0000000000..157235ff0f --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/apollo/_api.py @@ -0,0 +1,108 @@ +import logging +from typing import List + +from backend.blocks.apollo._auth import ApolloCredentials +from backend.blocks.apollo.models import ( + Contact, + Organization, + SearchOrganizationsRequest, + SearchOrganizationsResponse, + SearchPeopleRequest, + SearchPeopleResponse, +) +from backend.util.request import Requests + +logger = logging.getLogger(name=__name__) + + +class ApolloClient: + """Client for the Apollo API""" + + API_URL = "https://api.apollo.io/api/v1" + + def __init__(self, credentials: ApolloCredentials): + self.credentials = credentials + self.requests = Requests() + + def _get_headers(self) -> dict[str, str]: + return {"x-api-key": self.credentials.api_key.get_secret_value()} + + def search_people(self, query: SearchPeopleRequest) -> List[Contact]: + """Search for people in Apollo""" + response = self.requests.get( + f"{self.API_URL}/mixed_people/search", + headers=self._get_headers(), + params=query.model_dump(exclude={"credentials", "max_results"}), + ) + parsed_response = SearchPeopleResponse(**response.json()) + if parsed_response.pagination.total_entries == 0: + return [] + + people = parsed_response.people + + # handle pagination + if ( + query.max_results is not None + and query.max_results < parsed_response.pagination.total_entries + and len(people) < query.max_results + ): + while ( + len(people) < query.max_results + and query.page < parsed_response.pagination.total_pages + and len(parsed_response.people) > 0 + ): + query.page += 1 + response = self.requests.get( + f"{self.API_URL}/mixed_people/search", + headers=self._get_headers(), + params=query.model_dump(exclude={"credentials", "max_results"}), + ) + parsed_response = SearchPeopleResponse(**response.json()) + people.extend(parsed_response.people[: query.max_results - len(people)]) + + logger.info(f"Found {len(people)} people") + return people[: query.max_results] if query.max_results else people + + def search_organizations( + self, query: SearchOrganizationsRequest + ) -> List[Organization]: + """Search for organizations in Apollo""" + response = self.requests.get( + f"{self.API_URL}/mixed_companies/search", + headers=self._get_headers(), + params=query.model_dump(exclude={"credentials", "max_results"}), + ) + parsed_response = SearchOrganizationsResponse(**response.json()) + if parsed_response.pagination.total_entries == 0: + return [] + + organizations = parsed_response.organizations + + # handle pagination + if ( + query.max_results is not None + and query.max_results < parsed_response.pagination.total_entries + and len(organizations) < query.max_results + ): + while ( + len(organizations) < query.max_results + and query.page < parsed_response.pagination.total_pages + and len(parsed_response.organizations) > 0 + ): + query.page += 1 + response = self.requests.get( + f"{self.API_URL}/mixed_companies/search", + headers=self._get_headers(), + params=query.model_dump(exclude={"credentials", "max_results"}), + ) + parsed_response = SearchOrganizationsResponse(**response.json()) + organizations.extend( + parsed_response.organizations[ + : query.max_results - len(organizations) + ] + ) + + logger.info(f"Found {len(organizations)} organizations") + return ( + organizations[: query.max_results] if query.max_results else organizations + ) diff --git a/autogpt_platform/backend/backend/blocks/apollo/_auth.py b/autogpt_platform/backend/backend/blocks/apollo/_auth.py new file mode 100644 index 0000000000..c813c72d99 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/apollo/_auth.py @@ -0,0 +1,35 @@ +from typing import Literal + +from pydantic import SecretStr + +from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput +from backend.integrations.providers import ProviderName + +ApolloCredentials = APIKeyCredentials +ApolloCredentialsInput = CredentialsMetaInput[ + Literal[ProviderName.APOLLO], + Literal["api_key"], +] + +TEST_CREDENTIALS = APIKeyCredentials( + id="01234567-89ab-cdef-0123-456789abcdef", + provider="apollo", + api_key=SecretStr("mock-apollo-api-key"), + title="Mock Apollo API key", + expires_at=None, +) +TEST_CREDENTIALS_INPUT = { + "provider": TEST_CREDENTIALS.provider, + "id": TEST_CREDENTIALS.id, + "type": TEST_CREDENTIALS.type, + "title": TEST_CREDENTIALS.title, +} + + +def ApolloCredentialsField() -> ApolloCredentialsInput: + """ + Creates a Apollo credentials input on a block. + """ + return CredentialsField( + description="The Apollo integration can be used with an API Key.", + ) diff --git a/autogpt_platform/backend/backend/blocks/apollo/models.py b/autogpt_platform/backend/backend/blocks/apollo/models.py new file mode 100644 index 0000000000..b9fa621ddc --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/apollo/models.py @@ -0,0 +1,543 @@ +from enum import Enum +from typing import Any, Optional + +from pydantic import BaseModel + +from backend.data.model import SchemaField + + +class PrimaryPhone(BaseModel): + """A primary phone in Apollo""" + + number: str + source: str + sanitized_number: str + + +class SenorityLevels(str, Enum): + """Seniority levels in Apollo""" + + OWNER = "owner" + FOUNDER = "founder" + C_SUITE = "c_suite" + PARTNER = "partner" + VP = "vp" + HEAD = "head" + DIRECTOR = "director" + MANAGER = "manager" + SENIOR = "senior" + ENTRY = "entry" + INTERN = "intern" + + +class ContactEmailStatuses(str, Enum): + """Contact email statuses in Apollo""" + + VERIFIED = "verified" + UNVERIFIED = "unverified" + LIKELY_TO_ENGAGE = "likely_to_engage" + UNAVAILABLE = "unavailable" + + +class RuleConfigStatus(BaseModel): + """A rule config status in Apollo""" + + _id: str + created_at: str + rule_action_config_id: str + rule_config_id: str + status_cd: str + updated_at: str + id: str + key: str + + +class ContactCampaignStatus(BaseModel): + """A contact campaign status in Apollo""" + + id: str + emailer_campaign_id: str + send_email_from_user_id: str + inactive_reason: str + status: str + added_at: str + added_by_user_id: str + finished_at: str + paused_at: str + auto_unpause_at: str + send_email_from_email_address: str + send_email_from_email_account_id: str + manually_set_unpause: str + failure_reason: str + current_step_id: str + in_response_to_emailer_message_id: str + cc_emails: str + bcc_emails: str + to_emails: str + + +class Account(BaseModel): + """An account in Apollo""" + + id: str + name: str + website_url: str + blog_url: str + angellist_url: str + linkedin_url: str + twitter_url: str + facebook_url: str + primary_phone: PrimaryPhone + languages: list[str] + alexa_ranking: int + phone: str + linkedin_uid: str + founded_year: int + publicly_traded_symbol: str + publicly_traded_exchange: str + logo_url: str + chrunchbase_url: str + primary_domain: str + domain: str + team_id: str + organization_id: str + account_stage_id: str + source: str + original_source: str + creator_id: str + owner_id: str + created_at: str + phone_status: str + hubspot_id: str + salesforce_id: str + crm_owner_id: str + parent_account_id: str + sanitized_phone: str + # no listed type on the API docs + account_playbook_statues: list[Any] + account_rule_config_statuses: list[RuleConfigStatus] + existence_level: str + label_ids: list[str] + typed_custom_fields: Any + custom_field_errors: Any + modality: str + source_display_name: str + salesforce_record_id: str + crm_record_url: str + + +class ContactEmail(BaseModel): + """A contact email in Apollo""" + + email: str = "" + email_md5: str = "" + email_sha256: str = "" + email_status: str = "" + email_source: str = "" + extrapolated_email_confidence: str = "" + position: int = 0 + email_from_customer: str = "" + free_domain: bool = True + + +class EmploymentHistory(BaseModel): + """An employment history in Apollo""" + + class Config: + extra = "allow" + arbitrary_types_allowed = True + from_attributes = True + populate_by_name = True + + _id: Optional[str] = None + created_at: Optional[str] = None + current: Optional[bool] = None + degree: Optional[str] = None + description: Optional[str] = None + emails: Optional[str] = None + end_date: Optional[str] = None + grade_level: Optional[str] = None + kind: Optional[str] = None + major: Optional[str] = None + organization_id: Optional[str] = None + organization_name: Optional[str] = None + raw_address: Optional[str] = None + start_date: Optional[str] = None + title: Optional[str] = None + updated_at: Optional[str] = None + id: Optional[str] = None + key: Optional[str] = None + + +class Breadcrumb(BaseModel): + """A breadcrumb in Apollo""" + + label: Optional[str] = "N/A" + signal_field_name: Optional[str] = "N/A" + value: str | list | None = "N/A" + display_name: Optional[str] = "N/A" + + +class TypedCustomField(BaseModel): + """A typed custom field in Apollo""" + + id: Optional[str] = "N/A" + value: Optional[str] = "N/A" + + +class Pagination(BaseModel): + """Pagination in Apollo""" + + class Config: + extra = "allow" # Allow extra fields + arbitrary_types_allowed = True # Allow any type + from_attributes = True # Allow from_orm + populate_by_name = True # Allow field aliases to work both ways + + page: int = 0 + per_page: int = 0 + total_entries: int = 0 + total_pages: int = 0 + + +class DialerFlags(BaseModel): + """A dialer flags in Apollo""" + + country_name: str + country_enabled: bool + high_risk_calling_enabled: bool + potential_high_risk_number: bool + + +class PhoneNumber(BaseModel): + """A phone number in Apollo""" + + raw_number: str = "" + sanitized_number: str = "" + type: str = "" + position: int = 0 + status: str = "" + dnc_status: str = "" + dnc_other_info: str = "" + dailer_flags: DialerFlags = DialerFlags( + country_name="", + country_enabled=True, + high_risk_calling_enabled=True, + potential_high_risk_number=True, + ) + + +class Organization(BaseModel): + """An organization in Apollo""" + + class Config: + extra = "allow" + arbitrary_types_allowed = True + from_attributes = True + populate_by_name = True + + id: Optional[str] = "N/A" + name: Optional[str] = "N/A" + website_url: Optional[str] = "N/A" + blog_url: Optional[str] = "N/A" + angellist_url: Optional[str] = "N/A" + linkedin_url: Optional[str] = "N/A" + twitter_url: Optional[str] = "N/A" + facebook_url: Optional[str] = "N/A" + primary_phone: Optional[PrimaryPhone] = PrimaryPhone( + number="N/A", source="N/A", sanitized_number="N/A" + ) + languages: list[str] = [] + alexa_ranking: Optional[int] = 0 + phone: Optional[str] = "N/A" + linkedin_uid: Optional[str] = "N/A" + founded_year: Optional[int] = 0 + publicly_traded_symbol: Optional[str] = "N/A" + publicly_traded_exchange: Optional[str] = "N/A" + logo_url: Optional[str] = "N/A" + chrunchbase_url: Optional[str] = "N/A" + primary_domain: Optional[str] = "N/A" + sanitized_phone: Optional[str] = "N/A" + owned_by_organization_id: Optional[str] = "N/A" + intent_strength: Optional[str] = "N/A" + show_intent: bool = True + has_intent_signal_account: Optional[bool] = True + intent_signal_account: Optional[str] = "N/A" + + +class Contact(BaseModel): + """A contact in Apollo""" + + class Config: + extra = "allow" + arbitrary_types_allowed = True + from_attributes = True + populate_by_name = True + + contact_roles: list[Any] = [] + id: Optional[str] = None + first_name: Optional[str] = None + last_name: Optional[str] = None + name: Optional[str] = None + linkedin_url: Optional[str] = None + title: Optional[str] = None + contact_stage_id: Optional[str] = None + owner_id: Optional[str] = None + creator_id: Optional[str] = None + person_id: Optional[str] = None + email_needs_tickling: bool = True + organization_name: Optional[str] = None + source: Optional[str] = None + original_source: Optional[str] = None + organization_id: Optional[str] = None + headline: Optional[str] = None + photo_url: Optional[str] = None + present_raw_address: Optional[str] = None + linkededin_uid: Optional[str] = None + extrapolated_email_confidence: Optional[float] = None + salesforce_id: Optional[str] = None + salesforce_lead_id: Optional[str] = None + salesforce_contact_id: Optional[str] = None + saleforce_account_id: Optional[str] = None + crm_owner_id: Optional[str] = None + created_at: Optional[str] = None + emailer_campaign_ids: list[str] = [] + direct_dial_status: Optional[str] = None + direct_dial_enrichment_failed_at: Optional[str] = None + email_status: Optional[str] = None + email_source: Optional[str] = None + account_id: Optional[str] = None + last_activity_date: Optional[str] = None + hubspot_vid: Optional[str] = None + hubspot_company_id: Optional[str] = None + crm_id: Optional[str] = None + sanitized_phone: Optional[str] = None + merged_crm_ids: Optional[str] = None + updated_at: Optional[str] = None + queued_for_crm_push: bool = True + suggested_from_rule_engine_config_id: Optional[str] = None + email_unsubscribed: Optional[str] = None + label_ids: list[Any] = [] + has_pending_email_arcgate_request: bool = True + has_email_arcgate_request: bool = True + existence_level: Optional[str] = None + email: Optional[str] = None + email_from_customer: Optional[str] = None + typed_custom_fields: list[TypedCustomField] = [] + custom_field_errors: Any = None + salesforce_record_id: Optional[str] = None + crm_record_url: Optional[str] = None + email_status_unavailable_reason: Optional[str] = None + email_true_status: Optional[str] = None + updated_email_true_status: bool = True + contact_rule_config_statuses: list[RuleConfigStatus] = [] + source_display_name: Optional[str] = None + twitter_url: Optional[str] = None + contact_campaign_statuses: list[ContactCampaignStatus] = [] + state: Optional[str] = None + city: Optional[str] = None + country: Optional[str] = None + account: Optional[Account] = None + contact_emails: list[ContactEmail] = [] + organization: Optional[Organization] = None + employment_history: list[EmploymentHistory] = [] + time_zone: Optional[str] = None + intent_strength: Optional[str] = None + show_intent: bool = True + phone_numbers: list[PhoneNumber] = [] + account_phone_note: Optional[str] = None + free_domain: bool = True + is_likely_to_engage: bool = True + email_domain_catchall: bool = True + contact_job_change_event: Optional[str] = None + + +class SearchOrganizationsRequest(BaseModel): + """Request for Apollo's search organizations API""" + + organization_num_empoloyees_range: list[int] = SchemaField( + description="""The number range of employees working for the company. This enables you to find companies based on headcount. You can add multiple ranges to expand your search results. + +Each range you add needs to be a string, with the upper and lower numbers of the range separated only by a comma.""", + default=[0, 1000000], + ) + + organization_locations: list[str] = SchemaField( + description="""The location of the company headquarters. You can search across cities, US states, and countries. + +If a company has several office locations, results are still based on the headquarters location. For example, if you search chicago but a company's HQ location is in boston, any Boston-based companies will not appearch in your search results, even if they match other parameters. + +To exclude companies based on location, use the organization_not_locations parameter. +""", + default=[], + ) + organizations_not_locations: list[str] = SchemaField( + description="""Exclude companies from search results based on the location of the company headquarters. You can use cities, US states, and countries as locations to exclude. + +This parameter is useful for ensuring you do not prospect in an undesirable territory. For example, if you use ireland as a value, no Ireland-based companies will appear in your search results. +""", + default=[], + ) + q_organization_keyword_tags: list[str] = SchemaField( + description="""Filter search results based on keywords associated with companies. For example, you can enter mining as a value to return only companies that have an association with the mining industry.""" + ) + q_organization_name: str = SchemaField( + description="""Filter search results to include a specific company name. + +If the value you enter for this parameter does not match with a company's name, the company will not appear in search results, even if it matches other parameters. Partial matches are accepted. For example, if you filter by the value marketing, a company called NY Marketing Unlimited would still be eligible as a search result, but NY Market Analysis would not be eligible.""" + ) + organization_ids: list[str] = SchemaField( + description="""The Apollo IDs for the companies you want to include in your search results. Each company in the Apollo database is assigned a unique ID. + +To find IDs, identify the values for organization_id when you call this endpoint.""", + default=[], + ) + max_results: int = SchemaField( + description="""The maximum number of results to return. If you don't specify this parameter, the default is 100.""", + default=100, + ge=1, + le=50000, + advanced=True, + ) + + page: int = SchemaField( + description="""The page number of the Apollo data that you want to retrieve. + +Use this parameter in combination with the per_page parameter to make search results for navigable and improve the performance of the endpoint.""", + default=1, + ) + per_page: int = SchemaField( + description="""The number of search results that should be returned for each page. Limited the number of results per page improves the endpoint's performance. + +Use the page parameter to search the different pages of data.""", + default=100, + ) + + +class SearchOrganizationsResponse(BaseModel): + """Response from Apollo's search organizations API""" + + breadcrumbs: list[Breadcrumb] = [] + partial_results_only: bool = True + has_join: bool = True + disable_eu_prospecting: bool = True + partial_results_limit: int = 0 + pagination: Pagination = Pagination( + page=0, per_page=0, total_entries=0, total_pages=0 + ) + # no listed type on the API docs + accounts: list[Any] = [] + organizations: list[Organization] = [] + models_ids: list[str] = [] + num_fetch_result: Optional[str] = "N/A" + derived_params: Optional[str] = "N/A" + + +class SearchPeopleRequest(BaseModel): + """Request for Apollo's search people API""" + + person_titles: list[str] = SchemaField( + description="""Job titles held by the people you want to find. For a person to be included in search results, they only need to match 1 of the job titles you add. Adding more job titles expands your search results. + +Results also include job titles with the same terms, even if they are not exact matches. For example, searching for marketing manager might return people with the job title content marketing manager. + +Use this parameter in combination with the person_seniorities[] parameter to find people based on specific job functions and seniority levels. +""", + default=[], + placeholder="marketing manager", + ) + person_locations: list[str] = SchemaField( + description="""The location where people live. You can search across cities, US states, and countries. + +To find people based on the headquarters locations of their current employer, use the organization_locations parameter.""", + default=[], + ) + person_seniorities: list[SenorityLevels] = SchemaField( + description="""The job seniority that people hold within their current employer. This enables you to find people that currently hold positions at certain reporting levels, such as Director level or senior IC level. + +For a person to be included in search results, they only need to match 1 of the seniorities you add. Adding more seniorities expands your search results. + +Searches only return results based on their current job title, so searching for Director-level employees only returns people that currently hold a Director-level title. If someone was previously a Director, but is currently a VP, they would not be included in your search results. + +Use this parameter in combination with the person_titles[] parameter to find people based on specific job functions and seniority levels.""", + default=[], + ) + organization_locations: list[str] = SchemaField( + description="""The location of the company headquarters for a person's current employer. You can search across cities, US states, and countries. + +If a company has several office locations, results are still based on the headquarters location. For example, if you search chicago but a company's HQ location is in boston, people that work for the Boston-based company will not appear in your results, even if they match other parameters. + +To find people based on their personal location, use the person_locations parameter.""", + default=[], + ) + q_organization_domains: list[str] = SchemaField( + description="""The domain name for the person's employer. This can be the current employer or a previous employer. Do not include www., the @ symbol, or similar. + +You can add multiple domains to search across companies. + + Examples: apollo.io and microsoft.com""", + default=[], + ) + contact_email_statuses: list[ContactEmailStatuses] = SchemaField( + description="""The email statuses for the people you want to find. You can add multiple statuses to expand your search.""", + default=[], + ) + organization_ids: list[str] = SchemaField( + description="""The Apollo IDs for the companies (employers) you want to include in your search results. Each company in the Apollo database is assigned a unique ID. + +To find IDs, call the Organization Search endpoint and identify the values for organization_id.""", + default=[], + ) + organization_num_empoloyees_range: list[int] = SchemaField( + description="""The number range of employees working for the company. This enables you to find companies based on headcount. You can add multiple ranges to expand your search results. + +Each range you add needs to be a string, with the upper and lower numbers of the range separated only by a comma.""", + default=[], + ) + q_keywords: str = SchemaField( + description="""A string of words over which we want to filter the results""", + default="", + ) + page: int = SchemaField( + description="""The page number of the Apollo data that you want to retrieve. + +Use this parameter in combination with the per_page parameter to make search results for navigable and improve the performance of the endpoint.""", + default=1, + ) + per_page: int = SchemaField( + description="""The number of search results that should be returned for each page. Limited the number of results per page improves the endpoint's performance. + +Use the page parameter to search the different pages of data.""", + default=100, + ) + max_results: int = SchemaField( + description="""The maximum number of results to return. If you don't specify this parameter, the default is 100.""", + default=100, + ge=1, + le=50000, + advanced=True, + ) + + +class SearchPeopleResponse(BaseModel): + """Response from Apollo's search people API""" + + class Config: + extra = "allow" # Allow extra fields + arbitrary_types_allowed = True # Allow any type + from_attributes = True # Allow from_orm + populate_by_name = True # Allow field aliases to work both ways + + breadcrumbs: list[Breadcrumb] = [] + partial_results_only: bool = True + has_join: bool = True + disable_eu_prospecting: bool = True + partial_results_limit: int = 0 + pagination: Pagination = Pagination( + page=0, per_page=0, total_entries=0, total_pages=0 + ) + contacts: list[Contact] = [] + people: list[Contact] = [] + model_ids: list[str] = [] + num_fetch_result: Optional[str] = "N/A" + derived_params: Optional[str] = "N/A" diff --git a/autogpt_platform/backend/backend/blocks/apollo/organization.py b/autogpt_platform/backend/backend/blocks/apollo/organization.py new file mode 100644 index 0000000000..45ce769fcf --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/apollo/organization.py @@ -0,0 +1,219 @@ +from backend.blocks.apollo._api import ApolloClient +from backend.blocks.apollo._auth import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + ApolloCredentials, + ApolloCredentialsInput, +) +from backend.blocks.apollo.models import ( + Organization, + PrimaryPhone, + SearchOrganizationsRequest, +) +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class SearchOrganizationsBlock(Block): + """Search for organizations in Apollo""" + + class Input(BlockSchema): + organization_num_empoloyees_range: list[int] = SchemaField( + description="""The number range of employees working for the company. This enables you to find companies based on headcount. You can add multiple ranges to expand your search results. + +Each range you add needs to be a string, with the upper and lower numbers of the range separated only by a comma.""", + default=[0, 1000000], + ) + + organization_locations: list[str] = SchemaField( + description="""The location of the company headquarters. You can search across cities, US states, and countries. + +If a company has several office locations, results are still based on the headquarters location. For example, if you search chicago but a company's HQ location is in boston, any Boston-based companies will not appearch in your search results, even if they match other parameters. + +To exclude companies based on location, use the organization_not_locations parameter. +""", + default=[], + ) + organizations_not_locations: list[str] = SchemaField( + description="""Exclude companies from search results based on the location of the company headquarters. You can use cities, US states, and countries as locations to exclude. + +This parameter is useful for ensuring you do not prospect in an undesirable territory. For example, if you use ireland as a value, no Ireland-based companies will appear in your search results. +""", + default=[], + ) + q_organization_keyword_tags: list[str] = SchemaField( + description="""Filter search results based on keywords associated with companies. For example, you can enter mining as a value to return only companies that have an association with the mining industry.""", + default=[], + ) + q_organization_name: str = SchemaField( + description="""Filter search results to include a specific company name. + +If the value you enter for this parameter does not match with a company's name, the company will not appear in search results, even if it matches other parameters. Partial matches are accepted. For example, if you filter by the value marketing, a company called NY Marketing Unlimited would still be eligible as a search result, but NY Market Analysis would not be eligible.""", + default="", + advanced=False, + ) + organization_ids: list[str] = SchemaField( + description="""The Apollo IDs for the companies you want to include in your search results. Each company in the Apollo database is assigned a unique ID. + +To find IDs, identify the values for organization_id when you call this endpoint.""", + default=[], + ) + max_results: int = SchemaField( + description="""The maximum number of results to return. If you don't specify this parameter, the default is 100.""", + default=100, + ge=1, + le=50000, + advanced=True, + ) + credentials: ApolloCredentialsInput = SchemaField( + description="Apollo credentials", + ) + + class Output(BlockSchema): + organizations: list[Organization] = SchemaField( + description="List of organizations found", + default=[], + ) + organization: Organization = SchemaField( + description="Each found organization, one at a time", + ) + error: str = SchemaField( + description="Error message if the search failed", + default="", + ) + + def __init__(self): + super().__init__( + id="3d71270d-599e-4148-9b95-71b35d2f44f0", + description="Search for organizations in Apollo", + categories={BlockCategory.SEARCH}, + input_schema=SearchOrganizationsBlock.Input, + output_schema=SearchOrganizationsBlock.Output, + test_credentials=TEST_CREDENTIALS, + test_input={"query": "Google", "credentials": TEST_CREDENTIALS_INPUT}, + test_output=[ + ( + "organization", + Organization( + id="1", + name="Google", + website_url="https://google.com", + blog_url="https://google.com/blog", + angellist_url="https://angel.co/google", + linkedin_url="https://linkedin.com/company/google", + twitter_url="https://twitter.com/google", + facebook_url="https://facebook.com/google", + primary_phone=PrimaryPhone( + source="google", + number="1234567890", + sanitized_number="1234567890", + ), + languages=["en"], + alexa_ranking=1000, + phone="1234567890", + linkedin_uid="1234567890", + founded_year=2000, + publicly_traded_symbol="GOOGL", + publicly_traded_exchange="NASDAQ", + logo_url="https://google.com/logo.png", + chrunchbase_url="https://chrunchbase.com/google", + primary_domain="google.com", + sanitized_phone="1234567890", + owned_by_organization_id="1", + intent_strength="strong", + show_intent=True, + has_intent_signal_account=True, + intent_signal_account="1", + ), + ), + ( + "organizations", + [ + Organization( + id="1", + name="Google", + website_url="https://google.com", + blog_url="https://google.com/blog", + angellist_url="https://angel.co/google", + linkedin_url="https://linkedin.com/company/google", + twitter_url="https://twitter.com/google", + facebook_url="https://facebook.com/google", + primary_phone=PrimaryPhone( + source="google", + number="1234567890", + sanitized_number="1234567890", + ), + languages=["en"], + alexa_ranking=1000, + phone="1234567890", + linkedin_uid="1234567890", + founded_year=2000, + publicly_traded_symbol="GOOGL", + publicly_traded_exchange="NASDAQ", + logo_url="https://google.com/logo.png", + chrunchbase_url="https://chrunchbase.com/google", + primary_domain="google.com", + sanitized_phone="1234567890", + owned_by_organization_id="1", + intent_strength="strong", + show_intent=True, + has_intent_signal_account=True, + intent_signal_account="1", + ), + ], + ), + ], + test_mock={ + "search_organizations": lambda *args, **kwargs: [ + Organization( + id="1", + name="Google", + website_url="https://google.com", + blog_url="https://google.com/blog", + angellist_url="https://angel.co/google", + linkedin_url="https://linkedin.com/company/google", + twitter_url="https://twitter.com/google", + facebook_url="https://facebook.com/google", + primary_phone=PrimaryPhone( + source="google", + number="1234567890", + sanitized_number="1234567890", + ), + languages=["en"], + alexa_ranking=1000, + phone="1234567890", + linkedin_uid="1234567890", + founded_year=2000, + publicly_traded_symbol="GOOGL", + publicly_traded_exchange="NASDAQ", + logo_url="https://google.com/logo.png", + chrunchbase_url="https://chrunchbase.com/google", + primary_domain="google.com", + sanitized_phone="1234567890", + owned_by_organization_id="1", + intent_strength="strong", + show_intent=True, + has_intent_signal_account=True, + intent_signal_account="1", + ) + ] + }, + ) + + @staticmethod + def search_organizations( + query: SearchOrganizationsRequest, credentials: ApolloCredentials + ) -> list[Organization]: + client = ApolloClient(credentials) + return client.search_organizations(query) + + def run( + self, input_data: Input, *, credentials: ApolloCredentials, **kwargs + ) -> BlockOutput: + query = SearchOrganizationsRequest( + **input_data.model_dump(exclude={"credentials"}) + ) + organizations = self.search_organizations(query, credentials) + for organization in organizations: + yield "organization", organization + yield "organizations", organizations diff --git a/autogpt_platform/backend/backend/blocks/apollo/people.py b/autogpt_platform/backend/backend/blocks/apollo/people.py new file mode 100644 index 0000000000..ac75e03d1d --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/apollo/people.py @@ -0,0 +1,394 @@ +from backend.blocks.apollo._api import ApolloClient +from backend.blocks.apollo._auth import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + ApolloCredentials, + ApolloCredentialsInput, +) +from backend.blocks.apollo.models import ( + Contact, + ContactEmailStatuses, + SearchPeopleRequest, + SenorityLevels, +) +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class SearchPeopleBlock(Block): + """Search for people in Apollo""" + + class Input(BlockSchema): + person_titles: list[str] = SchemaField( + description="""Job titles held by the people you want to find. For a person to be included in search results, they only need to match 1 of the job titles you add. Adding more job titles expands your search results. + + Results also include job titles with the same terms, even if they are not exact matches. For example, searching for marketing manager might return people with the job title content marketing manager. + + Use this parameter in combination with the person_seniorities[] parameter to find people based on specific job functions and seniority levels. + """, + default=[], + advanced=False, + ) + person_locations: list[str] = SchemaField( + description="""The location where people live. You can search across cities, US states, and countries. + + To find people based on the headquarters locations of their current employer, use the organization_locations parameter.""", + default=[], + advanced=False, + ) + person_seniorities: list[SenorityLevels] = SchemaField( + description="""The job seniority that people hold within their current employer. This enables you to find people that currently hold positions at certain reporting levels, such as Director level or senior IC level. + + For a person to be included in search results, they only need to match 1 of the seniorities you add. Adding more seniorities expands your search results. + + Searches only return results based on their current job title, so searching for Director-level employees only returns people that currently hold a Director-level title. If someone was previously a Director, but is currently a VP, they would not be included in your search results. + + Use this parameter in combination with the person_titles[] parameter to find people based on specific job functions and seniority levels.""", + default=[], + advanced=False, + ) + organization_locations: list[str] = SchemaField( + description="""The location of the company headquarters for a person's current employer. You can search across cities, US states, and countries. + + If a company has several office locations, results are still based on the headquarters location. For example, if you search chicago but a company's HQ location is in boston, people that work for the Boston-based company will not appear in your results, even if they match other parameters. + + To find people based on their personal location, use the person_locations parameter.""", + default=[], + advanced=False, + ) + q_organization_domains: list[str] = SchemaField( + description="""The domain name for the person's employer. This can be the current employer or a previous employer. Do not include www., the @ symbol, or similar. + + You can add multiple domains to search across companies. + + Examples: apollo.io and microsoft.com""", + default=[], + advanced=False, + ) + contact_email_statuses: list[ContactEmailStatuses] = SchemaField( + description="""The email statuses for the people you want to find. You can add multiple statuses to expand your search.""", + default=[], + advanced=False, + ) + organization_ids: list[str] = SchemaField( + description="""The Apollo IDs for the companies (employers) you want to include in your search results. Each company in the Apollo database is assigned a unique ID. + + To find IDs, call the Organization Search endpoint and identify the values for organization_id.""", + default=[], + advanced=False, + ) + organization_num_empoloyees_range: list[int] = SchemaField( + description="""The number range of employees working for the company. This enables you to find companies based on headcount. You can add multiple ranges to expand your search results. + + Each range you add needs to be a string, with the upper and lower numbers of the range separated only by a comma.""", + default=[], + advanced=False, + ) + q_keywords: str = SchemaField( + description="""A string of words over which we want to filter the results""", + default="", + advanced=False, + ) + max_results: int = SchemaField( + description="""The maximum number of results to return. If you don't specify this parameter, the default is 100.""", + default=100, + ge=1, + le=50000, + advanced=True, + ) + + credentials: ApolloCredentialsInput = SchemaField( + description="Apollo credentials", + ) + + class Output(BlockSchema): + people: list[Contact] = SchemaField( + description="List of people found", + default=[], + ) + person: Contact = SchemaField( + description="Each found person, one at a time", + ) + error: str = SchemaField( + description="Error message if the search failed", + default="", + ) + + def __init__(self): + super().__init__( + id="c2adb3aa-5aae-488d-8a6e-4eb8c23e2ed6", + description="Search for people in Apollo", + categories={BlockCategory.SEARCH}, + input_schema=SearchPeopleBlock.Input, + output_schema=SearchPeopleBlock.Output, + test_credentials=TEST_CREDENTIALS, + test_input={"credentials": TEST_CREDENTIALS_INPUT}, + test_output=[ + ( + "person", + Contact( + contact_roles=[], + id="1", + name="John Doe", + first_name="John", + last_name="Doe", + linkedin_url="https://www.linkedin.com/in/johndoe", + title="Software Engineer", + organization_name="Google", + organization_id="123456", + contact_stage_id="1", + owner_id="1", + creator_id="1", + person_id="1", + email_needs_tickling=True, + source="apollo", + original_source="apollo", + headline="Software Engineer", + photo_url="https://www.linkedin.com/in/johndoe", + present_raw_address="123 Main St, Anytown, USA", + linkededin_uid="123456", + extrapolated_email_confidence=0.8, + salesforce_id="123456", + salesforce_lead_id="123456", + salesforce_contact_id="123456", + saleforce_account_id="123456", + crm_owner_id="123456", + created_at="2021-01-01", + emailer_campaign_ids=[], + direct_dial_status="active", + direct_dial_enrichment_failed_at="2021-01-01", + email_status="active", + email_source="apollo", + account_id="123456", + last_activity_date="2021-01-01", + hubspot_vid="123456", + hubspot_company_id="123456", + crm_id="123456", + sanitized_phone="123456", + merged_crm_ids="123456", + updated_at="2021-01-01", + queued_for_crm_push=True, + suggested_from_rule_engine_config_id="123456", + email_unsubscribed=None, + label_ids=[], + has_pending_email_arcgate_request=True, + has_email_arcgate_request=True, + existence_level=None, + email=None, + email_from_customer=None, + typed_custom_fields=[], + custom_field_errors=None, + salesforce_record_id=None, + crm_record_url=None, + email_status_unavailable_reason=None, + email_true_status=None, + updated_email_true_status=True, + contact_rule_config_statuses=[], + source_display_name=None, + twitter_url=None, + contact_campaign_statuses=[], + state=None, + city=None, + country=None, + account=None, + contact_emails=[], + organization=None, + employment_history=[], + time_zone=None, + intent_strength=None, + show_intent=True, + phone_numbers=[], + account_phone_note=None, + free_domain=True, + is_likely_to_engage=True, + email_domain_catchall=True, + contact_job_change_event=None, + ), + ), + ( + "people", + [ + Contact( + contact_roles=[], + id="1", + name="John Doe", + first_name="John", + last_name="Doe", + linkedin_url="https://www.linkedin.com/in/johndoe", + title="Software Engineer", + organization_name="Google", + organization_id="123456", + contact_stage_id="1", + owner_id="1", + creator_id="1", + person_id="1", + email_needs_tickling=True, + source="apollo", + original_source="apollo", + headline="Software Engineer", + photo_url="https://www.linkedin.com/in/johndoe", + present_raw_address="123 Main St, Anytown, USA", + linkededin_uid="123456", + extrapolated_email_confidence=0.8, + salesforce_id="123456", + salesforce_lead_id="123456", + salesforce_contact_id="123456", + saleforce_account_id="123456", + crm_owner_id="123456", + created_at="2021-01-01", + emailer_campaign_ids=[], + direct_dial_status="active", + direct_dial_enrichment_failed_at="2021-01-01", + email_status="active", + email_source="apollo", + account_id="123456", + last_activity_date="2021-01-01", + hubspot_vid="123456", + hubspot_company_id="123456", + crm_id="123456", + sanitized_phone="123456", + merged_crm_ids="123456", + updated_at="2021-01-01", + queued_for_crm_push=True, + suggested_from_rule_engine_config_id="123456", + email_unsubscribed=None, + label_ids=[], + has_pending_email_arcgate_request=True, + has_email_arcgate_request=True, + existence_level=None, + email=None, + email_from_customer=None, + typed_custom_fields=[], + custom_field_errors=None, + salesforce_record_id=None, + crm_record_url=None, + email_status_unavailable_reason=None, + email_true_status=None, + updated_email_true_status=True, + contact_rule_config_statuses=[], + source_display_name=None, + twitter_url=None, + contact_campaign_statuses=[], + state=None, + city=None, + country=None, + account=None, + contact_emails=[], + organization=None, + employment_history=[], + time_zone=None, + intent_strength=None, + show_intent=True, + phone_numbers=[], + account_phone_note=None, + free_domain=True, + is_likely_to_engage=True, + email_domain_catchall=True, + contact_job_change_event=None, + ), + ], + ), + ], + test_mock={ + "search_people": lambda query, credentials: [ + Contact( + id="1", + name="John Doe", + first_name="John", + last_name="Doe", + linkedin_url="https://www.linkedin.com/in/johndoe", + title="Software Engineer", + organization_name="Google", + organization_id="123456", + contact_stage_id="1", + owner_id="1", + creator_id="1", + person_id="1", + email_needs_tickling=True, + source="apollo", + original_source="apollo", + headline="Software Engineer", + photo_url="https://www.linkedin.com/in/johndoe", + present_raw_address="123 Main St, Anytown, USA", + linkededin_uid="123456", + extrapolated_email_confidence=0.8, + salesforce_id="123456", + salesforce_lead_id="123456", + salesforce_contact_id="123456", + saleforce_account_id="123456", + crm_owner_id="123456", + created_at="2021-01-01", + emailer_campaign_ids=[], + direct_dial_status="active", + direct_dial_enrichment_failed_at="2021-01-01", + email_status="active", + email_source="apollo", + account_id="123456", + last_activity_date="2021-01-01", + hubspot_vid="123456", + hubspot_company_id="123456", + crm_id="123456", + sanitized_phone="123456", + merged_crm_ids="123456", + updated_at="2021-01-01", + queued_for_crm_push=True, + suggested_from_rule_engine_config_id="123456", + email_unsubscribed=None, + label_ids=[], + has_pending_email_arcgate_request=True, + has_email_arcgate_request=True, + existence_level=None, + email=None, + email_from_customer=None, + typed_custom_fields=[], + custom_field_errors=None, + salesforce_record_id=None, + crm_record_url=None, + email_status_unavailable_reason=None, + email_true_status=None, + updated_email_true_status=True, + contact_rule_config_statuses=[], + source_display_name=None, + twitter_url=None, + contact_campaign_statuses=[], + state=None, + city=None, + country=None, + account=None, + contact_emails=[], + organization=None, + employment_history=[], + time_zone=None, + intent_strength=None, + show_intent=True, + phone_numbers=[], + account_phone_note=None, + free_domain=True, + is_likely_to_engage=True, + email_domain_catchall=True, + contact_job_change_event=None, + ), + ] + }, + ) + + @staticmethod + def search_people( + query: SearchPeopleRequest, credentials: ApolloCredentials + ) -> list[Contact]: + client = ApolloClient(credentials) + return client.search_people(query) + + def run( + self, + input_data: Input, + *, + credentials: ApolloCredentials, + **kwargs, + ) -> BlockOutput: + + query = SearchPeopleRequest(**input_data.model_dump(exclude={"credentials"})) + people = self.search_people(query, credentials) + for person in people: + yield "person", person + yield "people", people diff --git a/autogpt_platform/backend/backend/blocks/smartlead/_api.py b/autogpt_platform/backend/backend/blocks/smartlead/_api.py new file mode 100644 index 0000000000..8caa266c2c --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/smartlead/_api.py @@ -0,0 +1,97 @@ +from backend.blocks.smartlead.models import ( + AddLeadsRequest, + AddLeadsToCampaignResponse, + CreateCampaignRequest, + CreateCampaignResponse, + SaveSequencesRequest, + SaveSequencesResponse, +) +from backend.util.request import Requests + + +class SmartLeadClient: + """Client for the SmartLead API""" + + # This api is stupid and requires your api key in the url. DO NOT RAISE ERRORS FOR BAD REQUESTS. + # FILTER OUT THE API KEY FROM THE ERROR MESSAGE. + + API_URL = "https://server.smartlead.ai/api/v1" + + def __init__(self, api_key: str): + self.api_key = api_key + self.requests = Requests() + + def _add_auth_to_url(self, url: str) -> str: + return f"{url}?api_key={self.api_key}" + + def _handle_error(self, e: Exception) -> str: + return e.__str__().replace(self.api_key, "API KEY") + + def create_campaign(self, request: CreateCampaignRequest) -> CreateCampaignResponse: + try: + response = self.requests.post( + self._add_auth_to_url(f"{self.API_URL}/campaigns/create"), + json=request.model_dump(), + ) + response_data = response.json() + return CreateCampaignResponse(**response_data) + except ValueError as e: + raise ValueError(f"Invalid response format: {str(e)}") + except Exception as e: + raise ValueError(f"Failed to create campaign: {self._handle_error(e)}") + + def add_leads_to_campaign( + self, request: AddLeadsRequest + ) -> AddLeadsToCampaignResponse: + try: + response = self.requests.post( + self._add_auth_to_url( + f"{self.API_URL}/campaigns/{request.campaign_id}/leads" + ), + json=request.model_dump(exclude={"campaign_id"}), + ) + response_data = response.json() + response_parsed = AddLeadsToCampaignResponse(**response_data) + if not response_parsed.ok: + raise ValueError( + f"Failed to add leads to campaign: {response_parsed.error}" + ) + return response_parsed + except ValueError as e: + raise ValueError(f"Invalid response format: {str(e)}") + except Exception as e: + raise ValueError( + f"Failed to add leads to campaign: {self._handle_error(e)}" + ) + + def save_campaign_sequences( + self, campaign_id: int, request: SaveSequencesRequest + ) -> SaveSequencesResponse: + """ + Save sequences within a campaign. + + Args: + campaign_id: ID of the campaign to save sequences for + request: SaveSequencesRequest containing the sequences configuration + + Returns: + SaveSequencesResponse with the result of the operation + + Note: + For variant_distribution_type: + - MANUAL_EQUAL: Equally distributes variants across leads + - AI_EQUAL: Requires winning_metric_property and lead_distribution_percentage + - MANUAL_PERCENTAGE: Requires variant_distribution_percentage in seq_variants + """ + try: + response = self.requests.post( + self._add_auth_to_url( + f"{self.API_URL}/campaigns/{campaign_id}/sequences" + ), + json=request.model_dump(exclude_none=True), + ) + return SaveSequencesResponse(**response.json()) + except Exception as e: + raise ValueError( + f"Failed to save campaign sequences: {e.__str__().replace(self.api_key, 'API KEY')}" + ) diff --git a/autogpt_platform/backend/backend/blocks/smartlead/_auth.py b/autogpt_platform/backend/backend/blocks/smartlead/_auth.py new file mode 100644 index 0000000000..219524126c --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/smartlead/_auth.py @@ -0,0 +1,35 @@ +from typing import Literal + +from pydantic import SecretStr + +from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput +from backend.integrations.providers import ProviderName + +SmartLeadCredentials = APIKeyCredentials +SmartLeadCredentialsInput = CredentialsMetaInput[ + Literal[ProviderName.SMARTLEAD], + Literal["api_key"], +] + +TEST_CREDENTIALS = APIKeyCredentials( + id="01234567-89ab-cdef-0123-456789abcdef", + provider="smartlead", + api_key=SecretStr("mock-smartlead-api-key"), + title="Mock SmartLead API key", + expires_at=None, +) +TEST_CREDENTIALS_INPUT = { + "provider": TEST_CREDENTIALS.provider, + "id": TEST_CREDENTIALS.id, + "type": TEST_CREDENTIALS.type, + "title": TEST_CREDENTIALS.title, +} + + +def SmartLeadCredentialsField() -> SmartLeadCredentialsInput: + """ + Creates a SmartLead credentials input on a block. + """ + return CredentialsField( + description="The SmartLead integration can be used with an API Key.", + ) diff --git a/autogpt_platform/backend/backend/blocks/smartlead/campaign.py b/autogpt_platform/backend/backend/blocks/smartlead/campaign.py new file mode 100644 index 0000000000..942ebcd471 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/smartlead/campaign.py @@ -0,0 +1,326 @@ +from backend.blocks.smartlead._api import SmartLeadClient +from backend.blocks.smartlead._auth import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + SmartLeadCredentials, + SmartLeadCredentialsInput, +) +from backend.blocks.smartlead.models import ( + AddLeadsRequest, + AddLeadsToCampaignResponse, + CreateCampaignRequest, + CreateCampaignResponse, + LeadInput, + LeadUploadSettings, + SaveSequencesRequest, + SaveSequencesResponse, + Sequence, +) +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class CreateCampaignBlock(Block): + """Create a campaign in SmartLead""" + + class Input(BlockSchema): + name: str = SchemaField( + description="The name of the campaign", + ) + credentials: SmartLeadCredentialsInput = SchemaField( + description="SmartLead credentials", + ) + + class Output(BlockSchema): + id: int = SchemaField( + description="The ID of the created campaign", + ) + name: str = SchemaField( + description="The name of the created campaign", + ) + created_at: str = SchemaField( + description="The date and time the campaign was created", + ) + error: str = SchemaField( + description="Error message if the search failed", + default="", + ) + + def __init__(self): + super().__init__( + id="8865699f-9188-43c4-89b0-79c84cfaa03e", + description="Create a campaign in SmartLead", + categories={BlockCategory.CRM}, + input_schema=CreateCampaignBlock.Input, + output_schema=CreateCampaignBlock.Output, + test_credentials=TEST_CREDENTIALS, + test_input={"name": "Test Campaign", "credentials": TEST_CREDENTIALS_INPUT}, + test_output=[ + ( + "id", + 1, + ), + ( + "name", + "Test Campaign", + ), + ( + "created_at", + "2024-01-01T00:00:00Z", + ), + ], + test_mock={ + "create_campaign": lambda name, credentials: CreateCampaignResponse( + ok=True, + id=1, + name=name, + created_at="2024-01-01T00:00:00Z", + ) + }, + ) + + @staticmethod + def create_campaign( + name: str, credentials: SmartLeadCredentials + ) -> CreateCampaignResponse: + client = SmartLeadClient(credentials.api_key.get_secret_value()) + return client.create_campaign(CreateCampaignRequest(name=name)) + + def run( + self, + input_data: Input, + *, + credentials: SmartLeadCredentials, + **kwargs, + ) -> BlockOutput: + response = self.create_campaign(input_data.name, credentials) + + yield "id", response.id + yield "name", response.name + yield "created_at", response.created_at + if not response.ok: + yield "error", "Failed to create campaign" + + +class AddLeadToCampaignBlock(Block): + """Add a lead to a campaign in SmartLead""" + + class Input(BlockSchema): + campaign_id: int = SchemaField( + description="The ID of the campaign to add the lead to", + ) + lead_list: list[LeadInput] = SchemaField( + description="An array of JSON objects, each representing a lead's details. Can hold max 100 leads.", + max_length=100, + default=[], + advanced=False, + ) + settings: LeadUploadSettings = SchemaField( + description="Settings for lead upload", + default=LeadUploadSettings(), + ) + credentials: SmartLeadCredentialsInput = SchemaField( + description="SmartLead credentials", + ) + + class Output(BlockSchema): + campaign_id: int = SchemaField( + description="The ID of the campaign the lead was added to (passed through)", + ) + upload_count: int = SchemaField( + description="The number of leads added to the campaign", + ) + already_added_to_campaign: int = SchemaField( + description="The number of leads that were already added to the campaign", + ) + duplicate_count: int = SchemaField( + description="The number of emails that were duplicates", + ) + invalid_email_count: int = SchemaField( + description="The number of emails that were invalidly formatted", + ) + is_lead_limit_exhausted: bool = SchemaField( + description="Whether the lead limit was exhausted", + ) + lead_import_stopped_count: int = SchemaField( + description="The number of leads that were not added to the campaign because the lead import was stopped", + ) + error: str = SchemaField( + description="Error message if the lead was not added to the campaign", + default="", + ) + + def __init__(self): + super().__init__( + id="fb8106a4-1a8f-42f9-a502-f6d07e6fe0ec", + description="Add a lead to a campaign in SmartLead", + categories={BlockCategory.CRM}, + input_schema=AddLeadToCampaignBlock.Input, + output_schema=AddLeadToCampaignBlock.Output, + test_credentials=TEST_CREDENTIALS, + test_input={ + "campaign_id": 1, + "lead_list": [], + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_output=[ + ( + "campaign_id", + 1, + ), + ( + "upload_count", + 1, + ), + ], + test_mock={ + "add_leads_to_campaign": lambda campaign_id, lead_list, credentials: AddLeadsToCampaignResponse( + ok=True, + upload_count=1, + already_added_to_campaign=0, + duplicate_count=0, + invalid_email_count=0, + is_lead_limit_exhausted=False, + lead_import_stopped_count=0, + error="", + total_leads=1, + block_count=0, + invalid_emails=[], + unsubscribed_leads=[], + bounce_count=0, + ) + }, + ) + + @staticmethod + def add_leads_to_campaign( + campaign_id: int, lead_list: list[LeadInput], credentials: SmartLeadCredentials + ) -> AddLeadsToCampaignResponse: + client = SmartLeadClient(credentials.api_key.get_secret_value()) + return client.add_leads_to_campaign( + AddLeadsRequest( + campaign_id=campaign_id, + lead_list=lead_list, + settings=LeadUploadSettings( + ignore_global_block_list=False, + ignore_unsubscribe_list=False, + ignore_community_bounce_list=False, + ignore_duplicate_leads_in_other_campaign=False, + ), + ), + ) + + def run( + self, + input_data: Input, + *, + credentials: SmartLeadCredentials, + **kwargs, + ) -> BlockOutput: + response = self.add_leads_to_campaign( + input_data.campaign_id, input_data.lead_list, credentials + ) + + yield "campaign_id", input_data.campaign_id + yield "upload_count", response.upload_count + if response.already_added_to_campaign: + yield "already_added_to_campaign", response.already_added_to_campaign + if response.duplicate_count: + yield "duplicate_count", response.duplicate_count + if response.invalid_email_count: + yield "invalid_email_count", response.invalid_email_count + if response.is_lead_limit_exhausted: + yield "is_lead_limit_exhausted", response.is_lead_limit_exhausted + if response.lead_import_stopped_count: + yield "lead_import_stopped_count", response.lead_import_stopped_count + if response.error: + yield "error", response.error + if not response.ok: + yield "error", "Failed to add leads to campaign" + + +class SaveCampaignSequencesBlock(Block): + """Save sequences within a campaign""" + + class Input(BlockSchema): + campaign_id: int = SchemaField( + description="The ID of the campaign to save sequences for", + ) + sequences: list[Sequence] = SchemaField( + description="The sequences to save", + default=[], + advanced=False, + ) + credentials: SmartLeadCredentialsInput = SchemaField( + description="SmartLead credentials", + ) + + class Output(BlockSchema): + data: dict | str | None = SchemaField( + description="Data from the API", + default=None, + ) + message: str = SchemaField( + description="Message from the API", + default="", + ) + error: str = SchemaField( + description="Error message if the sequences were not saved", + default="", + ) + + def __init__(self): + super().__init__( + id="e7d9f41c-dc10-4f39-98ba-a432abd128c0", + description="Save sequences within a campaign", + categories={BlockCategory.CRM}, + input_schema=SaveCampaignSequencesBlock.Input, + output_schema=SaveCampaignSequencesBlock.Output, + test_credentials=TEST_CREDENTIALS, + test_input={ + "campaign_id": 1, + "sequences": [], + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_output=[ + ( + "message", + "Sequences saved successfully", + ), + ], + test_mock={ + "save_campaign_sequences": lambda campaign_id, sequences, credentials: SaveSequencesResponse( + ok=True, + message="Sequences saved successfully", + ) + }, + ) + + @staticmethod + def save_campaign_sequences( + campaign_id: int, sequences: list[Sequence], credentials: SmartLeadCredentials + ) -> SaveSequencesResponse: + client = SmartLeadClient(credentials.api_key.get_secret_value()) + return client.save_campaign_sequences( + campaign_id=campaign_id, request=SaveSequencesRequest(sequences=sequences) + ) + + def run( + self, + input_data: Input, + *, + credentials: SmartLeadCredentials, + **kwargs, + ) -> BlockOutput: + response = self.save_campaign_sequences( + input_data.campaign_id, input_data.sequences, credentials + ) + + if response.data: + yield "data", response.data + if response.message: + yield "message", response.message + if response.error: + yield "error", response.error + if not response.ok: + yield "error", "Failed to save sequences" diff --git a/autogpt_platform/backend/backend/blocks/smartlead/models.py b/autogpt_platform/backend/backend/blocks/smartlead/models.py new file mode 100644 index 0000000000..a41d0e6ee8 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/smartlead/models.py @@ -0,0 +1,147 @@ +from enum import Enum + +from pydantic import BaseModel + +from backend.data.model import SchemaField + + +class CreateCampaignResponse(BaseModel): + ok: bool + id: int + name: str + created_at: str + + +class CreateCampaignRequest(BaseModel): + name: str + client_id: str | None = None + + +class AddLeadsToCampaignResponse(BaseModel): + ok: bool + upload_count: int + total_leads: int + block_count: int + duplicate_count: int + invalid_email_count: int + invalid_emails: list[str] + already_added_to_campaign: int + unsubscribed_leads: list[str] + is_lead_limit_exhausted: bool + lead_import_stopped_count: int + bounce_count: int + error: str | None = None + + +class LeadCustomFields(BaseModel): + """Custom fields for a lead (max 20 fields)""" + + fields: dict[str, str] = SchemaField( + description="Custom fields for a lead (max 20 fields)", + max_length=20, + default={}, + ) + + +class LeadInput(BaseModel): + """Single lead input data""" + + first_name: str + last_name: str + email: str + phone_number: str | None = None # Changed from int to str for phone numbers + company_name: str | None = None + website: str | None = None + location: str | None = None + custom_fields: LeadCustomFields | None = None + linkedin_profile: str | None = None + company_url: str | None = None + + +class LeadUploadSettings(BaseModel): + """Settings for lead upload""" + + ignore_global_block_list: bool = SchemaField( + description="Ignore the global block list", + default=False, + ) + ignore_unsubscribe_list: bool = SchemaField( + description="Ignore the unsubscribe list", + default=False, + ) + ignore_community_bounce_list: bool = SchemaField( + description="Ignore the community bounce list", + default=False, + ) + ignore_duplicate_leads_in_other_campaign: bool = SchemaField( + description="Ignore duplicate leads in other campaigns", + default=False, + ) + + +class AddLeadsRequest(BaseModel): + """Request body for adding leads to a campaign""" + + lead_list: list[LeadInput] = SchemaField( + description="List of leads to add to the campaign", + max_length=100, + default=[], + ) + settings: LeadUploadSettings + campaign_id: int + + +class VariantDistributionType(str, Enum): + MANUAL_EQUAL = "MANUAL_EQUAL" + MANUAL_PERCENTAGE = "MANUAL_PERCENTAGE" + AI_EQUAL = "AI_EQUAL" + + +class WinningMetricProperty(str, Enum): + OPEN_RATE = "OPEN_RATE" + CLICK_RATE = "CLICK_RATE" + REPLY_RATE = "REPLY_RATE" + POSITIVE_REPLY_RATE = "POSITIVE_REPLY_RATE" + + +class SequenceDelayDetails(BaseModel): + delay_in_days: int + + +class SequenceVariant(BaseModel): + subject: str + email_body: str + variant_label: str + id: int | None = None # Optional for creation, required for updates + variant_distribution_percentage: int | None = None + + +class Sequence(BaseModel): + seq_number: int = SchemaField( + description="The sequence number", + default=1, + ) + seq_delay_details: SequenceDelayDetails + id: int | None = None + variant_distribution_type: VariantDistributionType | None = None + lead_distribution_percentage: int | None = SchemaField( + None, ge=20, le=100 + ) # >= 20% for fair calculation + winning_metric_property: WinningMetricProperty | None = None + seq_variants: list[SequenceVariant] | None = None + subject: str = "" # blank makes the follow up in the same thread + email_body: str | None = None + + +class SaveSequencesRequest(BaseModel): + sequences: list[Sequence] + + +class SaveSequencesResponse(BaseModel): + ok: bool + message: str = SchemaField( + description="Message from the API", + default="", + ) + data: dict | str | None = None + error: str | None = None diff --git a/autogpt_platform/backend/backend/blocks/zerobounce/_api.py b/autogpt_platform/backend/backend/blocks/zerobounce/_api.py new file mode 100644 index 0000000000..c78fe38c09 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/zerobounce/_api.py @@ -0,0 +1,10 @@ +from zerobouncesdk import ZBValidateResponse, ZeroBounce + + +class ZeroBounceClient: + def __init__(self, api_key: str): + self.api_key = api_key + self.client = ZeroBounce(api_key) + + def validate_email(self, email: str, ip_address: str) -> ZBValidateResponse: + return self.client.validate(email, ip_address) diff --git a/autogpt_platform/backend/backend/blocks/zerobounce/_auth.py b/autogpt_platform/backend/backend/blocks/zerobounce/_auth.py new file mode 100644 index 0000000000..e7125fc3c9 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/zerobounce/_auth.py @@ -0,0 +1,35 @@ +from typing import Literal + +from pydantic import SecretStr + +from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput +from backend.integrations.providers import ProviderName + +ZeroBounceCredentials = APIKeyCredentials +ZeroBounceCredentialsInput = CredentialsMetaInput[ + Literal[ProviderName.ZEROBOUNCE], + Literal["api_key"], +] + +TEST_CREDENTIALS = APIKeyCredentials( + id="01234567-89ab-cdef-0123-456789abcdef", + provider="zerobounce", + api_key=SecretStr("mock-zerobounce-api-key"), + title="Mock ZeroBounce API key", + expires_at=None, +) +TEST_CREDENTIALS_INPUT = { + "provider": TEST_CREDENTIALS.provider, + "id": TEST_CREDENTIALS.id, + "type": TEST_CREDENTIALS.type, + "title": TEST_CREDENTIALS.title, +} + + +def ZeroBounceCredentialsField() -> ZeroBounceCredentialsInput: + """ + Creates a ZeroBounce credentials input on a block. + """ + return CredentialsField( + description="The ZeroBounce integration can be used with an API Key.", + ) diff --git a/autogpt_platform/backend/backend/blocks/zerobounce/validate_emails.py b/autogpt_platform/backend/backend/blocks/zerobounce/validate_emails.py new file mode 100644 index 0000000000..ee87a8f285 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/zerobounce/validate_emails.py @@ -0,0 +1,175 @@ +from typing import Optional + +from pydantic import BaseModel +from zerobouncesdk.zb_validate_response import ( + ZBValidateResponse, + ZBValidateStatus, + ZBValidateSubStatus, +) + +from backend.blocks.zerobounce._api import ZeroBounceClient +from backend.blocks.zerobounce._auth import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + ZeroBounceCredentials, + ZeroBounceCredentialsInput, +) +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class Response(BaseModel): + address: str = SchemaField( + description="The email address you are validating.", default="N/A" + ) + status: ZBValidateStatus = SchemaField( + description="The status of the email address.", default=ZBValidateStatus.unknown + ) + sub_status: ZBValidateSubStatus = SchemaField( + description="The sub-status of the email address.", + default=ZBValidateSubStatus.none, + ) + account: Optional[str] = SchemaField( + description="The portion of the email address before the '@' symbol.", + default="N/A", + ) + domain: Optional[str] = SchemaField( + description="The portion of the email address after the '@' symbol." + ) + did_you_mean: Optional[str] = SchemaField( + description="Suggestive Fix for an email typo", + default=None, + ) + domain_age_days: Optional[str] = SchemaField( + description="Age of the email domain in days or [null].", + default=None, + ) + free_email: Optional[bool] = SchemaField( + description="Whether the email address is a free email provider.", default=False + ) + mx_found: Optional[bool] = SchemaField( + description="Whether the MX record was found.", default=False + ) + mx_record: Optional[str] = SchemaField( + description="The MX record of the email address.", default=None + ) + smtp_provider: Optional[str] = SchemaField( + description="The SMTP provider of the email address.", default=None + ) + firstname: Optional[str] = SchemaField( + description="The first name of the email address.", default=None + ) + lastname: Optional[str] = SchemaField( + description="The last name of the email address.", default=None + ) + gender: Optional[str] = SchemaField( + description="The gender of the email address.", default=None + ) + city: Optional[str] = SchemaField( + description="The city of the email address.", default=None + ) + region: Optional[str] = SchemaField( + description="The region of the email address.", default=None + ) + zipcode: Optional[str] = SchemaField( + description="The zipcode of the email address.", default=None + ) + country: Optional[str] = SchemaField( + description="The country of the email address.", default=None + ) + + +class ValidateEmailsBlock(Block): + """Search for people in Apollo""" + + class Input(BlockSchema): + email: str = SchemaField( + description="Email to validate", + ) + ip_address: str = SchemaField( + description="IP address to validate", + default="", + ) + credentials: ZeroBounceCredentialsInput = SchemaField( + description="ZeroBounce credentials", + ) + + class Output(BlockSchema): + response: Response = SchemaField( + description="Response from ZeroBounce", + ) + error: str = SchemaField( + description="Error message if the search failed", + default="", + ) + + def __init__(self): + super().__init__( + id="e3950439-fa0b-40e8-b19f-e0dca0bf5853", + description="Validate emails", + categories={BlockCategory.SEARCH}, + input_schema=ValidateEmailsBlock.Input, + output_schema=ValidateEmailsBlock.Output, + test_credentials=TEST_CREDENTIALS, + test_input={ + "credentials": TEST_CREDENTIALS_INPUT, + "email": "test@test.com", + }, + test_output=[ + ( + "response", + Response( + address="test@test.com", + status=ZBValidateStatus.valid, + sub_status=ZBValidateSubStatus.allowed, + account="test", + domain="test.com", + did_you_mean=None, + domain_age_days=None, + free_email=False, + mx_found=False, + mx_record=None, + smtp_provider=None, + ), + ) + ], + test_mock={ + "validate_email": lambda email, ip_address, credentials: ZBValidateResponse( + data={ + "address": email, + "status": ZBValidateStatus.valid, + "sub_status": ZBValidateSubStatus.allowed, + "account": "test", + "domain": "test.com", + "did_you_mean": None, + "domain_age_days": None, + "free_email": False, + "mx_found": False, + "mx_record": None, + "smtp_provider": None, + } + ) + }, + ) + + @staticmethod + def validate_email( + email: str, ip_address: str, credentials: ZeroBounceCredentials + ) -> ZBValidateResponse: + client = ZeroBounceClient(credentials.api_key.get_secret_value()) + return client.validate_email(email, ip_address) + + def run( + self, + input_data: Input, + *, + credentials: ZeroBounceCredentials, + **kwargs, + ) -> BlockOutput: + response: ZBValidateResponse = self.validate_email( + input_data.email, input_data.ip_address, credentials + ) + + response_model = Response(**response.__dict__) + + yield "response", response_model diff --git a/autogpt_platform/backend/backend/data/credit.py b/autogpt_platform/backend/backend/data/credit.py index f4ed045c47..a6b840590a 100644 --- a/autogpt_platform/backend/backend/data/credit.py +++ b/autogpt_platform/backend/backend/data/credit.py @@ -1,11 +1,17 @@ +import asyncio import logging from abc import ABC, abstractmethod from collections import defaultdict from datetime import datetime, timezone import stripe +from autogpt_libs.utils.cache import thread_cached from prisma import Json -from prisma.enums import CreditRefundRequestStatus, CreditTransactionType +from prisma.enums import ( + CreditRefundRequestStatus, + CreditTransactionType, + NotificationType, +) from prisma.errors import UniqueViolationError from prisma.models import CreditRefundRequest, CreditTransaction, User from prisma.types import CreditTransactionCreateInput, CreditTransactionWhereInput @@ -23,7 +29,10 @@ from backend.data.model import ( TransactionHistory, UserTransaction, ) +from backend.data.notifications import NotificationEventDTO, RefundRequestData from backend.data.user import get_user_by_id +from backend.notifications import NotificationManager +from backend.util.service import get_service_client from backend.util.settings import Settings settings = Settings() @@ -338,6 +347,26 @@ class UsageTransactionMetadata(BaseModel): class UserCredit(UserCreditBase): + @thread_cached + def notification_client(self) -> NotificationManager: + return get_service_client(NotificationManager) + + async def _send_refund_notification( + self, + notification_request: RefundRequestData, + notification_type: NotificationType, + ): + await asyncio.to_thread( + lambda: self.notification_client().queue_notification( + NotificationEventDTO( + recipient_email=settings.config.refund_notification_email, + user_id=notification_request.user_id, + type=notification_type, + data=notification_request.model_dump(), + ) + ) + ) + def _block_usage_cost( self, block: Block, @@ -457,10 +486,11 @@ class UserCredit(UserCreditBase): ) balance = await self.get_credits(user_id) amount = transaction.amount - refund_key = f"{transaction.createdAt.strftime('%Y-%W')}-{user_id}" + refund_key_format = settings.config.refund_request_time_key_format + refund_key = f"{transaction.createdAt.strftime(refund_key_format)}-{user_id}" try: - await CreditRefundRequest.prisma().create( + refund_request = await CreditRefundRequest.prisma().create( data={ "id": refund_key, "transactionKey": transaction_key, @@ -477,7 +507,20 @@ class UserCredit(UserCreditBase): ) if amount - balance > settings.config.refund_credit_tolerance_threshold: - # TODO: add a notification for the platform administrator. + user_data = await get_user_by_id(user_id) + await self._send_refund_notification( + RefundRequestData( + user_id=user_id, + user_name=user_data.name or "AutoGPT Platform User", + user_email=user_data.email, + transaction_id=transaction_key, + refund_request_id=refund_request.id, + reason=refund_request.reason, + amount=amount, + balance=balance, + ), + NotificationType.REFUND_REQUEST, + ) return 0 # Register the refund request for manual approval. # Auto refund the top-up. @@ -509,7 +552,7 @@ class UserCredit(UserCreditBase): f"Invalid amount to deduct ${request.amount/100} from ${transaction.amount/100} top-up" ) - await self._add_transaction( + balance, _ = await self._add_transaction( user_id=transaction.userId, amount=-request.amount, transaction_type=CreditTransactionType.REFUND, @@ -531,6 +574,21 @@ class UserCredit(UserCreditBase): }, ) + user_data = await get_user_by_id(transaction.userId) + await self._send_refund_notification( + RefundRequestData( + user_id=user_data.id, + user_name=user_data.name or "AutoGPT Platform User", + user_email=user_data.email, + transaction_id=transaction.transactionKey, + refund_request_id=request.id, + reason=str(request.reason or "-"), + amount=transaction.amount, + balance=balance, + ), + NotificationType.REFUND_PROCESSED, + ) + async def handle_dispute(self, dispute: stripe.Dispute): transaction = await CreditTransaction.prisma().find_first_or_raise( where={ diff --git a/autogpt_platform/backend/backend/data/graph.py b/autogpt_platform/backend/backend/data/graph.py index 5483c24bd3..52e37625cd 100644 --- a/autogpt_platform/backend/backend/data/graph.py +++ b/autogpt_platform/backend/backend/data/graph.py @@ -23,12 +23,15 @@ from backend.util import type from .block import BlockInput, BlockType, get_block, get_blocks from .db import BaseDbModel, transaction -from .execution import ExecutionStatus +from .execution import ExecutionResult, ExecutionStatus from .includes import AGENT_GRAPH_INCLUDE, AGENT_NODE_INCLUDE from .integrations import Webhook logger = logging.getLogger(__name__) +_INPUT_BLOCK_ID = AgentInputBlock().id +_OUTPUT_BLOCK_ID = AgentOutputBlock().id + class Link(BaseDbModel): source_id: str @@ -105,7 +108,7 @@ class NodeModel(Node): Webhook.model_rebuild() -class GraphExecution(BaseDbModel): +class GraphExecutionMeta(BaseDbModel): execution_id: str started_at: datetime ended_at: datetime @@ -114,33 +117,83 @@ class GraphExecution(BaseDbModel): status: ExecutionStatus graph_id: str graph_version: int + preset_id: Optional[str] @staticmethod - def from_db(execution: AgentGraphExecution): + def from_db(_graph_exec: AgentGraphExecution): now = datetime.now(timezone.utc) - start_time = execution.startedAt or execution.createdAt - end_time = execution.updatedAt or now + start_time = _graph_exec.startedAt or _graph_exec.createdAt + end_time = _graph_exec.updatedAt or now duration = (end_time - start_time).total_seconds() total_run_time = duration try: - stats = type.convert(execution.stats or {}, dict[str, Any]) + stats = type.convert(_graph_exec.stats or {}, dict[str, Any]) except ValueError: stats = {} duration = stats.get("walltime", duration) total_run_time = stats.get("nodes_walltime", total_run_time) - return GraphExecution( - id=execution.id, - execution_id=execution.id, + return GraphExecutionMeta( + id=_graph_exec.id, + execution_id=_graph_exec.id, started_at=start_time, ended_at=end_time, duration=duration, total_run_time=total_run_time, - status=ExecutionStatus(execution.executionStatus), - graph_id=execution.agentGraphId, - graph_version=execution.agentGraphVersion, + status=ExecutionStatus(_graph_exec.executionStatus), + graph_id=_graph_exec.agentGraphId, + graph_version=_graph_exec.agentGraphVersion, + preset_id=_graph_exec.agentPresetId, + ) + + +class GraphExecution(GraphExecutionMeta): + inputs: dict[str, Any] + outputs: dict[str, list[Any]] + node_executions: list[ExecutionResult] + + @staticmethod + def from_db(_graph_exec: AgentGraphExecution): + if _graph_exec.AgentNodeExecutions is None: + raise ValueError("Node executions must be included in query") + + graph_exec = GraphExecutionMeta.from_db(_graph_exec) + + node_executions = [ + ExecutionResult.from_db(ne) for ne in _graph_exec.AgentNodeExecutions + ] + + inputs = { + **{ + # inputs from Agent Input Blocks + exec.input_data["name"]: exec.input_data["value"] + for exec in node_executions + if exec.block_id == _INPUT_BLOCK_ID + }, + **{ + # input from webhook-triggered block + "payload": exec.input_data["payload"] + for exec in node_executions + if (block := get_block(exec.block_id)) + and block.block_type in [BlockType.WEBHOOK, BlockType.WEBHOOK_MANUAL] + }, + } + + outputs: dict[str, list] = defaultdict(list) + for exec in node_executions: + if exec.block_id == _OUTPUT_BLOCK_ID: + outputs[exec.input_data["name"]].append(exec.input_data["value"]) + + return GraphExecution( + **{ + field_name: getattr(graph_exec, field_name) + for field_name in graph_exec.model_fields + }, + inputs=inputs, + outputs=outputs, + node_executions=node_executions, ) @@ -514,17 +567,45 @@ async def get_graphs( return graph_models -async def get_executions(user_id: str) -> list[GraphExecution]: +async def get_graphs_executions(user_id: str) -> list[GraphExecutionMeta]: executions = await AgentGraphExecution.prisma().find_many( where={"userId": user_id}, order={"createdAt": "desc"}, ) - return [GraphExecution.from_db(execution) for execution in executions] + return [GraphExecutionMeta.from_db(execution) for execution in executions] + + +async def get_graph_executions(graph_id: str, user_id: str) -> list[GraphExecutionMeta]: + executions = await AgentGraphExecution.prisma().find_many( + where={"agentGraphId": graph_id, "userId": user_id}, + order={"createdAt": "desc"}, + ) + return [GraphExecutionMeta.from_db(execution) for execution in executions] + + +async def get_execution_meta( + user_id: str, execution_id: str +) -> GraphExecutionMeta | None: + execution = await AgentGraphExecution.prisma().find_first( + where={"id": execution_id, "userId": user_id} + ) + return GraphExecutionMeta.from_db(execution) if execution else None async def get_execution(user_id: str, execution_id: str) -> GraphExecution | None: execution = await AgentGraphExecution.prisma().find_first( - where={"id": execution_id, "userId": user_id} + where={"id": execution_id, "userId": user_id}, + include={ + "AgentNodeExecutions": { + "include": {"AgentNode": True, "Input": True, "Output": True}, + "order_by": [ + {"queuedTime": "asc"}, + { # Fallback: Incomplete execs has no queuedTime. + "addedTime": "asc" + }, + ], + }, + }, ) return GraphExecution.from_db(execution) if execution else None @@ -629,7 +710,7 @@ async def create_graph(graph: Graph, user_id: str) -> GraphModel: await __create_graph(tx, graph, user_id) if created_graph := await get_graph( - graph.id, graph.version, graph.is_template, user_id=user_id + graph.id, graph.version, template=graph.is_template, user_id=user_id ): return created_graph diff --git a/autogpt_platform/backend/backend/data/notifications.py b/autogpt_platform/backend/backend/data/notifications.py index a5bfbbc3ae..a0b6941640 100644 --- a/autogpt_platform/backend/backend/data/notifications.py +++ b/autogpt_platform/backend/backend/data/notifications.py @@ -100,6 +100,17 @@ class MonthlySummaryData(BaseSummaryData): year: int +class RefundRequestData(BaseNotificationData): + user_id: str + user_name: str + user_email: str + transaction_id: str + refund_request_id: str + reason: str + amount: float + balance: int + + NotificationData = Annotated[ Union[ AgentRunData, @@ -118,6 +129,8 @@ class NotificationEventDTO(BaseModel): type: NotificationType data: dict created_at: datetime = Field(default_factory=datetime.now) + recipient_email: Optional[str] = None + retry_count: int = 0 class NotificationEventModel(BaseModel, Generic[T_co]): @@ -153,6 +166,8 @@ def get_data_type( NotificationType.DAILY_SUMMARY: DailySummaryData, NotificationType.WEEKLY_SUMMARY: WeeklySummaryData, NotificationType.MONTHLY_SUMMARY: MonthlySummaryData, + NotificationType.REFUND_REQUEST: RefundRequestData, + NotificationType.REFUND_PROCESSED: RefundRequestData, }[notification_type] @@ -186,6 +201,8 @@ class NotificationTypeOverride: NotificationType.DAILY_SUMMARY: BatchingStrategy.IMMEDIATE, NotificationType.WEEKLY_SUMMARY: BatchingStrategy.IMMEDIATE, NotificationType.MONTHLY_SUMMARY: BatchingStrategy.IMMEDIATE, + NotificationType.REFUND_REQUEST: BatchingStrategy.IMMEDIATE, + NotificationType.REFUND_PROCESSED: BatchingStrategy.IMMEDIATE, } return BATCHING_RULES.get(self.notification_type, BatchingStrategy.HOURLY) @@ -201,6 +218,8 @@ class NotificationTypeOverride: NotificationType.DAILY_SUMMARY: "daily_summary.html", NotificationType.WEEKLY_SUMMARY: "weekly_summary.html", NotificationType.MONTHLY_SUMMARY: "monthly_summary.html", + NotificationType.REFUND_REQUEST: "refund_request.html", + NotificationType.REFUND_PROCESSED: "refund_processed.html", }[self.notification_type] @property @@ -214,6 +233,8 @@ class NotificationTypeOverride: NotificationType.DAILY_SUMMARY: "Here's your daily summary!", NotificationType.WEEKLY_SUMMARY: "Look at all the cool stuff you did last week!", NotificationType.MONTHLY_SUMMARY: "We did a lot this month!", + NotificationType.REFUND_REQUEST: "[ACTION REQUIRED] You got a ${{data.amount / 100}} refund request from {{data.user_name}}", + NotificationType.REFUND_PROCESSED: "Refund for ${{data.amount / 100}} to {{data.user_name}} has been processed", }[self.notification_type] diff --git a/autogpt_platform/backend/backend/data/user.py b/autogpt_platform/backend/backend/data/user.py index 162066722b..b4c107bac6 100644 --- a/autogpt_platform/backend/backend/data/user.py +++ b/autogpt_platform/backend/backend/data/user.py @@ -50,10 +50,10 @@ async def get_user_by_id(user_id: str) -> User: return User.model_validate(user) -async def get_user_email_by_id(user_id: str) -> str: +async def get_user_email_by_id(user_id: str) -> Optional[str]: try: - user = await prisma.user.find_unique_or_raise(where={"id": user_id}) - return user.email + user = await prisma.user.find_unique(where={"id": user_id}) + return user.email if user else None except Exception as e: raise DatabaseError(f"Failed to get user email for user {user_id}: {e}") from e diff --git a/autogpt_platform/backend/backend/executor/database.py b/autogpt_platform/backend/backend/executor/database.py index 02e1535f5e..1dee046ccc 100644 --- a/autogpt_platform/backend/backend/executor/database.py +++ b/autogpt_platform/backend/backend/executor/database.py @@ -8,7 +8,6 @@ from backend.data.execution import ( RedisExecutionEventBus, create_graph_execution, get_execution_results, - get_executions_in_timerange, get_incomplete_executions, get_latest_execution, update_execution_status, @@ -18,20 +17,9 @@ from backend.data.execution import ( upsert_execution_output, ) from backend.data.graph import get_graph, get_node -from backend.data.notifications import ( - create_or_add_to_user_notification_batch, - empty_user_notification_batch, - get_user_notification_batch, - get_user_notification_last_message_in_batch, -) from backend.data.user import ( - get_active_user_ids_in_timerange, - get_active_users_ids, - get_user_by_id, - get_user_email_by_id, get_user_integrations, get_user_metadata, - get_user_notification_preference, update_user_integrations, update_user_metadata, ) @@ -84,7 +72,6 @@ class DatabaseManager(AppService): update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats) upsert_execution_input = exposed_run_and_wait(upsert_execution_input) upsert_execution_output = exposed_run_and_wait(upsert_execution_output) - get_executions_in_timerange = exposed_run_and_wait(get_executions_in_timerange) # Graphs get_node = exposed_run_and_wait(get_node) @@ -97,27 +84,8 @@ class DatabaseManager(AppService): exposed_run_and_wait(user_credit_model.spend_credits), ) - # User + User Metadata + User Integrations + User Notification Preferences + # User + User Metadata + User Integrations get_user_metadata = exposed_run_and_wait(get_user_metadata) update_user_metadata = exposed_run_and_wait(update_user_metadata) get_user_integrations = exposed_run_and_wait(get_user_integrations) update_user_integrations = exposed_run_and_wait(update_user_integrations) - get_active_user_ids_in_timerange = exposed_run_and_wait( - get_active_user_ids_in_timerange - ) - get_user_by_id = exposed_run_and_wait(get_user_by_id) - get_user_email_by_id = exposed_run_and_wait(get_user_email_by_id) - get_user_notification_preference = exposed_run_and_wait( - get_user_notification_preference - ) - get_active_users_ids = exposed_run_and_wait(get_active_users_ids) - - # Notifications - create_or_add_to_user_notification_batch = exposed_run_and_wait( - create_or_add_to_user_notification_batch - ) - get_user_notification_last_message_in_batch = exposed_run_and_wait( - get_user_notification_last_message_in_batch - ) - empty_user_notification_batch = exposed_run_and_wait(empty_user_notification_batch) - get_user_notification_batch = exposed_run_and_wait(get_user_notification_batch) diff --git a/autogpt_platform/backend/backend/executor/manager.py b/autogpt_platform/backend/backend/executor/manager.py index 65c84ee1bc..6e7d109d15 100644 --- a/autogpt_platform/backend/backend/executor/manager.py +++ b/autogpt_platform/backend/backend/executor/manager.py @@ -210,6 +210,7 @@ def execute_node( for output_name, output_data in node_block.execute( input_data, **extra_exec_kwargs ): + output_data = json.convert_pydantic_to_json(output_data) output_size += len(json.dumps(output_data)) log_metadata.info("Node produced output", **{output_name: output_data}) db_client.upsert_execution_output(node_exec_id, output_name, output_data) diff --git a/autogpt_platform/backend/backend/integrations/credentials_store.py b/autogpt_platform/backend/backend/integrations/credentials_store.py index 5968cbe32d..3284c4b8fe 100644 --- a/autogpt_platform/backend/backend/integrations/credentials_store.py +++ b/autogpt_platform/backend/backend/integrations/credentials_store.py @@ -145,6 +145,29 @@ mem0_credentials = APIKeyCredentials( expires_at=None, ) +apollo_credentials = APIKeyCredentials( + id="544c62b5-1d0f-4156-8fb4-9525f11656eb", + provider="apollo", + api_key=SecretStr(settings.secrets.apollo_api_key), + title="Use Credits for Apollo", + expires_at=None, +) + +smartlead_credentials = APIKeyCredentials( + id="3bcdbda3-84a3-46af-8fdb-bfd2472298b8", + provider="smartlead", + api_key=SecretStr(settings.secrets.smartlead_api_key), + title="Use Credits for SmartLead", + expires_at=None, +) + +zerobounce_credentials = APIKeyCredentials( + id="63a6e279-2dc2-448e-bf57-85776f7176dc", + provider="zerobounce", + api_key=SecretStr(settings.secrets.zerobounce_api_key), + title="Use Credits for ZeroBounce", + expires_at=None, +) DEFAULT_CREDENTIALS = [ ollama_credentials, @@ -164,6 +187,9 @@ DEFAULT_CREDENTIALS = [ mem0_credentials, nvidia_credentials, screenshotone_credentials, + apollo_credentials, + smartlead_credentials, + zerobounce_credentials, ] @@ -231,6 +257,12 @@ class IntegrationCredentialsStore: all_credentials.append(screenshotone_credentials) if settings.secrets.mem0_api_key: all_credentials.append(mem0_credentials) + if settings.secrets.apollo_api_key: + all_credentials.append(apollo_credentials) + if settings.secrets.smartlead_api_key: + all_credentials.append(smartlead_credentials) + if settings.secrets.zerobounce_api_key: + all_credentials.append(zerobounce_credentials) return all_credentials def get_creds_by_id(self, user_id: str, credentials_id: str) -> Credentials | None: diff --git a/autogpt_platform/backend/backend/integrations/providers.py b/autogpt_platform/backend/backend/integrations/providers.py index 6f31988d7f..eb1f513c2e 100644 --- a/autogpt_platform/backend/backend/integrations/providers.py +++ b/autogpt_platform/backend/backend/integrations/providers.py @@ -4,6 +4,7 @@ from enum import Enum # --8<-- [start:ProviderName] class ProviderName(str, Enum): ANTHROPIC = "anthropic" + APOLLO = "apollo" COMPASS = "compass" DISCORD = "discord" D_ID = "d_id" @@ -32,8 +33,10 @@ class ProviderName(str, Enum): REVID = "revid" SCREENSHOTONE = "screenshotone" SLANT3D = "slant3d" + SMARTLEAD = "smartlead" SMTP = "smtp" TWITTER = "twitter" TODOIST = "todoist" UNREAL_SPEECH = "unreal_speech" + ZEROBOUNCE = "zerobounce" # --8<-- [end:ProviderName] diff --git a/autogpt_platform/backend/backend/notifications/notifications.py b/autogpt_platform/backend/backend/notifications/notifications.py index f1f32f9301..1d99011edb 100644 --- a/autogpt_platform/backend/backend/notifications/notifications.py +++ b/autogpt_platform/backend/backend/notifications/notifications.py @@ -1,9 +1,11 @@ import logging import time -from typing import TYPE_CHECKING +from typing import Callable +import aio_pika from aio_pika.exceptions import QueueEmpty -from autogpt_libs.utils.cache import thread_cached +from prisma.enums import NotificationType +from pydantic import BaseModel from backend.data.notifications import ( BatchingStrategy, @@ -13,26 +15,25 @@ from backend.data.notifications import ( get_data_type, ) from backend.data.rabbitmq import Exchange, ExchangeType, Queue, RabbitMQConfig -from backend.executor.database import DatabaseManager +from backend.data.user import get_user_email_by_id, get_user_notification_preference from backend.notifications.email import EmailSender -from backend.util.service import AppService, expose, get_service_client +from backend.util.service import AppService, expose from backend.util.settings import Settings -if TYPE_CHECKING: - from backend.executor import DatabaseManager - logger = logging.getLogger(__name__) settings = Settings() +class NotificationEvent(BaseModel): + event: NotificationEventDTO + model: NotificationEventModel + + def create_notification_config() -> RabbitMQConfig: """Create RabbitMQ configuration for notifications""" notification_exchange = Exchange(name="notifications", type=ExchangeType.TOPIC) - summary_exchange = Exchange(name="summaries", type=ExchangeType.TOPIC) - - dead_letter_exchange = Exchange(name="dead_letter", type=ExchangeType.DIRECT) - delay_exchange = Exchange(name="delay", type=ExchangeType.DIRECT) + dead_letter_exchange = Exchange(name="dead_letter", type=ExchangeType.TOPIC) queues = [ # Main notification queues @@ -45,34 +46,6 @@ def create_notification_config() -> RabbitMQConfig: "x-dead-letter-routing-key": "failed.immediate", }, ), - Queue( - name="backoff_notifications", - exchange=notification_exchange, - routing_key="notification.backoff.#", - arguments={ - "x-dead-letter-exchange": dead_letter_exchange.name, - "x-dead-letter-routing-key": "failed.backoff", - }, - ), - # Summary queues - Queue( - name="daily_summary_trigger", - exchange=summary_exchange, - routing_key="summary.daily", - arguments={"x-message-ttl": 86400000}, # 24 hours - ), - Queue( - name="weekly_summary_trigger", - exchange=summary_exchange, - routing_key="summary.weekly", - arguments={"x-message-ttl": 604800000}, # 7 days - ), - Queue( - name="monthly_summary_trigger", - exchange=summary_exchange, - routing_key="summary.monthly", - arguments={"x-message-ttl": 2592000000}, # 30 days - ), # Failed notifications queue Queue( name="failed_notifications", @@ -84,10 +57,7 @@ def create_notification_config() -> RabbitMQConfig: return RabbitMQConfig( exchanges=[ notification_exchange, - # batch_exchange, - summary_exchange, dead_letter_exchange, - delay_exchange, ], queues=queues, ) @@ -120,15 +90,15 @@ class NotificationManager(AppService): def queue_notification(self, event: NotificationEventDTO) -> NotificationResult: """Queue a notification - exposed method for other services to call""" try: - logger.info(f"Recieved Request to queue {event=}") - # Workaround for not being able to seralize generics over the expose bus + logger.info(f"Received Request to queue {event=}") + # Workaround for not being able to serialize generics over the expose bus parsed_event = NotificationEventModel[ get_data_type(event.type) ].model_validate(event.model_dump()) routing_key = self.get_routing_key(parsed_event) message = parsed_event.model_dump_json() - logger.info(f"Recieved Request to queue {message=}") + logger.info(f"Received Request to queue {message=}") exchange = "notifications" @@ -145,41 +115,93 @@ class NotificationManager(AppService): return NotificationResult( success=True, - message=(f"Notification queued with routing key: {routing_key}"), + message=f"Notification queued with routing key: {routing_key}", ) except Exception as e: - logger.error(f"Error queueing notification: {e}") + logger.exception(f"Error queueing notification: {e}") return NotificationResult(success=False, message=str(e)) - async def _process_immediate(self, message: str) -> bool: - """Process a single notification immediately, returning whether to put into the failed queue""" + def _should_email_user_based_on_preference( + self, user_id: str, event_type: NotificationType + ) -> bool: + return self.run_and_wait( + get_user_notification_preference(user_id) + ).preferences.get(event_type, True) + + def _parse_message(self, message: str) -> NotificationEvent | None: try: event = NotificationEventDTO.model_validate_json(message) - parsed_event = NotificationEventModel[ + model = NotificationEventModel[ get_data_type(event.type) ].model_validate_json(message) - user_email = get_db_client().get_user_email_by_id(event.user_id) - should_send = ( - get_db_client() - .get_user_notification_preference(event.user_id) - .preferences[event.type] - ) - if not user_email: + return NotificationEvent(event=event, model=model) + except Exception as e: + logger.error(f"Error parsing message due to non matching schema {e}") + return None + + def _process_immediate(self, message: str) -> bool: + """Process a single notification immediately, returning whether to put into the failed queue""" + try: + parsed = self._parse_message(message) + if not parsed: + return False + event = parsed.event + model = parsed.model + + if event.recipient_email: + recipient_email = event.recipient_email + else: + recipient_email = self.run_and_wait(get_user_email_by_id(event.user_id)) + if not recipient_email: logger.error(f"User email not found for user {event.user_id}") return False + + should_send = self._should_email_user_based_on_preference( + event.user_id, event.type + ) if not should_send: logger.debug( f"User {event.user_id} does not want to receive {event.type} notifications" ) return True - self.email_sender.send_templated(event.type, user_email, parsed_event) - logger.info(f"Processing notification: {parsed_event}") + + self.email_sender.send_templated(event.type, recipient_email, model) + logger.info(f"Processing notification: {model}") return True except Exception as e: - logger.error(f"Error processing notification: {e}") + logger.exception(f"Error processing notification: {e}") return False + def _run_queue( + self, + queue: aio_pika.abc.AbstractQueue, + process_func: Callable[[str], bool], + error_queue_name: str, + ): + message: aio_pika.abc.AbstractMessage | None = None + try: + # This parameter "no_ack" is named like shit, think of it as "auto_ack" + message = self.run_and_wait(queue.get(timeout=1.0, no_ack=False)) + result = process_func(message.body.decode()) + if result: + self.run_and_wait(message.ack()) + else: + self.run_and_wait(message.reject(requeue=False)) + + except QueueEmpty: + logger.debug(f"Queue {error_queue_name} empty") + except Exception as e: + if message: + logger.error( + f"Error in notification service loop, message rejected {e}" + ) + self.run_and_wait(message.reject(requeue=False)) + else: + logger.error( + f"Error in notification service loop, message unable to be rejected, and will have to be manually removed to free space in the queue: {e}" + ) + def run_service(self): logger.info(f"[{self.service_name}] Started notification service") @@ -192,20 +214,11 @@ class NotificationManager(AppService): while self.running: try: - # Process immediate notifications - try: - message = self.run_and_wait(immediate_queue.get()) - - if message: - success = self.run_and_wait( - self._process_immediate(message.body.decode()) - ) - if success: - self.run_and_wait(message.ack()) - else: - self.run_and_wait(message.reject(requeue=True)) - except QueueEmpty: - logger.debug("Immediate queue empty") + self._run_queue( + queue=immediate_queue, + process_func=self._process_immediate, + error_queue_name="immediate_notifications", + ) time.sleep(0.1) @@ -218,12 +231,3 @@ class NotificationManager(AppService): """Cleanup service resources""" self.running = False super().cleanup() - - # ------- UTILITIES ------- # - - -@thread_cached -def get_db_client() -> "DatabaseManager": - from backend.executor import DatabaseManager - - return get_service_client(DatabaseManager) diff --git a/autogpt_platform/backend/backend/notifications/templates/refund_processed.html.jinja2 b/autogpt_platform/backend/backend/notifications/templates/refund_processed.html.jinja2 new file mode 100644 index 0000000000..244c062a70 --- /dev/null +++ b/autogpt_platform/backend/backend/notifications/templates/refund_processed.html.jinja2 @@ -0,0 +1,51 @@ +{# Refund Processed Notification Email Template #} +{# + Template variables: + data.user_id: the ID of the user + data.user_name: the user's name + data.user_email: the user's email address + data.transaction_id: the transaction ID for the refund request + data.refund_request_id: the refund request ID + data.reason: the reason for the refund request + data.amount: the refund amount in cents (divide by 100 for dollars) + data.balance: the user's latest balance in cents (after the refund deduction) + + Subject: Refund for ${{ data.amount / 100 }} to {{ data.user_name }} has been processed +#} + + + + + + Refund Processed Notification + + +

Hello Administrator,

+ +

+ This is to notify you that the refund for ${{ data.amount / 100 }} to {{ data.user_name }} has been processed successfully. +

+ +

Refund Details

+ + +

+ The user's balance has been updated accordingly after the deduction. +

+ +

+ Please contact the support team if you have any questions or need further assistance regarding this refund. +

+ +

Best regards,
Your Notification System

+ + diff --git a/autogpt_platform/backend/backend/notifications/templates/refund_request.html.jinja2 b/autogpt_platform/backend/backend/notifications/templates/refund_request.html.jinja2 new file mode 100644 index 0000000000..b9e2ac954a --- /dev/null +++ b/autogpt_platform/backend/backend/notifications/templates/refund_request.html.jinja2 @@ -0,0 +1,72 @@ +{# Refund Request Email Template #} +{# + Template variables: + data.user_id: the ID of the user + data.user_name: the user's name + data.user_email: the user's email address + data.transaction_id: the transaction ID for the refund request + data.refund_request_id: the refund request ID + data.reason: the reason for the refund request + data.amount: the refund amount in cents (divide by 100 for dollars) + data.balance: the user's balance in cents (divide by 100 for dollars) + + Subject: [ACTION REQUIRED] You got a ${{ data.amount / 100 }} refund request from {{ data.user_name }} +#} + + + + + + Refund Request Approval Needed + + +

Hello Administrator,

+

+ A refund request has been submitted by a user and requires your approval. +

+ +

Refund Request Details

+ + +

+ To approve this refund, please click on the following Stripe link: + https://dashboard.stripe.com/test/payments/{{data.transaction_id}} +
+ And then click on the "Refund" button. +

+ +

+ To reject this refund, please follow these steps: +

+
    +
  1. + Visit the Supabase Dashboard: + https://supabase.com/dashboard/project/bgwpwdsxblryihinutbx/editor +
  2. +
  3. + Navigate to the RefundRequest table. +
  4. +
  5. + Filter the transactionKey column with the Transaction ID: {{ data.transaction_id }}. +
  6. +
  7. + Update the status field to REJECTED and enter the rejection reason in the result column. +
  8. +
+ +

+ Please take the necessary action at your earliest convenience. +

+

Thank you for your prompt attention.

+

Best regards,
Your Notification System

+ + diff --git a/autogpt_platform/backend/backend/server/rest_api.py b/autogpt_platform/backend/backend/server/rest_api.py index ae756ee71f..d5435e3615 100644 --- a/autogpt_platform/backend/backend/server/rest_api.py +++ b/autogpt_platform/backend/backend/server/rest_api.py @@ -155,7 +155,7 @@ class AgentServer(backend.util.service.AppProcess): @staticmethod async def test_get_graph_run_status(graph_exec_id: str, user_id: str): - execution = await backend.data.graph.get_execution( + execution = await backend.data.graph.get_execution_meta( user_id=user_id, execution_id=graph_exec_id ) if not execution: @@ -163,10 +163,10 @@ class AgentServer(backend.util.service.AppProcess): return execution.status @staticmethod - async def test_get_graph_run_node_execution_results( + async def test_get_graph_run_results( graph_id: str, graph_exec_id: str, user_id: str ): - return await backend.server.routers.v1.get_graph_run_node_execution_results( + return await backend.server.routers.v1.get_graph_execution( graph_id, graph_exec_id, user_id ) diff --git a/autogpt_platform/backend/backend/server/routers/v1.py b/autogpt_platform/backend/backend/server/routers/v1.py index 99d86a8de8..e1ad2c127f 100644 --- a/autogpt_platform/backend/backend/server/routers/v1.py +++ b/autogpt_platform/backend/backend/server/routers/v1.py @@ -16,7 +16,6 @@ import backend.data.block import backend.server.integrations.router import backend.server.routers.analytics import backend.server.v2.library.db as library_db -from backend.data import execution as execution_db from backend.data import graph as graph_db from backend.data.api_key import ( APIKeyError, @@ -545,7 +544,7 @@ async def set_graph_active_version( ) def execute_graph( graph_id: str, - node_input: Annotated[dict[str, Any], Body(..., embed=True, default_factory=dict)], + node_input: Annotated[dict[str, Any], Body(..., default_factory=dict)], user_id: Annotated[str, Depends(get_user_id)], graph_version: Optional[int] = None, ) -> ExecuteGraphResponse: @@ -566,8 +565,10 @@ def execute_graph( ) async def stop_graph_run( graph_exec_id: str, user_id: Annotated[str, Depends(get_user_id)] -) -> Sequence[execution_db.ExecutionResult]: - if not await graph_db.get_execution(user_id=user_id, execution_id=graph_exec_id): +) -> graph_db.GraphExecution: + if not await graph_db.get_execution_meta( + user_id=user_id, execution_id=graph_exec_id + ): raise HTTPException(404, detail=f"Agent execution #{graph_exec_id} not found") await asyncio.to_thread( @@ -575,7 +576,13 @@ async def stop_graph_run( ) # Retrieve & return canceled graph execution in its final state - return await execution_db.get_execution_results(graph_exec_id) + result = await graph_db.get_execution(execution_id=graph_exec_id, user_id=user_id) + if not result: + raise HTTPException( + 500, + detail=f"Could not fetch graph execution #{graph_exec_id} after stopping", + ) + return result @v1_router.get( @@ -583,10 +590,22 @@ async def stop_graph_run( tags=["graphs"], dependencies=[Depends(auth_middleware)], ) -async def get_executions( +async def get_graphs_executions( user_id: Annotated[str, Depends(get_user_id)], -) -> list[graph_db.GraphExecution]: - return await graph_db.get_executions(user_id=user_id) +) -> list[graph_db.GraphExecutionMeta]: + return await graph_db.get_graphs_executions(user_id=user_id) + + +@v1_router.get( + path="/graphs/{graph_id}/executions", + tags=["graphs"], + dependencies=[Depends(auth_middleware)], +) +async def get_graph_executions( + graph_id: str, + user_id: Annotated[str, Depends(get_user_id)], +) -> list[graph_db.GraphExecutionMeta]: + return await graph_db.get_graph_executions(graph_id=graph_id, user_id=user_id) @v1_router.get( @@ -594,16 +613,20 @@ async def get_executions( tags=["graphs"], dependencies=[Depends(auth_middleware)], ) -async def get_graph_run_node_execution_results( +async def get_graph_execution( graph_id: str, graph_exec_id: str, user_id: Annotated[str, Depends(get_user_id)], -) -> Sequence[execution_db.ExecutionResult]: +) -> graph_db.GraphExecution: graph = await graph_db.get_graph(graph_id, user_id=user_id) if not graph: raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.") - return await execution_db.get_execution_results(graph_exec_id) + result = await graph_db.get_execution(execution_id=graph_exec_id, user_id=user_id) + if not result: + raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.") + + return result ######################################################## diff --git a/autogpt_platform/backend/backend/server/v2/store/routes.py b/autogpt_platform/backend/backend/server/v2/store/routes.py index 65d9d9954f..0921cb650e 100644 --- a/autogpt_platform/backend/backend/server/v2/store/routes.py +++ b/autogpt_platform/backend/backend/server/v2/store/routes.py @@ -669,7 +669,8 @@ async def review_submission( reviewer_id=user.user_id, ) return submission - except Exception: + except Exception as e: + logger.error(f"Could not create store submission review: {e}") raise fastapi.HTTPException( status_code=500, detail="An error occurred while creating the store submission review", diff --git a/autogpt_platform/backend/backend/util/json.py b/autogpt_platform/backend/backend/util/json.py index 7f88917414..fbec1312a9 100644 --- a/autogpt_platform/backend/backend/util/json.py +++ b/autogpt_platform/backend/backend/util/json.py @@ -1,8 +1,9 @@ import json -from typing import Any, Type, TypeVar, overload +from typing import Any, Type, TypeGuard, TypeVar, overload import jsonschema from fastapi.encoders import jsonable_encoder +from pydantic import BaseModel from .type import type_match @@ -45,3 +46,17 @@ def validate_with_jsonschema( return None except jsonschema.ValidationError as e: return str(e) + + +def is_list_of_basemodels(value: object) -> TypeGuard[list[BaseModel]]: + return isinstance(value, list) and all( + isinstance(item, BaseModel) for item in value + ) + + +def convert_pydantic_to_json(output_data: Any) -> Any: + if isinstance(output_data, BaseModel): + return output_data.model_dump() + if is_list_of_basemodels(output_data): + return [item.model_dump() for item in output_data] + return output_data diff --git a/autogpt_platform/backend/backend/util/settings.py b/autogpt_platform/backend/backend/util/settings.py index d04293a8a2..74595225f2 100644 --- a/autogpt_platform/backend/backend/util/settings.py +++ b/autogpt_platform/backend/backend/util/settings.py @@ -97,6 +97,14 @@ class Config(UpdateTrackingModel["Config"], BaseSettings): default=500, description="Maximum number of credits above the balance to be auto-approved.", ) + refund_notification_email: str = Field( + default="refund@agpt.co", + description="Email address to send refund notifications to.", + ) + refund_request_time_key_format: str = Field( + default="%Y-%W", # This will allow for weekly refunds per user. + description="Time key format for refund requests.", + ) model_config = SettingsConfigDict( env_file=".env", @@ -364,6 +372,10 @@ class Secrets(UpdateTrackingModel["Secrets"], BaseSettings): screenshotone_api_key: str = Field(default="", description="ScreenshotOne API Key") + apollo_api_key: str = Field(default="", description="Apollo API Key") + smartlead_api_key: str = Field(default="", description="SmartLead API Key") + zerobounce_api_key: str = Field(default="", description="ZeroBounce API Key") + # Add more secret fields as needed model_config = SettingsConfigDict( diff --git a/autogpt_platform/backend/backend/util/test.py b/autogpt_platform/backend/backend/util/test.py index 29b2cfdbc5..7691fec7d1 100644 --- a/autogpt_platform/backend/backend/util/test.py +++ b/autogpt_platform/backend/backend/util/test.py @@ -78,9 +78,10 @@ async def wait_execution( # Wait for the executions to complete for i in range(timeout): if await is_execution_completed(): - return await AgentServer().test_get_graph_run_node_execution_results( + graph_exec = await AgentServer().test_get_graph_run_results( graph_id, graph_exec_id, user_id ) + return graph_exec.node_executions time.sleep(1) assert False, "Execution did not complete in time." diff --git a/autogpt_platform/backend/migrations/20250220111649_add_refund_notifications/migration.sql b/autogpt_platform/backend/migrations/20250220111649_add_refund_notifications/migration.sql new file mode 100644 index 0000000000..10609ad802 --- /dev/null +++ b/autogpt_platform/backend/migrations/20250220111649_add_refund_notifications/migration.sql @@ -0,0 +1,9 @@ +-- AlterEnum +-- This migration adds more than one value to an enum. +-- With PostgreSQL versions 11 and earlier, this is not possible +-- in a single migration. This can be worked around by creating +-- multiple migrations, each migration adding only one value to +-- the enum. + +ALTER TYPE "NotificationType" ADD VALUE 'REFUND_REQUEST'; +ALTER TYPE "NotificationType" ADD VALUE 'REFUND_PROCESSED'; diff --git a/autogpt_platform/backend/poetry.lock b/autogpt_platform/backend/poetry.lock index 2e88aded96..463ad6ea3f 100644 --- a/autogpt_platform/backend/poetry.lock +++ b/autogpt_platform/backend/poetry.lock @@ -5936,6 +5936,21 @@ files = [ defusedxml = ">=0.7.1,<0.8.0" requests = "*" +[[package]] +name = "zerobouncesdk" +version = "1.1.1" +description = "ZeroBounce Python API - https://www.zerobounce.net." +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "zerobouncesdk-1.1.1-py3-none-any.whl", hash = "sha256:9fb9dfa44fe4ce35d6f2e43d5144c31ca03544a3317d75643cb9f86b0c028675"}, + {file = "zerobouncesdk-1.1.1.tar.gz", hash = "sha256:00aa537263d5bc21534c0007dd9f94ce8e0986caa530c5a0bbe0bd917451f236"}, +] + +[package.dependencies] +requests = ">=2.22.0" + [[package]] name = "zipp" version = "3.21.0" @@ -6072,4 +6087,4 @@ cffi = ["cffi (>=1.11)"] [metadata] lock-version = "2.1" python-versions = ">=3.10,<3.13" -content-hash = "1a222b9695cc0506038d4f0b021f71ff9a443490cee021beca5365a47b0437a2" +content-hash = "ba36ce74308bd37e19ca790e63dae387e5ad6173b2945dd63b58b3e918e85b46" diff --git a/autogpt_platform/backend/pyproject.toml b/autogpt_platform/backend/pyproject.toml index d8999aa1ca..31be7656fb 100644 --- a/autogpt_platform/backend/pyproject.toml +++ b/autogpt_platform/backend/pyproject.toml @@ -63,6 +63,7 @@ tweepy = "^4.14.0" uvicorn = { extras = ["standard"], version = "^0.34.0" } websockets = "^13.1" youtube-transcript-api = "^0.6.2" +zerobouncesdk = "^1.1.1" # NOTE: please insert new dependencies in their alphabetical location [tool.poetry.group.dev.dependencies] diff --git a/autogpt_platform/backend/schema.prisma b/autogpt_platform/backend/schema.prisma index 699adc735f..c98e9113ae 100644 --- a/autogpt_platform/backend/schema.prisma +++ b/autogpt_platform/backend/schema.prisma @@ -133,6 +133,8 @@ enum NotificationType { DAILY_SUMMARY WEEKLY_SUMMARY MONTHLY_SUMMARY + REFUND_REQUEST + REFUND_PROCESSED } model NotificationEvent { diff --git a/autogpt_platform/backend/test/conftest.py b/autogpt_platform/backend/test/conftest.py index d107a0b322..ec8e661a01 100644 --- a/autogpt_platform/backend/test/conftest.py +++ b/autogpt_platform/backend/test/conftest.py @@ -1,23 +1,25 @@ import logging +import os import pytest -from backend.util.test import SpinTestServer +from backend.util.logging import configure_logging # NOTE: You can run tests like with the --log-cli-level=INFO to see the logs # Set up logging +configure_logging() logger = logging.getLogger(__name__) -# Create console handler with formatting -ch = logging.StreamHandler() -ch.setLevel(logging.INFO) -formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") -ch.setFormatter(formatter) -logger.addHandler(ch) +# Reduce Prisma log spam unless PRISMA_DEBUG is set +if not os.getenv("PRISMA_DEBUG"): + prisma_logger = logging.getLogger("prisma") + prisma_logger.setLevel(logging.INFO) @pytest.fixture(scope="session") async def server(): + from backend.util.test import SpinTestServer + async with SpinTestServer() as server: yield server diff --git a/autogpt_platform/backend/test/executor/test_manager.py b/autogpt_platform/backend/test/executor/test_manager.py index 3e4a7cacd2..0412a779bc 100644 --- a/autogpt_platform/backend/test/executor/test_manager.py +++ b/autogpt_platform/backend/test/executor/test_manager.py @@ -58,7 +58,7 @@ async def assert_sample_graph_executions( graph_exec_id: str, ): logger.info(f"Checking execution results for graph {test_graph.id}") - executions = await agent_server.test_get_graph_run_node_execution_results( + graph_run = await agent_server.test_get_graph_run_results( test_graph.id, graph_exec_id, test_user.id, @@ -77,7 +77,7 @@ async def assert_sample_graph_executions( ] # Executing StoreValueBlock - exec = executions[0] + exec = graph_run.node_executions[0] logger.info(f"Checking first StoreValueBlock execution: {exec}") assert exec.status == execution.ExecutionStatus.COMPLETED assert exec.graph_exec_id == graph_exec_id @@ -90,7 +90,7 @@ async def assert_sample_graph_executions( assert exec.node_id in [test_graph.nodes[0].id, test_graph.nodes[1].id] # Executing StoreValueBlock - exec = executions[1] + exec = graph_run.node_executions[1] logger.info(f"Checking second StoreValueBlock execution: {exec}") assert exec.status == execution.ExecutionStatus.COMPLETED assert exec.graph_exec_id == graph_exec_id @@ -103,7 +103,7 @@ async def assert_sample_graph_executions( assert exec.node_id in [test_graph.nodes[0].id, test_graph.nodes[1].id] # Executing FillTextTemplateBlock - exec = executions[2] + exec = graph_run.node_executions[2] logger.info(f"Checking FillTextTemplateBlock execution: {exec}") assert exec.status == execution.ExecutionStatus.COMPLETED assert exec.graph_exec_id == graph_exec_id @@ -118,7 +118,7 @@ async def assert_sample_graph_executions( assert exec.node_id == test_graph.nodes[2].id # Executing PrintToConsoleBlock - exec = executions[3] + exec = graph_run.node_executions[3] logger.info(f"Checking PrintToConsoleBlock execution: {exec}") assert exec.status == execution.ExecutionStatus.COMPLETED assert exec.graph_exec_id == graph_exec_id @@ -201,14 +201,14 @@ async def test_input_pin_always_waited(server: SpinTestServer): ) logger.info("Checking execution results") - executions = await server.agent_server.test_get_graph_run_node_execution_results( + graph_exec = await server.agent_server.test_get_graph_run_results( test_graph.id, graph_exec_id, test_user.id ) - assert len(executions) == 3 + assert len(graph_exec.node_executions) == 3 # FindInDictionaryBlock should wait for the input pin to be provided, # Hence executing extraction of "key" from {"key1": "value1", "key2": "value2"} - assert executions[2].status == execution.ExecutionStatus.COMPLETED - assert executions[2].output_data == {"output": ["value2"]} + assert graph_exec.node_executions[2].status == execution.ExecutionStatus.COMPLETED + assert graph_exec.node_executions[2].output_data == {"output": ["value2"]} logger.info("Completed test_input_pin_always_waited") @@ -284,12 +284,12 @@ async def test_static_input_link_on_graph(server: SpinTestServer): server.agent_server, test_graph, test_user, {}, 8 ) logger.info("Checking execution results") - executions = await server.agent_server.test_get_graph_run_node_execution_results( + graph_exec = await server.agent_server.test_get_graph_run_results( test_graph.id, graph_exec_id, test_user.id ) - assert len(executions) == 8 + assert len(graph_exec.node_executions) == 8 # The last 3 executions will be a+b=4+5=9 - for i, exec_data in enumerate(executions[-3:]): + for i, exec_data in enumerate(graph_exec.node_executions[-3:]): logger.info(f"Checking execution {i+1} of last 3: {exec_data}") assert exec_data.status == execution.ExecutionStatus.COMPLETED assert exec_data.output_data == {"result": [9]} diff --git a/autogpt_platform/frontend/src/app/agents/[id]/page.tsx b/autogpt_platform/frontend/src/app/agents/[id]/page.tsx new file mode 100644 index 0000000000..7967deae8a --- /dev/null +++ b/autogpt_platform/frontend/src/app/agents/[id]/page.tsx @@ -0,0 +1,184 @@ +"use client"; +import React, { useCallback, useEffect, useMemo, useState } from "react"; +import { useParams, useRouter } from "next/navigation"; + +import { useBackendAPI } from "@/lib/autogpt-server-api/context"; +import { + GraphExecution, + GraphExecutionMeta, + GraphMeta, + Schedule, +} from "@/lib/autogpt-server-api"; + +import AgentRunDraftView from "@/components/agents/agent-run-draft-view"; +import AgentRunDetailsView from "@/components/agents/agent-run-details-view"; +import AgentRunsSelectorList from "@/components/agents/agent-runs-selector-list"; +import AgentScheduleDetailsView from "@/components/agents/agent-schedule-details-view"; + +export default function AgentRunsPage(): React.ReactElement { + const { id: agentID }: { id: string } = useParams(); + const router = useRouter(); + const api = useBackendAPI(); + + const [agent, setAgent] = useState(null); + const [agentRuns, setAgentRuns] = useState([]); + const [schedules, setSchedules] = useState([]); + const [selectedView, selectView] = useState<{ + type: "run" | "schedule"; + id?: string; + }>({ type: "run" }); + const [selectedRun, setSelectedRun] = useState< + GraphExecution | GraphExecutionMeta | null + >(null); + const [selectedSchedule, setSelectedSchedule] = useState( + null, + ); + const [isFirstLoad, setIsFirstLoad] = useState(true); + + const openRunDraftView = useCallback(() => { + selectView({ type: "run" }); + }, []); + + const selectRun = useCallback((id: string) => { + selectView({ type: "run", id }); + }, []); + + const selectSchedule = useCallback((schedule: Schedule) => { + selectView({ type: "schedule", id: schedule.id }); + setSelectedSchedule(schedule); + }, []); + + const fetchAgents = useCallback(() => { + api.getGraph(agentID).then(setAgent); + api.getGraphExecutions(agentID).then((agentRuns) => { + const sortedRuns = agentRuns.toSorted( + (a, b) => b.started_at - a.started_at, + ); + setAgentRuns(sortedRuns); + + if (!selectedView.id && isFirstLoad && sortedRuns.length > 0) { + // only for first load or first execution + setIsFirstLoad(false); + selectView({ type: "run", id: sortedRuns[0].execution_id }); + setSelectedRun(sortedRuns[0]); + } + }); + if (selectedView.type == "run" && selectedView.id) { + api.getGraphExecutionInfo(agentID, selectedView.id).then(setSelectedRun); + } + }, [api, agentID, selectedView, isFirstLoad]); + + useEffect(() => { + fetchAgents(); + }, []); + + // load selectedRun based on selectedView + useEffect(() => { + if (selectedView.type != "run" || !selectedView.id) return; + + // pull partial data from "cache" while waiting for the rest to load + if (selectedView.id !== selectedRun?.execution_id) { + setSelectedRun( + agentRuns.find((r) => r.execution_id == selectedView.id) ?? null, + ); + } + + api.getGraphExecutionInfo(agentID, selectedView.id).then(setSelectedRun); + }, [api, selectedView, agentRuns, agentID]); + + const fetchSchedules = useCallback(async () => { + // TODO: filter in backend - https://github.com/Significant-Gravitas/AutoGPT/issues/9183 + setSchedules( + (await api.listSchedules()).filter((s) => s.graph_id == agentID), + ); + }, [api, agentID]); + + useEffect(() => { + fetchSchedules(); + }, [fetchSchedules]); + + const removeSchedule = useCallback( + async (scheduleId: string) => { + const removedSchedule = await api.deleteSchedule(scheduleId); + setSchedules(schedules.filter((s) => s.id !== removedSchedule.id)); + }, + [schedules, api], + ); + + /* TODO: use websockets instead of polling - https://github.com/Significant-Gravitas/AutoGPT/issues/8782 */ + useEffect(() => { + const intervalId = setInterval(() => fetchAgents(), 5000); + return () => clearInterval(intervalId); + }, [fetchAgents, agent]); + + const agentActions: { label: string; callback: () => void }[] = useMemo( + () => [ + { + label: "Open in builder", + callback: () => agent && router.push(`/build?flowID=${agent.id}`), + }, + ], + [agent, router], + ); + + if (!agent) { + /* TODO: implement loading indicators / skeleton page */ + return Loading...; + } + + return ( +
+ {/* Sidebar w/ list of runs */} + {/* TODO: render this below header in sm and md layouts */} + + +
+ {/* Header */} +
+

+ { + agent.name /* TODO: use dynamic/custom run title - https://github.com/Significant-Gravitas/AutoGPT/issues/9184 */ + } +

+
+ + {/* Run / Schedule views */} + {(selectedView.type == "run" ? ( + selectedView.id ? ( + selectedRun && ( + + ) + ) : ( + selectRun(runID)} + agentActions={agentActions} + /> + ) + ) : selectedView.type == "schedule" ? ( + selectedSchedule && ( + selectRun(runID)} + agentActions={agentActions} + /> + ) + ) : null) ||

Loading...

} +
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/globals.css b/autogpt_platform/frontend/src/app/globals.css index c2998c08d0..46b185d69e 100644 --- a/autogpt_platform/frontend/src/app/globals.css +++ b/autogpt_platform/frontend/src/app/globals.css @@ -2,54 +2,9 @@ @tailwind components; @tailwind utilities; -@layer base { - .font-neue { - font-family: "PP Neue Montreal TT", sans-serif; - } -} - -@layer utilities { - .w-110 { - width: 27.5rem; - } - .h-7\.5 { - height: 1.1875rem; - } - .h-18 { - height: 4.5rem; - } - .h-238 { - height: 14.875rem; - } - .top-158 { - top: 9.875rem; - } - .top-254 { - top: 15.875rem; - } - .top-284 { - top: 17.75rem; - } - .top-360 { - top: 22.5rem; - } - .left-297 { - left: 18.5625rem; - } - .left-34 { - left: 2.125rem; - } -} - -@layer utilities { - .text-balance { - text-wrap: balance; - } -} - @layer base { :root { - --background: 0 0% 100%; + --background: 0 0% 99.6%; /* #FEFEFE */ --foreground: 240 10% 3.9%; --card: 0 0% 100%; --card-foreground: 240 10% 3.9%; @@ -61,8 +16,8 @@ --secondary-foreground: 240 5.9% 10%; --muted: 240 4.8% 95.9%; --muted-foreground: 240 3.8% 46.1%; - --accent: 240 4.8% 95.9%; - --accent-foreground: 240 5.9% 10%; + --accent: 262 83% 58%; + --accent-foreground: 0 0% 100%; --destructive: 0 84.2% 60.2%; --destructive-foreground: 0 0% 98%; --border: 240 5.9% 90%; @@ -102,9 +57,7 @@ --chart-4: 280 65% 60%; --chart-5: 340 75% 55%; } -} -@layer base { * { @apply border-border; } @@ -112,6 +65,14 @@ @apply bg-background text-foreground; } + .font-neue { + font-family: "PP Neue Montreal TT", sans-serif; + } +} + +/* *** AutoGPT Design Components *** */ + +@layer components { .agpt-border-input { @apply border border-input focus-visible:border-gray-400 focus-visible:outline-none; } @@ -119,4 +80,67 @@ .agpt-shadow-input { @apply shadow-sm focus-visible:shadow-md; } + + .agpt-rounded-card { + @apply rounded-2xl; + } + + .agpt-rounded-box { + @apply rounded-3xl; + } + + .agpt-card { + @apply agpt-rounded-card border border-zinc-300 bg-white p-[1px]; + } + + .agpt-box { + @apply agpt-card agpt-rounded-box; + } + + .agpt-div { + @apply border-zinc-200 p-5; + } +} + +@layer utilities { + .agpt-card-selected { + @apply border-2 border-accent bg-violet-50/50 p-0; + } +} + +@layer utilities { + /* TODO: 1. remove unused utility classes */ + /* TODO: 2. fix naming of numbered dimensions so that the number is 4*dimension */ + /* TODO: 3. move to tailwind.config.ts spacing config */ + .h-7\.5 { + height: 1.1875rem; + } + .h-18 { + height: 4.5rem; + } + .h-238 { + height: 14.875rem; + } + .top-158 { + top: 9.875rem; + } + .top-254 { + top: 15.875rem; + } + .top-284 { + top: 17.75rem; + } + .top-360 { + top: 22.5rem; + } + .left-297 { + left: 18.5625rem; + } + .left-34 { + left: 2.125rem; + } + + .text-balance { + text-wrap: balance; + } } diff --git a/autogpt_platform/frontend/src/app/layout.tsx b/autogpt_platform/frontend/src/app/layout.tsx index e6888f6fb4..7bac676a94 100644 --- a/autogpt_platform/frontend/src/app/layout.tsx +++ b/autogpt_platform/frontend/src/app/layout.tsx @@ -34,7 +34,7 @@ export default async function RootLayout({ return ( { const [flows, setFlows] = useState([]); - const [executions, setExecutions] = useState([]); + const [executions, setExecutions] = useState([]); const [schedules, setSchedules] = useState([]); const [selectedFlow, setSelectedFlow] = useState(null); - const [selectedRun, setSelectedRun] = useState(null); + const [selectedRun, setSelectedRun] = useState( + null, + ); const [sortColumn, setSortColumn] = useState("id"); const [sortDirection, setSortDirection] = useState<"asc" | "desc">("asc"); const api = useBackendAPI(); diff --git a/autogpt_platform/frontend/src/app/profile/(user)/credits/page.tsx b/autogpt_platform/frontend/src/app/profile/(user)/credits/page.tsx index 5a8991acb8..3ece782495 100644 --- a/autogpt_platform/frontend/src/app/profile/(user)/credits/page.tsx +++ b/autogpt_platform/frontend/src/app/profile/(user)/credits/page.tsx @@ -260,7 +260,6 @@ export default function CreditsPage() {

+ ))} + + +
+

Agent actions

+ {agentActions.map((action, i) => ( + + ))} +
+ + + + ); +} diff --git a/autogpt_platform/frontend/src/components/agents/agent-run-draft-view.tsx b/autogpt_platform/frontend/src/components/agents/agent-run-draft-view.tsx new file mode 100644 index 0000000000..8727d23056 --- /dev/null +++ b/autogpt_platform/frontend/src/components/agents/agent-run-draft-view.tsx @@ -0,0 +1,99 @@ +"use client"; +import React, { useCallback, useMemo, useState } from "react"; + +import { useBackendAPI } from "@/lib/autogpt-server-api/context"; +import { GraphMeta } from "@/lib/autogpt-server-api"; + +import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"; +import { Button, ButtonProps } from "@/components/agptui/Button"; +import { Input } from "@/components/ui/input"; + +export default function AgentRunDraftView({ + agent, + onRun, + agentActions, +}: { + agent: GraphMeta; + onRun: (runID: string) => void; + agentActions: { label: string; callback: () => void }[]; +}): React.ReactNode { + const api = useBackendAPI(); + + const agentInputs = agent.input_schema.properties; + const [inputValues, setInputValues] = useState>({}); + + const doRun = useCallback( + () => + api + .executeGraph(agent.id, agent.version, inputValues) + .then((newRun) => onRun(newRun.graph_exec_id)), + [api, agent, inputValues, onRun], + ); + + const runActions: { + label: string; + variant?: ButtonProps["variant"]; + callback: () => void; + }[] = useMemo( + () => [{ label: "Run", variant: "accent", callback: () => doRun() }], + [doRun], + ); + + return ( +
+
+ + + Input + + + {Object.entries(agentInputs).map(([key, inputSubSchema]) => ( +
+ + + setInputValues((obj) => ({ ...obj, [key]: e.target.value })) + } + /> +
+ ))} +
+
+
+ + {/* Actions */} + +
+ ); +} diff --git a/autogpt_platform/frontend/src/components/agents/agent-run-status-chip.tsx b/autogpt_platform/frontend/src/components/agents/agent-run-status-chip.tsx new file mode 100644 index 0000000000..fed82611f8 --- /dev/null +++ b/autogpt_platform/frontend/src/components/agents/agent-run-status-chip.tsx @@ -0,0 +1,65 @@ +import React from "react"; + +import { Badge } from "@/components/ui/badge"; + +import { GraphExecutionMeta } from "@/lib/autogpt-server-api/types"; + +export type AgentRunStatus = + | "success" + | "failed" + | "queued" + | "running" + | "stopped" + | "scheduled" + | "draft"; + +export const agentRunStatusMap: Record< + GraphExecutionMeta["status"], + AgentRunStatus +> = { + COMPLETED: "success", + FAILED: "failed", + QUEUED: "queued", + RUNNING: "running", + TERMINATED: "stopped", + // TODO: implement "draft" - https://github.com/Significant-Gravitas/AutoGPT/issues/9168 +}; + +const statusData: Record< + AgentRunStatus, + { label: string; variant: keyof typeof statusStyles } +> = { + success: { label: "Success", variant: "success" }, + running: { label: "Running", variant: "info" }, + failed: { label: "Failed", variant: "destructive" }, + queued: { label: "Queued", variant: "warning" }, + draft: { label: "Draft", variant: "secondary" }, + stopped: { label: "Stopped", variant: "secondary" }, + scheduled: { label: "Scheduled", variant: "secondary" }, +}; + +const statusStyles = { + success: + "bg-green-100 text-green-800 hover:bg-green-100 hover:text-green-800", + destructive: "bg-red-100 text-red-800 hover:bg-red-100 hover:text-red-800", + warning: + "bg-yellow-100 text-yellow-800 hover:bg-yellow-100 hover:text-yellow-800", + info: "bg-blue-100 text-blue-800 hover:bg-blue-100 hover:text-blue-800", + secondary: + "bg-slate-100 text-slate-800 hover:bg-slate-100 hover:text-slate-800", +}; + +export default function AgentRunStatusChip({ + status, +}: { + status: AgentRunStatus; +}): React.ReactElement { + return ( + + {statusData[status].label} + + ); +} diff --git a/autogpt_platform/frontend/src/components/agents/agent-run-summary-card.tsx b/autogpt_platform/frontend/src/components/agents/agent-run-summary-card.tsx new file mode 100644 index 0000000000..7336835438 --- /dev/null +++ b/autogpt_platform/frontend/src/components/agents/agent-run-summary-card.tsx @@ -0,0 +1,95 @@ +import React from "react"; +import moment from "moment"; +import { MoreVertical } from "lucide-react"; + +import { cn } from "@/lib/utils"; + +import { Button } from "@/components/ui/button"; +import { Card, CardContent } from "@/components/ui/card"; +import { + DropdownMenu, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuTrigger, +} from "@/components/ui/dropdown-menu"; + +import AgentRunStatusChip, { + AgentRunStatus, +} from "@/components/agents/agent-run-status-chip"; + +export type AgentRunSummaryProps = { + agentID: string; + agentRunID: string; + status: AgentRunStatus; + title: string; + timestamp: number | Date; + selected?: boolean; + onClick?: () => void; + className?: string; +}; + +export default function AgentRunSummaryCard({ + agentID, + agentRunID, + status, + title, + timestamp, + selected = false, + onClick, + className, +}: AgentRunSummaryProps): React.ReactElement { + return ( + + + + +
+

+ {title} +

+ + {/* + + + + + + Pin into a template + + + + Rename + + + + Delete + + + */} +
+ +

+ Ran {moment(timestamp).fromNow()} +

+
+
+ ); +} diff --git a/autogpt_platform/frontend/src/components/agents/agent-runs-selector-list.tsx b/autogpt_platform/frontend/src/components/agents/agent-runs-selector-list.tsx new file mode 100644 index 0000000000..b452c26114 --- /dev/null +++ b/autogpt_platform/frontend/src/components/agents/agent-runs-selector-list.tsx @@ -0,0 +1,133 @@ +"use client"; +import React, { useState } from "react"; +import { Plus } from "lucide-react"; + +import { cn } from "@/lib/utils"; +import { + GraphExecutionMeta, + GraphMeta, + Schedule, +} from "@/lib/autogpt-server-api"; + +import { ScrollArea } from "@/components/ui/scroll-area"; +import { Button } from "@/components/agptui/Button"; +import { Badge } from "@/components/ui/badge"; + +import { agentRunStatusMap } from "@/components/agents/agent-run-status-chip"; +import AgentRunSummaryCard from "@/components/agents/agent-run-summary-card"; + +interface AgentRunsSelectorListProps { + agent: GraphMeta; + agentRuns: GraphExecutionMeta[]; + schedules: Schedule[]; + selectedView: { type: "run" | "schedule"; id?: string }; + onSelectRun: (id: string) => void; + onSelectSchedule: (schedule: Schedule) => void; + onDraftNewRun: () => void; + className?: string; +} + +export default function AgentRunsSelectorList({ + agent, + agentRuns, + schedules, + selectedView, + onSelectRun, + onSelectSchedule, + onDraftNewRun, + className, +}: AgentRunsSelectorListProps): React.ReactElement { + const [activeListTab, setActiveListTab] = useState<"runs" | "scheduled">( + "runs", + ); + + return ( + + ); +} diff --git a/autogpt_platform/frontend/src/components/agents/agent-schedule-details-view.tsx b/autogpt_platform/frontend/src/components/agents/agent-schedule-details-view.tsx new file mode 100644 index 0000000000..6a071ada9a --- /dev/null +++ b/autogpt_platform/frontend/src/components/agents/agent-schedule-details-view.tsx @@ -0,0 +1,141 @@ +"use client"; +import React, { useCallback, useMemo } from "react"; + +import { BlockIOSubType, GraphMeta, Schedule } from "@/lib/autogpt-server-api"; +import { useBackendAPI } from "@/lib/autogpt-server-api/context"; + +import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"; +import { AgentRunStatus } from "@/components/agents/agent-run-status-chip"; +import { Button } from "@/components/agptui/Button"; +import { Input } from "@/components/ui/input"; + +export default function AgentScheduleDetailsView({ + agent, + schedule, + onForcedRun, + agentActions, +}: { + agent: GraphMeta; + schedule: Schedule; + onForcedRun: (runID: string) => void; + agentActions: { label: string; callback: () => void }[]; +}): React.ReactNode { + const api = useBackendAPI(); + + const selectedRunStatus: AgentRunStatus = "scheduled"; + + const infoStats: { label: string; value: React.ReactNode }[] = useMemo(() => { + return [ + { + label: "Status", + value: + selectedRunStatus.charAt(0).toUpperCase() + + selectedRunStatus.slice(1), + }, + { + label: "Scheduled for", + value: schedule.next_run_time.toLocaleString(), + }, + ]; + }, [schedule, selectedRunStatus]); + + const agentRunInputs: Record< + string, + { title?: string; /* type: BlockIOSubType; */ value: any } + > = useMemo(() => { + // TODO: show (link to) preset - https://github.com/Significant-Gravitas/AutoGPT/issues/9168 + + // Add type info from agent input schema + return Object.fromEntries( + Object.entries(schedule.input_data).map(([k, v]) => [ + k, + { + title: agent.input_schema.properties[k].title, + /* TODO: type: agent.input_schema.properties[k].type */ + value: v, + }, + ]), + ); + }, [agent, schedule]); + + const runNow = useCallback( + () => + api + .executeGraph(agent.id, agent.version, schedule.input_data) + .then((run) => onForcedRun(run.graph_exec_id)), + [api, agent, schedule, onForcedRun], + ); + + const runActions: { label: string; callback: () => void }[] = useMemo( + () => [{ label: "Run now", callback: () => runNow() }], + [runNow], + ); + + return ( +
+
+ + + Info + + + +
+ {infoStats.map(({ label, value }) => ( +
+

{label}

+

{value}

+
+ ))} +
+
+
+ + + + Input + + + {agentRunInputs !== undefined ? ( + Object.entries(agentRunInputs).map(([key, { title, value }]) => ( +
+ + +
+ )) + ) : ( +

Loading...

+ )} +
+
+
+ + {/* Run / Agent Actions */} + +
+ ); +} diff --git a/autogpt_platform/frontend/src/components/agptui/AgentImageItem.tsx b/autogpt_platform/frontend/src/components/agptui/AgentImageItem.tsx index 935b341443..ff028e313a 100644 --- a/autogpt_platform/frontend/src/components/agptui/AgentImageItem.tsx +++ b/autogpt_platform/frontend/src/components/agptui/AgentImageItem.tsx @@ -92,7 +92,6 @@ export const AgentImageItem: React.FC = React.memo( {isVideoFile && playingVideoIndex !== index && (
diff --git a/autogpt_platform/frontend/src/components/agptui/BreadCrumbs.tsx b/autogpt_platform/frontend/src/components/agptui/BreadCrumbs.tsx index fff57dd7a6..21ed54deb4 100644 --- a/autogpt_platform/frontend/src/components/agptui/BreadCrumbs.tsx +++ b/autogpt_platform/frontend/src/components/agptui/BreadCrumbs.tsx @@ -31,7 +31,7 @@ export const BreadCrumbs: React.FC = ({ items }) => { {index < items.length - 1 && ( - + / )} diff --git a/autogpt_platform/frontend/src/components/agptui/Button.stories.tsx b/autogpt_platform/frontend/src/components/agptui/Button.stories.tsx index 2f97449f3a..42d3c55e4e 100644 --- a/autogpt_platform/frontend/src/components/agptui/Button.stories.tsx +++ b/autogpt_platform/frontend/src/components/agptui/Button.stories.tsx @@ -65,15 +65,12 @@ export const Interactive: Story = { export const Variants: Story = { render: (args) => (
- - diff --git a/autogpt_platform/frontend/src/components/agptui/Button.tsx b/autogpt_platform/frontend/src/components/agptui/Button.tsx index e2ffc1bd05..39f0536da3 100644 --- a/autogpt_platform/frontend/src/components/agptui/Button.tsx +++ b/autogpt_platform/frontend/src/components/agptui/Button.tsx @@ -5,16 +5,15 @@ import { cva, type VariantProps } from "class-variance-authority"; import { cn } from "@/lib/utils"; const buttonVariants = cva( - "inline-flex items-center justify-center whitespace-nowrap rounded-[80px] text-xl font-medium transition-colors focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-neutral-950 disabled:pointer-events-none disabled:opacity-50 dark:focus-visible:ring-neutral-300 font-neue leading-9 tracking-tight", + "inline-flex items-center whitespace-nowrap font-medium transition-colors focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-neutral-950 disabled:pointer-events-none disabled:opacity-50 dark:focus-visible:ring-neutral-300 font-neue leading-9 tracking-tight", { variants: { variant: { - default: - "bg-white border border-black/50 text-[#272727] hover:bg-neutral-100 dark:bg-neutral-800 dark:text-neutral-100 dark:hover:bg-neutral-700", destructive: "bg-red-600 text-neutral-50 border border-red-500/50 hover:bg-red-500/90 dark:bg-red-700 dark:text-neutral-50 dark:hover:bg-red-600", + accent: "bg-accent text-accent-foreground hover:bg-violet-500", outline: - "bg-white border border-black/50 text-[#272727] hover:bg-neutral-100 dark:bg-neutral-800 dark:text-neutral-100 dark:hover:bg-neutral-700", + "border border-black/50 text-[#272727] hover:bg-neutral-100 dark:bg-neutral-800 dark:text-neutral-100 dark:hover:bg-neutral-700", secondary: "bg-neutral-100 text-[#272727] border border-neutral-200 hover:bg-neutral-100/80 dark:bg-neutral-700 dark:text-neutral-100 dark:border-neutral-600 dark:hover:bg-neutral-600", ghost: @@ -22,17 +21,17 @@ const buttonVariants = cva( link: "text-[#272727] underline-offset-4 hover:underline dark:text-neutral-100", }, size: { - default: - "h-10 px-4 py-2 text-sm sm:h-12 sm:px-5 sm:py-2.5 sm:text-base md:h-14 md:px-6 md:py-3 md:text-lg lg:h-[4.375rem] lg:px-[1.625rem] lg:py-[0.4375rem] lg:text-xl", - sm: "h-8 px-3 py-1.5 text-xs sm:h-9 sm:px-3.5 sm:py-2 sm:text-sm md:h-10 md:px-4 md:py-2 md:text-base lg:h-[3.125rem] lg:px-[1.25rem] lg:py-[0.3125rem] lg:text-sm", - lg: "h-12 px-5 py-2.5 text-lg sm:h-14 sm:px-6 sm:py-3 sm:text-xl md:h-16 md:px-7 md:py-3.5 md:text-2xl lg:h-[5.625rem] lg:px-[2rem] lg:py-[0.5625rem] lg:text-2xl", + default: "h-10 px-4 py-2 rounded-full text-sm", + sm: "h-8 px-3 py-1.5 rounded-full text-xs", + lg: "h-12 px-5 py-2.5 rounded-full text-lg", primary: - "h-10 w-28 sm:h-12 sm:w-32 md:h-[4.375rem] md:w-[11rem] lg:h-[3.125rem] lg:w-[7rem]", - icon: "h-10 w-10 sm:h-12 sm:w-12 md:h-14 md:w-14 lg:h-[4.375rem] lg:w-[4.375rem]", + "h-10 w-28 rounded-full sm:h-12 sm:w-32 md:h-[4.375rem] md:w-[11rem] lg:h-[3.125rem] lg:w-[7rem]", + icon: "h-10 w-10", + card: "h-12 p-5 agpt-rounded-card justify-center text-lg", }, }, defaultVariants: { - variant: "default", + variant: "outline", size: "default", }, }, @@ -43,13 +42,13 @@ export interface ButtonProps VariantProps { asChild?: boolean; variant?: - | "default" | "destructive" + | "accent" | "outline" | "secondary" | "ghost" | "link"; - size?: "default" | "sm" | "lg" | "primary" | "icon"; + size?: "default" | "sm" | "lg" | "primary" | "icon" | "card"; } const Button = React.forwardRef( diff --git a/autogpt_platform/frontend/src/components/agptui/CreatorInfoCard.tsx b/autogpt_platform/frontend/src/components/agptui/CreatorInfoCard.tsx index c53470faa3..ad8e614158 100644 --- a/autogpt_platform/frontend/src/components/agptui/CreatorInfoCard.tsx +++ b/autogpt_platform/frontend/src/components/agptui/CreatorInfoCard.tsx @@ -33,7 +33,7 @@ export const CreatorInfoCard: React.FC = ({
-
+
{username}
diff --git a/autogpt_platform/frontend/src/components/agptui/MobileNavBar.tsx b/autogpt_platform/frontend/src/components/agptui/MobileNavBar.tsx index ed3aa43dd1..4f8f589f1b 100644 --- a/autogpt_platform/frontend/src/components/agptui/MobileNavBar.tsx +++ b/autogpt_platform/frontend/src/components/agptui/MobileNavBar.tsx @@ -87,7 +87,7 @@ const PopoutMenuItem: React.FC<{ {getIcon(icon)}
{text}
@@ -150,7 +150,7 @@ export const MobileNavBar: React.FC = ({ exit={{ opacity: 0, y: -32, transition: { duration: 0.2 } }} className="w-screen rounded-b-2xl bg-white dark:bg-neutral-900" > -
+
= ({ {userName?.charAt(0) || "U"} -
-
+
+
{userName || "Unknown User"}
-
+
{userEmail || "No Email Set"}
diff --git a/autogpt_platform/frontend/src/components/agptui/Navbar.tsx b/autogpt_platform/frontend/src/components/agptui/Navbar.tsx index c9e1eaecf3..ced3ec6123 100644 --- a/autogpt_platform/frontend/src/components/agptui/Navbar.tsx +++ b/autogpt_platform/frontend/src/components/agptui/Navbar.tsx @@ -48,7 +48,7 @@ export const Navbar = async ({ links, menuItemGroups }: NavbarProps) => { return ( <> -
@@ -426,16 +424,14 @@ export const PublishAgentInfo: React.FC = ({
} + {trigger || }
diff --git a/autogpt_platform/frontend/src/components/integrations/credentials-input.tsx b/autogpt_platform/frontend/src/components/integrations/credentials-input.tsx index 9663c8cc5f..16d9f8d0a6 100644 --- a/autogpt_platform/frontend/src/components/integrations/credentials-input.tsx +++ b/autogpt_platform/frontend/src/components/integrations/credentials-input.tsx @@ -56,6 +56,7 @@ export const providerIcons: Record< React.FC<{ className?: string }> > = { anthropic: fallbackIcon, + apollo: fallbackIcon, e2b: fallbackIcon, github: FaGithub, google: FaGoogle, @@ -86,7 +87,9 @@ export const providerIcons: Record< unreal_speech: fallbackIcon, exa: fallbackIcon, hubspot: FaHubspot, + smartlead: fallbackIcon, todoist: fallbackIcon, + zerobounce: fallbackIcon, }; // --8<-- [end:ProviderIconsEmbed] diff --git a/autogpt_platform/frontend/src/components/integrations/credentials-provider.tsx b/autogpt_platform/frontend/src/components/integrations/credentials-provider.tsx index 990cc04f99..db98f646c7 100644 --- a/autogpt_platform/frontend/src/components/integrations/credentials-provider.tsx +++ b/autogpt_platform/frontend/src/components/integrations/credentials-provider.tsx @@ -18,6 +18,7 @@ const CREDENTIALS_PROVIDER_NAMES = Object.values( // --8<-- [start:CredentialsProviderNames] const providerDisplayNames: Record = { anthropic: "Anthropic", + apollo: "Apollo", discord: "Discord", d_id: "D-ID", e2b: "E2B", @@ -40,8 +41,9 @@ const providerDisplayNames: Record = { openweathermap: "OpenWeatherMap", open_router: "Open Router", pinecone: "Pinecone", - slant3d: "Slant3D", screenshotone: "ScreenshotOne", + slant3d: "Slant3D", + smartlead: "SmartLead", smtp: "SMTP", reddit: "Reddit", replicate: "Replicate", @@ -49,6 +51,7 @@ const providerDisplayNames: Record = { twitter: "Twitter", todoist: "Todoist", unreal_speech: "Unreal Speech", + zerobounce: "ZeroBounce", } as const; // --8<-- [end:CredentialsProviderNames] diff --git a/autogpt_platform/frontend/src/components/monitor/AgentFlowList.tsx b/autogpt_platform/frontend/src/components/monitor/AgentFlowList.tsx index 7c210c5ff6..df108def49 100644 --- a/autogpt_platform/frontend/src/components/monitor/AgentFlowList.tsx +++ b/autogpt_platform/frontend/src/components/monitor/AgentFlowList.tsx @@ -1,4 +1,4 @@ -import { GraphExecution, LibraryAgent } from "@/lib/autogpt-server-api"; +import { GraphExecutionMeta, LibraryAgent } from "@/lib/autogpt-server-api"; import React from "react"; import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"; import { Button } from "@/components/ui/button"; @@ -37,7 +37,7 @@ export const AgentFlowList = ({ className, }: { flows: LibraryAgent[]; - executions?: GraphExecution[]; + executions?: GraphExecutionMeta[]; selectedFlow: LibraryAgent | null; onSelectFlow: (f: LibraryAgent) => void; className?: string; @@ -106,7 +106,7 @@ export const AgentFlowList = ({ {flows .map((flow) => { let runCount = 0, - lastRun: GraphExecution | null = null; + lastRun: GraphExecutionMeta | null = null; if (executions) { const _flowRuns = executions.filter( (r) => r.graph_id == flow.agent_id, diff --git a/autogpt_platform/frontend/src/components/monitor/FlowInfo.tsx b/autogpt_platform/frontend/src/components/monitor/FlowInfo.tsx index 0a3c18c392..91387b1cd5 100644 --- a/autogpt_platform/frontend/src/components/monitor/FlowInfo.tsx +++ b/autogpt_platform/frontend/src/components/monitor/FlowInfo.tsx @@ -1,6 +1,6 @@ import React, { useEffect, useState, useCallback } from "react"; import { - GraphExecution, + GraphExecutionMeta, Graph, safeCopyGraph, BlockUIType, @@ -32,7 +32,6 @@ import { DialogFooter, } from "@/components/ui/dialog"; import { useToast } from "@/components/ui/use-toast"; -import { CronScheduler } from "@/components/cronScheduler"; import RunnerInputUI from "@/components/runner-ui/RunnerInputUI"; import useAgentGraph from "@/hooks/useAgentGraph"; import { useBackendAPI } from "@/lib/autogpt-server-api/context"; @@ -40,7 +39,7 @@ import { useBackendAPI } from "@/lib/autogpt-server-api/context"; export const FlowInfo: React.FC< React.HTMLAttributes & { flow: LibraryAgent; - executions: GraphExecution[]; + executions: GraphExecutionMeta[]; flowVersion?: number | "all"; refresh: () => void; } diff --git a/autogpt_platform/frontend/src/components/monitor/FlowRunInfo.tsx b/autogpt_platform/frontend/src/components/monitor/FlowRunInfo.tsx index 4fcff1782a..4968c5cc27 100644 --- a/autogpt_platform/frontend/src/components/monitor/FlowRunInfo.tsx +++ b/autogpt_platform/frontend/src/components/monitor/FlowRunInfo.tsx @@ -1,6 +1,6 @@ import React, { useCallback, useEffect, useState } from "react"; import { - GraphExecution, + GraphExecutionMeta, LibraryAgent, NodeExecutionResult, SpecialBlockID, @@ -18,7 +18,7 @@ import { useBackendAPI } from "@/lib/autogpt-server-api/context"; export const FlowRunInfo: React.FC< React.HTMLAttributes & { flow: LibraryAgent; - execution: GraphExecution; + execution: GraphExecutionMeta; } > = ({ flow, execution, ...props }) => { const [isOutputOpen, setIsOutputOpen] = useState(false); @@ -26,10 +26,9 @@ export const FlowRunInfo: React.FC< const api = useBackendAPI(); const fetchBlockResults = useCallback(async () => { - const executionResults = await api.getGraphExecutionInfo( - flow.agent_id, - execution.execution_id, - ); + const executionResults = ( + await api.getGraphExecutionInfo(flow.agent_id, execution.execution_id) + ).node_executions; // Create a map of the latest COMPLETED execution results of output nodes by node_id const latestCompletedResults = executionResults diff --git a/autogpt_platform/frontend/src/components/monitor/FlowRunStatusBadge.tsx b/autogpt_platform/frontend/src/components/monitor/FlowRunStatusBadge.tsx index 47381b0b83..d74b7cedf1 100644 --- a/autogpt_platform/frontend/src/components/monitor/FlowRunStatusBadge.tsx +++ b/autogpt_platform/frontend/src/components/monitor/FlowRunStatusBadge.tsx @@ -1,10 +1,10 @@ import React from "react"; import { Badge } from "@/components/ui/badge"; import { cn } from "@/lib/utils"; -import { GraphExecution } from "@/lib/autogpt-server-api"; +import { GraphExecutionMeta } from "@/lib/autogpt-server-api"; export const FlowRunStatusBadge: React.FC<{ - status: GraphExecution["status"]; + status: GraphExecutionMeta["status"]; className?: string; }> = ({ status, className }) => ( void; + selectedRun?: GraphExecutionMeta | null; + onSelectRun: (r: GraphExecutionMeta) => void; }> = ({ flows, executions, selectedRun, onSelectRun, className }) => ( diff --git a/autogpt_platform/frontend/src/components/monitor/FlowRunsStatus.tsx b/autogpt_platform/frontend/src/components/monitor/FlowRunsStatus.tsx index a01c7709ad..ef004bebf7 100644 --- a/autogpt_platform/frontend/src/components/monitor/FlowRunsStatus.tsx +++ b/autogpt_platform/frontend/src/components/monitor/FlowRunsStatus.tsx @@ -1,5 +1,5 @@ import React, { useState } from "react"; -import { GraphExecution, LibraryAgent } from "@/lib/autogpt-server-api"; +import { GraphExecutionMeta, LibraryAgent } from "@/lib/autogpt-server-api"; import { CardTitle } from "@/components/ui/card"; import { Button } from "@/components/ui/button"; import { @@ -12,7 +12,7 @@ import { FlowRunsTimeline } from "@/components/monitor/FlowRunsTimeline"; export const FlowRunsStatus: React.FC<{ flows: LibraryAgent[]; - executions: GraphExecution[]; + executions: GraphExecutionMeta[]; title?: string; className?: string; }> = ({ flows, executions: executions, title, className }) => { diff --git a/autogpt_platform/frontend/src/components/monitor/FlowRunsTimeline.tsx b/autogpt_platform/frontend/src/components/monitor/FlowRunsTimeline.tsx index 082039e7ba..91f7bab093 100644 --- a/autogpt_platform/frontend/src/components/monitor/FlowRunsTimeline.tsx +++ b/autogpt_platform/frontend/src/components/monitor/FlowRunsTimeline.tsx @@ -1,4 +1,4 @@ -import { GraphExecution, LibraryAgent } from "@/lib/autogpt-server-api"; +import { GraphExecutionMeta, LibraryAgent } from "@/lib/autogpt-server-api"; import { ComposedChart, DefaultLegendContentProps, @@ -23,7 +23,7 @@ export const FlowRunsTimeline = ({ className, }: { flows: LibraryAgent[]; - executions: GraphExecution[]; + executions: GraphExecutionMeta[]; dataMin: "dataMin" | number; className?: string; }) => ( @@ -60,8 +60,10 @@ export const FlowRunsTimeline = ({ { if (payload && payload.length) { - const data: GraphExecution & { time: number; _duration: number } = - payload[0].payload; + const data: GraphExecutionMeta & { + time: number; + _duration: number; + } = payload[0].payload; const flow = flows.find((f) => f.agent_id === data.graph_id); return ( diff --git a/autogpt_platform/frontend/src/components/monitor/scheduleTable.tsx b/autogpt_platform/frontend/src/components/monitor/scheduleTable.tsx index a16acaf865..adae51cdff 100644 --- a/autogpt_platform/frontend/src/components/monitor/scheduleTable.tsx +++ b/autogpt_platform/frontend/src/components/monitor/scheduleTable.tsx @@ -242,7 +242,7 @@ export const SchedulesTable = ({ {schedule.graph_version} - {new Date(schedule.next_run_time).toLocaleString()} + {schedule.next_run_time.toLocaleString()} diff --git a/autogpt_platform/frontend/src/components/ui/card.tsx b/autogpt_platform/frontend/src/components/ui/card.tsx index e0a3ba1b3d..96dcfa01da 100644 --- a/autogpt_platform/frontend/src/components/ui/card.tsx +++ b/autogpt_platform/frontend/src/components/ui/card.tsx @@ -8,10 +8,7 @@ const Card = React.forwardRef< >(({ className, ...props }, ref) => (
)); diff --git a/autogpt_platform/frontend/src/hooks/useAgentGraph.tsx b/autogpt_platform/frontend/src/hooks/useAgentGraph.tsx index f969da1c83..0a97caac60 100644 --- a/autogpt_platform/frontend/src/hooks/useAgentGraph.tsx +++ b/autogpt_platform/frontend/src/hooks/useAgentGraph.tsx @@ -606,8 +606,11 @@ export default function useAgentGraph( } const fetchExecutions = async () => { - const results = await api.getGraphExecutionInfo(flowID, flowExecutionID); - setUpdateQueue((prev) => [...prev, ...results]); + const execution = await api.getGraphExecutionInfo( + flowID, + flowExecutionID, + ); + setUpdateQueue((prev) => [...prev, ...execution.node_executions]); // Track execution until completed const pendingNodeExecutions: Set = new Set(); diff --git a/autogpt_platform/frontend/src/lib/autogpt-server-api/client.ts b/autogpt_platform/frontend/src/lib/autogpt-server-api/client.ts index 832148844b..e638b41a15 100644 --- a/autogpt_platform/frontend/src/lib/autogpt-server-api/client.ts +++ b/autogpt_platform/frontend/src/lib/autogpt-server-api/client.ts @@ -14,6 +14,7 @@ import { CredentialsDeleteResponse, CredentialsMetaResponse, GraphExecution, + GraphExecutionMeta, Graph, GraphCreatable, GraphMeta, @@ -161,10 +162,6 @@ export default class BackendAPI { return this._get(`/graphs`); } - getExecutions(): Promise { - return this._get(`/executions`); - } - getGraph( id: string, version?: number, @@ -212,22 +209,37 @@ export default class BackendAPI { return this._request("POST", `/graphs/${id}/execute/${version}`, inputData); } + getExecutions(): Promise { + return this._get(`/executions`); + } + + getGraphExecutions(graphID: string): Promise { + return this._get(`/graphs/${graphID}/executions`); + } + async getGraphExecutionInfo( graphID: string, runID: string, - ): Promise { - return (await this._get(`/graphs/${graphID}/executions/${runID}`)).map( + ): Promise { + const result = await this._get(`/graphs/${graphID}/executions/${runID}`); + result.node_executions = result.node_executions.map( parseNodeExecutionResultTimestamps, ); + return result; } async stopGraphExecution( graphID: string, runID: string, - ): Promise { - return ( - await this._request("POST", `/graphs/${graphID}/executions/${runID}/stop`) - ).map(parseNodeExecutionResultTimestamps); + ): Promise { + const result = await this._request( + "POST", + `/graphs/${graphID}/executions/${runID}/stop`, + ); + result.node_executions = result.node_executions.map( + parseNodeExecutionResultTimestamps, + ); + return result; } oAuthLogin( @@ -484,15 +496,19 @@ export default class BackendAPI { } async createSchedule(schedule: ScheduleCreatable): Promise { - return this._request("POST", `/schedules`, schedule); + return this._request("POST", `/schedules`, schedule).then( + parseScheduleTimestamp, + ); } - async deleteSchedule(scheduleId: string): Promise { + async deleteSchedule(scheduleId: string): Promise<{ id: string }> { return this._request("DELETE", `/schedules/${scheduleId}`); } async listSchedules(): Promise { - return this._get(`/schedules`); + return this._get(`/schedules`).then((schedules) => + schedules.map(parseScheduleTimestamp), + ); } private async _uploadFile(path: string, file: File): Promise { @@ -824,3 +840,10 @@ function parseNodeExecutionResultTimestamps(result: any): NodeExecutionResult { end_time: result.end_time ? new Date(result.end_time) : undefined, }; } + +function parseScheduleTimestamp(result: any): Schedule { + return { + ...result, + next_run_time: new Date(result.next_run_time), + }; +} diff --git a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts index 9734ad2bea..56d9b06f5f 100644 --- a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts +++ b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts @@ -41,6 +41,8 @@ export type BlockIOSubSchema = | BlockIOSimpleTypeSubSchema | BlockIOCombinedTypeSubSchema; +export type BlockIOSubType = BlockIOSimpleTypeSubSchema["type"]; + export type BlockIOSimpleTypeSubSchema = | BlockIOObjectSubSchema | BlockIOCredentialsSubSchema @@ -112,6 +114,7 @@ export type Credentials = // --8<-- [start:BlockIOCredentialsSubSchema] export const PROVIDER_NAMES = { ANTHROPIC: "anthropic", + APOLLO: "apollo", D_ID: "d_id", DISCORD: "discord", E2B: "e2b", @@ -134,8 +137,9 @@ export const PROVIDER_NAMES = { OPENWEATHERMAP: "openweathermap", OPEN_ROUTER: "open_router", PINECONE: "pinecone", - SLANT3D: "slant3d", SCREENSHOTONE: "screenshotone", + SLANT3D: "slant3d", + SMARTLEAD: "smartlead", SMTP: "smtp", TWITTER: "twitter", REPLICATE: "replicate", @@ -143,14 +147,14 @@ export const PROVIDER_NAMES = { REVID: "revid", UNREAL_SPEECH: "unreal_speech", TODOIST: "todoist", + ZEROBOUNCE: "zerobounce", } as const; // --8<-- [end:BlockIOCredentialsSubSchema] export type CredentialsProviderName = (typeof PROVIDER_NAMES)[keyof typeof PROVIDER_NAMES]; -export type BlockIOCredentialsSubSchema = BlockIOSubSchemaMeta & { - type: "object"; +export type BlockIOCredentialsSubSchema = BlockIOObjectSubSchema & { /* Mirror of backend/data/model.py:CredentialsFieldSchemaExtra */ credentials_provider: CredentialsProviderName[]; credentials_scopes?: string[]; @@ -167,21 +171,17 @@ export type BlockIONullSubSchema = BlockIOSubSchemaMeta & { // At the time of writing, combined schemas only occur on the first nested level in a // block schema. It is typed this way to make the use of these objects less tedious. -type BlockIOCombinedTypeSubSchema = BlockIOSubSchemaMeta & - ( +type BlockIOCombinedTypeSubSchema = BlockIOSubSchemaMeta & { type: never } & ( | { - type: "allOf"; allOf: [BlockIOSimpleTypeSubSchema]; secret?: boolean; } | { - type: "anyOf"; anyOf: BlockIOSimpleTypeSubSchema[]; default?: string | number | boolean | null; secret?: boolean; } | { - type: "oneOf"; oneOf: BlockIOSimpleTypeSubSchema[]; default?: string | number | boolean | null; secret?: boolean; @@ -216,8 +216,8 @@ export type LinkCreatable = Omit & { id?: string; }; -/* Mirror of backend/data/graph.py:GraphExecution */ -export type GraphExecution = { +/* Mirror of backend/data/graph.py:GraphExecutionMeta */ +export type GraphExecutionMeta = { execution_id: string; started_at: number; ended_at: number; @@ -226,6 +226,14 @@ export type GraphExecution = { status: "QUEUED" | "RUNNING" | "COMPLETED" | "TERMINATED" | "FAILED"; graph_id: GraphID; graph_version: number; + preset_id?: string; +}; + +/* Mirror of backend/data/graph.py:GraphExecution */ +export type GraphExecution = GraphExecutionMeta & { + inputs: Record; + outputs: Record>; + node_executions: NodeExecutionResult[]; }; export type GraphMeta = { @@ -234,12 +242,27 @@ export type GraphMeta = { is_active: boolean; name: string; description: string; - input_schema: BlockIOObjectSubSchema; - output_schema: BlockIOObjectSubSchema; + input_schema: GraphIOSchema; + output_schema: GraphIOSchema; }; export type GraphID = Brand; +/* Derived from backend/data/graph.py:Graph._generate_schema() */ +export type GraphIOSchema = { + type: "object"; + properties: { [key: string]: GraphIOSubSchema }; + required: (keyof BlockIORootSchema["properties"])[]; +}; +export type GraphIOSubSchema = Omit< + BlockIOSubSchemaMeta, + "placeholder" | "depends_on" | "hidden" +> & { + type: never; // bodge to avoid type checking hell; doesn't exist at runtime + default?: string; + secret: boolean; +}; + /* Mirror of backend/data/graph.py:Graph */ export type Graph = GraphMeta & { nodes: Array; @@ -253,8 +276,8 @@ export type GraphUpdateable = Omit< version?: number; is_active?: boolean; links: Array; - input_schema?: BlockIOObjectSubSchema; - output_schema?: BlockIOObjectSubSchema; + input_schema?: GraphIOSchema; + output_schema?: GraphIOSchema; }; export type GraphCreatable = Omit & { id?: string }; @@ -550,7 +573,7 @@ export type Schedule = { graph_id: GraphID; graph_version: number; input_data: { [key: string]: any }; - next_run_time: string; + next_run_time: Date; }; export type ScheduleCreatable = { diff --git a/autogpt_platform/frontend/tailwind.config.ts b/autogpt_platform/frontend/tailwind.config.ts index d2616f98a4..51f2257dd8 100644 --- a/autogpt_platform/frontend/tailwind.config.ts +++ b/autogpt_platform/frontend/tailwind.config.ts @@ -18,7 +18,7 @@ const config = { mono: ["var(--font-geist-mono)"], // Include the custom font family neue: ['"PP Neue Montreal TT"', "sans-serif"], - poppin: ["var(--font-poppins)"], + poppins: ["var(--font-poppins)"], inter: ["var(--font-inter)"], }, colors: { @@ -95,26 +95,20 @@ const config = { 28: "7rem", 32: "8rem", 36: "9rem", - 39: "9.875rem", 40: "10rem", 44: "11rem", 48: "12rem", 52: "13rem", 56: "14rem", - 69: "14.875rem", 60: "15rem", - 63: "15.875rem", 64: "16rem", - 68: "17.75rem", + 68: "17rem", + 70: "17.5rem", + 71: "17.75rem", 72: "18rem", - 77: "18.5625rem", + 76: "19rem", 80: "20rem", - 89: "22.5rem", 96: "24rem", - 110: "27.5rem", - 139: "37.1875rem", - 167: "41.6875rem", - 225: "56.25rem", }, borderRadius: { lg: "var(--radius)",