diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 293a9e2d43..f262441f0e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -170,6 +170,16 @@ repos: files: ^classic/benchmark/(agbenchmark|tests)/((?!reports).)*[/.] args: [--config=classic/benchmark/.flake8] + - repo: local + hooks: + - id: prettier + name: Format (Prettier) - AutoGPT Platform - Frontend + alias: format-platform-frontend + entry: bash -c 'cd autogpt_platform/frontend && npx prettier --write $(echo "$@" | sed "s|autogpt_platform/frontend/||g")' -- + files: ^autogpt_platform/frontend/ + types: [file] + language: system + - repo: local # To have watertight type checking, we check *all* the files in an affected # project. To trigger on poetry.lock we also reset the file `types` filter. @@ -221,6 +231,16 @@ repos: language: system pass_filenames: false + - repo: local + hooks: + - id: tsc + name: Typecheck - AutoGPT Platform - Frontend + entry: bash -c 'cd autogpt_platform/frontend && npm run type-check' + files: ^autogpt_platform/frontend/ + types: [file] + language: system + pass_filenames: false + - repo: local hooks: - id: pytest diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/feature_flag/client.py b/autogpt_platform/autogpt_libs/autogpt_libs/feature_flag/client.py index dde516c1d8..9aed891706 100644 --- a/autogpt_platform/autogpt_libs/autogpt_libs/feature_flag/client.py +++ b/autogpt_platform/autogpt_libs/autogpt_libs/feature_flag/client.py @@ -13,7 +13,6 @@ from typing_extensions import ParamSpec from .config import SETTINGS logger = logging.getLogger(__name__) -logging.basicConfig(level=logging.DEBUG) P = ParamSpec("P") T = TypeVar("T") diff --git a/autogpt_platform/backend/.env.example b/autogpt_platform/backend/.env.example index 11383b9589..8b28244f42 100644 --- a/autogpt_platform/backend/.env.example +++ b/autogpt_platform/backend/.env.example @@ -165,6 +165,15 @@ MEM0_API_KEY= # Nvidia NVIDIA_API_KEY= +# Apollo +APOLLO_API_KEY= + +# SmartLead +SMARTLEAD_API_KEY= + +# ZeroBounce +ZEROBOUNCE_API_KEY= + # Logging Configuration LOG_LEVEL=INFO ENABLE_CLOUD_LOGGING=false diff --git a/autogpt_platform/backend/backend/blocks/apollo/_api.py b/autogpt_platform/backend/backend/blocks/apollo/_api.py new file mode 100644 index 0000000000..157235ff0f --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/apollo/_api.py @@ -0,0 +1,108 @@ +import logging +from typing import List + +from backend.blocks.apollo._auth import ApolloCredentials +from backend.blocks.apollo.models import ( + Contact, + Organization, + SearchOrganizationsRequest, + SearchOrganizationsResponse, + SearchPeopleRequest, + SearchPeopleResponse, +) +from backend.util.request import Requests + +logger = logging.getLogger(name=__name__) + + +class ApolloClient: + """Client for the Apollo API""" + + API_URL = "https://api.apollo.io/api/v1" + + def __init__(self, credentials: ApolloCredentials): + self.credentials = credentials + self.requests = Requests() + + def _get_headers(self) -> dict[str, str]: + return {"x-api-key": self.credentials.api_key.get_secret_value()} + + def search_people(self, query: SearchPeopleRequest) -> List[Contact]: + """Search for people in Apollo""" + response = self.requests.get( + f"{self.API_URL}/mixed_people/search", + headers=self._get_headers(), + params=query.model_dump(exclude={"credentials", "max_results"}), + ) + parsed_response = SearchPeopleResponse(**response.json()) + if parsed_response.pagination.total_entries == 0: + return [] + + people = parsed_response.people + + # handle pagination + if ( + query.max_results is not None + and query.max_results < parsed_response.pagination.total_entries + and len(people) < query.max_results + ): + while ( + len(people) < query.max_results + and query.page < parsed_response.pagination.total_pages + and len(parsed_response.people) > 0 + ): + query.page += 1 + response = self.requests.get( + f"{self.API_URL}/mixed_people/search", + headers=self._get_headers(), + params=query.model_dump(exclude={"credentials", "max_results"}), + ) + parsed_response = SearchPeopleResponse(**response.json()) + people.extend(parsed_response.people[: query.max_results - len(people)]) + + logger.info(f"Found {len(people)} people") + return people[: query.max_results] if query.max_results else people + + def search_organizations( + self, query: SearchOrganizationsRequest + ) -> List[Organization]: + """Search for organizations in Apollo""" + response = self.requests.get( + f"{self.API_URL}/mixed_companies/search", + headers=self._get_headers(), + params=query.model_dump(exclude={"credentials", "max_results"}), + ) + parsed_response = SearchOrganizationsResponse(**response.json()) + if parsed_response.pagination.total_entries == 0: + return [] + + organizations = parsed_response.organizations + + # handle pagination + if ( + query.max_results is not None + and query.max_results < parsed_response.pagination.total_entries + and len(organizations) < query.max_results + ): + while ( + len(organizations) < query.max_results + and query.page < parsed_response.pagination.total_pages + and len(parsed_response.organizations) > 0 + ): + query.page += 1 + response = self.requests.get( + f"{self.API_URL}/mixed_companies/search", + headers=self._get_headers(), + params=query.model_dump(exclude={"credentials", "max_results"}), + ) + parsed_response = SearchOrganizationsResponse(**response.json()) + organizations.extend( + parsed_response.organizations[ + : query.max_results - len(organizations) + ] + ) + + logger.info(f"Found {len(organizations)} organizations") + return ( + organizations[: query.max_results] if query.max_results else organizations + ) diff --git a/autogpt_platform/backend/backend/blocks/apollo/_auth.py b/autogpt_platform/backend/backend/blocks/apollo/_auth.py new file mode 100644 index 0000000000..c813c72d99 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/apollo/_auth.py @@ -0,0 +1,35 @@ +from typing import Literal + +from pydantic import SecretStr + +from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput +from backend.integrations.providers import ProviderName + +ApolloCredentials = APIKeyCredentials +ApolloCredentialsInput = CredentialsMetaInput[ + Literal[ProviderName.APOLLO], + Literal["api_key"], +] + +TEST_CREDENTIALS = APIKeyCredentials( + id="01234567-89ab-cdef-0123-456789abcdef", + provider="apollo", + api_key=SecretStr("mock-apollo-api-key"), + title="Mock Apollo API key", + expires_at=None, +) +TEST_CREDENTIALS_INPUT = { + "provider": TEST_CREDENTIALS.provider, + "id": TEST_CREDENTIALS.id, + "type": TEST_CREDENTIALS.type, + "title": TEST_CREDENTIALS.title, +} + + +def ApolloCredentialsField() -> ApolloCredentialsInput: + """ + Creates a Apollo credentials input on a block. + """ + return CredentialsField( + description="The Apollo integration can be used with an API Key.", + ) diff --git a/autogpt_platform/backend/backend/blocks/apollo/models.py b/autogpt_platform/backend/backend/blocks/apollo/models.py new file mode 100644 index 0000000000..b9fa621ddc --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/apollo/models.py @@ -0,0 +1,543 @@ +from enum import Enum +from typing import Any, Optional + +from pydantic import BaseModel + +from backend.data.model import SchemaField + + +class PrimaryPhone(BaseModel): + """A primary phone in Apollo""" + + number: str + source: str + sanitized_number: str + + +class SenorityLevels(str, Enum): + """Seniority levels in Apollo""" + + OWNER = "owner" + FOUNDER = "founder" + C_SUITE = "c_suite" + PARTNER = "partner" + VP = "vp" + HEAD = "head" + DIRECTOR = "director" + MANAGER = "manager" + SENIOR = "senior" + ENTRY = "entry" + INTERN = "intern" + + +class ContactEmailStatuses(str, Enum): + """Contact email statuses in Apollo""" + + VERIFIED = "verified" + UNVERIFIED = "unverified" + LIKELY_TO_ENGAGE = "likely_to_engage" + UNAVAILABLE = "unavailable" + + +class RuleConfigStatus(BaseModel): + """A rule config status in Apollo""" + + _id: str + created_at: str + rule_action_config_id: str + rule_config_id: str + status_cd: str + updated_at: str + id: str + key: str + + +class ContactCampaignStatus(BaseModel): + """A contact campaign status in Apollo""" + + id: str + emailer_campaign_id: str + send_email_from_user_id: str + inactive_reason: str + status: str + added_at: str + added_by_user_id: str + finished_at: str + paused_at: str + auto_unpause_at: str + send_email_from_email_address: str + send_email_from_email_account_id: str + manually_set_unpause: str + failure_reason: str + current_step_id: str + in_response_to_emailer_message_id: str + cc_emails: str + bcc_emails: str + to_emails: str + + +class Account(BaseModel): + """An account in Apollo""" + + id: str + name: str + website_url: str + blog_url: str + angellist_url: str + linkedin_url: str + twitter_url: str + facebook_url: str + primary_phone: PrimaryPhone + languages: list[str] + alexa_ranking: int + phone: str + linkedin_uid: str + founded_year: int + publicly_traded_symbol: str + publicly_traded_exchange: str + logo_url: str + chrunchbase_url: str + primary_domain: str + domain: str + team_id: str + organization_id: str + account_stage_id: str + source: str + original_source: str + creator_id: str + owner_id: str + created_at: str + phone_status: str + hubspot_id: str + salesforce_id: str + crm_owner_id: str + parent_account_id: str + sanitized_phone: str + # no listed type on the API docs + account_playbook_statues: list[Any] + account_rule_config_statuses: list[RuleConfigStatus] + existence_level: str + label_ids: list[str] + typed_custom_fields: Any + custom_field_errors: Any + modality: str + source_display_name: str + salesforce_record_id: str + crm_record_url: str + + +class ContactEmail(BaseModel): + """A contact email in Apollo""" + + email: str = "" + email_md5: str = "" + email_sha256: str = "" + email_status: str = "" + email_source: str = "" + extrapolated_email_confidence: str = "" + position: int = 0 + email_from_customer: str = "" + free_domain: bool = True + + +class EmploymentHistory(BaseModel): + """An employment history in Apollo""" + + class Config: + extra = "allow" + arbitrary_types_allowed = True + from_attributes = True + populate_by_name = True + + _id: Optional[str] = None + created_at: Optional[str] = None + current: Optional[bool] = None + degree: Optional[str] = None + description: Optional[str] = None + emails: Optional[str] = None + end_date: Optional[str] = None + grade_level: Optional[str] = None + kind: Optional[str] = None + major: Optional[str] = None + organization_id: Optional[str] = None + organization_name: Optional[str] = None + raw_address: Optional[str] = None + start_date: Optional[str] = None + title: Optional[str] = None + updated_at: Optional[str] = None + id: Optional[str] = None + key: Optional[str] = None + + +class Breadcrumb(BaseModel): + """A breadcrumb in Apollo""" + + label: Optional[str] = "N/A" + signal_field_name: Optional[str] = "N/A" + value: str | list | None = "N/A" + display_name: Optional[str] = "N/A" + + +class TypedCustomField(BaseModel): + """A typed custom field in Apollo""" + + id: Optional[str] = "N/A" + value: Optional[str] = "N/A" + + +class Pagination(BaseModel): + """Pagination in Apollo""" + + class Config: + extra = "allow" # Allow extra fields + arbitrary_types_allowed = True # Allow any type + from_attributes = True # Allow from_orm + populate_by_name = True # Allow field aliases to work both ways + + page: int = 0 + per_page: int = 0 + total_entries: int = 0 + total_pages: int = 0 + + +class DialerFlags(BaseModel): + """A dialer flags in Apollo""" + + country_name: str + country_enabled: bool + high_risk_calling_enabled: bool + potential_high_risk_number: bool + + +class PhoneNumber(BaseModel): + """A phone number in Apollo""" + + raw_number: str = "" + sanitized_number: str = "" + type: str = "" + position: int = 0 + status: str = "" + dnc_status: str = "" + dnc_other_info: str = "" + dailer_flags: DialerFlags = DialerFlags( + country_name="", + country_enabled=True, + high_risk_calling_enabled=True, + potential_high_risk_number=True, + ) + + +class Organization(BaseModel): + """An organization in Apollo""" + + class Config: + extra = "allow" + arbitrary_types_allowed = True + from_attributes = True + populate_by_name = True + + id: Optional[str] = "N/A" + name: Optional[str] = "N/A" + website_url: Optional[str] = "N/A" + blog_url: Optional[str] = "N/A" + angellist_url: Optional[str] = "N/A" + linkedin_url: Optional[str] = "N/A" + twitter_url: Optional[str] = "N/A" + facebook_url: Optional[str] = "N/A" + primary_phone: Optional[PrimaryPhone] = PrimaryPhone( + number="N/A", source="N/A", sanitized_number="N/A" + ) + languages: list[str] = [] + alexa_ranking: Optional[int] = 0 + phone: Optional[str] = "N/A" + linkedin_uid: Optional[str] = "N/A" + founded_year: Optional[int] = 0 + publicly_traded_symbol: Optional[str] = "N/A" + publicly_traded_exchange: Optional[str] = "N/A" + logo_url: Optional[str] = "N/A" + chrunchbase_url: Optional[str] = "N/A" + primary_domain: Optional[str] = "N/A" + sanitized_phone: Optional[str] = "N/A" + owned_by_organization_id: Optional[str] = "N/A" + intent_strength: Optional[str] = "N/A" + show_intent: bool = True + has_intent_signal_account: Optional[bool] = True + intent_signal_account: Optional[str] = "N/A" + + +class Contact(BaseModel): + """A contact in Apollo""" + + class Config: + extra = "allow" + arbitrary_types_allowed = True + from_attributes = True + populate_by_name = True + + contact_roles: list[Any] = [] + id: Optional[str] = None + first_name: Optional[str] = None + last_name: Optional[str] = None + name: Optional[str] = None + linkedin_url: Optional[str] = None + title: Optional[str] = None + contact_stage_id: Optional[str] = None + owner_id: Optional[str] = None + creator_id: Optional[str] = None + person_id: Optional[str] = None + email_needs_tickling: bool = True + organization_name: Optional[str] = None + source: Optional[str] = None + original_source: Optional[str] = None + organization_id: Optional[str] = None + headline: Optional[str] = None + photo_url: Optional[str] = None + present_raw_address: Optional[str] = None + linkededin_uid: Optional[str] = None + extrapolated_email_confidence: Optional[float] = None + salesforce_id: Optional[str] = None + salesforce_lead_id: Optional[str] = None + salesforce_contact_id: Optional[str] = None + saleforce_account_id: Optional[str] = None + crm_owner_id: Optional[str] = None + created_at: Optional[str] = None + emailer_campaign_ids: list[str] = [] + direct_dial_status: Optional[str] = None + direct_dial_enrichment_failed_at: Optional[str] = None + email_status: Optional[str] = None + email_source: Optional[str] = None + account_id: Optional[str] = None + last_activity_date: Optional[str] = None + hubspot_vid: Optional[str] = None + hubspot_company_id: Optional[str] = None + crm_id: Optional[str] = None + sanitized_phone: Optional[str] = None + merged_crm_ids: Optional[str] = None + updated_at: Optional[str] = None + queued_for_crm_push: bool = True + suggested_from_rule_engine_config_id: Optional[str] = None + email_unsubscribed: Optional[str] = None + label_ids: list[Any] = [] + has_pending_email_arcgate_request: bool = True + has_email_arcgate_request: bool = True + existence_level: Optional[str] = None + email: Optional[str] = None + email_from_customer: Optional[str] = None + typed_custom_fields: list[TypedCustomField] = [] + custom_field_errors: Any = None + salesforce_record_id: Optional[str] = None + crm_record_url: Optional[str] = None + email_status_unavailable_reason: Optional[str] = None + email_true_status: Optional[str] = None + updated_email_true_status: bool = True + contact_rule_config_statuses: list[RuleConfigStatus] = [] + source_display_name: Optional[str] = None + twitter_url: Optional[str] = None + contact_campaign_statuses: list[ContactCampaignStatus] = [] + state: Optional[str] = None + city: Optional[str] = None + country: Optional[str] = None + account: Optional[Account] = None + contact_emails: list[ContactEmail] = [] + organization: Optional[Organization] = None + employment_history: list[EmploymentHistory] = [] + time_zone: Optional[str] = None + intent_strength: Optional[str] = None + show_intent: bool = True + phone_numbers: list[PhoneNumber] = [] + account_phone_note: Optional[str] = None + free_domain: bool = True + is_likely_to_engage: bool = True + email_domain_catchall: bool = True + contact_job_change_event: Optional[str] = None + + +class SearchOrganizationsRequest(BaseModel): + """Request for Apollo's search organizations API""" + + organization_num_empoloyees_range: list[int] = SchemaField( + description="""The number range of employees working for the company. This enables you to find companies based on headcount. You can add multiple ranges to expand your search results. + +Each range you add needs to be a string, with the upper and lower numbers of the range separated only by a comma.""", + default=[0, 1000000], + ) + + organization_locations: list[str] = SchemaField( + description="""The location of the company headquarters. You can search across cities, US states, and countries. + +If a company has several office locations, results are still based on the headquarters location. For example, if you search chicago but a company's HQ location is in boston, any Boston-based companies will not appearch in your search results, even if they match other parameters. + +To exclude companies based on location, use the organization_not_locations parameter. +""", + default=[], + ) + organizations_not_locations: list[str] = SchemaField( + description="""Exclude companies from search results based on the location of the company headquarters. You can use cities, US states, and countries as locations to exclude. + +This parameter is useful for ensuring you do not prospect in an undesirable territory. For example, if you use ireland as a value, no Ireland-based companies will appear in your search results. +""", + default=[], + ) + q_organization_keyword_tags: list[str] = SchemaField( + description="""Filter search results based on keywords associated with companies. For example, you can enter mining as a value to return only companies that have an association with the mining industry.""" + ) + q_organization_name: str = SchemaField( + description="""Filter search results to include a specific company name. + +If the value you enter for this parameter does not match with a company's name, the company will not appear in search results, even if it matches other parameters. Partial matches are accepted. For example, if you filter by the value marketing, a company called NY Marketing Unlimited would still be eligible as a search result, but NY Market Analysis would not be eligible.""" + ) + organization_ids: list[str] = SchemaField( + description="""The Apollo IDs for the companies you want to include in your search results. Each company in the Apollo database is assigned a unique ID. + +To find IDs, identify the values for organization_id when you call this endpoint.""", + default=[], + ) + max_results: int = SchemaField( + description="""The maximum number of results to return. If you don't specify this parameter, the default is 100.""", + default=100, + ge=1, + le=50000, + advanced=True, + ) + + page: int = SchemaField( + description="""The page number of the Apollo data that you want to retrieve. + +Use this parameter in combination with the per_page parameter to make search results for navigable and improve the performance of the endpoint.""", + default=1, + ) + per_page: int = SchemaField( + description="""The number of search results that should be returned for each page. Limited the number of results per page improves the endpoint's performance. + +Use the page parameter to search the different pages of data.""", + default=100, + ) + + +class SearchOrganizationsResponse(BaseModel): + """Response from Apollo's search organizations API""" + + breadcrumbs: list[Breadcrumb] = [] + partial_results_only: bool = True + has_join: bool = True + disable_eu_prospecting: bool = True + partial_results_limit: int = 0 + pagination: Pagination = Pagination( + page=0, per_page=0, total_entries=0, total_pages=0 + ) + # no listed type on the API docs + accounts: list[Any] = [] + organizations: list[Organization] = [] + models_ids: list[str] = [] + num_fetch_result: Optional[str] = "N/A" + derived_params: Optional[str] = "N/A" + + +class SearchPeopleRequest(BaseModel): + """Request for Apollo's search people API""" + + person_titles: list[str] = SchemaField( + description="""Job titles held by the people you want to find. For a person to be included in search results, they only need to match 1 of the job titles you add. Adding more job titles expands your search results. + +Results also include job titles with the same terms, even if they are not exact matches. For example, searching for marketing manager might return people with the job title content marketing manager. + +Use this parameter in combination with the person_seniorities[] parameter to find people based on specific job functions and seniority levels. +""", + default=[], + placeholder="marketing manager", + ) + person_locations: list[str] = SchemaField( + description="""The location where people live. You can search across cities, US states, and countries. + +To find people based on the headquarters locations of their current employer, use the organization_locations parameter.""", + default=[], + ) + person_seniorities: list[SenorityLevels] = SchemaField( + description="""The job seniority that people hold within their current employer. This enables you to find people that currently hold positions at certain reporting levels, such as Director level or senior IC level. + +For a person to be included in search results, they only need to match 1 of the seniorities you add. Adding more seniorities expands your search results. + +Searches only return results based on their current job title, so searching for Director-level employees only returns people that currently hold a Director-level title. If someone was previously a Director, but is currently a VP, they would not be included in your search results. + +Use this parameter in combination with the person_titles[] parameter to find people based on specific job functions and seniority levels.""", + default=[], + ) + organization_locations: list[str] = SchemaField( + description="""The location of the company headquarters for a person's current employer. You can search across cities, US states, and countries. + +If a company has several office locations, results are still based on the headquarters location. For example, if you search chicago but a company's HQ location is in boston, people that work for the Boston-based company will not appear in your results, even if they match other parameters. + +To find people based on their personal location, use the person_locations parameter.""", + default=[], + ) + q_organization_domains: list[str] = SchemaField( + description="""The domain name for the person's employer. This can be the current employer or a previous employer. Do not include www., the @ symbol, or similar. + +You can add multiple domains to search across companies. + + Examples: apollo.io and microsoft.com""", + default=[], + ) + contact_email_statuses: list[ContactEmailStatuses] = SchemaField( + description="""The email statuses for the people you want to find. You can add multiple statuses to expand your search.""", + default=[], + ) + organization_ids: list[str] = SchemaField( + description="""The Apollo IDs for the companies (employers) you want to include in your search results. Each company in the Apollo database is assigned a unique ID. + +To find IDs, call the Organization Search endpoint and identify the values for organization_id.""", + default=[], + ) + organization_num_empoloyees_range: list[int] = SchemaField( + description="""The number range of employees working for the company. This enables you to find companies based on headcount. You can add multiple ranges to expand your search results. + +Each range you add needs to be a string, with the upper and lower numbers of the range separated only by a comma.""", + default=[], + ) + q_keywords: str = SchemaField( + description="""A string of words over which we want to filter the results""", + default="", + ) + page: int = SchemaField( + description="""The page number of the Apollo data that you want to retrieve. + +Use this parameter in combination with the per_page parameter to make search results for navigable and improve the performance of the endpoint.""", + default=1, + ) + per_page: int = SchemaField( + description="""The number of search results that should be returned for each page. Limited the number of results per page improves the endpoint's performance. + +Use the page parameter to search the different pages of data.""", + default=100, + ) + max_results: int = SchemaField( + description="""The maximum number of results to return. If you don't specify this parameter, the default is 100.""", + default=100, + ge=1, + le=50000, + advanced=True, + ) + + +class SearchPeopleResponse(BaseModel): + """Response from Apollo's search people API""" + + class Config: + extra = "allow" # Allow extra fields + arbitrary_types_allowed = True # Allow any type + from_attributes = True # Allow from_orm + populate_by_name = True # Allow field aliases to work both ways + + breadcrumbs: list[Breadcrumb] = [] + partial_results_only: bool = True + has_join: bool = True + disable_eu_prospecting: bool = True + partial_results_limit: int = 0 + pagination: Pagination = Pagination( + page=0, per_page=0, total_entries=0, total_pages=0 + ) + contacts: list[Contact] = [] + people: list[Contact] = [] + model_ids: list[str] = [] + num_fetch_result: Optional[str] = "N/A" + derived_params: Optional[str] = "N/A" diff --git a/autogpt_platform/backend/backend/blocks/apollo/organization.py b/autogpt_platform/backend/backend/blocks/apollo/organization.py new file mode 100644 index 0000000000..45ce769fcf --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/apollo/organization.py @@ -0,0 +1,219 @@ +from backend.blocks.apollo._api import ApolloClient +from backend.blocks.apollo._auth import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + ApolloCredentials, + ApolloCredentialsInput, +) +from backend.blocks.apollo.models import ( + Organization, + PrimaryPhone, + SearchOrganizationsRequest, +) +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class SearchOrganizationsBlock(Block): + """Search for organizations in Apollo""" + + class Input(BlockSchema): + organization_num_empoloyees_range: list[int] = SchemaField( + description="""The number range of employees working for the company. This enables you to find companies based on headcount. You can add multiple ranges to expand your search results. + +Each range you add needs to be a string, with the upper and lower numbers of the range separated only by a comma.""", + default=[0, 1000000], + ) + + organization_locations: list[str] = SchemaField( + description="""The location of the company headquarters. You can search across cities, US states, and countries. + +If a company has several office locations, results are still based on the headquarters location. For example, if you search chicago but a company's HQ location is in boston, any Boston-based companies will not appearch in your search results, even if they match other parameters. + +To exclude companies based on location, use the organization_not_locations parameter. +""", + default=[], + ) + organizations_not_locations: list[str] = SchemaField( + description="""Exclude companies from search results based on the location of the company headquarters. You can use cities, US states, and countries as locations to exclude. + +This parameter is useful for ensuring you do not prospect in an undesirable territory. For example, if you use ireland as a value, no Ireland-based companies will appear in your search results. +""", + default=[], + ) + q_organization_keyword_tags: list[str] = SchemaField( + description="""Filter search results based on keywords associated with companies. For example, you can enter mining as a value to return only companies that have an association with the mining industry.""", + default=[], + ) + q_organization_name: str = SchemaField( + description="""Filter search results to include a specific company name. + +If the value you enter for this parameter does not match with a company's name, the company will not appear in search results, even if it matches other parameters. Partial matches are accepted. For example, if you filter by the value marketing, a company called NY Marketing Unlimited would still be eligible as a search result, but NY Market Analysis would not be eligible.""", + default="", + advanced=False, + ) + organization_ids: list[str] = SchemaField( + description="""The Apollo IDs for the companies you want to include in your search results. Each company in the Apollo database is assigned a unique ID. + +To find IDs, identify the values for organization_id when you call this endpoint.""", + default=[], + ) + max_results: int = SchemaField( + description="""The maximum number of results to return. If you don't specify this parameter, the default is 100.""", + default=100, + ge=1, + le=50000, + advanced=True, + ) + credentials: ApolloCredentialsInput = SchemaField( + description="Apollo credentials", + ) + + class Output(BlockSchema): + organizations: list[Organization] = SchemaField( + description="List of organizations found", + default=[], + ) + organization: Organization = SchemaField( + description="Each found organization, one at a time", + ) + error: str = SchemaField( + description="Error message if the search failed", + default="", + ) + + def __init__(self): + super().__init__( + id="3d71270d-599e-4148-9b95-71b35d2f44f0", + description="Search for organizations in Apollo", + categories={BlockCategory.SEARCH}, + input_schema=SearchOrganizationsBlock.Input, + output_schema=SearchOrganizationsBlock.Output, + test_credentials=TEST_CREDENTIALS, + test_input={"query": "Google", "credentials": TEST_CREDENTIALS_INPUT}, + test_output=[ + ( + "organization", + Organization( + id="1", + name="Google", + website_url="https://google.com", + blog_url="https://google.com/blog", + angellist_url="https://angel.co/google", + linkedin_url="https://linkedin.com/company/google", + twitter_url="https://twitter.com/google", + facebook_url="https://facebook.com/google", + primary_phone=PrimaryPhone( + source="google", + number="1234567890", + sanitized_number="1234567890", + ), + languages=["en"], + alexa_ranking=1000, + phone="1234567890", + linkedin_uid="1234567890", + founded_year=2000, + publicly_traded_symbol="GOOGL", + publicly_traded_exchange="NASDAQ", + logo_url="https://google.com/logo.png", + chrunchbase_url="https://chrunchbase.com/google", + primary_domain="google.com", + sanitized_phone="1234567890", + owned_by_organization_id="1", + intent_strength="strong", + show_intent=True, + has_intent_signal_account=True, + intent_signal_account="1", + ), + ), + ( + "organizations", + [ + Organization( + id="1", + name="Google", + website_url="https://google.com", + blog_url="https://google.com/blog", + angellist_url="https://angel.co/google", + linkedin_url="https://linkedin.com/company/google", + twitter_url="https://twitter.com/google", + facebook_url="https://facebook.com/google", + primary_phone=PrimaryPhone( + source="google", + number="1234567890", + sanitized_number="1234567890", + ), + languages=["en"], + alexa_ranking=1000, + phone="1234567890", + linkedin_uid="1234567890", + founded_year=2000, + publicly_traded_symbol="GOOGL", + publicly_traded_exchange="NASDAQ", + logo_url="https://google.com/logo.png", + chrunchbase_url="https://chrunchbase.com/google", + primary_domain="google.com", + sanitized_phone="1234567890", + owned_by_organization_id="1", + intent_strength="strong", + show_intent=True, + has_intent_signal_account=True, + intent_signal_account="1", + ), + ], + ), + ], + test_mock={ + "search_organizations": lambda *args, **kwargs: [ + Organization( + id="1", + name="Google", + website_url="https://google.com", + blog_url="https://google.com/blog", + angellist_url="https://angel.co/google", + linkedin_url="https://linkedin.com/company/google", + twitter_url="https://twitter.com/google", + facebook_url="https://facebook.com/google", + primary_phone=PrimaryPhone( + source="google", + number="1234567890", + sanitized_number="1234567890", + ), + languages=["en"], + alexa_ranking=1000, + phone="1234567890", + linkedin_uid="1234567890", + founded_year=2000, + publicly_traded_symbol="GOOGL", + publicly_traded_exchange="NASDAQ", + logo_url="https://google.com/logo.png", + chrunchbase_url="https://chrunchbase.com/google", + primary_domain="google.com", + sanitized_phone="1234567890", + owned_by_organization_id="1", + intent_strength="strong", + show_intent=True, + has_intent_signal_account=True, + intent_signal_account="1", + ) + ] + }, + ) + + @staticmethod + def search_organizations( + query: SearchOrganizationsRequest, credentials: ApolloCredentials + ) -> list[Organization]: + client = ApolloClient(credentials) + return client.search_organizations(query) + + def run( + self, input_data: Input, *, credentials: ApolloCredentials, **kwargs + ) -> BlockOutput: + query = SearchOrganizationsRequest( + **input_data.model_dump(exclude={"credentials"}) + ) + organizations = self.search_organizations(query, credentials) + for organization in organizations: + yield "organization", organization + yield "organizations", organizations diff --git a/autogpt_platform/backend/backend/blocks/apollo/people.py b/autogpt_platform/backend/backend/blocks/apollo/people.py new file mode 100644 index 0000000000..ac75e03d1d --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/apollo/people.py @@ -0,0 +1,394 @@ +from backend.blocks.apollo._api import ApolloClient +from backend.blocks.apollo._auth import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + ApolloCredentials, + ApolloCredentialsInput, +) +from backend.blocks.apollo.models import ( + Contact, + ContactEmailStatuses, + SearchPeopleRequest, + SenorityLevels, +) +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class SearchPeopleBlock(Block): + """Search for people in Apollo""" + + class Input(BlockSchema): + person_titles: list[str] = SchemaField( + description="""Job titles held by the people you want to find. For a person to be included in search results, they only need to match 1 of the job titles you add. Adding more job titles expands your search results. + + Results also include job titles with the same terms, even if they are not exact matches. For example, searching for marketing manager might return people with the job title content marketing manager. + + Use this parameter in combination with the person_seniorities[] parameter to find people based on specific job functions and seniority levels. + """, + default=[], + advanced=False, + ) + person_locations: list[str] = SchemaField( + description="""The location where people live. You can search across cities, US states, and countries. + + To find people based on the headquarters locations of their current employer, use the organization_locations parameter.""", + default=[], + advanced=False, + ) + person_seniorities: list[SenorityLevels] = SchemaField( + description="""The job seniority that people hold within their current employer. This enables you to find people that currently hold positions at certain reporting levels, such as Director level or senior IC level. + + For a person to be included in search results, they only need to match 1 of the seniorities you add. Adding more seniorities expands your search results. + + Searches only return results based on their current job title, so searching for Director-level employees only returns people that currently hold a Director-level title. If someone was previously a Director, but is currently a VP, they would not be included in your search results. + + Use this parameter in combination with the person_titles[] parameter to find people based on specific job functions and seniority levels.""", + default=[], + advanced=False, + ) + organization_locations: list[str] = SchemaField( + description="""The location of the company headquarters for a person's current employer. You can search across cities, US states, and countries. + + If a company has several office locations, results are still based on the headquarters location. For example, if you search chicago but a company's HQ location is in boston, people that work for the Boston-based company will not appear in your results, even if they match other parameters. + + To find people based on their personal location, use the person_locations parameter.""", + default=[], + advanced=False, + ) + q_organization_domains: list[str] = SchemaField( + description="""The domain name for the person's employer. This can be the current employer or a previous employer. Do not include www., the @ symbol, or similar. + + You can add multiple domains to search across companies. + + Examples: apollo.io and microsoft.com""", + default=[], + advanced=False, + ) + contact_email_statuses: list[ContactEmailStatuses] = SchemaField( + description="""The email statuses for the people you want to find. You can add multiple statuses to expand your search.""", + default=[], + advanced=False, + ) + organization_ids: list[str] = SchemaField( + description="""The Apollo IDs for the companies (employers) you want to include in your search results. Each company in the Apollo database is assigned a unique ID. + + To find IDs, call the Organization Search endpoint and identify the values for organization_id.""", + default=[], + advanced=False, + ) + organization_num_empoloyees_range: list[int] = SchemaField( + description="""The number range of employees working for the company. This enables you to find companies based on headcount. You can add multiple ranges to expand your search results. + + Each range you add needs to be a string, with the upper and lower numbers of the range separated only by a comma.""", + default=[], + advanced=False, + ) + q_keywords: str = SchemaField( + description="""A string of words over which we want to filter the results""", + default="", + advanced=False, + ) + max_results: int = SchemaField( + description="""The maximum number of results to return. If you don't specify this parameter, the default is 100.""", + default=100, + ge=1, + le=50000, + advanced=True, + ) + + credentials: ApolloCredentialsInput = SchemaField( + description="Apollo credentials", + ) + + class Output(BlockSchema): + people: list[Contact] = SchemaField( + description="List of people found", + default=[], + ) + person: Contact = SchemaField( + description="Each found person, one at a time", + ) + error: str = SchemaField( + description="Error message if the search failed", + default="", + ) + + def __init__(self): + super().__init__( + id="c2adb3aa-5aae-488d-8a6e-4eb8c23e2ed6", + description="Search for people in Apollo", + categories={BlockCategory.SEARCH}, + input_schema=SearchPeopleBlock.Input, + output_schema=SearchPeopleBlock.Output, + test_credentials=TEST_CREDENTIALS, + test_input={"credentials": TEST_CREDENTIALS_INPUT}, + test_output=[ + ( + "person", + Contact( + contact_roles=[], + id="1", + name="John Doe", + first_name="John", + last_name="Doe", + linkedin_url="https://www.linkedin.com/in/johndoe", + title="Software Engineer", + organization_name="Google", + organization_id="123456", + contact_stage_id="1", + owner_id="1", + creator_id="1", + person_id="1", + email_needs_tickling=True, + source="apollo", + original_source="apollo", + headline="Software Engineer", + photo_url="https://www.linkedin.com/in/johndoe", + present_raw_address="123 Main St, Anytown, USA", + linkededin_uid="123456", + extrapolated_email_confidence=0.8, + salesforce_id="123456", + salesforce_lead_id="123456", + salesforce_contact_id="123456", + saleforce_account_id="123456", + crm_owner_id="123456", + created_at="2021-01-01", + emailer_campaign_ids=[], + direct_dial_status="active", + direct_dial_enrichment_failed_at="2021-01-01", + email_status="active", + email_source="apollo", + account_id="123456", + last_activity_date="2021-01-01", + hubspot_vid="123456", + hubspot_company_id="123456", + crm_id="123456", + sanitized_phone="123456", + merged_crm_ids="123456", + updated_at="2021-01-01", + queued_for_crm_push=True, + suggested_from_rule_engine_config_id="123456", + email_unsubscribed=None, + label_ids=[], + has_pending_email_arcgate_request=True, + has_email_arcgate_request=True, + existence_level=None, + email=None, + email_from_customer=None, + typed_custom_fields=[], + custom_field_errors=None, + salesforce_record_id=None, + crm_record_url=None, + email_status_unavailable_reason=None, + email_true_status=None, + updated_email_true_status=True, + contact_rule_config_statuses=[], + source_display_name=None, + twitter_url=None, + contact_campaign_statuses=[], + state=None, + city=None, + country=None, + account=None, + contact_emails=[], + organization=None, + employment_history=[], + time_zone=None, + intent_strength=None, + show_intent=True, + phone_numbers=[], + account_phone_note=None, + free_domain=True, + is_likely_to_engage=True, + email_domain_catchall=True, + contact_job_change_event=None, + ), + ), + ( + "people", + [ + Contact( + contact_roles=[], + id="1", + name="John Doe", + first_name="John", + last_name="Doe", + linkedin_url="https://www.linkedin.com/in/johndoe", + title="Software Engineer", + organization_name="Google", + organization_id="123456", + contact_stage_id="1", + owner_id="1", + creator_id="1", + person_id="1", + email_needs_tickling=True, + source="apollo", + original_source="apollo", + headline="Software Engineer", + photo_url="https://www.linkedin.com/in/johndoe", + present_raw_address="123 Main St, Anytown, USA", + linkededin_uid="123456", + extrapolated_email_confidence=0.8, + salesforce_id="123456", + salesforce_lead_id="123456", + salesforce_contact_id="123456", + saleforce_account_id="123456", + crm_owner_id="123456", + created_at="2021-01-01", + emailer_campaign_ids=[], + direct_dial_status="active", + direct_dial_enrichment_failed_at="2021-01-01", + email_status="active", + email_source="apollo", + account_id="123456", + last_activity_date="2021-01-01", + hubspot_vid="123456", + hubspot_company_id="123456", + crm_id="123456", + sanitized_phone="123456", + merged_crm_ids="123456", + updated_at="2021-01-01", + queued_for_crm_push=True, + suggested_from_rule_engine_config_id="123456", + email_unsubscribed=None, + label_ids=[], + has_pending_email_arcgate_request=True, + has_email_arcgate_request=True, + existence_level=None, + email=None, + email_from_customer=None, + typed_custom_fields=[], + custom_field_errors=None, + salesforce_record_id=None, + crm_record_url=None, + email_status_unavailable_reason=None, + email_true_status=None, + updated_email_true_status=True, + contact_rule_config_statuses=[], + source_display_name=None, + twitter_url=None, + contact_campaign_statuses=[], + state=None, + city=None, + country=None, + account=None, + contact_emails=[], + organization=None, + employment_history=[], + time_zone=None, + intent_strength=None, + show_intent=True, + phone_numbers=[], + account_phone_note=None, + free_domain=True, + is_likely_to_engage=True, + email_domain_catchall=True, + contact_job_change_event=None, + ), + ], + ), + ], + test_mock={ + "search_people": lambda query, credentials: [ + Contact( + id="1", + name="John Doe", + first_name="John", + last_name="Doe", + linkedin_url="https://www.linkedin.com/in/johndoe", + title="Software Engineer", + organization_name="Google", + organization_id="123456", + contact_stage_id="1", + owner_id="1", + creator_id="1", + person_id="1", + email_needs_tickling=True, + source="apollo", + original_source="apollo", + headline="Software Engineer", + photo_url="https://www.linkedin.com/in/johndoe", + present_raw_address="123 Main St, Anytown, USA", + linkededin_uid="123456", + extrapolated_email_confidence=0.8, + salesforce_id="123456", + salesforce_lead_id="123456", + salesforce_contact_id="123456", + saleforce_account_id="123456", + crm_owner_id="123456", + created_at="2021-01-01", + emailer_campaign_ids=[], + direct_dial_status="active", + direct_dial_enrichment_failed_at="2021-01-01", + email_status="active", + email_source="apollo", + account_id="123456", + last_activity_date="2021-01-01", + hubspot_vid="123456", + hubspot_company_id="123456", + crm_id="123456", + sanitized_phone="123456", + merged_crm_ids="123456", + updated_at="2021-01-01", + queued_for_crm_push=True, + suggested_from_rule_engine_config_id="123456", + email_unsubscribed=None, + label_ids=[], + has_pending_email_arcgate_request=True, + has_email_arcgate_request=True, + existence_level=None, + email=None, + email_from_customer=None, + typed_custom_fields=[], + custom_field_errors=None, + salesforce_record_id=None, + crm_record_url=None, + email_status_unavailable_reason=None, + email_true_status=None, + updated_email_true_status=True, + contact_rule_config_statuses=[], + source_display_name=None, + twitter_url=None, + contact_campaign_statuses=[], + state=None, + city=None, + country=None, + account=None, + contact_emails=[], + organization=None, + employment_history=[], + time_zone=None, + intent_strength=None, + show_intent=True, + phone_numbers=[], + account_phone_note=None, + free_domain=True, + is_likely_to_engage=True, + email_domain_catchall=True, + contact_job_change_event=None, + ), + ] + }, + ) + + @staticmethod + def search_people( + query: SearchPeopleRequest, credentials: ApolloCredentials + ) -> list[Contact]: + client = ApolloClient(credentials) + return client.search_people(query) + + def run( + self, + input_data: Input, + *, + credentials: ApolloCredentials, + **kwargs, + ) -> BlockOutput: + + query = SearchPeopleRequest(**input_data.model_dump(exclude={"credentials"})) + people = self.search_people(query, credentials) + for person in people: + yield "person", person + yield "people", people diff --git a/autogpt_platform/backend/backend/blocks/smartlead/_api.py b/autogpt_platform/backend/backend/blocks/smartlead/_api.py new file mode 100644 index 0000000000..8caa266c2c --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/smartlead/_api.py @@ -0,0 +1,97 @@ +from backend.blocks.smartlead.models import ( + AddLeadsRequest, + AddLeadsToCampaignResponse, + CreateCampaignRequest, + CreateCampaignResponse, + SaveSequencesRequest, + SaveSequencesResponse, +) +from backend.util.request import Requests + + +class SmartLeadClient: + """Client for the SmartLead API""" + + # This api is stupid and requires your api key in the url. DO NOT RAISE ERRORS FOR BAD REQUESTS. + # FILTER OUT THE API KEY FROM THE ERROR MESSAGE. + + API_URL = "https://server.smartlead.ai/api/v1" + + def __init__(self, api_key: str): + self.api_key = api_key + self.requests = Requests() + + def _add_auth_to_url(self, url: str) -> str: + return f"{url}?api_key={self.api_key}" + + def _handle_error(self, e: Exception) -> str: + return e.__str__().replace(self.api_key, "API KEY") + + def create_campaign(self, request: CreateCampaignRequest) -> CreateCampaignResponse: + try: + response = self.requests.post( + self._add_auth_to_url(f"{self.API_URL}/campaigns/create"), + json=request.model_dump(), + ) + response_data = response.json() + return CreateCampaignResponse(**response_data) + except ValueError as e: + raise ValueError(f"Invalid response format: {str(e)}") + except Exception as e: + raise ValueError(f"Failed to create campaign: {self._handle_error(e)}") + + def add_leads_to_campaign( + self, request: AddLeadsRequest + ) -> AddLeadsToCampaignResponse: + try: + response = self.requests.post( + self._add_auth_to_url( + f"{self.API_URL}/campaigns/{request.campaign_id}/leads" + ), + json=request.model_dump(exclude={"campaign_id"}), + ) + response_data = response.json() + response_parsed = AddLeadsToCampaignResponse(**response_data) + if not response_parsed.ok: + raise ValueError( + f"Failed to add leads to campaign: {response_parsed.error}" + ) + return response_parsed + except ValueError as e: + raise ValueError(f"Invalid response format: {str(e)}") + except Exception as e: + raise ValueError( + f"Failed to add leads to campaign: {self._handle_error(e)}" + ) + + def save_campaign_sequences( + self, campaign_id: int, request: SaveSequencesRequest + ) -> SaveSequencesResponse: + """ + Save sequences within a campaign. + + Args: + campaign_id: ID of the campaign to save sequences for + request: SaveSequencesRequest containing the sequences configuration + + Returns: + SaveSequencesResponse with the result of the operation + + Note: + For variant_distribution_type: + - MANUAL_EQUAL: Equally distributes variants across leads + - AI_EQUAL: Requires winning_metric_property and lead_distribution_percentage + - MANUAL_PERCENTAGE: Requires variant_distribution_percentage in seq_variants + """ + try: + response = self.requests.post( + self._add_auth_to_url( + f"{self.API_URL}/campaigns/{campaign_id}/sequences" + ), + json=request.model_dump(exclude_none=True), + ) + return SaveSequencesResponse(**response.json()) + except Exception as e: + raise ValueError( + f"Failed to save campaign sequences: {e.__str__().replace(self.api_key, 'API KEY')}" + ) diff --git a/autogpt_platform/backend/backend/blocks/smartlead/_auth.py b/autogpt_platform/backend/backend/blocks/smartlead/_auth.py new file mode 100644 index 0000000000..219524126c --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/smartlead/_auth.py @@ -0,0 +1,35 @@ +from typing import Literal + +from pydantic import SecretStr + +from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput +from backend.integrations.providers import ProviderName + +SmartLeadCredentials = APIKeyCredentials +SmartLeadCredentialsInput = CredentialsMetaInput[ + Literal[ProviderName.SMARTLEAD], + Literal["api_key"], +] + +TEST_CREDENTIALS = APIKeyCredentials( + id="01234567-89ab-cdef-0123-456789abcdef", + provider="smartlead", + api_key=SecretStr("mock-smartlead-api-key"), + title="Mock SmartLead API key", + expires_at=None, +) +TEST_CREDENTIALS_INPUT = { + "provider": TEST_CREDENTIALS.provider, + "id": TEST_CREDENTIALS.id, + "type": TEST_CREDENTIALS.type, + "title": TEST_CREDENTIALS.title, +} + + +def SmartLeadCredentialsField() -> SmartLeadCredentialsInput: + """ + Creates a SmartLead credentials input on a block. + """ + return CredentialsField( + description="The SmartLead integration can be used with an API Key.", + ) diff --git a/autogpt_platform/backend/backend/blocks/smartlead/campaign.py b/autogpt_platform/backend/backend/blocks/smartlead/campaign.py new file mode 100644 index 0000000000..942ebcd471 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/smartlead/campaign.py @@ -0,0 +1,326 @@ +from backend.blocks.smartlead._api import SmartLeadClient +from backend.blocks.smartlead._auth import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + SmartLeadCredentials, + SmartLeadCredentialsInput, +) +from backend.blocks.smartlead.models import ( + AddLeadsRequest, + AddLeadsToCampaignResponse, + CreateCampaignRequest, + CreateCampaignResponse, + LeadInput, + LeadUploadSettings, + SaveSequencesRequest, + SaveSequencesResponse, + Sequence, +) +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class CreateCampaignBlock(Block): + """Create a campaign in SmartLead""" + + class Input(BlockSchema): + name: str = SchemaField( + description="The name of the campaign", + ) + credentials: SmartLeadCredentialsInput = SchemaField( + description="SmartLead credentials", + ) + + class Output(BlockSchema): + id: int = SchemaField( + description="The ID of the created campaign", + ) + name: str = SchemaField( + description="The name of the created campaign", + ) + created_at: str = SchemaField( + description="The date and time the campaign was created", + ) + error: str = SchemaField( + description="Error message if the search failed", + default="", + ) + + def __init__(self): + super().__init__( + id="8865699f-9188-43c4-89b0-79c84cfaa03e", + description="Create a campaign in SmartLead", + categories={BlockCategory.CRM}, + input_schema=CreateCampaignBlock.Input, + output_schema=CreateCampaignBlock.Output, + test_credentials=TEST_CREDENTIALS, + test_input={"name": "Test Campaign", "credentials": TEST_CREDENTIALS_INPUT}, + test_output=[ + ( + "id", + 1, + ), + ( + "name", + "Test Campaign", + ), + ( + "created_at", + "2024-01-01T00:00:00Z", + ), + ], + test_mock={ + "create_campaign": lambda name, credentials: CreateCampaignResponse( + ok=True, + id=1, + name=name, + created_at="2024-01-01T00:00:00Z", + ) + }, + ) + + @staticmethod + def create_campaign( + name: str, credentials: SmartLeadCredentials + ) -> CreateCampaignResponse: + client = SmartLeadClient(credentials.api_key.get_secret_value()) + return client.create_campaign(CreateCampaignRequest(name=name)) + + def run( + self, + input_data: Input, + *, + credentials: SmartLeadCredentials, + **kwargs, + ) -> BlockOutput: + response = self.create_campaign(input_data.name, credentials) + + yield "id", response.id + yield "name", response.name + yield "created_at", response.created_at + if not response.ok: + yield "error", "Failed to create campaign" + + +class AddLeadToCampaignBlock(Block): + """Add a lead to a campaign in SmartLead""" + + class Input(BlockSchema): + campaign_id: int = SchemaField( + description="The ID of the campaign to add the lead to", + ) + lead_list: list[LeadInput] = SchemaField( + description="An array of JSON objects, each representing a lead's details. Can hold max 100 leads.", + max_length=100, + default=[], + advanced=False, + ) + settings: LeadUploadSettings = SchemaField( + description="Settings for lead upload", + default=LeadUploadSettings(), + ) + credentials: SmartLeadCredentialsInput = SchemaField( + description="SmartLead credentials", + ) + + class Output(BlockSchema): + campaign_id: int = SchemaField( + description="The ID of the campaign the lead was added to (passed through)", + ) + upload_count: int = SchemaField( + description="The number of leads added to the campaign", + ) + already_added_to_campaign: int = SchemaField( + description="The number of leads that were already added to the campaign", + ) + duplicate_count: int = SchemaField( + description="The number of emails that were duplicates", + ) + invalid_email_count: int = SchemaField( + description="The number of emails that were invalidly formatted", + ) + is_lead_limit_exhausted: bool = SchemaField( + description="Whether the lead limit was exhausted", + ) + lead_import_stopped_count: int = SchemaField( + description="The number of leads that were not added to the campaign because the lead import was stopped", + ) + error: str = SchemaField( + description="Error message if the lead was not added to the campaign", + default="", + ) + + def __init__(self): + super().__init__( + id="fb8106a4-1a8f-42f9-a502-f6d07e6fe0ec", + description="Add a lead to a campaign in SmartLead", + categories={BlockCategory.CRM}, + input_schema=AddLeadToCampaignBlock.Input, + output_schema=AddLeadToCampaignBlock.Output, + test_credentials=TEST_CREDENTIALS, + test_input={ + "campaign_id": 1, + "lead_list": [], + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_output=[ + ( + "campaign_id", + 1, + ), + ( + "upload_count", + 1, + ), + ], + test_mock={ + "add_leads_to_campaign": lambda campaign_id, lead_list, credentials: AddLeadsToCampaignResponse( + ok=True, + upload_count=1, + already_added_to_campaign=0, + duplicate_count=0, + invalid_email_count=0, + is_lead_limit_exhausted=False, + lead_import_stopped_count=0, + error="", + total_leads=1, + block_count=0, + invalid_emails=[], + unsubscribed_leads=[], + bounce_count=0, + ) + }, + ) + + @staticmethod + def add_leads_to_campaign( + campaign_id: int, lead_list: list[LeadInput], credentials: SmartLeadCredentials + ) -> AddLeadsToCampaignResponse: + client = SmartLeadClient(credentials.api_key.get_secret_value()) + return client.add_leads_to_campaign( + AddLeadsRequest( + campaign_id=campaign_id, + lead_list=lead_list, + settings=LeadUploadSettings( + ignore_global_block_list=False, + ignore_unsubscribe_list=False, + ignore_community_bounce_list=False, + ignore_duplicate_leads_in_other_campaign=False, + ), + ), + ) + + def run( + self, + input_data: Input, + *, + credentials: SmartLeadCredentials, + **kwargs, + ) -> BlockOutput: + response = self.add_leads_to_campaign( + input_data.campaign_id, input_data.lead_list, credentials + ) + + yield "campaign_id", input_data.campaign_id + yield "upload_count", response.upload_count + if response.already_added_to_campaign: + yield "already_added_to_campaign", response.already_added_to_campaign + if response.duplicate_count: + yield "duplicate_count", response.duplicate_count + if response.invalid_email_count: + yield "invalid_email_count", response.invalid_email_count + if response.is_lead_limit_exhausted: + yield "is_lead_limit_exhausted", response.is_lead_limit_exhausted + if response.lead_import_stopped_count: + yield "lead_import_stopped_count", response.lead_import_stopped_count + if response.error: + yield "error", response.error + if not response.ok: + yield "error", "Failed to add leads to campaign" + + +class SaveCampaignSequencesBlock(Block): + """Save sequences within a campaign""" + + class Input(BlockSchema): + campaign_id: int = SchemaField( + description="The ID of the campaign to save sequences for", + ) + sequences: list[Sequence] = SchemaField( + description="The sequences to save", + default=[], + advanced=False, + ) + credentials: SmartLeadCredentialsInput = SchemaField( + description="SmartLead credentials", + ) + + class Output(BlockSchema): + data: dict | str | None = SchemaField( + description="Data from the API", + default=None, + ) + message: str = SchemaField( + description="Message from the API", + default="", + ) + error: str = SchemaField( + description="Error message if the sequences were not saved", + default="", + ) + + def __init__(self): + super().__init__( + id="e7d9f41c-dc10-4f39-98ba-a432abd128c0", + description="Save sequences within a campaign", + categories={BlockCategory.CRM}, + input_schema=SaveCampaignSequencesBlock.Input, + output_schema=SaveCampaignSequencesBlock.Output, + test_credentials=TEST_CREDENTIALS, + test_input={ + "campaign_id": 1, + "sequences": [], + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_output=[ + ( + "message", + "Sequences saved successfully", + ), + ], + test_mock={ + "save_campaign_sequences": lambda campaign_id, sequences, credentials: SaveSequencesResponse( + ok=True, + message="Sequences saved successfully", + ) + }, + ) + + @staticmethod + def save_campaign_sequences( + campaign_id: int, sequences: list[Sequence], credentials: SmartLeadCredentials + ) -> SaveSequencesResponse: + client = SmartLeadClient(credentials.api_key.get_secret_value()) + return client.save_campaign_sequences( + campaign_id=campaign_id, request=SaveSequencesRequest(sequences=sequences) + ) + + def run( + self, + input_data: Input, + *, + credentials: SmartLeadCredentials, + **kwargs, + ) -> BlockOutput: + response = self.save_campaign_sequences( + input_data.campaign_id, input_data.sequences, credentials + ) + + if response.data: + yield "data", response.data + if response.message: + yield "message", response.message + if response.error: + yield "error", response.error + if not response.ok: + yield "error", "Failed to save sequences" diff --git a/autogpt_platform/backend/backend/blocks/smartlead/models.py b/autogpt_platform/backend/backend/blocks/smartlead/models.py new file mode 100644 index 0000000000..a41d0e6ee8 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/smartlead/models.py @@ -0,0 +1,147 @@ +from enum import Enum + +from pydantic import BaseModel + +from backend.data.model import SchemaField + + +class CreateCampaignResponse(BaseModel): + ok: bool + id: int + name: str + created_at: str + + +class CreateCampaignRequest(BaseModel): + name: str + client_id: str | None = None + + +class AddLeadsToCampaignResponse(BaseModel): + ok: bool + upload_count: int + total_leads: int + block_count: int + duplicate_count: int + invalid_email_count: int + invalid_emails: list[str] + already_added_to_campaign: int + unsubscribed_leads: list[str] + is_lead_limit_exhausted: bool + lead_import_stopped_count: int + bounce_count: int + error: str | None = None + + +class LeadCustomFields(BaseModel): + """Custom fields for a lead (max 20 fields)""" + + fields: dict[str, str] = SchemaField( + description="Custom fields for a lead (max 20 fields)", + max_length=20, + default={}, + ) + + +class LeadInput(BaseModel): + """Single lead input data""" + + first_name: str + last_name: str + email: str + phone_number: str | None = None # Changed from int to str for phone numbers + company_name: str | None = None + website: str | None = None + location: str | None = None + custom_fields: LeadCustomFields | None = None + linkedin_profile: str | None = None + company_url: str | None = None + + +class LeadUploadSettings(BaseModel): + """Settings for lead upload""" + + ignore_global_block_list: bool = SchemaField( + description="Ignore the global block list", + default=False, + ) + ignore_unsubscribe_list: bool = SchemaField( + description="Ignore the unsubscribe list", + default=False, + ) + ignore_community_bounce_list: bool = SchemaField( + description="Ignore the community bounce list", + default=False, + ) + ignore_duplicate_leads_in_other_campaign: bool = SchemaField( + description="Ignore duplicate leads in other campaigns", + default=False, + ) + + +class AddLeadsRequest(BaseModel): + """Request body for adding leads to a campaign""" + + lead_list: list[LeadInput] = SchemaField( + description="List of leads to add to the campaign", + max_length=100, + default=[], + ) + settings: LeadUploadSettings + campaign_id: int + + +class VariantDistributionType(str, Enum): + MANUAL_EQUAL = "MANUAL_EQUAL" + MANUAL_PERCENTAGE = "MANUAL_PERCENTAGE" + AI_EQUAL = "AI_EQUAL" + + +class WinningMetricProperty(str, Enum): + OPEN_RATE = "OPEN_RATE" + CLICK_RATE = "CLICK_RATE" + REPLY_RATE = "REPLY_RATE" + POSITIVE_REPLY_RATE = "POSITIVE_REPLY_RATE" + + +class SequenceDelayDetails(BaseModel): + delay_in_days: int + + +class SequenceVariant(BaseModel): + subject: str + email_body: str + variant_label: str + id: int | None = None # Optional for creation, required for updates + variant_distribution_percentage: int | None = None + + +class Sequence(BaseModel): + seq_number: int = SchemaField( + description="The sequence number", + default=1, + ) + seq_delay_details: SequenceDelayDetails + id: int | None = None + variant_distribution_type: VariantDistributionType | None = None + lead_distribution_percentage: int | None = SchemaField( + None, ge=20, le=100 + ) # >= 20% for fair calculation + winning_metric_property: WinningMetricProperty | None = None + seq_variants: list[SequenceVariant] | None = None + subject: str = "" # blank makes the follow up in the same thread + email_body: str | None = None + + +class SaveSequencesRequest(BaseModel): + sequences: list[Sequence] + + +class SaveSequencesResponse(BaseModel): + ok: bool + message: str = SchemaField( + description="Message from the API", + default="", + ) + data: dict | str | None = None + error: str | None = None diff --git a/autogpt_platform/backend/backend/blocks/zerobounce/_api.py b/autogpt_platform/backend/backend/blocks/zerobounce/_api.py new file mode 100644 index 0000000000..c78fe38c09 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/zerobounce/_api.py @@ -0,0 +1,10 @@ +from zerobouncesdk import ZBValidateResponse, ZeroBounce + + +class ZeroBounceClient: + def __init__(self, api_key: str): + self.api_key = api_key + self.client = ZeroBounce(api_key) + + def validate_email(self, email: str, ip_address: str) -> ZBValidateResponse: + return self.client.validate(email, ip_address) diff --git a/autogpt_platform/backend/backend/blocks/zerobounce/_auth.py b/autogpt_platform/backend/backend/blocks/zerobounce/_auth.py new file mode 100644 index 0000000000..e7125fc3c9 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/zerobounce/_auth.py @@ -0,0 +1,35 @@ +from typing import Literal + +from pydantic import SecretStr + +from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput +from backend.integrations.providers import ProviderName + +ZeroBounceCredentials = APIKeyCredentials +ZeroBounceCredentialsInput = CredentialsMetaInput[ + Literal[ProviderName.ZEROBOUNCE], + Literal["api_key"], +] + +TEST_CREDENTIALS = APIKeyCredentials( + id="01234567-89ab-cdef-0123-456789abcdef", + provider="zerobounce", + api_key=SecretStr("mock-zerobounce-api-key"), + title="Mock ZeroBounce API key", + expires_at=None, +) +TEST_CREDENTIALS_INPUT = { + "provider": TEST_CREDENTIALS.provider, + "id": TEST_CREDENTIALS.id, + "type": TEST_CREDENTIALS.type, + "title": TEST_CREDENTIALS.title, +} + + +def ZeroBounceCredentialsField() -> ZeroBounceCredentialsInput: + """ + Creates a ZeroBounce credentials input on a block. + """ + return CredentialsField( + description="The ZeroBounce integration can be used with an API Key.", + ) diff --git a/autogpt_platform/backend/backend/blocks/zerobounce/validate_emails.py b/autogpt_platform/backend/backend/blocks/zerobounce/validate_emails.py new file mode 100644 index 0000000000..ee87a8f285 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/zerobounce/validate_emails.py @@ -0,0 +1,175 @@ +from typing import Optional + +from pydantic import BaseModel +from zerobouncesdk.zb_validate_response import ( + ZBValidateResponse, + ZBValidateStatus, + ZBValidateSubStatus, +) + +from backend.blocks.zerobounce._api import ZeroBounceClient +from backend.blocks.zerobounce._auth import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + ZeroBounceCredentials, + ZeroBounceCredentialsInput, +) +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class Response(BaseModel): + address: str = SchemaField( + description="The email address you are validating.", default="N/A" + ) + status: ZBValidateStatus = SchemaField( + description="The status of the email address.", default=ZBValidateStatus.unknown + ) + sub_status: ZBValidateSubStatus = SchemaField( + description="The sub-status of the email address.", + default=ZBValidateSubStatus.none, + ) + account: Optional[str] = SchemaField( + description="The portion of the email address before the '@' symbol.", + default="N/A", + ) + domain: Optional[str] = SchemaField( + description="The portion of the email address after the '@' symbol." + ) + did_you_mean: Optional[str] = SchemaField( + description="Suggestive Fix for an email typo", + default=None, + ) + domain_age_days: Optional[str] = SchemaField( + description="Age of the email domain in days or [null].", + default=None, + ) + free_email: Optional[bool] = SchemaField( + description="Whether the email address is a free email provider.", default=False + ) + mx_found: Optional[bool] = SchemaField( + description="Whether the MX record was found.", default=False + ) + mx_record: Optional[str] = SchemaField( + description="The MX record of the email address.", default=None + ) + smtp_provider: Optional[str] = SchemaField( + description="The SMTP provider of the email address.", default=None + ) + firstname: Optional[str] = SchemaField( + description="The first name of the email address.", default=None + ) + lastname: Optional[str] = SchemaField( + description="The last name of the email address.", default=None + ) + gender: Optional[str] = SchemaField( + description="The gender of the email address.", default=None + ) + city: Optional[str] = SchemaField( + description="The city of the email address.", default=None + ) + region: Optional[str] = SchemaField( + description="The region of the email address.", default=None + ) + zipcode: Optional[str] = SchemaField( + description="The zipcode of the email address.", default=None + ) + country: Optional[str] = SchemaField( + description="The country of the email address.", default=None + ) + + +class ValidateEmailsBlock(Block): + """Search for people in Apollo""" + + class Input(BlockSchema): + email: str = SchemaField( + description="Email to validate", + ) + ip_address: str = SchemaField( + description="IP address to validate", + default="", + ) + credentials: ZeroBounceCredentialsInput = SchemaField( + description="ZeroBounce credentials", + ) + + class Output(BlockSchema): + response: Response = SchemaField( + description="Response from ZeroBounce", + ) + error: str = SchemaField( + description="Error message if the search failed", + default="", + ) + + def __init__(self): + super().__init__( + id="e3950439-fa0b-40e8-b19f-e0dca0bf5853", + description="Validate emails", + categories={BlockCategory.SEARCH}, + input_schema=ValidateEmailsBlock.Input, + output_schema=ValidateEmailsBlock.Output, + test_credentials=TEST_CREDENTIALS, + test_input={ + "credentials": TEST_CREDENTIALS_INPUT, + "email": "test@test.com", + }, + test_output=[ + ( + "response", + Response( + address="test@test.com", + status=ZBValidateStatus.valid, + sub_status=ZBValidateSubStatus.allowed, + account="test", + domain="test.com", + did_you_mean=None, + domain_age_days=None, + free_email=False, + mx_found=False, + mx_record=None, + smtp_provider=None, + ), + ) + ], + test_mock={ + "validate_email": lambda email, ip_address, credentials: ZBValidateResponse( + data={ + "address": email, + "status": ZBValidateStatus.valid, + "sub_status": ZBValidateSubStatus.allowed, + "account": "test", + "domain": "test.com", + "did_you_mean": None, + "domain_age_days": None, + "free_email": False, + "mx_found": False, + "mx_record": None, + "smtp_provider": None, + } + ) + }, + ) + + @staticmethod + def validate_email( + email: str, ip_address: str, credentials: ZeroBounceCredentials + ) -> ZBValidateResponse: + client = ZeroBounceClient(credentials.api_key.get_secret_value()) + return client.validate_email(email, ip_address) + + def run( + self, + input_data: Input, + *, + credentials: ZeroBounceCredentials, + **kwargs, + ) -> BlockOutput: + response: ZBValidateResponse = self.validate_email( + input_data.email, input_data.ip_address, credentials + ) + + response_model = Response(**response.__dict__) + + yield "response", response_model diff --git a/autogpt_platform/backend/backend/data/credit.py b/autogpt_platform/backend/backend/data/credit.py index f4ed045c47..a6b840590a 100644 --- a/autogpt_platform/backend/backend/data/credit.py +++ b/autogpt_platform/backend/backend/data/credit.py @@ -1,11 +1,17 @@ +import asyncio import logging from abc import ABC, abstractmethod from collections import defaultdict from datetime import datetime, timezone import stripe +from autogpt_libs.utils.cache import thread_cached from prisma import Json -from prisma.enums import CreditRefundRequestStatus, CreditTransactionType +from prisma.enums import ( + CreditRefundRequestStatus, + CreditTransactionType, + NotificationType, +) from prisma.errors import UniqueViolationError from prisma.models import CreditRefundRequest, CreditTransaction, User from prisma.types import CreditTransactionCreateInput, CreditTransactionWhereInput @@ -23,7 +29,10 @@ from backend.data.model import ( TransactionHistory, UserTransaction, ) +from backend.data.notifications import NotificationEventDTO, RefundRequestData from backend.data.user import get_user_by_id +from backend.notifications import NotificationManager +from backend.util.service import get_service_client from backend.util.settings import Settings settings = Settings() @@ -338,6 +347,26 @@ class UsageTransactionMetadata(BaseModel): class UserCredit(UserCreditBase): + @thread_cached + def notification_client(self) -> NotificationManager: + return get_service_client(NotificationManager) + + async def _send_refund_notification( + self, + notification_request: RefundRequestData, + notification_type: NotificationType, + ): + await asyncio.to_thread( + lambda: self.notification_client().queue_notification( + NotificationEventDTO( + recipient_email=settings.config.refund_notification_email, + user_id=notification_request.user_id, + type=notification_type, + data=notification_request.model_dump(), + ) + ) + ) + def _block_usage_cost( self, block: Block, @@ -457,10 +486,11 @@ class UserCredit(UserCreditBase): ) balance = await self.get_credits(user_id) amount = transaction.amount - refund_key = f"{transaction.createdAt.strftime('%Y-%W')}-{user_id}" + refund_key_format = settings.config.refund_request_time_key_format + refund_key = f"{transaction.createdAt.strftime(refund_key_format)}-{user_id}" try: - await CreditRefundRequest.prisma().create( + refund_request = await CreditRefundRequest.prisma().create( data={ "id": refund_key, "transactionKey": transaction_key, @@ -477,7 +507,20 @@ class UserCredit(UserCreditBase): ) if amount - balance > settings.config.refund_credit_tolerance_threshold: - # TODO: add a notification for the platform administrator. + user_data = await get_user_by_id(user_id) + await self._send_refund_notification( + RefundRequestData( + user_id=user_id, + user_name=user_data.name or "AutoGPT Platform User", + user_email=user_data.email, + transaction_id=transaction_key, + refund_request_id=refund_request.id, + reason=refund_request.reason, + amount=amount, + balance=balance, + ), + NotificationType.REFUND_REQUEST, + ) return 0 # Register the refund request for manual approval. # Auto refund the top-up. @@ -509,7 +552,7 @@ class UserCredit(UserCreditBase): f"Invalid amount to deduct ${request.amount/100} from ${transaction.amount/100} top-up" ) - await self._add_transaction( + balance, _ = await self._add_transaction( user_id=transaction.userId, amount=-request.amount, transaction_type=CreditTransactionType.REFUND, @@ -531,6 +574,21 @@ class UserCredit(UserCreditBase): }, ) + user_data = await get_user_by_id(transaction.userId) + await self._send_refund_notification( + RefundRequestData( + user_id=user_data.id, + user_name=user_data.name or "AutoGPT Platform User", + user_email=user_data.email, + transaction_id=transaction.transactionKey, + refund_request_id=request.id, + reason=str(request.reason or "-"), + amount=transaction.amount, + balance=balance, + ), + NotificationType.REFUND_PROCESSED, + ) + async def handle_dispute(self, dispute: stripe.Dispute): transaction = await CreditTransaction.prisma().find_first_or_raise( where={ diff --git a/autogpt_platform/backend/backend/data/graph.py b/autogpt_platform/backend/backend/data/graph.py index 5483c24bd3..52e37625cd 100644 --- a/autogpt_platform/backend/backend/data/graph.py +++ b/autogpt_platform/backend/backend/data/graph.py @@ -23,12 +23,15 @@ from backend.util import type from .block import BlockInput, BlockType, get_block, get_blocks from .db import BaseDbModel, transaction -from .execution import ExecutionStatus +from .execution import ExecutionResult, ExecutionStatus from .includes import AGENT_GRAPH_INCLUDE, AGENT_NODE_INCLUDE from .integrations import Webhook logger = logging.getLogger(__name__) +_INPUT_BLOCK_ID = AgentInputBlock().id +_OUTPUT_BLOCK_ID = AgentOutputBlock().id + class Link(BaseDbModel): source_id: str @@ -105,7 +108,7 @@ class NodeModel(Node): Webhook.model_rebuild() -class GraphExecution(BaseDbModel): +class GraphExecutionMeta(BaseDbModel): execution_id: str started_at: datetime ended_at: datetime @@ -114,33 +117,83 @@ class GraphExecution(BaseDbModel): status: ExecutionStatus graph_id: str graph_version: int + preset_id: Optional[str] @staticmethod - def from_db(execution: AgentGraphExecution): + def from_db(_graph_exec: AgentGraphExecution): now = datetime.now(timezone.utc) - start_time = execution.startedAt or execution.createdAt - end_time = execution.updatedAt or now + start_time = _graph_exec.startedAt or _graph_exec.createdAt + end_time = _graph_exec.updatedAt or now duration = (end_time - start_time).total_seconds() total_run_time = duration try: - stats = type.convert(execution.stats or {}, dict[str, Any]) + stats = type.convert(_graph_exec.stats or {}, dict[str, Any]) except ValueError: stats = {} duration = stats.get("walltime", duration) total_run_time = stats.get("nodes_walltime", total_run_time) - return GraphExecution( - id=execution.id, - execution_id=execution.id, + return GraphExecutionMeta( + id=_graph_exec.id, + execution_id=_graph_exec.id, started_at=start_time, ended_at=end_time, duration=duration, total_run_time=total_run_time, - status=ExecutionStatus(execution.executionStatus), - graph_id=execution.agentGraphId, - graph_version=execution.agentGraphVersion, + status=ExecutionStatus(_graph_exec.executionStatus), + graph_id=_graph_exec.agentGraphId, + graph_version=_graph_exec.agentGraphVersion, + preset_id=_graph_exec.agentPresetId, + ) + + +class GraphExecution(GraphExecutionMeta): + inputs: dict[str, Any] + outputs: dict[str, list[Any]] + node_executions: list[ExecutionResult] + + @staticmethod + def from_db(_graph_exec: AgentGraphExecution): + if _graph_exec.AgentNodeExecutions is None: + raise ValueError("Node executions must be included in query") + + graph_exec = GraphExecutionMeta.from_db(_graph_exec) + + node_executions = [ + ExecutionResult.from_db(ne) for ne in _graph_exec.AgentNodeExecutions + ] + + inputs = { + **{ + # inputs from Agent Input Blocks + exec.input_data["name"]: exec.input_data["value"] + for exec in node_executions + if exec.block_id == _INPUT_BLOCK_ID + }, + **{ + # input from webhook-triggered block + "payload": exec.input_data["payload"] + for exec in node_executions + if (block := get_block(exec.block_id)) + and block.block_type in [BlockType.WEBHOOK, BlockType.WEBHOOK_MANUAL] + }, + } + + outputs: dict[str, list] = defaultdict(list) + for exec in node_executions: + if exec.block_id == _OUTPUT_BLOCK_ID: + outputs[exec.input_data["name"]].append(exec.input_data["value"]) + + return GraphExecution( + **{ + field_name: getattr(graph_exec, field_name) + for field_name in graph_exec.model_fields + }, + inputs=inputs, + outputs=outputs, + node_executions=node_executions, ) @@ -514,17 +567,45 @@ async def get_graphs( return graph_models -async def get_executions(user_id: str) -> list[GraphExecution]: +async def get_graphs_executions(user_id: str) -> list[GraphExecutionMeta]: executions = await AgentGraphExecution.prisma().find_many( where={"userId": user_id}, order={"createdAt": "desc"}, ) - return [GraphExecution.from_db(execution) for execution in executions] + return [GraphExecutionMeta.from_db(execution) for execution in executions] + + +async def get_graph_executions(graph_id: str, user_id: str) -> list[GraphExecutionMeta]: + executions = await AgentGraphExecution.prisma().find_many( + where={"agentGraphId": graph_id, "userId": user_id}, + order={"createdAt": "desc"}, + ) + return [GraphExecutionMeta.from_db(execution) for execution in executions] + + +async def get_execution_meta( + user_id: str, execution_id: str +) -> GraphExecutionMeta | None: + execution = await AgentGraphExecution.prisma().find_first( + where={"id": execution_id, "userId": user_id} + ) + return GraphExecutionMeta.from_db(execution) if execution else None async def get_execution(user_id: str, execution_id: str) -> GraphExecution | None: execution = await AgentGraphExecution.prisma().find_first( - where={"id": execution_id, "userId": user_id} + where={"id": execution_id, "userId": user_id}, + include={ + "AgentNodeExecutions": { + "include": {"AgentNode": True, "Input": True, "Output": True}, + "order_by": [ + {"queuedTime": "asc"}, + { # Fallback: Incomplete execs has no queuedTime. + "addedTime": "asc" + }, + ], + }, + }, ) return GraphExecution.from_db(execution) if execution else None @@ -629,7 +710,7 @@ async def create_graph(graph: Graph, user_id: str) -> GraphModel: await __create_graph(tx, graph, user_id) if created_graph := await get_graph( - graph.id, graph.version, graph.is_template, user_id=user_id + graph.id, graph.version, template=graph.is_template, user_id=user_id ): return created_graph diff --git a/autogpt_platform/backend/backend/data/notifications.py b/autogpt_platform/backend/backend/data/notifications.py index a5bfbbc3ae..a0b6941640 100644 --- a/autogpt_platform/backend/backend/data/notifications.py +++ b/autogpt_platform/backend/backend/data/notifications.py @@ -100,6 +100,17 @@ class MonthlySummaryData(BaseSummaryData): year: int +class RefundRequestData(BaseNotificationData): + user_id: str + user_name: str + user_email: str + transaction_id: str + refund_request_id: str + reason: str + amount: float + balance: int + + NotificationData = Annotated[ Union[ AgentRunData, @@ -118,6 +129,8 @@ class NotificationEventDTO(BaseModel): type: NotificationType data: dict created_at: datetime = Field(default_factory=datetime.now) + recipient_email: Optional[str] = None + retry_count: int = 0 class NotificationEventModel(BaseModel, Generic[T_co]): @@ -153,6 +166,8 @@ def get_data_type( NotificationType.DAILY_SUMMARY: DailySummaryData, NotificationType.WEEKLY_SUMMARY: WeeklySummaryData, NotificationType.MONTHLY_SUMMARY: MonthlySummaryData, + NotificationType.REFUND_REQUEST: RefundRequestData, + NotificationType.REFUND_PROCESSED: RefundRequestData, }[notification_type] @@ -186,6 +201,8 @@ class NotificationTypeOverride: NotificationType.DAILY_SUMMARY: BatchingStrategy.IMMEDIATE, NotificationType.WEEKLY_SUMMARY: BatchingStrategy.IMMEDIATE, NotificationType.MONTHLY_SUMMARY: BatchingStrategy.IMMEDIATE, + NotificationType.REFUND_REQUEST: BatchingStrategy.IMMEDIATE, + NotificationType.REFUND_PROCESSED: BatchingStrategy.IMMEDIATE, } return BATCHING_RULES.get(self.notification_type, BatchingStrategy.HOURLY) @@ -201,6 +218,8 @@ class NotificationTypeOverride: NotificationType.DAILY_SUMMARY: "daily_summary.html", NotificationType.WEEKLY_SUMMARY: "weekly_summary.html", NotificationType.MONTHLY_SUMMARY: "monthly_summary.html", + NotificationType.REFUND_REQUEST: "refund_request.html", + NotificationType.REFUND_PROCESSED: "refund_processed.html", }[self.notification_type] @property @@ -214,6 +233,8 @@ class NotificationTypeOverride: NotificationType.DAILY_SUMMARY: "Here's your daily summary!", NotificationType.WEEKLY_SUMMARY: "Look at all the cool stuff you did last week!", NotificationType.MONTHLY_SUMMARY: "We did a lot this month!", + NotificationType.REFUND_REQUEST: "[ACTION REQUIRED] You got a ${{data.amount / 100}} refund request from {{data.user_name}}", + NotificationType.REFUND_PROCESSED: "Refund for ${{data.amount / 100}} to {{data.user_name}} has been processed", }[self.notification_type] diff --git a/autogpt_platform/backend/backend/data/user.py b/autogpt_platform/backend/backend/data/user.py index 162066722b..b4c107bac6 100644 --- a/autogpt_platform/backend/backend/data/user.py +++ b/autogpt_platform/backend/backend/data/user.py @@ -50,10 +50,10 @@ async def get_user_by_id(user_id: str) -> User: return User.model_validate(user) -async def get_user_email_by_id(user_id: str) -> str: +async def get_user_email_by_id(user_id: str) -> Optional[str]: try: - user = await prisma.user.find_unique_or_raise(where={"id": user_id}) - return user.email + user = await prisma.user.find_unique(where={"id": user_id}) + return user.email if user else None except Exception as e: raise DatabaseError(f"Failed to get user email for user {user_id}: {e}") from e diff --git a/autogpt_platform/backend/backend/executor/database.py b/autogpt_platform/backend/backend/executor/database.py index 02e1535f5e..1dee046ccc 100644 --- a/autogpt_platform/backend/backend/executor/database.py +++ b/autogpt_platform/backend/backend/executor/database.py @@ -8,7 +8,6 @@ from backend.data.execution import ( RedisExecutionEventBus, create_graph_execution, get_execution_results, - get_executions_in_timerange, get_incomplete_executions, get_latest_execution, update_execution_status, @@ -18,20 +17,9 @@ from backend.data.execution import ( upsert_execution_output, ) from backend.data.graph import get_graph, get_node -from backend.data.notifications import ( - create_or_add_to_user_notification_batch, - empty_user_notification_batch, - get_user_notification_batch, - get_user_notification_last_message_in_batch, -) from backend.data.user import ( - get_active_user_ids_in_timerange, - get_active_users_ids, - get_user_by_id, - get_user_email_by_id, get_user_integrations, get_user_metadata, - get_user_notification_preference, update_user_integrations, update_user_metadata, ) @@ -84,7 +72,6 @@ class DatabaseManager(AppService): update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats) upsert_execution_input = exposed_run_and_wait(upsert_execution_input) upsert_execution_output = exposed_run_and_wait(upsert_execution_output) - get_executions_in_timerange = exposed_run_and_wait(get_executions_in_timerange) # Graphs get_node = exposed_run_and_wait(get_node) @@ -97,27 +84,8 @@ class DatabaseManager(AppService): exposed_run_and_wait(user_credit_model.spend_credits), ) - # User + User Metadata + User Integrations + User Notification Preferences + # User + User Metadata + User Integrations get_user_metadata = exposed_run_and_wait(get_user_metadata) update_user_metadata = exposed_run_and_wait(update_user_metadata) get_user_integrations = exposed_run_and_wait(get_user_integrations) update_user_integrations = exposed_run_and_wait(update_user_integrations) - get_active_user_ids_in_timerange = exposed_run_and_wait( - get_active_user_ids_in_timerange - ) - get_user_by_id = exposed_run_and_wait(get_user_by_id) - get_user_email_by_id = exposed_run_and_wait(get_user_email_by_id) - get_user_notification_preference = exposed_run_and_wait( - get_user_notification_preference - ) - get_active_users_ids = exposed_run_and_wait(get_active_users_ids) - - # Notifications - create_or_add_to_user_notification_batch = exposed_run_and_wait( - create_or_add_to_user_notification_batch - ) - get_user_notification_last_message_in_batch = exposed_run_and_wait( - get_user_notification_last_message_in_batch - ) - empty_user_notification_batch = exposed_run_and_wait(empty_user_notification_batch) - get_user_notification_batch = exposed_run_and_wait(get_user_notification_batch) diff --git a/autogpt_platform/backend/backend/executor/manager.py b/autogpt_platform/backend/backend/executor/manager.py index 65c84ee1bc..6e7d109d15 100644 --- a/autogpt_platform/backend/backend/executor/manager.py +++ b/autogpt_platform/backend/backend/executor/manager.py @@ -210,6 +210,7 @@ def execute_node( for output_name, output_data in node_block.execute( input_data, **extra_exec_kwargs ): + output_data = json.convert_pydantic_to_json(output_data) output_size += len(json.dumps(output_data)) log_metadata.info("Node produced output", **{output_name: output_data}) db_client.upsert_execution_output(node_exec_id, output_name, output_data) diff --git a/autogpt_platform/backend/backend/integrations/credentials_store.py b/autogpt_platform/backend/backend/integrations/credentials_store.py index 5968cbe32d..3284c4b8fe 100644 --- a/autogpt_platform/backend/backend/integrations/credentials_store.py +++ b/autogpt_platform/backend/backend/integrations/credentials_store.py @@ -145,6 +145,29 @@ mem0_credentials = APIKeyCredentials( expires_at=None, ) +apollo_credentials = APIKeyCredentials( + id="544c62b5-1d0f-4156-8fb4-9525f11656eb", + provider="apollo", + api_key=SecretStr(settings.secrets.apollo_api_key), + title="Use Credits for Apollo", + expires_at=None, +) + +smartlead_credentials = APIKeyCredentials( + id="3bcdbda3-84a3-46af-8fdb-bfd2472298b8", + provider="smartlead", + api_key=SecretStr(settings.secrets.smartlead_api_key), + title="Use Credits for SmartLead", + expires_at=None, +) + +zerobounce_credentials = APIKeyCredentials( + id="63a6e279-2dc2-448e-bf57-85776f7176dc", + provider="zerobounce", + api_key=SecretStr(settings.secrets.zerobounce_api_key), + title="Use Credits for ZeroBounce", + expires_at=None, +) DEFAULT_CREDENTIALS = [ ollama_credentials, @@ -164,6 +187,9 @@ DEFAULT_CREDENTIALS = [ mem0_credentials, nvidia_credentials, screenshotone_credentials, + apollo_credentials, + smartlead_credentials, + zerobounce_credentials, ] @@ -231,6 +257,12 @@ class IntegrationCredentialsStore: all_credentials.append(screenshotone_credentials) if settings.secrets.mem0_api_key: all_credentials.append(mem0_credentials) + if settings.secrets.apollo_api_key: + all_credentials.append(apollo_credentials) + if settings.secrets.smartlead_api_key: + all_credentials.append(smartlead_credentials) + if settings.secrets.zerobounce_api_key: + all_credentials.append(zerobounce_credentials) return all_credentials def get_creds_by_id(self, user_id: str, credentials_id: str) -> Credentials | None: diff --git a/autogpt_platform/backend/backend/integrations/providers.py b/autogpt_platform/backend/backend/integrations/providers.py index 6f31988d7f..eb1f513c2e 100644 --- a/autogpt_platform/backend/backend/integrations/providers.py +++ b/autogpt_platform/backend/backend/integrations/providers.py @@ -4,6 +4,7 @@ from enum import Enum # --8<-- [start:ProviderName] class ProviderName(str, Enum): ANTHROPIC = "anthropic" + APOLLO = "apollo" COMPASS = "compass" DISCORD = "discord" D_ID = "d_id" @@ -32,8 +33,10 @@ class ProviderName(str, Enum): REVID = "revid" SCREENSHOTONE = "screenshotone" SLANT3D = "slant3d" + SMARTLEAD = "smartlead" SMTP = "smtp" TWITTER = "twitter" TODOIST = "todoist" UNREAL_SPEECH = "unreal_speech" + ZEROBOUNCE = "zerobounce" # --8<-- [end:ProviderName] diff --git a/autogpt_platform/backend/backend/notifications/notifications.py b/autogpt_platform/backend/backend/notifications/notifications.py index f1f32f9301..1d99011edb 100644 --- a/autogpt_platform/backend/backend/notifications/notifications.py +++ b/autogpt_platform/backend/backend/notifications/notifications.py @@ -1,9 +1,11 @@ import logging import time -from typing import TYPE_CHECKING +from typing import Callable +import aio_pika from aio_pika.exceptions import QueueEmpty -from autogpt_libs.utils.cache import thread_cached +from prisma.enums import NotificationType +from pydantic import BaseModel from backend.data.notifications import ( BatchingStrategy, @@ -13,26 +15,25 @@ from backend.data.notifications import ( get_data_type, ) from backend.data.rabbitmq import Exchange, ExchangeType, Queue, RabbitMQConfig -from backend.executor.database import DatabaseManager +from backend.data.user import get_user_email_by_id, get_user_notification_preference from backend.notifications.email import EmailSender -from backend.util.service import AppService, expose, get_service_client +from backend.util.service import AppService, expose from backend.util.settings import Settings -if TYPE_CHECKING: - from backend.executor import DatabaseManager - logger = logging.getLogger(__name__) settings = Settings() +class NotificationEvent(BaseModel): + event: NotificationEventDTO + model: NotificationEventModel + + def create_notification_config() -> RabbitMQConfig: """Create RabbitMQ configuration for notifications""" notification_exchange = Exchange(name="notifications", type=ExchangeType.TOPIC) - summary_exchange = Exchange(name="summaries", type=ExchangeType.TOPIC) - - dead_letter_exchange = Exchange(name="dead_letter", type=ExchangeType.DIRECT) - delay_exchange = Exchange(name="delay", type=ExchangeType.DIRECT) + dead_letter_exchange = Exchange(name="dead_letter", type=ExchangeType.TOPIC) queues = [ # Main notification queues @@ -45,34 +46,6 @@ def create_notification_config() -> RabbitMQConfig: "x-dead-letter-routing-key": "failed.immediate", }, ), - Queue( - name="backoff_notifications", - exchange=notification_exchange, - routing_key="notification.backoff.#", - arguments={ - "x-dead-letter-exchange": dead_letter_exchange.name, - "x-dead-letter-routing-key": "failed.backoff", - }, - ), - # Summary queues - Queue( - name="daily_summary_trigger", - exchange=summary_exchange, - routing_key="summary.daily", - arguments={"x-message-ttl": 86400000}, # 24 hours - ), - Queue( - name="weekly_summary_trigger", - exchange=summary_exchange, - routing_key="summary.weekly", - arguments={"x-message-ttl": 604800000}, # 7 days - ), - Queue( - name="monthly_summary_trigger", - exchange=summary_exchange, - routing_key="summary.monthly", - arguments={"x-message-ttl": 2592000000}, # 30 days - ), # Failed notifications queue Queue( name="failed_notifications", @@ -84,10 +57,7 @@ def create_notification_config() -> RabbitMQConfig: return RabbitMQConfig( exchanges=[ notification_exchange, - # batch_exchange, - summary_exchange, dead_letter_exchange, - delay_exchange, ], queues=queues, ) @@ -120,15 +90,15 @@ class NotificationManager(AppService): def queue_notification(self, event: NotificationEventDTO) -> NotificationResult: """Queue a notification - exposed method for other services to call""" try: - logger.info(f"Recieved Request to queue {event=}") - # Workaround for not being able to seralize generics over the expose bus + logger.info(f"Received Request to queue {event=}") + # Workaround for not being able to serialize generics over the expose bus parsed_event = NotificationEventModel[ get_data_type(event.type) ].model_validate(event.model_dump()) routing_key = self.get_routing_key(parsed_event) message = parsed_event.model_dump_json() - logger.info(f"Recieved Request to queue {message=}") + logger.info(f"Received Request to queue {message=}") exchange = "notifications" @@ -145,41 +115,93 @@ class NotificationManager(AppService): return NotificationResult( success=True, - message=(f"Notification queued with routing key: {routing_key}"), + message=f"Notification queued with routing key: {routing_key}", ) except Exception as e: - logger.error(f"Error queueing notification: {e}") + logger.exception(f"Error queueing notification: {e}") return NotificationResult(success=False, message=str(e)) - async def _process_immediate(self, message: str) -> bool: - """Process a single notification immediately, returning whether to put into the failed queue""" + def _should_email_user_based_on_preference( + self, user_id: str, event_type: NotificationType + ) -> bool: + return self.run_and_wait( + get_user_notification_preference(user_id) + ).preferences.get(event_type, True) + + def _parse_message(self, message: str) -> NotificationEvent | None: try: event = NotificationEventDTO.model_validate_json(message) - parsed_event = NotificationEventModel[ + model = NotificationEventModel[ get_data_type(event.type) ].model_validate_json(message) - user_email = get_db_client().get_user_email_by_id(event.user_id) - should_send = ( - get_db_client() - .get_user_notification_preference(event.user_id) - .preferences[event.type] - ) - if not user_email: + return NotificationEvent(event=event, model=model) + except Exception as e: + logger.error(f"Error parsing message due to non matching schema {e}") + return None + + def _process_immediate(self, message: str) -> bool: + """Process a single notification immediately, returning whether to put into the failed queue""" + try: + parsed = self._parse_message(message) + if not parsed: + return False + event = parsed.event + model = parsed.model + + if event.recipient_email: + recipient_email = event.recipient_email + else: + recipient_email = self.run_and_wait(get_user_email_by_id(event.user_id)) + if not recipient_email: logger.error(f"User email not found for user {event.user_id}") return False + + should_send = self._should_email_user_based_on_preference( + event.user_id, event.type + ) if not should_send: logger.debug( f"User {event.user_id} does not want to receive {event.type} notifications" ) return True - self.email_sender.send_templated(event.type, user_email, parsed_event) - logger.info(f"Processing notification: {parsed_event}") + + self.email_sender.send_templated(event.type, recipient_email, model) + logger.info(f"Processing notification: {model}") return True except Exception as e: - logger.error(f"Error processing notification: {e}") + logger.exception(f"Error processing notification: {e}") return False + def _run_queue( + self, + queue: aio_pika.abc.AbstractQueue, + process_func: Callable[[str], bool], + error_queue_name: str, + ): + message: aio_pika.abc.AbstractMessage | None = None + try: + # This parameter "no_ack" is named like shit, think of it as "auto_ack" + message = self.run_and_wait(queue.get(timeout=1.0, no_ack=False)) + result = process_func(message.body.decode()) + if result: + self.run_and_wait(message.ack()) + else: + self.run_and_wait(message.reject(requeue=False)) + + except QueueEmpty: + logger.debug(f"Queue {error_queue_name} empty") + except Exception as e: + if message: + logger.error( + f"Error in notification service loop, message rejected {e}" + ) + self.run_and_wait(message.reject(requeue=False)) + else: + logger.error( + f"Error in notification service loop, message unable to be rejected, and will have to be manually removed to free space in the queue: {e}" + ) + def run_service(self): logger.info(f"[{self.service_name}] Started notification service") @@ -192,20 +214,11 @@ class NotificationManager(AppService): while self.running: try: - # Process immediate notifications - try: - message = self.run_and_wait(immediate_queue.get()) - - if message: - success = self.run_and_wait( - self._process_immediate(message.body.decode()) - ) - if success: - self.run_and_wait(message.ack()) - else: - self.run_and_wait(message.reject(requeue=True)) - except QueueEmpty: - logger.debug("Immediate queue empty") + self._run_queue( + queue=immediate_queue, + process_func=self._process_immediate, + error_queue_name="immediate_notifications", + ) time.sleep(0.1) @@ -218,12 +231,3 @@ class NotificationManager(AppService): """Cleanup service resources""" self.running = False super().cleanup() - - # ------- UTILITIES ------- # - - -@thread_cached -def get_db_client() -> "DatabaseManager": - from backend.executor import DatabaseManager - - return get_service_client(DatabaseManager) diff --git a/autogpt_platform/backend/backend/notifications/templates/refund_processed.html.jinja2 b/autogpt_platform/backend/backend/notifications/templates/refund_processed.html.jinja2 new file mode 100644 index 0000000000..244c062a70 --- /dev/null +++ b/autogpt_platform/backend/backend/notifications/templates/refund_processed.html.jinja2 @@ -0,0 +1,51 @@ +{# Refund Processed Notification Email Template #} +{# + Template variables: + data.user_id: the ID of the user + data.user_name: the user's name + data.user_email: the user's email address + data.transaction_id: the transaction ID for the refund request + data.refund_request_id: the refund request ID + data.reason: the reason for the refund request + data.amount: the refund amount in cents (divide by 100 for dollars) + data.balance: the user's latest balance in cents (after the refund deduction) + + Subject: Refund for ${{ data.amount / 100 }} to {{ data.user_name }} has been processed +#} + + + +
+ +Hello Administrator,
+ ++ This is to notify you that the refund for ${{ data.amount / 100 }} to {{ data.user_name }} has been processed successfully. +
+ ++ The user's balance has been updated accordingly after the deduction. +
+ ++ Please contact the support team if you have any questions or need further assistance regarding this refund. +
+ +Best regards,
Your Notification System
Hello Administrator,
++ A refund request has been submitted by a user and requires your approval. +
+ +
+ To approve this refund, please click on the following Stripe link:
+ https://dashboard.stripe.com/test/payments/{{data.transaction_id}}
+
+ And then click on the "Refund" button.
+
+ To reject this refund, please follow these steps: +
+transactionKey column with the Transaction ID: {{ data.transaction_id }}.
+ status field to REJECTED and enter the rejection reason in the result column.
+ + Please take the necessary action at your earliest convenience. +
+Thank you for your prompt attention.
+Best regards,
Your Notification System
Loading...
} +