mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-13 09:08:02 -05:00
Compare commits
197 Commits
abhi/fix-g
...
update-blo
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7cfc0a0c4f | ||
|
|
c869fd0119 | ||
|
|
097348e2ba | ||
|
|
2f97724ba5 | ||
|
|
805a988e21 | ||
|
|
6166e31942 | ||
|
|
fa56b68071 | ||
|
|
4207dc2bcf | ||
|
|
73d032a937 | ||
|
|
2011322511 | ||
|
|
09e1a4081f | ||
|
|
3831aa99e7 | ||
|
|
f15633833c | ||
|
|
2352c50433 | ||
|
|
00add5738f | ||
|
|
00b163b0a5 | ||
|
|
3c23fd5b1a | ||
|
|
7c719d6835 | ||
|
|
0b0c810861 | ||
|
|
1c1dda57e8 | ||
|
|
20e93f32ff | ||
|
|
8fc00dfe02 | ||
|
|
185d103371 | ||
|
|
dfc024acc3 | ||
|
|
791ab5d671 | ||
|
|
6dc88e3f21 | ||
|
|
d3f5718cac | ||
|
|
aef7b5db7d | ||
|
|
df071ca5a5 | ||
|
|
2b192a0d20 | ||
|
|
9572415b74 | ||
|
|
ad1bf2f27f | ||
|
|
67991f7c6d | ||
|
|
c2aad7d2d9 | ||
|
|
504a0a1250 | ||
|
|
69ae276bb8 | ||
|
|
66f2e2a77b | ||
|
|
c463d1022b | ||
|
|
88dc0bbe0b | ||
|
|
5ea176e457 | ||
|
|
134163aa88 | ||
|
|
ff494bee93 | ||
|
|
f02e2fd8bb | ||
|
|
2012213af5 | ||
|
|
94f4702f6b | ||
|
|
abf05d6407 | ||
|
|
58a8b0ddeb | ||
|
|
6a4a8f5a46 | ||
|
|
240ad756aa | ||
|
|
6c62a6a558 | ||
|
|
d36ac1471c | ||
|
|
a2b34739c9 | ||
|
|
7d4775e3b7 | ||
|
|
d133b474a8 | ||
|
|
14ef675784 | ||
|
|
2c9baa6966 | ||
|
|
43ca817a85 | ||
|
|
5cf97dd296 | ||
|
|
c577758b4a | ||
|
|
ed9d0b85b4 | ||
|
|
cfb92dd4f9 | ||
|
|
b892c2b272 | ||
|
|
d4679bbae8 | ||
|
|
34954d8df3 | ||
|
|
780c893c91 | ||
|
|
a113fdb134 | ||
|
|
b73cb9fc98 | ||
|
|
223e3de073 | ||
|
|
abfbdd0934 | ||
|
|
d8e38b505c | ||
|
|
4ebc34da62 | ||
|
|
056c539bde | ||
|
|
7e2e8843c0 | ||
|
|
a8cde7c3c5 | ||
|
|
64e59c5324 | ||
|
|
0da1461a79 | ||
|
|
192ff65bbf | ||
|
|
388003ff2d | ||
|
|
1081b15c91 | ||
|
|
0e17f5757b | ||
|
|
80de45c610 | ||
|
|
bd237c5b52 | ||
|
|
6f8f7ac716 | ||
|
|
cca71f99b9 | ||
|
|
e54a999ff4 | ||
|
|
36360b3ade | ||
|
|
a1607a3b21 | ||
|
|
8804417c72 | ||
|
|
6aeec36a3c | ||
|
|
6d09a46652 | ||
|
|
be4b2b1ba1 | ||
|
|
d2d6346f59 | ||
|
|
d83984bf38 | ||
|
|
146bf8e692 | ||
|
|
015e7d2b10 | ||
|
|
dac7e4aa57 | ||
|
|
1764cf9837 | ||
|
|
fbd1e26524 | ||
|
|
fb10bacfda | ||
|
|
f6a12828f0 | ||
|
|
fba186e5e1 | ||
|
|
91b88b840e | ||
|
|
f6b00f07ce | ||
|
|
a4bd7c9b58 | ||
|
|
bc643492e8 | ||
|
|
ba64e05803 | ||
|
|
01c0284fd2 | ||
|
|
a08a7bd1e1 | ||
|
|
25bab0eaa5 | ||
|
|
bb646e865e | ||
|
|
dc777ce89a | ||
|
|
a3955385ec | ||
|
|
88b03563f5 | ||
|
|
f57f6fc1c0 | ||
|
|
2f7ad767c2 | ||
|
|
e3e4bc8c96 | ||
|
|
d3cb3c73d1 | ||
|
|
cc75eea402 | ||
|
|
40961cb9f1 | ||
|
|
13be1a03b7 | ||
|
|
757381c889 | ||
|
|
154f0a2b83 | ||
|
|
bf91cc507f | ||
|
|
940fef027a | ||
|
|
d1b51ad09b | ||
|
|
4cb0bbe67e | ||
|
|
ebbb7b07cf | ||
|
|
db3d86bce6 | ||
|
|
f1f6f87b25 | ||
|
|
0d5ab20f14 | ||
|
|
4017c5be70 | ||
|
|
b7cc83854a | ||
|
|
0599083e0e | ||
|
|
9e36144b40 | ||
|
|
ce3c86cb1d | ||
|
|
e33263c9fc | ||
|
|
04f0f64fbb | ||
|
|
755a7b620d | ||
|
|
d54c9d4e7e | ||
|
|
852af17294 | ||
|
|
8889d029d4 | ||
|
|
e93c7dc89e | ||
|
|
7791281b90 | ||
|
|
cc9ff9e2bb | ||
|
|
6b82b9e73b | ||
|
|
61bfbeeb01 | ||
|
|
9518ff4f02 | ||
|
|
49490e899e | ||
|
|
d19866bdf5 | ||
|
|
b6c946ac4f | ||
|
|
d6f5dcd717 | ||
|
|
99779f52b2 | ||
|
|
d4e5a48163 | ||
|
|
a40bb6f6d6 | ||
|
|
a393f8bf9f | ||
|
|
d56931f4cb | ||
|
|
03691329be | ||
|
|
9813012c12 | ||
|
|
8f74e58ecc | ||
|
|
b1f5413dab | ||
|
|
cc41b2f4ab | ||
|
|
f45b4cc243 | ||
|
|
ed2e5e813d | ||
|
|
ab33d079e2 | ||
|
|
a24c869a44 | ||
|
|
d6b1cf64ed | ||
|
|
e9982ba9bd | ||
|
|
58c1e050f2 | ||
|
|
f125bb658c | ||
|
|
ba9c91d0b7 | ||
|
|
e6254f0e83 | ||
|
|
3c8cf8bd1e | ||
|
|
79f3888d61 | ||
|
|
ace5d34cc6 | ||
|
|
100ab90f44 | ||
|
|
a0ad796432 | ||
|
|
0cc625ca15 | ||
|
|
a43b329132 | ||
|
|
beeadd16f1 | ||
|
|
b2d5b9efb4 | ||
|
|
c77f32b23f | ||
|
|
b92223cf7b | ||
|
|
0d47b0ce38 | ||
|
|
c0409ba0b1 | ||
|
|
41c8504bdc | ||
|
|
c4d38e4ff3 | ||
|
|
b10a275676 | ||
|
|
8baabb0379 | ||
|
|
8bf977958a | ||
|
|
c73da1f79c | ||
|
|
3c3c1ce90a | ||
|
|
e9a198f5da | ||
|
|
706bf0578e | ||
|
|
610b613367 | ||
|
|
5f4a411b15 | ||
|
|
7b004f07e7 | ||
|
|
c16d2f94a6 |
@@ -1,18 +0,0 @@
|
||||
version = 1
|
||||
|
||||
test_patterns = ["**/*.spec.ts","**/*_test.py","**/*_tests.py","**/test_*.py"]
|
||||
|
||||
exclude_patterns = ["classic/**"]
|
||||
|
||||
[[analyzers]]
|
||||
name = "javascript"
|
||||
|
||||
[analyzers.meta]
|
||||
plugins = ["react"]
|
||||
environment = ["nodejs"]
|
||||
|
||||
[[analyzers]]
|
||||
name = "python"
|
||||
|
||||
[analyzers.meta]
|
||||
runtime_version = "3.x.x"
|
||||
9
.github/workflows/classic-autogpt-ci.yml
vendored
9
.github/workflows/classic-autogpt-ci.yml
vendored
@@ -115,7 +115,6 @@ jobs:
|
||||
poetry run pytest -vv \
|
||||
--cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \
|
||||
--numprocesses=logical --durations=10 \
|
||||
--junitxml=junit.xml -o junit_family=legacy \
|
||||
tests/unit tests/integration
|
||||
env:
|
||||
CI: true
|
||||
@@ -125,14 +124,8 @@ jobs:
|
||||
AWS_ACCESS_KEY_ID: minioadmin
|
||||
AWS_SECRET_ACCESS_KEY: minioadmin
|
||||
|
||||
- name: Upload test results to Codecov
|
||||
if: ${{ !cancelled() }} # Run even if tests fail
|
||||
uses: codecov/test-results-action@v1
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v5
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: autogpt-agent,${{ runner.os }}
|
||||
|
||||
9
.github/workflows/classic-benchmark-ci.yml
vendored
9
.github/workflows/classic-benchmark-ci.yml
vendored
@@ -87,20 +87,13 @@ jobs:
|
||||
poetry run pytest -vv \
|
||||
--cov=agbenchmark --cov-branch --cov-report term-missing --cov-report xml \
|
||||
--durations=10 \
|
||||
--junitxml=junit.xml -o junit_family=legacy \
|
||||
tests
|
||||
env:
|
||||
CI: true
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
|
||||
- name: Upload test results to Codecov
|
||||
if: ${{ !cancelled() }} # Run even if tests fail
|
||||
uses: codecov/test-results-action@v1
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v5
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: agbenchmark,${{ runner.os }}
|
||||
|
||||
9
.github/workflows/classic-forge-ci.yml
vendored
9
.github/workflows/classic-forge-ci.yml
vendored
@@ -139,7 +139,6 @@ jobs:
|
||||
poetry run pytest -vv \
|
||||
--cov=forge --cov-branch --cov-report term-missing --cov-report xml \
|
||||
--durations=10 \
|
||||
--junitxml=junit.xml -o junit_family=legacy \
|
||||
forge
|
||||
env:
|
||||
CI: true
|
||||
@@ -149,14 +148,8 @@ jobs:
|
||||
AWS_ACCESS_KEY_ID: minioadmin
|
||||
AWS_SECRET_ACCESS_KEY: minioadmin
|
||||
|
||||
- name: Upload test results to Codecov
|
||||
if: ${{ !cancelled() }} # Run even if tests fail
|
||||
uses: codecov/test-results-action@v1
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v5
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: forge,${{ runner.os }}
|
||||
|
||||
15
.github/workflows/platform-backend-ci.yml
vendored
15
.github/workflows/platform-backend-ci.yml
vendored
@@ -42,14 +42,6 @@ jobs:
|
||||
REDIS_PASSWORD: testpassword
|
||||
ports:
|
||||
- 6379:6379
|
||||
rabbitmq:
|
||||
image: rabbitmq:3.12-management
|
||||
ports:
|
||||
- 5672:5672
|
||||
- 15672:15672
|
||||
env:
|
||||
RABBITMQ_DEFAULT_USER: ${{ env.RABBITMQ_DEFAULT_USER }}
|
||||
RABBITMQ_DEFAULT_PASS: ${{ env.RABBITMQ_DEFAULT_PASS }}
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
@@ -147,13 +139,6 @@ jobs:
|
||||
RUN_ENV: local
|
||||
PORT: 8080
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
# We know these are here, don't report this as a security vulnerability
|
||||
# This is used as the default credential for the entire system's RabbitMQ instance
|
||||
# If you want to replace this, you can do so by making our entire system generate
|
||||
# new credentials for each local user and update the environment variables in
|
||||
# the backend service, docker composes, and examples
|
||||
RABBITMQ_DEFAULT_USER: 'rabbitmq_user_default'
|
||||
RABBITMQ_DEFAULT_PASS: 'k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7'
|
||||
|
||||
# - name: Upload coverage reports to Codecov
|
||||
# uses: codecov/codecov-action@v4
|
||||
|
||||
28
.github/workflows/platform-frontend-ci.yml
vendored
28
.github/workflows/platform-frontend-ci.yml
vendored
@@ -37,25 +37,6 @@ jobs:
|
||||
run: |
|
||||
yarn lint
|
||||
|
||||
type-check:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "21"
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
yarn install --frozen-lockfile
|
||||
|
||||
- name: Run tsc check
|
||||
run: |
|
||||
yarn type-check
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
@@ -77,8 +58,8 @@ jobs:
|
||||
- name: Free Disk Space (Ubuntu)
|
||||
uses: jlumbroso/free-disk-space@main
|
||||
with:
|
||||
large-packages: false # slow
|
||||
docker-images: false # limited benefit
|
||||
large-packages: false # slow
|
||||
docker-images: false # limited benefit
|
||||
|
||||
- name: Copy default supabase .env
|
||||
run: |
|
||||
@@ -104,12 +85,11 @@ jobs:
|
||||
run: yarn playwright install --with-deps ${{ matrix.browser }}
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 20
|
||||
run: |
|
||||
yarn test --project=${{ matrix.browser }}
|
||||
|
||||
- name: Print Final Docker Compose logs
|
||||
if: always()
|
||||
- name: Print Docker Compose logs in debug mode
|
||||
if: runner.debug
|
||||
run: |
|
||||
docker compose -f ../docker-compose.yml logs
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@ jobs:
|
||||
close-issue-message: >
|
||||
This issue was closed automatically because it has been stale for 10 days
|
||||
with no activity.
|
||||
days-before-stale: 100
|
||||
days-before-stale: 50
|
||||
days-before-close: 10
|
||||
# Do not touch meta issues:
|
||||
exempt-issue-labels: meta,fridge,project management
|
||||
|
||||
@@ -170,16 +170,6 @@ repos:
|
||||
files: ^classic/benchmark/(agbenchmark|tests)/((?!reports).)*[/.]
|
||||
args: [--config=classic/benchmark/.flake8]
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: prettier
|
||||
name: Format (Prettier) - AutoGPT Platform - Frontend
|
||||
alias: format-platform-frontend
|
||||
entry: bash -c 'cd autogpt_platform/frontend && npx prettier --write $(echo "$@" | sed "s|autogpt_platform/frontend/||g")' --
|
||||
files: ^autogpt_platform/frontend/
|
||||
types: [file]
|
||||
language: system
|
||||
|
||||
- repo: local
|
||||
# To have watertight type checking, we check *all* the files in an affected
|
||||
# project. To trigger on poetry.lock we also reset the file `types` filter.
|
||||
@@ -231,16 +221,6 @@ repos:
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: tsc
|
||||
name: Typecheck - AutoGPT Platform - Frontend
|
||||
entry: bash -c 'cd autogpt_platform/frontend && npm run type-check'
|
||||
files: ^autogpt_platform/frontend/
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: pytest
|
||||
|
||||
@@ -20,7 +20,6 @@ Instead, please report them via:
|
||||
- Please provide detailed reports with reproducible steps
|
||||
- Include the version/commit hash where you discovered the vulnerability
|
||||
- Allow us a 90-day security fix window before any public disclosure
|
||||
- After patch is released, allow 30 days for users to update before public disclosure (for a total of 120 days max between update time and fix time)
|
||||
- Share any potential mitigations or workarounds if known
|
||||
|
||||
## Supported Versions
|
||||
|
||||
@@ -22,7 +22,7 @@ To run the AutoGPT Platform, follow these steps:
|
||||
|
||||
2. Run the following command:
|
||||
```
|
||||
git submodule update --init --recursive --progress
|
||||
git submodule update --init --recursive
|
||||
```
|
||||
This command will initialize and update the submodules in the repository. The `supabase` folder will be cloned to the root directory.
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from .config import Settings
|
||||
from .depends import requires_admin_user, requires_user
|
||||
from .jwt_utils import parse_jwt_token
|
||||
from .middleware import APIKeyValidator, auth_middleware
|
||||
from .middleware import auth_middleware
|
||||
from .models import User
|
||||
|
||||
__all__ = [
|
||||
@@ -9,7 +9,6 @@ __all__ = [
|
||||
"parse_jwt_token",
|
||||
"requires_user",
|
||||
"requires_admin_user",
|
||||
"APIKeyValidator",
|
||||
"auth_middleware",
|
||||
"User",
|
||||
]
|
||||
|
||||
@@ -1,10 +1,7 @@
|
||||
import inspect
|
||||
import logging
|
||||
from typing import Any, Callable, Optional
|
||||
|
||||
from fastapi import HTTPException, Request, Security
|
||||
from fastapi.security import APIKeyHeader, HTTPBearer
|
||||
from starlette.status import HTTP_401_UNAUTHORIZED
|
||||
from fastapi import HTTPException, Request
|
||||
from fastapi.security import HTTPBearer
|
||||
|
||||
from .config import settings
|
||||
from .jwt_utils import parse_jwt_token
|
||||
@@ -32,104 +29,3 @@ async def auth_middleware(request: Request):
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=401, detail=str(e))
|
||||
return payload
|
||||
|
||||
|
||||
class APIKeyValidator:
|
||||
"""
|
||||
Configurable API key validator that supports custom validation functions
|
||||
for FastAPI applications.
|
||||
|
||||
This class provides a flexible way to implement API key authentication with optional
|
||||
custom validation logic. It can be used for simple token matching
|
||||
or more complex validation scenarios like database lookups.
|
||||
|
||||
Examples:
|
||||
Simple token validation:
|
||||
```python
|
||||
validator = APIKeyValidator(
|
||||
header_name="X-API-Key",
|
||||
expected_token="your-secret-token"
|
||||
)
|
||||
|
||||
@app.get("/protected", dependencies=[Depends(validator.get_dependency())])
|
||||
def protected_endpoint():
|
||||
return {"message": "Access granted"}
|
||||
```
|
||||
|
||||
Custom validation with database lookup:
|
||||
```python
|
||||
async def validate_with_db(api_key: str):
|
||||
api_key_obj = await db.get_api_key(api_key)
|
||||
return api_key_obj if api_key_obj and api_key_obj.is_active else None
|
||||
|
||||
validator = APIKeyValidator(
|
||||
header_name="X-API-Key",
|
||||
validate_fn=validate_with_db
|
||||
)
|
||||
```
|
||||
|
||||
Args:
|
||||
header_name (str): The name of the header containing the API key
|
||||
expected_token (Optional[str]): The expected API key value for simple token matching
|
||||
validate_fn (Optional[Callable]): Custom validation function that takes an API key
|
||||
string and returns a boolean or object. Can be async.
|
||||
error_status (int): HTTP status code to use for validation errors
|
||||
error_message (str): Error message to return when validation fails
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
header_name: str,
|
||||
expected_token: Optional[str] = None,
|
||||
validate_fn: Optional[Callable[[str], bool]] = None,
|
||||
error_status: int = HTTP_401_UNAUTHORIZED,
|
||||
error_message: str = "Invalid API key",
|
||||
):
|
||||
# Create the APIKeyHeader as a class property
|
||||
self.security_scheme = APIKeyHeader(name=header_name)
|
||||
self.expected_token = expected_token
|
||||
self.custom_validate_fn = validate_fn
|
||||
self.error_status = error_status
|
||||
self.error_message = error_message
|
||||
|
||||
async def default_validator(self, api_key: str) -> bool:
|
||||
return api_key == self.expected_token
|
||||
|
||||
async def __call__(
|
||||
self, request: Request, api_key: str = Security(APIKeyHeader)
|
||||
) -> Any:
|
||||
if api_key is None:
|
||||
raise HTTPException(status_code=self.error_status, detail="Missing API key")
|
||||
|
||||
# Use custom validation if provided, otherwise use default equality check
|
||||
validator = self.custom_validate_fn or self.default_validator
|
||||
result = (
|
||||
await validator(api_key)
|
||||
if inspect.iscoroutinefunction(validator)
|
||||
else validator(api_key)
|
||||
)
|
||||
|
||||
if not result:
|
||||
raise HTTPException(
|
||||
status_code=self.error_status, detail=self.error_message
|
||||
)
|
||||
|
||||
# Store validation result in request state if it's not just a boolean
|
||||
if result is not True:
|
||||
request.state.api_key = result
|
||||
|
||||
return result
|
||||
|
||||
def get_dependency(self):
|
||||
"""
|
||||
Returns a callable dependency that FastAPI will recognize as a security scheme
|
||||
"""
|
||||
|
||||
async def validate_api_key(
|
||||
request: Request, api_key: str = Security(self.security_scheme)
|
||||
) -> Any:
|
||||
return await self(request, api_key)
|
||||
|
||||
# This helps FastAPI recognize it as a security dependency
|
||||
validate_api_key.__name__ = f"validate_{self.security_scheme.model.name}"
|
||||
return validate_api_key
|
||||
|
||||
@@ -13,6 +13,7 @@ from typing_extensions import ParamSpec
|
||||
from .config import SETTINGS
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
P = ParamSpec("P")
|
||||
T = TypeVar("T")
|
||||
|
||||
@@ -18,7 +18,7 @@ ERROR_LOG_FILE = "error.log"
|
||||
SIMPLE_LOG_FORMAT = "%(asctime)s %(levelname)s %(title)s%(message)s"
|
||||
|
||||
DEBUG_LOG_FORMAT = (
|
||||
"%(asctime)s %(levelname)s %(filename)s:%(lineno)d %(title)s%(message)s"
|
||||
"%(asctime)s %(levelname)s %(filename)s:%(lineno)d" " %(title)s%(message)s"
|
||||
)
|
||||
|
||||
|
||||
|
||||
439
autogpt_platform/autogpt_libs/poetry.lock
generated
439
autogpt_platform/autogpt_libs/poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -9,19 +9,19 @@ packages = [{ include = "autogpt_libs" }]
|
||||
[tool.poetry.dependencies]
|
||||
colorama = "^0.4.6"
|
||||
expiringdict = "^1.2.2"
|
||||
google-cloud-logging = "^3.11.4"
|
||||
pydantic = "^2.10.6"
|
||||
pydantic-settings = "^2.7.1"
|
||||
google-cloud-logging = "^3.11.3"
|
||||
pydantic = "^2.10.3"
|
||||
pydantic-settings = "^2.7.0"
|
||||
pyjwt = "^2.10.1"
|
||||
pytest-asyncio = "^0.25.3"
|
||||
pytest-asyncio = "^0.25.0"
|
||||
pytest-mock = "^3.14.0"
|
||||
python = ">=3.10,<4.0"
|
||||
python-dotenv = "^1.0.1"
|
||||
supabase = "^2.13.0"
|
||||
supabase = "^2.10.0"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
redis = "^5.2.1"
|
||||
ruff = "^0.9.6"
|
||||
ruff = "^0.8.6"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
|
||||
@@ -15,9 +15,6 @@ REDIS_PORT=6379
|
||||
REDIS_PASSWORD=password
|
||||
|
||||
ENABLE_CREDIT=false
|
||||
STRIPE_API_KEY=
|
||||
STRIPE_WEBHOOK_SECRET=
|
||||
|
||||
# What environment things should be logged under: local dev or prod
|
||||
APP_ENV=local
|
||||
# What environment to behave as: "local" or "cloud"
|
||||
@@ -25,26 +22,12 @@ BEHAVE_AS=local
|
||||
PYRO_HOST=localhost
|
||||
SENTRY_DSN=
|
||||
|
||||
# Email For Postmark so we can send emails
|
||||
POSTMARK_SERVER_API_TOKEN=
|
||||
POSTMARK_SENDER_EMAIL=invalid@invalid.com
|
||||
POSTMARK_WEBHOOK_TOKEN=
|
||||
|
||||
## User auth with Supabase is required for any of the 3rd party integrations with auth to work.
|
||||
ENABLE_AUTH=true
|
||||
SUPABASE_URL=http://localhost:8000
|
||||
SUPABASE_SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q
|
||||
SUPABASE_JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long
|
||||
|
||||
# RabbitMQ credentials -- Used for communication between services
|
||||
RABBITMQ_HOST=localhost
|
||||
RABBITMQ_PORT=5672
|
||||
RABBITMQ_DEFAULT_USER=rabbitmq_user_default
|
||||
RABBITMQ_DEFAULT_PASS=k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7
|
||||
|
||||
## GCS bucket is required for marketplace and library functionality
|
||||
MEDIA_GCS_BUCKET_NAME=
|
||||
|
||||
## For local development, you may need to set FRONTEND_BASE_URL for the OAuth flow
|
||||
## for integrations to work. Defaults to the value of PLATFORM_BASE_URL if not set.
|
||||
# FRONTEND_BASE_URL=http://localhost:3000
|
||||
@@ -53,7 +36,7 @@ MEDIA_GCS_BUCKET_NAME=
|
||||
## to use the platform's webhook-related functionality.
|
||||
## If you are developing locally, you can use something like ngrok to get a publc URL
|
||||
## and tunnel it to your locally running backend.
|
||||
PLATFORM_BASE_URL=http://localhost:3000
|
||||
PLATFORM_BASE_URL=https://your-public-url-here
|
||||
|
||||
## == INTEGRATION CREDENTIALS == ##
|
||||
# Each set of server side credentials is required for the corresponding 3rd party
|
||||
@@ -89,20 +72,6 @@ GOOGLE_CLIENT_SECRET=
|
||||
TWITTER_CLIENT_ID=
|
||||
TWITTER_CLIENT_SECRET=
|
||||
|
||||
# Linear App
|
||||
# Make a new workspace for your OAuth APP -- trust me
|
||||
# https://linear.app/settings/api/applications/new
|
||||
# Callback URL: http://localhost:3000/auth/integrations/oauth_callback
|
||||
LINEAR_CLIENT_ID=
|
||||
LINEAR_CLIENT_SECRET=
|
||||
|
||||
# To obtain Todoist API credentials:
|
||||
# 1. Create a Todoist account at todoist.com
|
||||
# 2. Visit the Developer Console: https://developer.todoist.com/appconsole.html
|
||||
# 3. Click "Create new app"
|
||||
# 4. Once created, copy your Client ID and Client Secret below
|
||||
TODOIST_CLIENT_ID=
|
||||
TODOIST_CLIENT_SECRET=
|
||||
|
||||
## ===== OPTIONAL API KEYS ===== ##
|
||||
|
||||
@@ -113,12 +82,10 @@ GROQ_API_KEY=
|
||||
OPEN_ROUTER_API_KEY=
|
||||
|
||||
# Reddit
|
||||
# Go to https://www.reddit.com/prefs/apps and create a new app
|
||||
# Choose "script" for the type
|
||||
# Fill in the redirect uri as <your_frontend_url>/auth/integrations/oauth_callback, e.g. http://localhost:3000/auth/integrations/oauth_callback
|
||||
REDDIT_CLIENT_ID=
|
||||
REDDIT_CLIENT_SECRET=
|
||||
REDDIT_USER_AGENT="AutoGPT:1.0 (by /u/autogpt)"
|
||||
REDDIT_USERNAME=
|
||||
REDDIT_PASSWORD=
|
||||
|
||||
# Discord
|
||||
DISCORD_BOT_TOKEN=
|
||||
@@ -163,21 +130,9 @@ EXA_API_KEY=
|
||||
# E2B
|
||||
E2B_API_KEY=
|
||||
|
||||
# Mem0
|
||||
MEM0_API_KEY=
|
||||
|
||||
# Nvidia
|
||||
NVIDIA_API_KEY=
|
||||
|
||||
# Apollo
|
||||
APOLLO_API_KEY=
|
||||
|
||||
# SmartLead
|
||||
SMARTLEAD_API_KEY=
|
||||
|
||||
# ZeroBounce
|
||||
ZEROBOUNCE_API_KEY=
|
||||
|
||||
# Logging Configuration
|
||||
LOG_LEVEL=INFO
|
||||
ENABLE_CLOUD_LOGGING=false
|
||||
|
||||
@@ -66,17 +66,10 @@ We use the Poetry to manage the dependencies. To set up the project, follow thes
|
||||
|
||||
### Starting the server without Docker
|
||||
|
||||
To run the server locally, start in the autogpt_platform folder:
|
||||
|
||||
```sh
|
||||
cd ..
|
||||
```
|
||||
|
||||
Run the following command to run database in docker but the application locally:
|
||||
|
||||
```sh
|
||||
docker compose --profile local up deps --build --detach
|
||||
cd backend
|
||||
poetry run app
|
||||
```
|
||||
|
||||
|
||||
@@ -1,30 +1,22 @@
|
||||
import logging
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from backend.util.process import AppProcess
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def run_processes(*processes: "AppProcess", **kwargs):
|
||||
"""
|
||||
Execute all processes in the app. The last process is run in the foreground.
|
||||
Includes enhanced error handling and process lifecycle management.
|
||||
"""
|
||||
try:
|
||||
# Run all processes except the last one in the background.
|
||||
for process in processes[:-1]:
|
||||
process.start(background=True, **kwargs)
|
||||
|
||||
# Run the last process in the foreground.
|
||||
# Run the last process in the foreground
|
||||
processes[-1].start(background=False, **kwargs)
|
||||
finally:
|
||||
for process in processes:
|
||||
try:
|
||||
process.stop()
|
||||
except Exception as e:
|
||||
logger.exception(f"[{process.service_name}] unable to stop: {e}")
|
||||
process.stop()
|
||||
|
||||
|
||||
def main(**kwargs):
|
||||
@@ -33,7 +25,6 @@ def main(**kwargs):
|
||||
"""
|
||||
|
||||
from backend.executor import DatabaseManager, ExecutionManager, ExecutionScheduler
|
||||
from backend.notifications import NotificationManager
|
||||
from backend.server.rest_api import AgentServer
|
||||
from backend.server.ws_api import WebsocketServer
|
||||
|
||||
@@ -41,7 +32,6 @@ def main(**kwargs):
|
||||
DatabaseManager(),
|
||||
ExecutionManager(),
|
||||
ExecutionScheduler(),
|
||||
NotificationManager(),
|
||||
WebsocketServer(),
|
||||
AgentServer(),
|
||||
**kwargs,
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from autogpt_libs.utils.cache import thread_cached
|
||||
|
||||
@@ -14,7 +13,6 @@ from backend.data.block import (
|
||||
)
|
||||
from backend.data.execution import ExecutionStatus
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util import json
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -44,23 +42,6 @@ class AgentExecutorBlock(Block):
|
||||
input_schema: dict = SchemaField(description="Input schema for the graph")
|
||||
output_schema: dict = SchemaField(description="Output schema for the graph")
|
||||
|
||||
@classmethod
|
||||
def get_input_schema(cls, data: BlockInput) -> dict[str, Any]:
|
||||
return data.get("input_schema", {})
|
||||
|
||||
@classmethod
|
||||
def get_input_defaults(cls, data: BlockInput) -> BlockInput:
|
||||
return data.get("data", {})
|
||||
|
||||
@classmethod
|
||||
def get_missing_input(cls, data: BlockInput) -> set[str]:
|
||||
required_fields = cls.get_input_schema(data).get("required", [])
|
||||
return set(required_fields) - set(data)
|
||||
|
||||
@classmethod
|
||||
def get_mismatch_error(cls, data: BlockInput) -> str | None:
|
||||
return json.validate_with_jsonschema(cls.get_input_schema(data), data)
|
||||
|
||||
class Output(BlockSchema):
|
||||
pass
|
||||
|
||||
|
||||
@@ -1,108 +0,0 @@
|
||||
import logging
|
||||
from typing import List
|
||||
|
||||
from backend.blocks.apollo._auth import ApolloCredentials
|
||||
from backend.blocks.apollo.models import (
|
||||
Contact,
|
||||
Organization,
|
||||
SearchOrganizationsRequest,
|
||||
SearchOrganizationsResponse,
|
||||
SearchPeopleRequest,
|
||||
SearchPeopleResponse,
|
||||
)
|
||||
from backend.util.request import Requests
|
||||
|
||||
logger = logging.getLogger(name=__name__)
|
||||
|
||||
|
||||
class ApolloClient:
|
||||
"""Client for the Apollo API"""
|
||||
|
||||
API_URL = "https://api.apollo.io/api/v1"
|
||||
|
||||
def __init__(self, credentials: ApolloCredentials):
|
||||
self.credentials = credentials
|
||||
self.requests = Requests()
|
||||
|
||||
def _get_headers(self) -> dict[str, str]:
|
||||
return {"x-api-key": self.credentials.api_key.get_secret_value()}
|
||||
|
||||
def search_people(self, query: SearchPeopleRequest) -> List[Contact]:
|
||||
"""Search for people in Apollo"""
|
||||
response = self.requests.get(
|
||||
f"{self.API_URL}/mixed_people/search",
|
||||
headers=self._get_headers(),
|
||||
params=query.model_dump(exclude={"credentials", "max_results"}),
|
||||
)
|
||||
parsed_response = SearchPeopleResponse(**response.json())
|
||||
if parsed_response.pagination.total_entries == 0:
|
||||
return []
|
||||
|
||||
people = parsed_response.people
|
||||
|
||||
# handle pagination
|
||||
if (
|
||||
query.max_results is not None
|
||||
and query.max_results < parsed_response.pagination.total_entries
|
||||
and len(people) < query.max_results
|
||||
):
|
||||
while (
|
||||
len(people) < query.max_results
|
||||
and query.page < parsed_response.pagination.total_pages
|
||||
and len(parsed_response.people) > 0
|
||||
):
|
||||
query.page += 1
|
||||
response = self.requests.get(
|
||||
f"{self.API_URL}/mixed_people/search",
|
||||
headers=self._get_headers(),
|
||||
params=query.model_dump(exclude={"credentials", "max_results"}),
|
||||
)
|
||||
parsed_response = SearchPeopleResponse(**response.json())
|
||||
people.extend(parsed_response.people[: query.max_results - len(people)])
|
||||
|
||||
logger.info(f"Found {len(people)} people")
|
||||
return people[: query.max_results] if query.max_results else people
|
||||
|
||||
def search_organizations(
|
||||
self, query: SearchOrganizationsRequest
|
||||
) -> List[Organization]:
|
||||
"""Search for organizations in Apollo"""
|
||||
response = self.requests.get(
|
||||
f"{self.API_URL}/mixed_companies/search",
|
||||
headers=self._get_headers(),
|
||||
params=query.model_dump(exclude={"credentials", "max_results"}),
|
||||
)
|
||||
parsed_response = SearchOrganizationsResponse(**response.json())
|
||||
if parsed_response.pagination.total_entries == 0:
|
||||
return []
|
||||
|
||||
organizations = parsed_response.organizations
|
||||
|
||||
# handle pagination
|
||||
if (
|
||||
query.max_results is not None
|
||||
and query.max_results < parsed_response.pagination.total_entries
|
||||
and len(organizations) < query.max_results
|
||||
):
|
||||
while (
|
||||
len(organizations) < query.max_results
|
||||
and query.page < parsed_response.pagination.total_pages
|
||||
and len(parsed_response.organizations) > 0
|
||||
):
|
||||
query.page += 1
|
||||
response = self.requests.get(
|
||||
f"{self.API_URL}/mixed_companies/search",
|
||||
headers=self._get_headers(),
|
||||
params=query.model_dump(exclude={"credentials", "max_results"}),
|
||||
)
|
||||
parsed_response = SearchOrganizationsResponse(**response.json())
|
||||
organizations.extend(
|
||||
parsed_response.organizations[
|
||||
: query.max_results - len(organizations)
|
||||
]
|
||||
)
|
||||
|
||||
logger.info(f"Found {len(organizations)} organizations")
|
||||
return (
|
||||
organizations[: query.max_results] if query.max_results else organizations
|
||||
)
|
||||
@@ -1,35 +0,0 @@
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput
|
||||
from backend.integrations.providers import ProviderName
|
||||
|
||||
ApolloCredentials = APIKeyCredentials
|
||||
ApolloCredentialsInput = CredentialsMetaInput[
|
||||
Literal[ProviderName.APOLLO],
|
||||
Literal["api_key"],
|
||||
]
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="apollo",
|
||||
api_key=SecretStr("mock-apollo-api-key"),
|
||||
title="Mock Apollo API key",
|
||||
expires_at=None,
|
||||
)
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.title,
|
||||
}
|
||||
|
||||
|
||||
def ApolloCredentialsField() -> ApolloCredentialsInput:
|
||||
"""
|
||||
Creates a Apollo credentials input on a block.
|
||||
"""
|
||||
return CredentialsField(
|
||||
description="The Apollo integration can be used with an API Key.",
|
||||
)
|
||||
@@ -1,543 +0,0 @@
|
||||
from enum import Enum
|
||||
from typing import Any, Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
|
||||
class PrimaryPhone(BaseModel):
|
||||
"""A primary phone in Apollo"""
|
||||
|
||||
number: str
|
||||
source: str
|
||||
sanitized_number: str
|
||||
|
||||
|
||||
class SenorityLevels(str, Enum):
|
||||
"""Seniority levels in Apollo"""
|
||||
|
||||
OWNER = "owner"
|
||||
FOUNDER = "founder"
|
||||
C_SUITE = "c_suite"
|
||||
PARTNER = "partner"
|
||||
VP = "vp"
|
||||
HEAD = "head"
|
||||
DIRECTOR = "director"
|
||||
MANAGER = "manager"
|
||||
SENIOR = "senior"
|
||||
ENTRY = "entry"
|
||||
INTERN = "intern"
|
||||
|
||||
|
||||
class ContactEmailStatuses(str, Enum):
|
||||
"""Contact email statuses in Apollo"""
|
||||
|
||||
VERIFIED = "verified"
|
||||
UNVERIFIED = "unverified"
|
||||
LIKELY_TO_ENGAGE = "likely_to_engage"
|
||||
UNAVAILABLE = "unavailable"
|
||||
|
||||
|
||||
class RuleConfigStatus(BaseModel):
|
||||
"""A rule config status in Apollo"""
|
||||
|
||||
_id: str
|
||||
created_at: str
|
||||
rule_action_config_id: str
|
||||
rule_config_id: str
|
||||
status_cd: str
|
||||
updated_at: str
|
||||
id: str
|
||||
key: str
|
||||
|
||||
|
||||
class ContactCampaignStatus(BaseModel):
|
||||
"""A contact campaign status in Apollo"""
|
||||
|
||||
id: str
|
||||
emailer_campaign_id: str
|
||||
send_email_from_user_id: str
|
||||
inactive_reason: str
|
||||
status: str
|
||||
added_at: str
|
||||
added_by_user_id: str
|
||||
finished_at: str
|
||||
paused_at: str
|
||||
auto_unpause_at: str
|
||||
send_email_from_email_address: str
|
||||
send_email_from_email_account_id: str
|
||||
manually_set_unpause: str
|
||||
failure_reason: str
|
||||
current_step_id: str
|
||||
in_response_to_emailer_message_id: str
|
||||
cc_emails: str
|
||||
bcc_emails: str
|
||||
to_emails: str
|
||||
|
||||
|
||||
class Account(BaseModel):
|
||||
"""An account in Apollo"""
|
||||
|
||||
id: str
|
||||
name: str
|
||||
website_url: str
|
||||
blog_url: str
|
||||
angellist_url: str
|
||||
linkedin_url: str
|
||||
twitter_url: str
|
||||
facebook_url: str
|
||||
primary_phone: PrimaryPhone
|
||||
languages: list[str]
|
||||
alexa_ranking: int
|
||||
phone: str
|
||||
linkedin_uid: str
|
||||
founded_year: int
|
||||
publicly_traded_symbol: str
|
||||
publicly_traded_exchange: str
|
||||
logo_url: str
|
||||
chrunchbase_url: str
|
||||
primary_domain: str
|
||||
domain: str
|
||||
team_id: str
|
||||
organization_id: str
|
||||
account_stage_id: str
|
||||
source: str
|
||||
original_source: str
|
||||
creator_id: str
|
||||
owner_id: str
|
||||
created_at: str
|
||||
phone_status: str
|
||||
hubspot_id: str
|
||||
salesforce_id: str
|
||||
crm_owner_id: str
|
||||
parent_account_id: str
|
||||
sanitized_phone: str
|
||||
# no listed type on the API docs
|
||||
account_playbook_statues: list[Any]
|
||||
account_rule_config_statuses: list[RuleConfigStatus]
|
||||
existence_level: str
|
||||
label_ids: list[str]
|
||||
typed_custom_fields: Any
|
||||
custom_field_errors: Any
|
||||
modality: str
|
||||
source_display_name: str
|
||||
salesforce_record_id: str
|
||||
crm_record_url: str
|
||||
|
||||
|
||||
class ContactEmail(BaseModel):
|
||||
"""A contact email in Apollo"""
|
||||
|
||||
email: str = ""
|
||||
email_md5: str = ""
|
||||
email_sha256: str = ""
|
||||
email_status: str = ""
|
||||
email_source: str = ""
|
||||
extrapolated_email_confidence: str = ""
|
||||
position: int = 0
|
||||
email_from_customer: str = ""
|
||||
free_domain: bool = True
|
||||
|
||||
|
||||
class EmploymentHistory(BaseModel):
|
||||
"""An employment history in Apollo"""
|
||||
|
||||
class Config:
|
||||
extra = "allow"
|
||||
arbitrary_types_allowed = True
|
||||
from_attributes = True
|
||||
populate_by_name = True
|
||||
|
||||
_id: Optional[str] = None
|
||||
created_at: Optional[str] = None
|
||||
current: Optional[bool] = None
|
||||
degree: Optional[str] = None
|
||||
description: Optional[str] = None
|
||||
emails: Optional[str] = None
|
||||
end_date: Optional[str] = None
|
||||
grade_level: Optional[str] = None
|
||||
kind: Optional[str] = None
|
||||
major: Optional[str] = None
|
||||
organization_id: Optional[str] = None
|
||||
organization_name: Optional[str] = None
|
||||
raw_address: Optional[str] = None
|
||||
start_date: Optional[str] = None
|
||||
title: Optional[str] = None
|
||||
updated_at: Optional[str] = None
|
||||
id: Optional[str] = None
|
||||
key: Optional[str] = None
|
||||
|
||||
|
||||
class Breadcrumb(BaseModel):
|
||||
"""A breadcrumb in Apollo"""
|
||||
|
||||
label: Optional[str] = "N/A"
|
||||
signal_field_name: Optional[str] = "N/A"
|
||||
value: str | list | None = "N/A"
|
||||
display_name: Optional[str] = "N/A"
|
||||
|
||||
|
||||
class TypedCustomField(BaseModel):
|
||||
"""A typed custom field in Apollo"""
|
||||
|
||||
id: Optional[str] = "N/A"
|
||||
value: Optional[str] = "N/A"
|
||||
|
||||
|
||||
class Pagination(BaseModel):
|
||||
"""Pagination in Apollo"""
|
||||
|
||||
class Config:
|
||||
extra = "allow" # Allow extra fields
|
||||
arbitrary_types_allowed = True # Allow any type
|
||||
from_attributes = True # Allow from_orm
|
||||
populate_by_name = True # Allow field aliases to work both ways
|
||||
|
||||
page: int = 0
|
||||
per_page: int = 0
|
||||
total_entries: int = 0
|
||||
total_pages: int = 0
|
||||
|
||||
|
||||
class DialerFlags(BaseModel):
|
||||
"""A dialer flags in Apollo"""
|
||||
|
||||
country_name: str
|
||||
country_enabled: bool
|
||||
high_risk_calling_enabled: bool
|
||||
potential_high_risk_number: bool
|
||||
|
||||
|
||||
class PhoneNumber(BaseModel):
|
||||
"""A phone number in Apollo"""
|
||||
|
||||
raw_number: str = ""
|
||||
sanitized_number: str = ""
|
||||
type: str = ""
|
||||
position: int = 0
|
||||
status: str = ""
|
||||
dnc_status: str = ""
|
||||
dnc_other_info: str = ""
|
||||
dailer_flags: DialerFlags = DialerFlags(
|
||||
country_name="",
|
||||
country_enabled=True,
|
||||
high_risk_calling_enabled=True,
|
||||
potential_high_risk_number=True,
|
||||
)
|
||||
|
||||
|
||||
class Organization(BaseModel):
|
||||
"""An organization in Apollo"""
|
||||
|
||||
class Config:
|
||||
extra = "allow"
|
||||
arbitrary_types_allowed = True
|
||||
from_attributes = True
|
||||
populate_by_name = True
|
||||
|
||||
id: Optional[str] = "N/A"
|
||||
name: Optional[str] = "N/A"
|
||||
website_url: Optional[str] = "N/A"
|
||||
blog_url: Optional[str] = "N/A"
|
||||
angellist_url: Optional[str] = "N/A"
|
||||
linkedin_url: Optional[str] = "N/A"
|
||||
twitter_url: Optional[str] = "N/A"
|
||||
facebook_url: Optional[str] = "N/A"
|
||||
primary_phone: Optional[PrimaryPhone] = PrimaryPhone(
|
||||
number="N/A", source="N/A", sanitized_number="N/A"
|
||||
)
|
||||
languages: list[str] = []
|
||||
alexa_ranking: Optional[int] = 0
|
||||
phone: Optional[str] = "N/A"
|
||||
linkedin_uid: Optional[str] = "N/A"
|
||||
founded_year: Optional[int] = 0
|
||||
publicly_traded_symbol: Optional[str] = "N/A"
|
||||
publicly_traded_exchange: Optional[str] = "N/A"
|
||||
logo_url: Optional[str] = "N/A"
|
||||
chrunchbase_url: Optional[str] = "N/A"
|
||||
primary_domain: Optional[str] = "N/A"
|
||||
sanitized_phone: Optional[str] = "N/A"
|
||||
owned_by_organization_id: Optional[str] = "N/A"
|
||||
intent_strength: Optional[str] = "N/A"
|
||||
show_intent: bool = True
|
||||
has_intent_signal_account: Optional[bool] = True
|
||||
intent_signal_account: Optional[str] = "N/A"
|
||||
|
||||
|
||||
class Contact(BaseModel):
|
||||
"""A contact in Apollo"""
|
||||
|
||||
class Config:
|
||||
extra = "allow"
|
||||
arbitrary_types_allowed = True
|
||||
from_attributes = True
|
||||
populate_by_name = True
|
||||
|
||||
contact_roles: list[Any] = []
|
||||
id: Optional[str] = None
|
||||
first_name: Optional[str] = None
|
||||
last_name: Optional[str] = None
|
||||
name: Optional[str] = None
|
||||
linkedin_url: Optional[str] = None
|
||||
title: Optional[str] = None
|
||||
contact_stage_id: Optional[str] = None
|
||||
owner_id: Optional[str] = None
|
||||
creator_id: Optional[str] = None
|
||||
person_id: Optional[str] = None
|
||||
email_needs_tickling: bool = True
|
||||
organization_name: Optional[str] = None
|
||||
source: Optional[str] = None
|
||||
original_source: Optional[str] = None
|
||||
organization_id: Optional[str] = None
|
||||
headline: Optional[str] = None
|
||||
photo_url: Optional[str] = None
|
||||
present_raw_address: Optional[str] = None
|
||||
linkededin_uid: Optional[str] = None
|
||||
extrapolated_email_confidence: Optional[float] = None
|
||||
salesforce_id: Optional[str] = None
|
||||
salesforce_lead_id: Optional[str] = None
|
||||
salesforce_contact_id: Optional[str] = None
|
||||
saleforce_account_id: Optional[str] = None
|
||||
crm_owner_id: Optional[str] = None
|
||||
created_at: Optional[str] = None
|
||||
emailer_campaign_ids: list[str] = []
|
||||
direct_dial_status: Optional[str] = None
|
||||
direct_dial_enrichment_failed_at: Optional[str] = None
|
||||
email_status: Optional[str] = None
|
||||
email_source: Optional[str] = None
|
||||
account_id: Optional[str] = None
|
||||
last_activity_date: Optional[str] = None
|
||||
hubspot_vid: Optional[str] = None
|
||||
hubspot_company_id: Optional[str] = None
|
||||
crm_id: Optional[str] = None
|
||||
sanitized_phone: Optional[str] = None
|
||||
merged_crm_ids: Optional[str] = None
|
||||
updated_at: Optional[str] = None
|
||||
queued_for_crm_push: bool = True
|
||||
suggested_from_rule_engine_config_id: Optional[str] = None
|
||||
email_unsubscribed: Optional[str] = None
|
||||
label_ids: list[Any] = []
|
||||
has_pending_email_arcgate_request: bool = True
|
||||
has_email_arcgate_request: bool = True
|
||||
existence_level: Optional[str] = None
|
||||
email: Optional[str] = None
|
||||
email_from_customer: Optional[str] = None
|
||||
typed_custom_fields: list[TypedCustomField] = []
|
||||
custom_field_errors: Any = None
|
||||
salesforce_record_id: Optional[str] = None
|
||||
crm_record_url: Optional[str] = None
|
||||
email_status_unavailable_reason: Optional[str] = None
|
||||
email_true_status: Optional[str] = None
|
||||
updated_email_true_status: bool = True
|
||||
contact_rule_config_statuses: list[RuleConfigStatus] = []
|
||||
source_display_name: Optional[str] = None
|
||||
twitter_url: Optional[str] = None
|
||||
contact_campaign_statuses: list[ContactCampaignStatus] = []
|
||||
state: Optional[str] = None
|
||||
city: Optional[str] = None
|
||||
country: Optional[str] = None
|
||||
account: Optional[Account] = None
|
||||
contact_emails: list[ContactEmail] = []
|
||||
organization: Optional[Organization] = None
|
||||
employment_history: list[EmploymentHistory] = []
|
||||
time_zone: Optional[str] = None
|
||||
intent_strength: Optional[str] = None
|
||||
show_intent: bool = True
|
||||
phone_numbers: list[PhoneNumber] = []
|
||||
account_phone_note: Optional[str] = None
|
||||
free_domain: bool = True
|
||||
is_likely_to_engage: bool = True
|
||||
email_domain_catchall: bool = True
|
||||
contact_job_change_event: Optional[str] = None
|
||||
|
||||
|
||||
class SearchOrganizationsRequest(BaseModel):
|
||||
"""Request for Apollo's search organizations API"""
|
||||
|
||||
organization_num_empoloyees_range: list[int] = SchemaField(
|
||||
description="""The number range of employees working for the company. This enables you to find companies based on headcount. You can add multiple ranges to expand your search results.
|
||||
|
||||
Each range you add needs to be a string, with the upper and lower numbers of the range separated only by a comma.""",
|
||||
default=[0, 1000000],
|
||||
)
|
||||
|
||||
organization_locations: list[str] = SchemaField(
|
||||
description="""The location of the company headquarters. You can search across cities, US states, and countries.
|
||||
|
||||
If a company has several office locations, results are still based on the headquarters location. For example, if you search chicago but a company's HQ location is in boston, any Boston-based companies will not appearch in your search results, even if they match other parameters.
|
||||
|
||||
To exclude companies based on location, use the organization_not_locations parameter.
|
||||
""",
|
||||
default=[],
|
||||
)
|
||||
organizations_not_locations: list[str] = SchemaField(
|
||||
description="""Exclude companies from search results based on the location of the company headquarters. You can use cities, US states, and countries as locations to exclude.
|
||||
|
||||
This parameter is useful for ensuring you do not prospect in an undesirable territory. For example, if you use ireland as a value, no Ireland-based companies will appear in your search results.
|
||||
""",
|
||||
default=[],
|
||||
)
|
||||
q_organization_keyword_tags: list[str] = SchemaField(
|
||||
description="""Filter search results based on keywords associated with companies. For example, you can enter mining as a value to return only companies that have an association with the mining industry."""
|
||||
)
|
||||
q_organization_name: str = SchemaField(
|
||||
description="""Filter search results to include a specific company name.
|
||||
|
||||
If the value you enter for this parameter does not match with a company's name, the company will not appear in search results, even if it matches other parameters. Partial matches are accepted. For example, if you filter by the value marketing, a company called NY Marketing Unlimited would still be eligible as a search result, but NY Market Analysis would not be eligible."""
|
||||
)
|
||||
organization_ids: list[str] = SchemaField(
|
||||
description="""The Apollo IDs for the companies you want to include in your search results. Each company in the Apollo database is assigned a unique ID.
|
||||
|
||||
To find IDs, identify the values for organization_id when you call this endpoint.""",
|
||||
default=[],
|
||||
)
|
||||
max_results: int = SchemaField(
|
||||
description="""The maximum number of results to return. If you don't specify this parameter, the default is 100.""",
|
||||
default=100,
|
||||
ge=1,
|
||||
le=50000,
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
page: int = SchemaField(
|
||||
description="""The page number of the Apollo data that you want to retrieve.
|
||||
|
||||
Use this parameter in combination with the per_page parameter to make search results for navigable and improve the performance of the endpoint.""",
|
||||
default=1,
|
||||
)
|
||||
per_page: int = SchemaField(
|
||||
description="""The number of search results that should be returned for each page. Limited the number of results per page improves the endpoint's performance.
|
||||
|
||||
Use the page parameter to search the different pages of data.""",
|
||||
default=100,
|
||||
)
|
||||
|
||||
|
||||
class SearchOrganizationsResponse(BaseModel):
|
||||
"""Response from Apollo's search organizations API"""
|
||||
|
||||
breadcrumbs: list[Breadcrumb] = []
|
||||
partial_results_only: bool = True
|
||||
has_join: bool = True
|
||||
disable_eu_prospecting: bool = True
|
||||
partial_results_limit: int = 0
|
||||
pagination: Pagination = Pagination(
|
||||
page=0, per_page=0, total_entries=0, total_pages=0
|
||||
)
|
||||
# no listed type on the API docs
|
||||
accounts: list[Any] = []
|
||||
organizations: list[Organization] = []
|
||||
models_ids: list[str] = []
|
||||
num_fetch_result: Optional[str] = "N/A"
|
||||
derived_params: Optional[str] = "N/A"
|
||||
|
||||
|
||||
class SearchPeopleRequest(BaseModel):
|
||||
"""Request for Apollo's search people API"""
|
||||
|
||||
person_titles: list[str] = SchemaField(
|
||||
description="""Job titles held by the people you want to find. For a person to be included in search results, they only need to match 1 of the job titles you add. Adding more job titles expands your search results.
|
||||
|
||||
Results also include job titles with the same terms, even if they are not exact matches. For example, searching for marketing manager might return people with the job title content marketing manager.
|
||||
|
||||
Use this parameter in combination with the person_seniorities[] parameter to find people based on specific job functions and seniority levels.
|
||||
""",
|
||||
default=[],
|
||||
placeholder="marketing manager",
|
||||
)
|
||||
person_locations: list[str] = SchemaField(
|
||||
description="""The location where people live. You can search across cities, US states, and countries.
|
||||
|
||||
To find people based on the headquarters locations of their current employer, use the organization_locations parameter.""",
|
||||
default=[],
|
||||
)
|
||||
person_seniorities: list[SenorityLevels] = SchemaField(
|
||||
description="""The job seniority that people hold within their current employer. This enables you to find people that currently hold positions at certain reporting levels, such as Director level or senior IC level.
|
||||
|
||||
For a person to be included in search results, they only need to match 1 of the seniorities you add. Adding more seniorities expands your search results.
|
||||
|
||||
Searches only return results based on their current job title, so searching for Director-level employees only returns people that currently hold a Director-level title. If someone was previously a Director, but is currently a VP, they would not be included in your search results.
|
||||
|
||||
Use this parameter in combination with the person_titles[] parameter to find people based on specific job functions and seniority levels.""",
|
||||
default=[],
|
||||
)
|
||||
organization_locations: list[str] = SchemaField(
|
||||
description="""The location of the company headquarters for a person's current employer. You can search across cities, US states, and countries.
|
||||
|
||||
If a company has several office locations, results are still based on the headquarters location. For example, if you search chicago but a company's HQ location is in boston, people that work for the Boston-based company will not appear in your results, even if they match other parameters.
|
||||
|
||||
To find people based on their personal location, use the person_locations parameter.""",
|
||||
default=[],
|
||||
)
|
||||
q_organization_domains: list[str] = SchemaField(
|
||||
description="""The domain name for the person's employer. This can be the current employer or a previous employer. Do not include www., the @ symbol, or similar.
|
||||
|
||||
You can add multiple domains to search across companies.
|
||||
|
||||
Examples: apollo.io and microsoft.com""",
|
||||
default=[],
|
||||
)
|
||||
contact_email_statuses: list[ContactEmailStatuses] = SchemaField(
|
||||
description="""The email statuses for the people you want to find. You can add multiple statuses to expand your search.""",
|
||||
default=[],
|
||||
)
|
||||
organization_ids: list[str] = SchemaField(
|
||||
description="""The Apollo IDs for the companies (employers) you want to include in your search results. Each company in the Apollo database is assigned a unique ID.
|
||||
|
||||
To find IDs, call the Organization Search endpoint and identify the values for organization_id.""",
|
||||
default=[],
|
||||
)
|
||||
organization_num_empoloyees_range: list[int] = SchemaField(
|
||||
description="""The number range of employees working for the company. This enables you to find companies based on headcount. You can add multiple ranges to expand your search results.
|
||||
|
||||
Each range you add needs to be a string, with the upper and lower numbers of the range separated only by a comma.""",
|
||||
default=[],
|
||||
)
|
||||
q_keywords: str = SchemaField(
|
||||
description="""A string of words over which we want to filter the results""",
|
||||
default="",
|
||||
)
|
||||
page: int = SchemaField(
|
||||
description="""The page number of the Apollo data that you want to retrieve.
|
||||
|
||||
Use this parameter in combination with the per_page parameter to make search results for navigable and improve the performance of the endpoint.""",
|
||||
default=1,
|
||||
)
|
||||
per_page: int = SchemaField(
|
||||
description="""The number of search results that should be returned for each page. Limited the number of results per page improves the endpoint's performance.
|
||||
|
||||
Use the page parameter to search the different pages of data.""",
|
||||
default=100,
|
||||
)
|
||||
max_results: int = SchemaField(
|
||||
description="""The maximum number of results to return. If you don't specify this parameter, the default is 100.""",
|
||||
default=100,
|
||||
ge=1,
|
||||
le=50000,
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
|
||||
class SearchPeopleResponse(BaseModel):
|
||||
"""Response from Apollo's search people API"""
|
||||
|
||||
class Config:
|
||||
extra = "allow" # Allow extra fields
|
||||
arbitrary_types_allowed = True # Allow any type
|
||||
from_attributes = True # Allow from_orm
|
||||
populate_by_name = True # Allow field aliases to work both ways
|
||||
|
||||
breadcrumbs: list[Breadcrumb] = []
|
||||
partial_results_only: bool = True
|
||||
has_join: bool = True
|
||||
disable_eu_prospecting: bool = True
|
||||
partial_results_limit: int = 0
|
||||
pagination: Pagination = Pagination(
|
||||
page=0, per_page=0, total_entries=0, total_pages=0
|
||||
)
|
||||
contacts: list[Contact] = []
|
||||
people: list[Contact] = []
|
||||
model_ids: list[str] = []
|
||||
num_fetch_result: Optional[str] = "N/A"
|
||||
derived_params: Optional[str] = "N/A"
|
||||
@@ -1,219 +0,0 @@
|
||||
from backend.blocks.apollo._api import ApolloClient
|
||||
from backend.blocks.apollo._auth import (
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
ApolloCredentials,
|
||||
ApolloCredentialsInput,
|
||||
)
|
||||
from backend.blocks.apollo.models import (
|
||||
Organization,
|
||||
PrimaryPhone,
|
||||
SearchOrganizationsRequest,
|
||||
)
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
|
||||
class SearchOrganizationsBlock(Block):
|
||||
"""Search for organizations in Apollo"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
organization_num_empoloyees_range: list[int] = SchemaField(
|
||||
description="""The number range of employees working for the company. This enables you to find companies based on headcount. You can add multiple ranges to expand your search results.
|
||||
|
||||
Each range you add needs to be a string, with the upper and lower numbers of the range separated only by a comma.""",
|
||||
default=[0, 1000000],
|
||||
)
|
||||
|
||||
organization_locations: list[str] = SchemaField(
|
||||
description="""The location of the company headquarters. You can search across cities, US states, and countries.
|
||||
|
||||
If a company has several office locations, results are still based on the headquarters location. For example, if you search chicago but a company's HQ location is in boston, any Boston-based companies will not appearch in your search results, even if they match other parameters.
|
||||
|
||||
To exclude companies based on location, use the organization_not_locations parameter.
|
||||
""",
|
||||
default=[],
|
||||
)
|
||||
organizations_not_locations: list[str] = SchemaField(
|
||||
description="""Exclude companies from search results based on the location of the company headquarters. You can use cities, US states, and countries as locations to exclude.
|
||||
|
||||
This parameter is useful for ensuring you do not prospect in an undesirable territory. For example, if you use ireland as a value, no Ireland-based companies will appear in your search results.
|
||||
""",
|
||||
default=[],
|
||||
)
|
||||
q_organization_keyword_tags: list[str] = SchemaField(
|
||||
description="""Filter search results based on keywords associated with companies. For example, you can enter mining as a value to return only companies that have an association with the mining industry.""",
|
||||
default=[],
|
||||
)
|
||||
q_organization_name: str = SchemaField(
|
||||
description="""Filter search results to include a specific company name.
|
||||
|
||||
If the value you enter for this parameter does not match with a company's name, the company will not appear in search results, even if it matches other parameters. Partial matches are accepted. For example, if you filter by the value marketing, a company called NY Marketing Unlimited would still be eligible as a search result, but NY Market Analysis would not be eligible.""",
|
||||
default="",
|
||||
advanced=False,
|
||||
)
|
||||
organization_ids: list[str] = SchemaField(
|
||||
description="""The Apollo IDs for the companies you want to include in your search results. Each company in the Apollo database is assigned a unique ID.
|
||||
|
||||
To find IDs, identify the values for organization_id when you call this endpoint.""",
|
||||
default=[],
|
||||
)
|
||||
max_results: int = SchemaField(
|
||||
description="""The maximum number of results to return. If you don't specify this parameter, the default is 100.""",
|
||||
default=100,
|
||||
ge=1,
|
||||
le=50000,
|
||||
advanced=True,
|
||||
)
|
||||
credentials: ApolloCredentialsInput = SchemaField(
|
||||
description="Apollo credentials",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
organizations: list[Organization] = SchemaField(
|
||||
description="List of organizations found",
|
||||
default=[],
|
||||
)
|
||||
organization: Organization = SchemaField(
|
||||
description="Each found organization, one at a time",
|
||||
)
|
||||
error: str = SchemaField(
|
||||
description="Error message if the search failed",
|
||||
default="",
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="3d71270d-599e-4148-9b95-71b35d2f44f0",
|
||||
description="Search for organizations in Apollo",
|
||||
categories={BlockCategory.SEARCH},
|
||||
input_schema=SearchOrganizationsBlock.Input,
|
||||
output_schema=SearchOrganizationsBlock.Output,
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_input={"query": "Google", "credentials": TEST_CREDENTIALS_INPUT},
|
||||
test_output=[
|
||||
(
|
||||
"organization",
|
||||
Organization(
|
||||
id="1",
|
||||
name="Google",
|
||||
website_url="https://google.com",
|
||||
blog_url="https://google.com/blog",
|
||||
angellist_url="https://angel.co/google",
|
||||
linkedin_url="https://linkedin.com/company/google",
|
||||
twitter_url="https://twitter.com/google",
|
||||
facebook_url="https://facebook.com/google",
|
||||
primary_phone=PrimaryPhone(
|
||||
source="google",
|
||||
number="1234567890",
|
||||
sanitized_number="1234567890",
|
||||
),
|
||||
languages=["en"],
|
||||
alexa_ranking=1000,
|
||||
phone="1234567890",
|
||||
linkedin_uid="1234567890",
|
||||
founded_year=2000,
|
||||
publicly_traded_symbol="GOOGL",
|
||||
publicly_traded_exchange="NASDAQ",
|
||||
logo_url="https://google.com/logo.png",
|
||||
chrunchbase_url="https://chrunchbase.com/google",
|
||||
primary_domain="google.com",
|
||||
sanitized_phone="1234567890",
|
||||
owned_by_organization_id="1",
|
||||
intent_strength="strong",
|
||||
show_intent=True,
|
||||
has_intent_signal_account=True,
|
||||
intent_signal_account="1",
|
||||
),
|
||||
),
|
||||
(
|
||||
"organizations",
|
||||
[
|
||||
Organization(
|
||||
id="1",
|
||||
name="Google",
|
||||
website_url="https://google.com",
|
||||
blog_url="https://google.com/blog",
|
||||
angellist_url="https://angel.co/google",
|
||||
linkedin_url="https://linkedin.com/company/google",
|
||||
twitter_url="https://twitter.com/google",
|
||||
facebook_url="https://facebook.com/google",
|
||||
primary_phone=PrimaryPhone(
|
||||
source="google",
|
||||
number="1234567890",
|
||||
sanitized_number="1234567890",
|
||||
),
|
||||
languages=["en"],
|
||||
alexa_ranking=1000,
|
||||
phone="1234567890",
|
||||
linkedin_uid="1234567890",
|
||||
founded_year=2000,
|
||||
publicly_traded_symbol="GOOGL",
|
||||
publicly_traded_exchange="NASDAQ",
|
||||
logo_url="https://google.com/logo.png",
|
||||
chrunchbase_url="https://chrunchbase.com/google",
|
||||
primary_domain="google.com",
|
||||
sanitized_phone="1234567890",
|
||||
owned_by_organization_id="1",
|
||||
intent_strength="strong",
|
||||
show_intent=True,
|
||||
has_intent_signal_account=True,
|
||||
intent_signal_account="1",
|
||||
),
|
||||
],
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"search_organizations": lambda *args, **kwargs: [
|
||||
Organization(
|
||||
id="1",
|
||||
name="Google",
|
||||
website_url="https://google.com",
|
||||
blog_url="https://google.com/blog",
|
||||
angellist_url="https://angel.co/google",
|
||||
linkedin_url="https://linkedin.com/company/google",
|
||||
twitter_url="https://twitter.com/google",
|
||||
facebook_url="https://facebook.com/google",
|
||||
primary_phone=PrimaryPhone(
|
||||
source="google",
|
||||
number="1234567890",
|
||||
sanitized_number="1234567890",
|
||||
),
|
||||
languages=["en"],
|
||||
alexa_ranking=1000,
|
||||
phone="1234567890",
|
||||
linkedin_uid="1234567890",
|
||||
founded_year=2000,
|
||||
publicly_traded_symbol="GOOGL",
|
||||
publicly_traded_exchange="NASDAQ",
|
||||
logo_url="https://google.com/logo.png",
|
||||
chrunchbase_url="https://chrunchbase.com/google",
|
||||
primary_domain="google.com",
|
||||
sanitized_phone="1234567890",
|
||||
owned_by_organization_id="1",
|
||||
intent_strength="strong",
|
||||
show_intent=True,
|
||||
has_intent_signal_account=True,
|
||||
intent_signal_account="1",
|
||||
)
|
||||
]
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def search_organizations(
|
||||
query: SearchOrganizationsRequest, credentials: ApolloCredentials
|
||||
) -> list[Organization]:
|
||||
client = ApolloClient(credentials)
|
||||
return client.search_organizations(query)
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: ApolloCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
query = SearchOrganizationsRequest(
|
||||
**input_data.model_dump(exclude={"credentials"})
|
||||
)
|
||||
organizations = self.search_organizations(query, credentials)
|
||||
for organization in organizations:
|
||||
yield "organization", organization
|
||||
yield "organizations", organizations
|
||||
@@ -1,394 +0,0 @@
|
||||
from backend.blocks.apollo._api import ApolloClient
|
||||
from backend.blocks.apollo._auth import (
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
ApolloCredentials,
|
||||
ApolloCredentialsInput,
|
||||
)
|
||||
from backend.blocks.apollo.models import (
|
||||
Contact,
|
||||
ContactEmailStatuses,
|
||||
SearchPeopleRequest,
|
||||
SenorityLevels,
|
||||
)
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
|
||||
class SearchPeopleBlock(Block):
|
||||
"""Search for people in Apollo"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
person_titles: list[str] = SchemaField(
|
||||
description="""Job titles held by the people you want to find. For a person to be included in search results, they only need to match 1 of the job titles you add. Adding more job titles expands your search results.
|
||||
|
||||
Results also include job titles with the same terms, even if they are not exact matches. For example, searching for marketing manager might return people with the job title content marketing manager.
|
||||
|
||||
Use this parameter in combination with the person_seniorities[] parameter to find people based on specific job functions and seniority levels.
|
||||
""",
|
||||
default=[],
|
||||
advanced=False,
|
||||
)
|
||||
person_locations: list[str] = SchemaField(
|
||||
description="""The location where people live. You can search across cities, US states, and countries.
|
||||
|
||||
To find people based on the headquarters locations of their current employer, use the organization_locations parameter.""",
|
||||
default=[],
|
||||
advanced=False,
|
||||
)
|
||||
person_seniorities: list[SenorityLevels] = SchemaField(
|
||||
description="""The job seniority that people hold within their current employer. This enables you to find people that currently hold positions at certain reporting levels, such as Director level or senior IC level.
|
||||
|
||||
For a person to be included in search results, they only need to match 1 of the seniorities you add. Adding more seniorities expands your search results.
|
||||
|
||||
Searches only return results based on their current job title, so searching for Director-level employees only returns people that currently hold a Director-level title. If someone was previously a Director, but is currently a VP, they would not be included in your search results.
|
||||
|
||||
Use this parameter in combination with the person_titles[] parameter to find people based on specific job functions and seniority levels.""",
|
||||
default=[],
|
||||
advanced=False,
|
||||
)
|
||||
organization_locations: list[str] = SchemaField(
|
||||
description="""The location of the company headquarters for a person's current employer. You can search across cities, US states, and countries.
|
||||
|
||||
If a company has several office locations, results are still based on the headquarters location. For example, if you search chicago but a company's HQ location is in boston, people that work for the Boston-based company will not appear in your results, even if they match other parameters.
|
||||
|
||||
To find people based on their personal location, use the person_locations parameter.""",
|
||||
default=[],
|
||||
advanced=False,
|
||||
)
|
||||
q_organization_domains: list[str] = SchemaField(
|
||||
description="""The domain name for the person's employer. This can be the current employer or a previous employer. Do not include www., the @ symbol, or similar.
|
||||
|
||||
You can add multiple domains to search across companies.
|
||||
|
||||
Examples: apollo.io and microsoft.com""",
|
||||
default=[],
|
||||
advanced=False,
|
||||
)
|
||||
contact_email_statuses: list[ContactEmailStatuses] = SchemaField(
|
||||
description="""The email statuses for the people you want to find. You can add multiple statuses to expand your search.""",
|
||||
default=[],
|
||||
advanced=False,
|
||||
)
|
||||
organization_ids: list[str] = SchemaField(
|
||||
description="""The Apollo IDs for the companies (employers) you want to include in your search results. Each company in the Apollo database is assigned a unique ID.
|
||||
|
||||
To find IDs, call the Organization Search endpoint and identify the values for organization_id.""",
|
||||
default=[],
|
||||
advanced=False,
|
||||
)
|
||||
organization_num_empoloyees_range: list[int] = SchemaField(
|
||||
description="""The number range of employees working for the company. This enables you to find companies based on headcount. You can add multiple ranges to expand your search results.
|
||||
|
||||
Each range you add needs to be a string, with the upper and lower numbers of the range separated only by a comma.""",
|
||||
default=[],
|
||||
advanced=False,
|
||||
)
|
||||
q_keywords: str = SchemaField(
|
||||
description="""A string of words over which we want to filter the results""",
|
||||
default="",
|
||||
advanced=False,
|
||||
)
|
||||
max_results: int = SchemaField(
|
||||
description="""The maximum number of results to return. If you don't specify this parameter, the default is 100.""",
|
||||
default=100,
|
||||
ge=1,
|
||||
le=50000,
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
credentials: ApolloCredentialsInput = SchemaField(
|
||||
description="Apollo credentials",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
people: list[Contact] = SchemaField(
|
||||
description="List of people found",
|
||||
default=[],
|
||||
)
|
||||
person: Contact = SchemaField(
|
||||
description="Each found person, one at a time",
|
||||
)
|
||||
error: str = SchemaField(
|
||||
description="Error message if the search failed",
|
||||
default="",
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="c2adb3aa-5aae-488d-8a6e-4eb8c23e2ed6",
|
||||
description="Search for people in Apollo",
|
||||
categories={BlockCategory.SEARCH},
|
||||
input_schema=SearchPeopleBlock.Input,
|
||||
output_schema=SearchPeopleBlock.Output,
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_input={"credentials": TEST_CREDENTIALS_INPUT},
|
||||
test_output=[
|
||||
(
|
||||
"person",
|
||||
Contact(
|
||||
contact_roles=[],
|
||||
id="1",
|
||||
name="John Doe",
|
||||
first_name="John",
|
||||
last_name="Doe",
|
||||
linkedin_url="https://www.linkedin.com/in/johndoe",
|
||||
title="Software Engineer",
|
||||
organization_name="Google",
|
||||
organization_id="123456",
|
||||
contact_stage_id="1",
|
||||
owner_id="1",
|
||||
creator_id="1",
|
||||
person_id="1",
|
||||
email_needs_tickling=True,
|
||||
source="apollo",
|
||||
original_source="apollo",
|
||||
headline="Software Engineer",
|
||||
photo_url="https://www.linkedin.com/in/johndoe",
|
||||
present_raw_address="123 Main St, Anytown, USA",
|
||||
linkededin_uid="123456",
|
||||
extrapolated_email_confidence=0.8,
|
||||
salesforce_id="123456",
|
||||
salesforce_lead_id="123456",
|
||||
salesforce_contact_id="123456",
|
||||
saleforce_account_id="123456",
|
||||
crm_owner_id="123456",
|
||||
created_at="2021-01-01",
|
||||
emailer_campaign_ids=[],
|
||||
direct_dial_status="active",
|
||||
direct_dial_enrichment_failed_at="2021-01-01",
|
||||
email_status="active",
|
||||
email_source="apollo",
|
||||
account_id="123456",
|
||||
last_activity_date="2021-01-01",
|
||||
hubspot_vid="123456",
|
||||
hubspot_company_id="123456",
|
||||
crm_id="123456",
|
||||
sanitized_phone="123456",
|
||||
merged_crm_ids="123456",
|
||||
updated_at="2021-01-01",
|
||||
queued_for_crm_push=True,
|
||||
suggested_from_rule_engine_config_id="123456",
|
||||
email_unsubscribed=None,
|
||||
label_ids=[],
|
||||
has_pending_email_arcgate_request=True,
|
||||
has_email_arcgate_request=True,
|
||||
existence_level=None,
|
||||
email=None,
|
||||
email_from_customer=None,
|
||||
typed_custom_fields=[],
|
||||
custom_field_errors=None,
|
||||
salesforce_record_id=None,
|
||||
crm_record_url=None,
|
||||
email_status_unavailable_reason=None,
|
||||
email_true_status=None,
|
||||
updated_email_true_status=True,
|
||||
contact_rule_config_statuses=[],
|
||||
source_display_name=None,
|
||||
twitter_url=None,
|
||||
contact_campaign_statuses=[],
|
||||
state=None,
|
||||
city=None,
|
||||
country=None,
|
||||
account=None,
|
||||
contact_emails=[],
|
||||
organization=None,
|
||||
employment_history=[],
|
||||
time_zone=None,
|
||||
intent_strength=None,
|
||||
show_intent=True,
|
||||
phone_numbers=[],
|
||||
account_phone_note=None,
|
||||
free_domain=True,
|
||||
is_likely_to_engage=True,
|
||||
email_domain_catchall=True,
|
||||
contact_job_change_event=None,
|
||||
),
|
||||
),
|
||||
(
|
||||
"people",
|
||||
[
|
||||
Contact(
|
||||
contact_roles=[],
|
||||
id="1",
|
||||
name="John Doe",
|
||||
first_name="John",
|
||||
last_name="Doe",
|
||||
linkedin_url="https://www.linkedin.com/in/johndoe",
|
||||
title="Software Engineer",
|
||||
organization_name="Google",
|
||||
organization_id="123456",
|
||||
contact_stage_id="1",
|
||||
owner_id="1",
|
||||
creator_id="1",
|
||||
person_id="1",
|
||||
email_needs_tickling=True,
|
||||
source="apollo",
|
||||
original_source="apollo",
|
||||
headline="Software Engineer",
|
||||
photo_url="https://www.linkedin.com/in/johndoe",
|
||||
present_raw_address="123 Main St, Anytown, USA",
|
||||
linkededin_uid="123456",
|
||||
extrapolated_email_confidence=0.8,
|
||||
salesforce_id="123456",
|
||||
salesforce_lead_id="123456",
|
||||
salesforce_contact_id="123456",
|
||||
saleforce_account_id="123456",
|
||||
crm_owner_id="123456",
|
||||
created_at="2021-01-01",
|
||||
emailer_campaign_ids=[],
|
||||
direct_dial_status="active",
|
||||
direct_dial_enrichment_failed_at="2021-01-01",
|
||||
email_status="active",
|
||||
email_source="apollo",
|
||||
account_id="123456",
|
||||
last_activity_date="2021-01-01",
|
||||
hubspot_vid="123456",
|
||||
hubspot_company_id="123456",
|
||||
crm_id="123456",
|
||||
sanitized_phone="123456",
|
||||
merged_crm_ids="123456",
|
||||
updated_at="2021-01-01",
|
||||
queued_for_crm_push=True,
|
||||
suggested_from_rule_engine_config_id="123456",
|
||||
email_unsubscribed=None,
|
||||
label_ids=[],
|
||||
has_pending_email_arcgate_request=True,
|
||||
has_email_arcgate_request=True,
|
||||
existence_level=None,
|
||||
email=None,
|
||||
email_from_customer=None,
|
||||
typed_custom_fields=[],
|
||||
custom_field_errors=None,
|
||||
salesforce_record_id=None,
|
||||
crm_record_url=None,
|
||||
email_status_unavailable_reason=None,
|
||||
email_true_status=None,
|
||||
updated_email_true_status=True,
|
||||
contact_rule_config_statuses=[],
|
||||
source_display_name=None,
|
||||
twitter_url=None,
|
||||
contact_campaign_statuses=[],
|
||||
state=None,
|
||||
city=None,
|
||||
country=None,
|
||||
account=None,
|
||||
contact_emails=[],
|
||||
organization=None,
|
||||
employment_history=[],
|
||||
time_zone=None,
|
||||
intent_strength=None,
|
||||
show_intent=True,
|
||||
phone_numbers=[],
|
||||
account_phone_note=None,
|
||||
free_domain=True,
|
||||
is_likely_to_engage=True,
|
||||
email_domain_catchall=True,
|
||||
contact_job_change_event=None,
|
||||
),
|
||||
],
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"search_people": lambda query, credentials: [
|
||||
Contact(
|
||||
id="1",
|
||||
name="John Doe",
|
||||
first_name="John",
|
||||
last_name="Doe",
|
||||
linkedin_url="https://www.linkedin.com/in/johndoe",
|
||||
title="Software Engineer",
|
||||
organization_name="Google",
|
||||
organization_id="123456",
|
||||
contact_stage_id="1",
|
||||
owner_id="1",
|
||||
creator_id="1",
|
||||
person_id="1",
|
||||
email_needs_tickling=True,
|
||||
source="apollo",
|
||||
original_source="apollo",
|
||||
headline="Software Engineer",
|
||||
photo_url="https://www.linkedin.com/in/johndoe",
|
||||
present_raw_address="123 Main St, Anytown, USA",
|
||||
linkededin_uid="123456",
|
||||
extrapolated_email_confidence=0.8,
|
||||
salesforce_id="123456",
|
||||
salesforce_lead_id="123456",
|
||||
salesforce_contact_id="123456",
|
||||
saleforce_account_id="123456",
|
||||
crm_owner_id="123456",
|
||||
created_at="2021-01-01",
|
||||
emailer_campaign_ids=[],
|
||||
direct_dial_status="active",
|
||||
direct_dial_enrichment_failed_at="2021-01-01",
|
||||
email_status="active",
|
||||
email_source="apollo",
|
||||
account_id="123456",
|
||||
last_activity_date="2021-01-01",
|
||||
hubspot_vid="123456",
|
||||
hubspot_company_id="123456",
|
||||
crm_id="123456",
|
||||
sanitized_phone="123456",
|
||||
merged_crm_ids="123456",
|
||||
updated_at="2021-01-01",
|
||||
queued_for_crm_push=True,
|
||||
suggested_from_rule_engine_config_id="123456",
|
||||
email_unsubscribed=None,
|
||||
label_ids=[],
|
||||
has_pending_email_arcgate_request=True,
|
||||
has_email_arcgate_request=True,
|
||||
existence_level=None,
|
||||
email=None,
|
||||
email_from_customer=None,
|
||||
typed_custom_fields=[],
|
||||
custom_field_errors=None,
|
||||
salesforce_record_id=None,
|
||||
crm_record_url=None,
|
||||
email_status_unavailable_reason=None,
|
||||
email_true_status=None,
|
||||
updated_email_true_status=True,
|
||||
contact_rule_config_statuses=[],
|
||||
source_display_name=None,
|
||||
twitter_url=None,
|
||||
contact_campaign_statuses=[],
|
||||
state=None,
|
||||
city=None,
|
||||
country=None,
|
||||
account=None,
|
||||
contact_emails=[],
|
||||
organization=None,
|
||||
employment_history=[],
|
||||
time_zone=None,
|
||||
intent_strength=None,
|
||||
show_intent=True,
|
||||
phone_numbers=[],
|
||||
account_phone_note=None,
|
||||
free_domain=True,
|
||||
is_likely_to_engage=True,
|
||||
email_domain_catchall=True,
|
||||
contact_job_change_event=None,
|
||||
),
|
||||
]
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def search_people(
|
||||
query: SearchPeopleRequest, credentials: ApolloCredentials
|
||||
) -> list[Contact]:
|
||||
client = ApolloClient(credentials)
|
||||
return client.search_people(query)
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: ApolloCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
|
||||
query = SearchPeopleRequest(**input_data.model_dump(exclude={"credentials"}))
|
||||
people = self.search_people(query, credentials)
|
||||
for person in people:
|
||||
yield "person", person
|
||||
yield "people", people
|
||||
@@ -1,52 +1,13 @@
|
||||
import enum
|
||||
from typing import Any, List
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema, BlockType
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.file import MediaFile, store_media_file
|
||||
from backend.util.mock import MockObject
|
||||
from backend.util.text import TextFormatter
|
||||
from backend.util.type import convert
|
||||
|
||||
formatter = TextFormatter()
|
||||
|
||||
|
||||
class FileStoreBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
file_in: MediaFile = SchemaField(
|
||||
description="The file to store in the temporary directory, it can be a URL, data URI, or local path."
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
file_out: MediaFile = SchemaField(
|
||||
description="The relative path to the stored file in the temporary directory."
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="cbb50872-625b-42f0-8203-a2ae78242d8a",
|
||||
description="Stores the input file in the temporary directory.",
|
||||
categories={BlockCategory.BASIC, BlockCategory.MULTIMEDIA},
|
||||
input_schema=FileStoreBlock.Input,
|
||||
output_schema=FileStoreBlock.Output,
|
||||
static_output=True,
|
||||
)
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
graph_exec_id: str,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
file_path = store_media_file(
|
||||
graph_exec_id=graph_exec_id,
|
||||
file=input_data.file_in,
|
||||
return_content=False,
|
||||
)
|
||||
yield "file_out", file_path
|
||||
|
||||
|
||||
class StoreValueBlock(Block):
|
||||
"""
|
||||
This block allows you to provide a constant value as a block, in a stateless manner.
|
||||
@@ -297,7 +258,6 @@ class AgentOutputBlock(Block):
|
||||
|
||||
class Output(BlockSchema):
|
||||
output: Any = SchemaField(description="The value recorded as output.")
|
||||
name: Any = SchemaField(description="The name of the value recorded as output.")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
@@ -349,7 +309,6 @@ class AgentOutputBlock(Block):
|
||||
yield "output", f"Error: {e}, {input_data.value}"
|
||||
else:
|
||||
yield "output", input_data.value
|
||||
yield "name", input_data.name
|
||||
|
||||
|
||||
class AddToDictionaryBlock(Block):
|
||||
@@ -510,48 +469,6 @@ class AddToListBlock(Block):
|
||||
yield "updated_list", updated_list
|
||||
|
||||
|
||||
class FindInListBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
list: List[Any] = SchemaField(description="The list to search in.")
|
||||
value: Any = SchemaField(description="The value to search for.")
|
||||
|
||||
class Output(BlockSchema):
|
||||
index: int = SchemaField(description="The index of the value in the list.")
|
||||
found: bool = SchemaField(
|
||||
description="Whether the value was found in the list."
|
||||
)
|
||||
not_found_value: Any = SchemaField(
|
||||
description="The value that was not found in the list."
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="5e2c6d0a-1e37-489f-b1d0-8e1812b23333",
|
||||
description="Finds the index of the value in the list.",
|
||||
categories={BlockCategory.BASIC},
|
||||
input_schema=FindInListBlock.Input,
|
||||
output_schema=FindInListBlock.Output,
|
||||
test_input=[
|
||||
{"list": [1, 2, 3, 4, 5], "value": 3},
|
||||
{"list": [1, 2, 3, 4, 5], "value": 6},
|
||||
],
|
||||
test_output=[
|
||||
("index", 2),
|
||||
("found", True),
|
||||
("found", False),
|
||||
("not_found_value", 6),
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
try:
|
||||
yield "index", input_data.list.index(input_data.value)
|
||||
yield "found", True
|
||||
except ValueError:
|
||||
yield "found", False
|
||||
yield "not_found_value", input_data.value
|
||||
|
||||
|
||||
class NoteBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
text: str = SchemaField(description="The text to display in the sticky note.")
|
||||
@@ -673,47 +590,3 @@ class CreateListBlock(Block):
|
||||
yield "list", input_data.values
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to create list: {str(e)}"
|
||||
|
||||
|
||||
class TypeOptions(enum.Enum):
|
||||
STRING = "string"
|
||||
NUMBER = "number"
|
||||
BOOLEAN = "boolean"
|
||||
LIST = "list"
|
||||
DICTIONARY = "dictionary"
|
||||
|
||||
|
||||
class UniversalTypeConverterBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
value: Any = SchemaField(
|
||||
description="The value to convert to a universal type."
|
||||
)
|
||||
type: TypeOptions = SchemaField(description="The type to convert the value to.")
|
||||
|
||||
class Output(BlockSchema):
|
||||
value: Any = SchemaField(description="The converted value.")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="95d1b990-ce13-4d88-9737-ba5c2070c97b",
|
||||
description="This block is used to convert a value to a universal type.",
|
||||
categories={BlockCategory.BASIC},
|
||||
input_schema=UniversalTypeConverterBlock.Input,
|
||||
output_schema=UniversalTypeConverterBlock.Output,
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
try:
|
||||
converted_value = convert(
|
||||
input_data.value,
|
||||
{
|
||||
TypeOptions.STRING: str,
|
||||
TypeOptions.NUMBER: float,
|
||||
TypeOptions.BOOLEAN: bool,
|
||||
TypeOptions.LIST: list,
|
||||
TypeOptions.DICTIONARY: dict,
|
||||
}[input_data.type],
|
||||
)
|
||||
yield "value", converted_value
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to convert value: {str(e)}"
|
||||
|
||||
@@ -107,83 +107,3 @@ class ConditionBlock(Block):
|
||||
yield "yes_output", yes_value
|
||||
else:
|
||||
yield "no_output", no_value
|
||||
|
||||
|
||||
class IfInputMatchesBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
input: Any = SchemaField(
|
||||
description="The input to match against",
|
||||
placeholder="For example: 10 or 'hello' or True",
|
||||
)
|
||||
value: Any = SchemaField(
|
||||
description="The value to output if the input matches",
|
||||
placeholder="For example: 'Greater' or 20 or False",
|
||||
)
|
||||
yes_value: Any = SchemaField(
|
||||
description="The value to output if the input matches",
|
||||
placeholder="For example: 'Greater' or 20 or False",
|
||||
default=None,
|
||||
)
|
||||
no_value: Any = SchemaField(
|
||||
description="The value to output if the input does not match",
|
||||
placeholder="For example: 'Greater' or 20 or False",
|
||||
default=None,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
result: bool = SchemaField(
|
||||
description="The result of the condition evaluation (True or False)"
|
||||
)
|
||||
yes_output: Any = SchemaField(
|
||||
description="The output value if the condition is true"
|
||||
)
|
||||
no_output: Any = SchemaField(
|
||||
description="The output value if the condition is false"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="6dbbc4b3-ca6c-42b6-b508-da52d23e13f2",
|
||||
input_schema=IfInputMatchesBlock.Input,
|
||||
output_schema=IfInputMatchesBlock.Output,
|
||||
description="Handles conditional logic based on comparison operators",
|
||||
categories={BlockCategory.LOGIC},
|
||||
test_input=[
|
||||
{
|
||||
"input": 10,
|
||||
"value": 10,
|
||||
"yes_value": "Greater",
|
||||
"no_value": "Not greater",
|
||||
},
|
||||
{
|
||||
"input": 10,
|
||||
"value": 20,
|
||||
"yes_value": "Greater",
|
||||
"no_value": "Not greater",
|
||||
},
|
||||
{
|
||||
"input": 10,
|
||||
"value": None,
|
||||
"yes_value": "Yes",
|
||||
"no_value": "No",
|
||||
},
|
||||
],
|
||||
test_output=[
|
||||
("result", True),
|
||||
("yes_output", "Greater"),
|
||||
("result", False),
|
||||
("no_output", "Not greater"),
|
||||
("result", False),
|
||||
("no_output", "No"),
|
||||
# ("result", True),
|
||||
# ("yes_output", "Yes"),
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
if input_data.input == input_data.value or input_data.input is input_data.value:
|
||||
yield "result", True
|
||||
yield "yes_output", input_data.yes_value
|
||||
else:
|
||||
yield "result", False
|
||||
yield "no_output", input_data.no_value
|
||||
|
||||
@@ -188,270 +188,3 @@ class CodeExecutionBlock(Block):
|
||||
yield "stderr_logs", stderr_logs
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class InstantiationBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: CredentialsMetaInput[
|
||||
Literal[ProviderName.E2B], Literal["api_key"]
|
||||
] = CredentialsField(
|
||||
description="Enter your api key for the E2B Sandbox. You can get it in here - https://e2b.dev/docs",
|
||||
)
|
||||
|
||||
# Todo : Option to run commond in background
|
||||
setup_commands: list[str] = SchemaField(
|
||||
description=(
|
||||
"Shell commands to set up the sandbox before running the code. "
|
||||
"You can use `curl` or `git` to install your desired Debian based "
|
||||
"package manager. `pip` and `npm` are pre-installed.\n\n"
|
||||
"These commands are executed with `sh`, in the foreground."
|
||||
),
|
||||
placeholder="pip install cowsay",
|
||||
default=[],
|
||||
advanced=False,
|
||||
)
|
||||
|
||||
setup_code: str = SchemaField(
|
||||
description="Code to execute in the sandbox",
|
||||
placeholder="print('Hello, World!')",
|
||||
default="",
|
||||
advanced=False,
|
||||
)
|
||||
|
||||
language: ProgrammingLanguage = SchemaField(
|
||||
description="Programming language to execute",
|
||||
default=ProgrammingLanguage.PYTHON,
|
||||
advanced=False,
|
||||
)
|
||||
|
||||
timeout: int = SchemaField(
|
||||
description="Execution timeout in seconds", default=300
|
||||
)
|
||||
|
||||
template_id: str = SchemaField(
|
||||
description=(
|
||||
"You can use an E2B sandbox template by entering its ID here. "
|
||||
"Check out the E2B docs for more details: "
|
||||
"[E2B - Sandbox template](https://e2b.dev/docs/sandbox-template)"
|
||||
),
|
||||
default="",
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
sandbox_id: str = SchemaField(description="ID of the sandbox instance")
|
||||
response: str = SchemaField(description="Response from code execution")
|
||||
stdout_logs: str = SchemaField(
|
||||
description="Standard output logs from execution"
|
||||
)
|
||||
stderr_logs: str = SchemaField(description="Standard error logs from execution")
|
||||
error: str = SchemaField(description="Error message if execution failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="ff0861c9-1726-4aec-9e5b-bf53f3622112",
|
||||
description="Instantiate an isolated sandbox environment with internet access where to execute code in.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=InstantiationBlock.Input,
|
||||
output_schema=InstantiationBlock.Output,
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"setup_code": "print('Hello World')",
|
||||
"language": ProgrammingLanguage.PYTHON.value,
|
||||
"setup_commands": [],
|
||||
"timeout": 300,
|
||||
"template_id": "",
|
||||
},
|
||||
test_output=[
|
||||
("sandbox_id", str),
|
||||
("response", "Hello World"),
|
||||
("stdout_logs", "Hello World\n"),
|
||||
],
|
||||
test_mock={
|
||||
"execute_code": lambda setup_code, language, setup_commands, timeout, api_key, template_id: (
|
||||
"sandbox_id",
|
||||
"Hello World",
|
||||
"Hello World\n",
|
||||
"",
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
sandbox_id, response, stdout_logs, stderr_logs = self.execute_code(
|
||||
input_data.setup_code,
|
||||
input_data.language,
|
||||
input_data.setup_commands,
|
||||
input_data.timeout,
|
||||
credentials.api_key.get_secret_value(),
|
||||
input_data.template_id,
|
||||
)
|
||||
if sandbox_id:
|
||||
yield "sandbox_id", sandbox_id
|
||||
else:
|
||||
yield "error", "Sandbox ID not found"
|
||||
if response:
|
||||
yield "response", response
|
||||
if stdout_logs:
|
||||
yield "stdout_logs", stdout_logs
|
||||
if stderr_logs:
|
||||
yield "stderr_logs", stderr_logs
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
def execute_code(
|
||||
self,
|
||||
code: str,
|
||||
language: ProgrammingLanguage,
|
||||
setup_commands: list[str],
|
||||
timeout: int,
|
||||
api_key: str,
|
||||
template_id: str,
|
||||
):
|
||||
try:
|
||||
sandbox = None
|
||||
if template_id:
|
||||
sandbox = Sandbox(
|
||||
template=template_id, api_key=api_key, timeout=timeout
|
||||
)
|
||||
else:
|
||||
sandbox = Sandbox(api_key=api_key, timeout=timeout)
|
||||
|
||||
if not sandbox:
|
||||
raise Exception("Sandbox not created")
|
||||
|
||||
# Running setup commands
|
||||
for cmd in setup_commands:
|
||||
sandbox.commands.run(cmd)
|
||||
|
||||
# Executing the code
|
||||
execution = sandbox.run_code(
|
||||
code,
|
||||
language=language.value,
|
||||
on_error=lambda e: sandbox.kill(), # Kill the sandbox if there is an error
|
||||
)
|
||||
|
||||
if execution.error:
|
||||
raise Exception(execution.error)
|
||||
|
||||
response = execution.text
|
||||
stdout_logs = "".join(execution.logs.stdout)
|
||||
stderr_logs = "".join(execution.logs.stderr)
|
||||
|
||||
return sandbox.sandbox_id, response, stdout_logs, stderr_logs
|
||||
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
|
||||
class StepExecutionBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: CredentialsMetaInput[
|
||||
Literal[ProviderName.E2B], Literal["api_key"]
|
||||
] = CredentialsField(
|
||||
description="Enter your api key for the E2B Sandbox. You can get it in here - https://e2b.dev/docs",
|
||||
)
|
||||
|
||||
sandbox_id: str = SchemaField(
|
||||
description="ID of the sandbox instance to execute the code in",
|
||||
advanced=False,
|
||||
)
|
||||
|
||||
step_code: str = SchemaField(
|
||||
description="Code to execute in the sandbox",
|
||||
placeholder="print('Hello, World!')",
|
||||
default="",
|
||||
advanced=False,
|
||||
)
|
||||
|
||||
language: ProgrammingLanguage = SchemaField(
|
||||
description="Programming language to execute",
|
||||
default=ProgrammingLanguage.PYTHON,
|
||||
advanced=False,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
response: str = SchemaField(description="Response from code execution")
|
||||
stdout_logs: str = SchemaField(
|
||||
description="Standard output logs from execution"
|
||||
)
|
||||
stderr_logs: str = SchemaField(description="Standard error logs from execution")
|
||||
error: str = SchemaField(description="Error message if execution failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="82b59b8e-ea10-4d57-9161-8b169b0adba6",
|
||||
description="Execute code in a previously instantiated sandbox environment.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=StepExecutionBlock.Input,
|
||||
output_schema=StepExecutionBlock.Output,
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"sandbox_id": "sandbox_id",
|
||||
"step_code": "print('Hello World')",
|
||||
"language": ProgrammingLanguage.PYTHON.value,
|
||||
},
|
||||
test_output=[
|
||||
("response", "Hello World"),
|
||||
("stdout_logs", "Hello World\n"),
|
||||
],
|
||||
test_mock={
|
||||
"execute_step_code": lambda sandbox_id, step_code, language, api_key: (
|
||||
"Hello World",
|
||||
"Hello World\n",
|
||||
"",
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
def execute_step_code(
|
||||
self,
|
||||
sandbox_id: str,
|
||||
code: str,
|
||||
language: ProgrammingLanguage,
|
||||
api_key: str,
|
||||
):
|
||||
try:
|
||||
sandbox = Sandbox.connect(sandbox_id=sandbox_id, api_key=api_key)
|
||||
if not sandbox:
|
||||
raise Exception("Sandbox not found")
|
||||
|
||||
# Executing the code
|
||||
execution = sandbox.run_code(code, language=language.value)
|
||||
|
||||
if execution.error:
|
||||
raise Exception(execution.error)
|
||||
|
||||
response = execution.text
|
||||
stdout_logs = "".join(execution.logs.stdout)
|
||||
stderr_logs = "".join(execution.logs.stderr)
|
||||
|
||||
return response, stdout_logs, stderr_logs
|
||||
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
response, stdout_logs, stderr_logs = self.execute_step_code(
|
||||
input_data.sandbox_id,
|
||||
input_data.step_code,
|
||||
input_data.language,
|
||||
credentials.api_key.get_secret_value(),
|
||||
)
|
||||
|
||||
if response:
|
||||
yield "response", response
|
||||
if stdout_logs:
|
||||
yield "stdout_logs", stdout_logs
|
||||
if stderr_logs:
|
||||
yield "stderr_logs", stderr_logs
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
@@ -1,53 +1,22 @@
|
||||
import smtplib
|
||||
from email.mime.multipart import MIMEMultipart
|
||||
from email.mime.text import MIMEText
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, SecretStr
|
||||
from pydantic import BaseModel, ConfigDict
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import (
|
||||
CredentialsField,
|
||||
CredentialsMetaInput,
|
||||
SchemaField,
|
||||
UserPasswordCredentials,
|
||||
)
|
||||
from backend.integrations.providers import ProviderName
|
||||
|
||||
TEST_CREDENTIALS = UserPasswordCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="smtp",
|
||||
username=SecretStr("mock-smtp-username"),
|
||||
password=SecretStr("mock-smtp-password"),
|
||||
title="Mock SMTP credentials",
|
||||
)
|
||||
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.title,
|
||||
}
|
||||
SMTPCredentials = UserPasswordCredentials
|
||||
SMTPCredentialsInput = CredentialsMetaInput[
|
||||
Literal[ProviderName.SMTP],
|
||||
Literal["user_password"],
|
||||
]
|
||||
from backend.data.model import BlockSecret, SchemaField, SecretField
|
||||
|
||||
|
||||
def SMTPCredentialsField() -> SMTPCredentialsInput:
|
||||
return CredentialsField(
|
||||
description="The SMTP integration requires a username and password.",
|
||||
)
|
||||
|
||||
|
||||
class SMTPConfig(BaseModel):
|
||||
class EmailCredentials(BaseModel):
|
||||
smtp_server: str = SchemaField(
|
||||
default="smtp.example.com", description="SMTP server address"
|
||||
default="smtp.gmail.com", description="SMTP server address"
|
||||
)
|
||||
smtp_port: int = SchemaField(default=25, description="SMTP port number")
|
||||
smtp_username: BlockSecret = SecretField(key="smtp_username")
|
||||
smtp_password: BlockSecret = SecretField(key="smtp_password")
|
||||
|
||||
model_config = ConfigDict(title="SMTP Config")
|
||||
model_config = ConfigDict(title="Email Credentials")
|
||||
|
||||
|
||||
class SendEmailBlock(Block):
|
||||
@@ -61,11 +30,10 @@ class SendEmailBlock(Block):
|
||||
body: str = SchemaField(
|
||||
description="Body of the email", placeholder="Enter the email body"
|
||||
)
|
||||
config: SMTPConfig = SchemaField(
|
||||
description="SMTP Config",
|
||||
default=SMTPConfig(),
|
||||
creds: EmailCredentials = SchemaField(
|
||||
description="SMTP credentials",
|
||||
default=EmailCredentials(),
|
||||
)
|
||||
credentials: SMTPCredentialsInput = SMTPCredentialsField()
|
||||
|
||||
class Output(BlockSchema):
|
||||
status: str = SchemaField(description="Status of the email sending operation")
|
||||
@@ -75,6 +43,7 @@ class SendEmailBlock(Block):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
disabled=True,
|
||||
id="4335878a-394e-4e67-adf2-919877ff49ae",
|
||||
description="This block sends an email using the provided SMTP credentials.",
|
||||
categories={BlockCategory.OUTPUT},
|
||||
@@ -84,29 +53,25 @@ class SendEmailBlock(Block):
|
||||
"to_email": "recipient@example.com",
|
||||
"subject": "Test Email",
|
||||
"body": "This is a test email.",
|
||||
"config": {
|
||||
"creds": {
|
||||
"smtp_server": "smtp.gmail.com",
|
||||
"smtp_port": 25,
|
||||
"smtp_username": "your-email@gmail.com",
|
||||
"smtp_password": "your-gmail-password",
|
||||
},
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[("status", "Email sent successfully")],
|
||||
test_mock={"send_email": lambda *args, **kwargs: "Email sent successfully"},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def send_email(
|
||||
config: SMTPConfig,
|
||||
to_email: str,
|
||||
subject: str,
|
||||
body: str,
|
||||
credentials: SMTPCredentials,
|
||||
creds: EmailCredentials, to_email: str, subject: str, body: str
|
||||
) -> str:
|
||||
smtp_server = config.smtp_server
|
||||
smtp_port = config.smtp_port
|
||||
smtp_username = credentials.username.get_secret_value()
|
||||
smtp_password = credentials.password.get_secret_value()
|
||||
smtp_server = creds.smtp_server
|
||||
smtp_port = creds.smtp_port
|
||||
smtp_username = creds.smtp_username.get_secret_value()
|
||||
smtp_password = creds.smtp_password.get_secret_value()
|
||||
|
||||
msg = MIMEMultipart()
|
||||
msg["From"] = smtp_username
|
||||
@@ -121,13 +86,10 @@ class SendEmailBlock(Block):
|
||||
|
||||
return "Email sent successfully"
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: SMTPCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
yield "status", self.send_email(
|
||||
config=input_data.config,
|
||||
to_email=input_data.to_email,
|
||||
subject=input_data.subject,
|
||||
body=input_data.body,
|
||||
credentials=credentials,
|
||||
input_data.creds,
|
||||
input_data.to_email,
|
||||
input_data.subject,
|
||||
input_data.body,
|
||||
)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import List
|
||||
from typing import List, Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
@@ -13,12 +13,12 @@ from backend.util.request import requests
|
||||
|
||||
|
||||
class ContentRetrievalSettings(BaseModel):
|
||||
text: dict = SchemaField(
|
||||
text: Optional[dict] = SchemaField(
|
||||
description="Text content settings",
|
||||
default={"maxCharacters": 1000, "includeHtmlTags": False},
|
||||
advanced=True,
|
||||
)
|
||||
highlights: dict = SchemaField(
|
||||
highlights: Optional[dict] = SchemaField(
|
||||
description="Highlight settings",
|
||||
default={
|
||||
"numSentences": 3,
|
||||
@@ -27,7 +27,7 @@ class ContentRetrievalSettings(BaseModel):
|
||||
},
|
||||
advanced=True,
|
||||
)
|
||||
summary: dict = SchemaField(
|
||||
summary: Optional[dict] = SchemaField(
|
||||
description="Summary settings",
|
||||
default={"query": ""},
|
||||
advanced=True,
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from backend.blocks.github._auth import (
|
||||
GithubCredentials,
|
||||
GithubFineGrainedAPICredentials,
|
||||
)
|
||||
from backend.blocks.github._auth import GithubCredentials
|
||||
from backend.util.request import Requests
|
||||
|
||||
|
||||
@@ -33,68 +30,12 @@ def _convert_to_api_url(url: str) -> str:
|
||||
|
||||
def _get_headers(credentials: GithubCredentials) -> dict[str, str]:
|
||||
return {
|
||||
"Authorization": credentials.auth_header(),
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
|
||||
def convert_comment_url_to_api_endpoint(comment_url: str) -> str:
|
||||
"""
|
||||
Converts a GitHub comment URL (web interface) to the appropriate API endpoint URL.
|
||||
|
||||
Handles:
|
||||
1. Issue/PR comments: #issuecomment-{id}
|
||||
2. PR review comments: #discussion_r{id}
|
||||
|
||||
Returns the appropriate API endpoint path for the comment.
|
||||
"""
|
||||
# First, check if this is already an API URL
|
||||
parsed_url = urlparse(comment_url)
|
||||
if parsed_url.hostname == "api.github.com":
|
||||
return comment_url
|
||||
|
||||
# Replace pull with issues for comment endpoints
|
||||
if "/pull/" in comment_url:
|
||||
comment_url = comment_url.replace("/pull/", "/issues/")
|
||||
|
||||
# Handle issue/PR comments (#issuecomment-xxx)
|
||||
if "#issuecomment-" in comment_url:
|
||||
base_url, comment_part = comment_url.split("#issuecomment-")
|
||||
comment_id = comment_part
|
||||
|
||||
# Extract repo information from base URL
|
||||
parsed_url = urlparse(base_url)
|
||||
path_parts = parsed_url.path.strip("/").split("/")
|
||||
owner, repo = path_parts[0], path_parts[1]
|
||||
|
||||
# Construct API URL for issue comments
|
||||
return (
|
||||
f"https://api.github.com/repos/{owner}/{repo}/issues/comments/{comment_id}"
|
||||
)
|
||||
|
||||
# Handle PR review comments (#discussion_r)
|
||||
elif "#discussion_r" in comment_url:
|
||||
base_url, comment_part = comment_url.split("#discussion_r")
|
||||
comment_id = comment_part
|
||||
|
||||
# Extract repo information from base URL
|
||||
parsed_url = urlparse(base_url)
|
||||
path_parts = parsed_url.path.strip("/").split("/")
|
||||
owner, repo = path_parts[0], path_parts[1]
|
||||
|
||||
# Construct API URL for PR review comments
|
||||
return (
|
||||
f"https://api.github.com/repos/{owner}/{repo}/pulls/comments/{comment_id}"
|
||||
)
|
||||
|
||||
# If no specific comment identifiers are found, use the general URL conversion
|
||||
return _convert_to_api_url(comment_url)
|
||||
|
||||
|
||||
def get_api(
|
||||
credentials: GithubCredentials | GithubFineGrainedAPICredentials,
|
||||
convert_urls: bool = True,
|
||||
) -> Requests:
|
||||
def get_api(credentials: GithubCredentials, convert_urls: bool = True) -> Requests:
|
||||
return Requests(
|
||||
trusted_origins=["https://api.github.com", "https://github.com"],
|
||||
extra_url_validator=_convert_to_api_url if convert_urls else None,
|
||||
|
||||
@@ -22,11 +22,6 @@ GithubCredentialsInput = CredentialsMetaInput[
|
||||
Literal["api_key", "oauth2"] if GITHUB_OAUTH_IS_CONFIGURED else Literal["api_key"],
|
||||
]
|
||||
|
||||
GithubFineGrainedAPICredentials = APIKeyCredentials
|
||||
GithubFineGrainedAPICredentialsInput = CredentialsMetaInput[
|
||||
Literal[ProviderName.GITHUB], Literal["api_key"]
|
||||
]
|
||||
|
||||
|
||||
def GithubCredentialsField(scope: str) -> GithubCredentialsInput:
|
||||
"""
|
||||
@@ -42,16 +37,6 @@ def GithubCredentialsField(scope: str) -> GithubCredentialsInput:
|
||||
)
|
||||
|
||||
|
||||
def GithubFineGrainedAPICredentialsField(
|
||||
scope: str,
|
||||
) -> GithubFineGrainedAPICredentialsInput:
|
||||
return CredentialsField(
|
||||
required_scopes={scope},
|
||||
description="The GitHub integration can be used with OAuth, "
|
||||
"or any API key with sufficient permissions for the blocks it is used on.",
|
||||
)
|
||||
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="github",
|
||||
@@ -65,18 +50,3 @@ TEST_CREDENTIALS_INPUT = {
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.type,
|
||||
}
|
||||
|
||||
TEST_FINE_GRAINED_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="github",
|
||||
api_key=SecretStr("mock-github-api-key"),
|
||||
title="Mock GitHub API key",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
TEST_FINE_GRAINED_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_FINE_GRAINED_CREDENTIALS.provider,
|
||||
"id": TEST_FINE_GRAINED_CREDENTIALS.id,
|
||||
"type": TEST_FINE_GRAINED_CREDENTIALS.type,
|
||||
"title": TEST_FINE_GRAINED_CREDENTIALS.type,
|
||||
}
|
||||
|
||||
@@ -1,360 +0,0 @@
|
||||
from enum import Enum
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
from ._api import get_api
|
||||
from ._auth import (
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
GithubCredentials,
|
||||
GithubCredentialsField,
|
||||
GithubCredentialsInput,
|
||||
)
|
||||
|
||||
|
||||
# queued, in_progress, completed, waiting, requested, pending
|
||||
class ChecksStatus(Enum):
|
||||
QUEUED = "queued"
|
||||
IN_PROGRESS = "in_progress"
|
||||
COMPLETED = "completed"
|
||||
WAITING = "waiting"
|
||||
REQUESTED = "requested"
|
||||
PENDING = "pending"
|
||||
|
||||
|
||||
class ChecksConclusion(Enum):
|
||||
SUCCESS = "success"
|
||||
FAILURE = "failure"
|
||||
NEUTRAL = "neutral"
|
||||
CANCELLED = "cancelled"
|
||||
TIMED_OUT = "timed_out"
|
||||
ACTION_REQUIRED = "action_required"
|
||||
SKIPPED = "skipped"
|
||||
|
||||
|
||||
class GithubCreateCheckRunBlock(Block):
|
||||
"""Block for creating a new check run on a GitHub repository."""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: GithubCredentialsInput = GithubCredentialsField("repo:status")
|
||||
repo_url: str = SchemaField(
|
||||
description="URL of the GitHub repository",
|
||||
placeholder="https://github.com/owner/repo",
|
||||
)
|
||||
name: str = SchemaField(
|
||||
description="The name of the check run (e.g., 'code-coverage')",
|
||||
)
|
||||
head_sha: str = SchemaField(
|
||||
description="The SHA of the commit to check",
|
||||
)
|
||||
status: ChecksStatus = SchemaField(
|
||||
description="Current status of the check run",
|
||||
default=ChecksStatus.QUEUED,
|
||||
)
|
||||
conclusion: Optional[ChecksConclusion] = SchemaField(
|
||||
description="The final conclusion of the check (required if status is completed)",
|
||||
default=None,
|
||||
)
|
||||
details_url: str = SchemaField(
|
||||
description="The URL for the full details of the check",
|
||||
default="",
|
||||
)
|
||||
output_title: str = SchemaField(
|
||||
description="Title of the check run output",
|
||||
default="",
|
||||
)
|
||||
output_summary: str = SchemaField(
|
||||
description="Summary of the check run output",
|
||||
default="",
|
||||
)
|
||||
output_text: str = SchemaField(
|
||||
description="Detailed text of the check run output",
|
||||
default="",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
class CheckRunResult(BaseModel):
|
||||
id: int
|
||||
html_url: str
|
||||
status: str
|
||||
|
||||
check_run: CheckRunResult = SchemaField(
|
||||
description="Details of the created check run"
|
||||
)
|
||||
error: str = SchemaField(
|
||||
description="Error message if check run creation failed"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="2f45e89a-3b7d-4f22-b89e-6c4f5c7e1234",
|
||||
description="Creates a new check run for a specific commit in a GitHub repository",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=GithubCreateCheckRunBlock.Input,
|
||||
output_schema=GithubCreateCheckRunBlock.Output,
|
||||
test_input={
|
||||
"repo_url": "https://github.com/owner/repo",
|
||||
"name": "test-check",
|
||||
"head_sha": "ce587453ced02b1526dfb4cb910479d431683101",
|
||||
"status": ChecksStatus.COMPLETED.value,
|
||||
"conclusion": ChecksConclusion.SUCCESS.value,
|
||||
"output_title": "Test Results",
|
||||
"output_summary": "All tests passed",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
# requires a github app not available to oauth in our current system
|
||||
disabled=True,
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
(
|
||||
"check_run",
|
||||
{
|
||||
"id": 4,
|
||||
"html_url": "https://github.com/owner/repo/runs/4",
|
||||
"status": "completed",
|
||||
},
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"create_check_run": lambda *args, **kwargs: {
|
||||
"id": 4,
|
||||
"html_url": "https://github.com/owner/repo/runs/4",
|
||||
"status": "completed",
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def create_check_run(
|
||||
credentials: GithubCredentials,
|
||||
repo_url: str,
|
||||
name: str,
|
||||
head_sha: str,
|
||||
status: ChecksStatus,
|
||||
conclusion: Optional[ChecksConclusion] = None,
|
||||
details_url: Optional[str] = None,
|
||||
output_title: Optional[str] = None,
|
||||
output_summary: Optional[str] = None,
|
||||
output_text: Optional[str] = None,
|
||||
) -> dict:
|
||||
api = get_api(credentials)
|
||||
|
||||
class CheckRunData(BaseModel):
|
||||
name: str
|
||||
head_sha: str
|
||||
status: str
|
||||
conclusion: Optional[str] = None
|
||||
details_url: Optional[str] = None
|
||||
output: Optional[dict[str, str]] = None
|
||||
|
||||
data = CheckRunData(
|
||||
name=name,
|
||||
head_sha=head_sha,
|
||||
status=status.value,
|
||||
)
|
||||
|
||||
if conclusion:
|
||||
data.conclusion = conclusion.value
|
||||
|
||||
if details_url:
|
||||
data.details_url = details_url
|
||||
|
||||
if output_title or output_summary or output_text:
|
||||
output_data = {
|
||||
"title": output_title or "",
|
||||
"summary": output_summary or "",
|
||||
"text": output_text or "",
|
||||
}
|
||||
data.output = output_data
|
||||
|
||||
check_runs_url = f"{repo_url}/check-runs"
|
||||
response = api.post(
|
||||
check_runs_url, data=data.model_dump_json(exclude_none=True)
|
||||
)
|
||||
result = response.json()
|
||||
|
||||
return {
|
||||
"id": result["id"],
|
||||
"html_url": result["html_url"],
|
||||
"status": result["status"],
|
||||
}
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
result = self.create_check_run(
|
||||
credentials=credentials,
|
||||
repo_url=input_data.repo_url,
|
||||
name=input_data.name,
|
||||
head_sha=input_data.head_sha,
|
||||
status=input_data.status,
|
||||
conclusion=input_data.conclusion,
|
||||
details_url=input_data.details_url,
|
||||
output_title=input_data.output_title,
|
||||
output_summary=input_data.output_summary,
|
||||
output_text=input_data.output_text,
|
||||
)
|
||||
yield "check_run", result
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class GithubUpdateCheckRunBlock(Block):
|
||||
"""Block for updating an existing check run on a GitHub repository."""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: GithubCredentialsInput = GithubCredentialsField("repo:status")
|
||||
repo_url: str = SchemaField(
|
||||
description="URL of the GitHub repository",
|
||||
placeholder="https://github.com/owner/repo",
|
||||
)
|
||||
check_run_id: int = SchemaField(
|
||||
description="The ID of the check run to update",
|
||||
)
|
||||
status: ChecksStatus = SchemaField(
|
||||
description="New status of the check run",
|
||||
)
|
||||
conclusion: ChecksConclusion = SchemaField(
|
||||
description="The final conclusion of the check (required if status is completed)",
|
||||
)
|
||||
output_title: Optional[str] = SchemaField(
|
||||
description="New title of the check run output",
|
||||
default=None,
|
||||
)
|
||||
output_summary: Optional[str] = SchemaField(
|
||||
description="New summary of the check run output",
|
||||
default=None,
|
||||
)
|
||||
output_text: Optional[str] = SchemaField(
|
||||
description="New detailed text of the check run output",
|
||||
default=None,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
class CheckRunResult(BaseModel):
|
||||
id: int
|
||||
html_url: str
|
||||
status: str
|
||||
conclusion: Optional[str]
|
||||
|
||||
check_run: CheckRunResult = SchemaField(
|
||||
description="Details of the updated check run"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if check run update failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="8a23c567-9d01-4e56-b789-0c12d3e45678", # Generated UUID
|
||||
description="Updates an existing check run in a GitHub repository",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=GithubUpdateCheckRunBlock.Input,
|
||||
output_schema=GithubUpdateCheckRunBlock.Output,
|
||||
# requires a github app not available to oauth in our current system
|
||||
disabled=True,
|
||||
test_input={
|
||||
"repo_url": "https://github.com/owner/repo",
|
||||
"check_run_id": 4,
|
||||
"status": ChecksStatus.COMPLETED.value,
|
||||
"conclusion": ChecksConclusion.SUCCESS.value,
|
||||
"output_title": "Updated Results",
|
||||
"output_summary": "All tests passed after retry",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
(
|
||||
"check_run",
|
||||
{
|
||||
"id": 4,
|
||||
"html_url": "https://github.com/owner/repo/runs/4",
|
||||
"status": "completed",
|
||||
"conclusion": "success",
|
||||
},
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"update_check_run": lambda *args, **kwargs: {
|
||||
"id": 4,
|
||||
"html_url": "https://github.com/owner/repo/runs/4",
|
||||
"status": "completed",
|
||||
"conclusion": "success",
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def update_check_run(
|
||||
credentials: GithubCredentials,
|
||||
repo_url: str,
|
||||
check_run_id: int,
|
||||
status: ChecksStatus,
|
||||
conclusion: Optional[ChecksConclusion] = None,
|
||||
output_title: Optional[str] = None,
|
||||
output_summary: Optional[str] = None,
|
||||
output_text: Optional[str] = None,
|
||||
) -> dict:
|
||||
api = get_api(credentials)
|
||||
|
||||
class UpdateCheckRunData(BaseModel):
|
||||
status: str
|
||||
conclusion: Optional[str] = None
|
||||
output: Optional[dict[str, str]] = None
|
||||
|
||||
data = UpdateCheckRunData(
|
||||
status=status.value,
|
||||
)
|
||||
|
||||
if conclusion:
|
||||
data.conclusion = conclusion.value
|
||||
|
||||
if output_title or output_summary or output_text:
|
||||
output_data = {
|
||||
"title": output_title or "",
|
||||
"summary": output_summary or "",
|
||||
"text": output_text or "",
|
||||
}
|
||||
data.output = output_data
|
||||
|
||||
check_run_url = f"{repo_url}/check-runs/{check_run_id}"
|
||||
response = api.patch(
|
||||
check_run_url, data=data.model_dump_json(exclude_none=True)
|
||||
)
|
||||
result = response.json()
|
||||
|
||||
return {
|
||||
"id": result["id"],
|
||||
"html_url": result["html_url"],
|
||||
"status": result["status"],
|
||||
"conclusion": result.get("conclusion"),
|
||||
}
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
result = self.update_check_run(
|
||||
credentials=credentials,
|
||||
repo_url=input_data.repo_url,
|
||||
check_run_id=input_data.check_run_id,
|
||||
status=input_data.status,
|
||||
conclusion=input_data.conclusion,
|
||||
output_title=input_data.output_title,
|
||||
output_summary=input_data.output_summary,
|
||||
output_text=input_data.output_text,
|
||||
)
|
||||
yield "check_run", result
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
@@ -1,4 +1,3 @@
|
||||
import logging
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from typing_extensions import TypedDict
|
||||
@@ -6,7 +5,7 @@ from typing_extensions import TypedDict
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
from ._api import convert_comment_url_to_api_endpoint, get_api
|
||||
from ._api import get_api
|
||||
from ._auth import (
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
@@ -15,8 +14,6 @@ from ._auth import (
|
||||
GithubCredentialsInput,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def is_github_url(url: str) -> bool:
|
||||
return urlparse(url).netloc == "github.com"
|
||||
@@ -111,228 +108,6 @@ class GithubCommentBlock(Block):
|
||||
# --8<-- [end:GithubCommentBlockExample]
|
||||
|
||||
|
||||
class GithubUpdateCommentBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: GithubCredentialsInput = GithubCredentialsField("repo")
|
||||
comment_url: str = SchemaField(
|
||||
description="URL of the GitHub comment",
|
||||
placeholder="https://github.com/owner/repo/issues/1#issuecomment-123456789",
|
||||
default="",
|
||||
advanced=False,
|
||||
)
|
||||
issue_url: str = SchemaField(
|
||||
description="URL of the GitHub issue or pull request",
|
||||
placeholder="https://github.com/owner/repo/issues/1",
|
||||
default="",
|
||||
)
|
||||
comment_id: str = SchemaField(
|
||||
description="ID of the GitHub comment",
|
||||
placeholder="123456789",
|
||||
default="",
|
||||
)
|
||||
comment: str = SchemaField(
|
||||
description="Comment to update",
|
||||
placeholder="Enter your comment",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
id: int = SchemaField(description="ID of the updated comment")
|
||||
url: str = SchemaField(description="URL to the comment on GitHub")
|
||||
error: str = SchemaField(
|
||||
description="Error message if the comment update failed"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="b3f4d747-10e3-4e69-8c51-f2be1d99c9a7",
|
||||
description="This block updates a comment on a specified GitHub issue or pull request.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=GithubUpdateCommentBlock.Input,
|
||||
output_schema=GithubUpdateCommentBlock.Output,
|
||||
test_input={
|
||||
"comment_url": "https://github.com/owner/repo/issues/1#issuecomment-123456789",
|
||||
"comment": "This is an updated comment.",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("id", 123456789),
|
||||
(
|
||||
"url",
|
||||
"https://github.com/owner/repo/issues/1#issuecomment-123456789",
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"update_comment": lambda *args, **kwargs: (
|
||||
123456789,
|
||||
"https://github.com/owner/repo/issues/1#issuecomment-123456789",
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def update_comment(
|
||||
credentials: GithubCredentials, comment_url: str, body_text: str
|
||||
) -> tuple[int, str]:
|
||||
api = get_api(credentials, convert_urls=False)
|
||||
data = {"body": body_text}
|
||||
url = convert_comment_url_to_api_endpoint(comment_url)
|
||||
|
||||
logger.info(url)
|
||||
response = api.patch(url, json=data)
|
||||
comment = response.json()
|
||||
return comment["id"], comment["html_url"]
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
if (
|
||||
not input_data.comment_url
|
||||
and input_data.comment_id
|
||||
and input_data.issue_url
|
||||
):
|
||||
parsed_url = urlparse(input_data.issue_url)
|
||||
path_parts = parsed_url.path.strip("/").split("/")
|
||||
owner, repo = path_parts[0], path_parts[1]
|
||||
|
||||
input_data.comment_url = f"https://api.github.com/repos/{owner}/{repo}/issues/comments/{input_data.comment_id}"
|
||||
|
||||
elif (
|
||||
not input_data.comment_url
|
||||
and not input_data.comment_id
|
||||
and input_data.issue_url
|
||||
):
|
||||
raise ValueError(
|
||||
"Must provide either comment_url or comment_id and issue_url"
|
||||
)
|
||||
id, url = self.update_comment(
|
||||
credentials,
|
||||
input_data.comment_url,
|
||||
input_data.comment,
|
||||
)
|
||||
yield "id", id
|
||||
yield "url", url
|
||||
|
||||
|
||||
class GithubListCommentsBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: GithubCredentialsInput = GithubCredentialsField("repo")
|
||||
issue_url: str = SchemaField(
|
||||
description="URL of the GitHub issue or pull request",
|
||||
placeholder="https://github.com/owner/repo/issues/1",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
class CommentItem(TypedDict):
|
||||
id: int
|
||||
body: str
|
||||
user: str
|
||||
url: str
|
||||
|
||||
comment: CommentItem = SchemaField(
|
||||
title="Comment", description="Comments with their ID, body, user, and URL"
|
||||
)
|
||||
comments: list[CommentItem] = SchemaField(
|
||||
description="List of comments with their ID, body, user, and URL"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if listing comments failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="c4b5fb63-0005-4a11-b35a-0c2467bd6b59",
|
||||
description="This block lists all comments for a specified GitHub issue or pull request.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=GithubListCommentsBlock.Input,
|
||||
output_schema=GithubListCommentsBlock.Output,
|
||||
test_input={
|
||||
"issue_url": "https://github.com/owner/repo/issues/1",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
(
|
||||
"comment",
|
||||
{
|
||||
"id": 123456789,
|
||||
"body": "This is a test comment.",
|
||||
"user": "test_user",
|
||||
"url": "https://github.com/owner/repo/issues/1#issuecomment-123456789",
|
||||
},
|
||||
),
|
||||
(
|
||||
"comments",
|
||||
[
|
||||
{
|
||||
"id": 123456789,
|
||||
"body": "This is a test comment.",
|
||||
"user": "test_user",
|
||||
"url": "https://github.com/owner/repo/issues/1#issuecomment-123456789",
|
||||
}
|
||||
],
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"list_comments": lambda *args, **kwargs: [
|
||||
{
|
||||
"id": 123456789,
|
||||
"body": "This is a test comment.",
|
||||
"user": "test_user",
|
||||
"url": "https://github.com/owner/repo/issues/1#issuecomment-123456789",
|
||||
}
|
||||
]
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def list_comments(
|
||||
credentials: GithubCredentials, issue_url: str
|
||||
) -> list[Output.CommentItem]:
|
||||
parsed_url = urlparse(issue_url)
|
||||
path_parts = parsed_url.path.strip("/").split("/")
|
||||
|
||||
owner = path_parts[0]
|
||||
repo = path_parts[1]
|
||||
|
||||
# GitHub API uses 'issues' for both issues and pull requests when it comes to comments
|
||||
issue_number = path_parts[3] # Whether 'issues/123' or 'pull/123'
|
||||
|
||||
# Construct the proper API URL directly
|
||||
api_url = f"https://api.github.com/repos/{owner}/{repo}/issues/{issue_number}/comments"
|
||||
|
||||
# Set convert_urls=False since we're already providing an API URL
|
||||
api = get_api(credentials, convert_urls=False)
|
||||
response = api.get(api_url)
|
||||
comments = response.json()
|
||||
parsed_comments: list[GithubListCommentsBlock.Output.CommentItem] = [
|
||||
{
|
||||
"id": comment["id"],
|
||||
"body": comment["body"],
|
||||
"user": comment["user"]["login"],
|
||||
"url": comment["html_url"],
|
||||
}
|
||||
for comment in comments
|
||||
]
|
||||
return parsed_comments
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
comments = self.list_comments(
|
||||
credentials,
|
||||
input_data.issue_url,
|
||||
)
|
||||
yield from (("comment", comment) for comment in comments)
|
||||
yield "comments", comments
|
||||
|
||||
|
||||
class GithubMakeIssueBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: GithubCredentialsInput = GithubCredentialsField("repo")
|
||||
|
||||
@@ -200,7 +200,6 @@ class GithubReadPullRequestBlock(Block):
|
||||
include_pr_changes: bool = SchemaField(
|
||||
description="Whether to include the changes made in the pull request",
|
||||
default=False,
|
||||
advanced=False,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
|
||||
@@ -1,180 +0,0 @@
|
||||
from enum import Enum
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
from ._api import get_api
|
||||
from ._auth import (
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
GithubFineGrainedAPICredentials,
|
||||
GithubFineGrainedAPICredentialsField,
|
||||
GithubFineGrainedAPICredentialsInput,
|
||||
)
|
||||
|
||||
|
||||
class StatusState(Enum):
|
||||
ERROR = "error"
|
||||
FAILURE = "failure"
|
||||
PENDING = "pending"
|
||||
SUCCESS = "success"
|
||||
|
||||
|
||||
class GithubCreateStatusBlock(Block):
|
||||
"""Block for creating a commit status on a GitHub repository."""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: GithubFineGrainedAPICredentialsInput = (
|
||||
GithubFineGrainedAPICredentialsField("repo:status")
|
||||
)
|
||||
repo_url: str = SchemaField(
|
||||
description="URL of the GitHub repository",
|
||||
placeholder="https://github.com/owner/repo",
|
||||
)
|
||||
sha: str = SchemaField(
|
||||
description="The SHA of the commit to set status for",
|
||||
)
|
||||
state: StatusState = SchemaField(
|
||||
description="The state of the status (error, failure, pending, success)",
|
||||
)
|
||||
target_url: Optional[str] = SchemaField(
|
||||
description="URL with additional details about this status",
|
||||
default=None,
|
||||
)
|
||||
description: Optional[str] = SchemaField(
|
||||
description="Short description of the status",
|
||||
default=None,
|
||||
)
|
||||
check_name: Optional[str] = SchemaField(
|
||||
description="Label to differentiate this status from others",
|
||||
default="AutoGPT Platform Checks",
|
||||
advanced=False,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
class StatusResult(BaseModel):
|
||||
id: int
|
||||
url: str
|
||||
state: str
|
||||
context: str
|
||||
description: Optional[str]
|
||||
target_url: Optional[str]
|
||||
created_at: str
|
||||
updated_at: str
|
||||
|
||||
status: StatusResult = SchemaField(description="Details of the created status")
|
||||
error: str = SchemaField(description="Error message if status creation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="3d67f123-a4b5-4c89-9d01-2e34f5c67890", # Generated UUID
|
||||
description="Creates a new commit status in a GitHub repository",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=GithubCreateStatusBlock.Input,
|
||||
output_schema=GithubCreateStatusBlock.Output,
|
||||
test_input={
|
||||
"repo_url": "https://github.com/owner/repo",
|
||||
"sha": "ce587453ced02b1526dfb4cb910479d431683101",
|
||||
"state": StatusState.SUCCESS.value,
|
||||
"target_url": "https://example.com/build/status",
|
||||
"description": "The build succeeded!",
|
||||
"check_name": "continuous-integration/jenkins",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
(
|
||||
"status",
|
||||
{
|
||||
"id": 1234567890,
|
||||
"url": "https://api.github.com/repos/owner/repo/statuses/ce587453ced02b1526dfb4cb910479d431683101",
|
||||
"state": "success",
|
||||
"context": "continuous-integration/jenkins",
|
||||
"description": "The build succeeded!",
|
||||
"target_url": "https://example.com/build/status",
|
||||
"created_at": "2024-01-21T10:00:00Z",
|
||||
"updated_at": "2024-01-21T10:00:00Z",
|
||||
},
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"create_status": lambda *args, **kwargs: {
|
||||
"id": 1234567890,
|
||||
"url": "https://api.github.com/repos/owner/repo/statuses/ce587453ced02b1526dfb4cb910479d431683101",
|
||||
"state": "success",
|
||||
"context": "continuous-integration/jenkins",
|
||||
"description": "The build succeeded!",
|
||||
"target_url": "https://example.com/build/status",
|
||||
"created_at": "2024-01-21T10:00:00Z",
|
||||
"updated_at": "2024-01-21T10:00:00Z",
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def create_status(
|
||||
credentials: GithubFineGrainedAPICredentials,
|
||||
repo_url: str,
|
||||
sha: str,
|
||||
state: StatusState,
|
||||
target_url: Optional[str] = None,
|
||||
description: Optional[str] = None,
|
||||
context: str = "default",
|
||||
) -> dict:
|
||||
api = get_api(credentials)
|
||||
|
||||
class StatusData(BaseModel):
|
||||
state: str
|
||||
target_url: Optional[str] = None
|
||||
description: Optional[str] = None
|
||||
context: str
|
||||
|
||||
data = StatusData(
|
||||
state=state.value,
|
||||
context=context,
|
||||
)
|
||||
|
||||
if target_url:
|
||||
data.target_url = target_url
|
||||
|
||||
if description:
|
||||
data.description = description
|
||||
|
||||
status_url = f"{repo_url}/statuses/{sha}"
|
||||
response = api.post(status_url, data=data.model_dump_json(exclude_none=True))
|
||||
result = response.json()
|
||||
|
||||
return {
|
||||
"id": result["id"],
|
||||
"url": result["url"],
|
||||
"state": result["state"],
|
||||
"context": result["context"],
|
||||
"description": result.get("description"),
|
||||
"target_url": result.get("target_url"),
|
||||
"created_at": result["created_at"],
|
||||
"updated_at": result["updated_at"],
|
||||
}
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: GithubFineGrainedAPICredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
result = self.create_status(
|
||||
credentials=credentials,
|
||||
repo_url=input_data.repo_url,
|
||||
sha=input_data.sha,
|
||||
state=input_data.state,
|
||||
target_url=input_data.target_url,
|
||||
description=input_data.description,
|
||||
context=input_data.check_name or "AutoGPT Platform Checks",
|
||||
)
|
||||
yield "status", result
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
@@ -142,16 +142,6 @@ class IdeogramModelBlock(Block):
|
||||
title="Color Palette Preset",
|
||||
advanced=True,
|
||||
)
|
||||
custom_color_palette: Optional[list[str]] = SchemaField(
|
||||
description=(
|
||||
"Only available for model version V_2 or V_2_TURBO. Provide one or more color hex codes "
|
||||
"(e.g., ['#000030', '#1C0C47', '#9900FF', '#4285F4', '#FFFFFF']) to define a custom color "
|
||||
"palette. Only used if 'color_palette_name' is 'NONE'."
|
||||
),
|
||||
default=None,
|
||||
title="Custom Color Palette",
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
result: str = SchemaField(description="Generated image URL")
|
||||
@@ -161,7 +151,7 @@ class IdeogramModelBlock(Block):
|
||||
super().__init__(
|
||||
id="6ab085e2-20b3-4055-bc3e-08036e01eca6",
|
||||
description="This block runs Ideogram models with both simple and advanced settings.",
|
||||
categories={BlockCategory.AI, BlockCategory.MULTIMEDIA},
|
||||
categories={BlockCategory.AI},
|
||||
input_schema=IdeogramModelBlock.Input,
|
||||
output_schema=IdeogramModelBlock.Output,
|
||||
test_input={
|
||||
@@ -174,13 +164,6 @@ class IdeogramModelBlock(Block):
|
||||
"style_type": StyleType.AUTO,
|
||||
"negative_prompt": None,
|
||||
"color_palette_name": ColorPalettePreset.NONE,
|
||||
"custom_color_palette": [
|
||||
"#000030",
|
||||
"#1C0C47",
|
||||
"#9900FF",
|
||||
"#4285F4",
|
||||
"#FFFFFF",
|
||||
],
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_output=[
|
||||
@@ -190,7 +173,7 @@ class IdeogramModelBlock(Block):
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"run_model": lambda api_key, model_name, prompt, seed, aspect_ratio, magic_prompt_option, style_type, negative_prompt, color_palette_name, custom_colors: "https://ideogram.ai/api/images/test-generated-image-url.png",
|
||||
"run_model": lambda api_key, model_name, prompt, seed, aspect_ratio, magic_prompt_option, style_type, negative_prompt, color_palette_name: "https://ideogram.ai/api/images/test-generated-image-url.png",
|
||||
"upscale_image": lambda api_key, image_url: "https://ideogram.ai/api/images/test-upscaled-image-url.png",
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
@@ -212,7 +195,6 @@ class IdeogramModelBlock(Block):
|
||||
style_type=input_data.style_type.value,
|
||||
negative_prompt=input_data.negative_prompt,
|
||||
color_palette_name=input_data.color_palette_name.value,
|
||||
custom_colors=input_data.custom_color_palette,
|
||||
)
|
||||
|
||||
# Step 2: Upscale the image if requested
|
||||
@@ -235,7 +217,6 @@ class IdeogramModelBlock(Block):
|
||||
style_type: str,
|
||||
negative_prompt: Optional[str],
|
||||
color_palette_name: str,
|
||||
custom_colors: Optional[list[str]],
|
||||
):
|
||||
url = "https://api.ideogram.ai/generate"
|
||||
headers = {
|
||||
@@ -260,11 +241,7 @@ class IdeogramModelBlock(Block):
|
||||
data["image_request"]["negative_prompt"] = negative_prompt
|
||||
|
||||
if color_palette_name != "NONE":
|
||||
data["color_palette"] = {"name": color_palette_name}
|
||||
elif custom_colors:
|
||||
data["color_palette"] = {
|
||||
"members": [{"color_hex": color} for color in custom_colors]
|
||||
}
|
||||
data["image_request"]["color_palette"] = {"name": color_palette_name}
|
||||
|
||||
try:
|
||||
response = requests.post(url, json=data, headers=headers)
|
||||
@@ -290,7 +267,9 @@ class IdeogramModelBlock(Block):
|
||||
response = requests.post(
|
||||
url,
|
||||
headers=headers,
|
||||
data={"image_request": "{}"},
|
||||
data={
|
||||
"image_request": "{}", # Empty JSON object
|
||||
},
|
||||
files=files,
|
||||
)
|
||||
|
||||
|
||||
@@ -1,272 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from backend.blocks.linear._auth import LinearCredentials
|
||||
from backend.blocks.linear.models import (
|
||||
CreateCommentResponse,
|
||||
CreateIssueResponse,
|
||||
Issue,
|
||||
Project,
|
||||
)
|
||||
from backend.util.request import Requests
|
||||
|
||||
|
||||
class LinearAPIException(Exception):
|
||||
def __init__(self, message: str, status_code: int):
|
||||
super().__init__(message)
|
||||
self.status_code = status_code
|
||||
|
||||
|
||||
class LinearClient:
|
||||
"""Client for the Linear API
|
||||
|
||||
If you're looking for the schema: https://studio.apollographql.com/public/Linear-API/variant/current/schema
|
||||
"""
|
||||
|
||||
API_URL = "https://api.linear.app/graphql"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
credentials: LinearCredentials | None = None,
|
||||
custom_requests: Optional[Requests] = None,
|
||||
):
|
||||
if custom_requests:
|
||||
self._requests = custom_requests
|
||||
else:
|
||||
|
||||
headers: Dict[str, str] = {
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
if credentials:
|
||||
headers["Authorization"] = credentials.auth_header()
|
||||
|
||||
self._requests = Requests(
|
||||
extra_headers=headers,
|
||||
trusted_origins=["https://api.linear.app"],
|
||||
raise_for_status=False,
|
||||
)
|
||||
|
||||
def _execute_graphql_request(
|
||||
self, query: str, variables: dict | None = None
|
||||
) -> Any:
|
||||
"""
|
||||
Executes a GraphQL request against the Linear API and returns the response data.
|
||||
|
||||
Args:
|
||||
query: The GraphQL query string.
|
||||
variables (optional): Any GraphQL query variables
|
||||
|
||||
Returns:
|
||||
The parsed JSON response data, or raises a LinearAPIException on error.
|
||||
"""
|
||||
payload: Dict[str, Any] = {"query": query}
|
||||
if variables:
|
||||
payload["variables"] = variables
|
||||
|
||||
response = self._requests.post(self.API_URL, json=payload)
|
||||
|
||||
if not response.ok:
|
||||
|
||||
try:
|
||||
error_data = response.json()
|
||||
error_message = error_data.get("errors", [{}])[0].get("message", "")
|
||||
except json.JSONDecodeError:
|
||||
error_message = response.text
|
||||
|
||||
raise LinearAPIException(
|
||||
f"Linear API request failed ({response.status_code}): {error_message}",
|
||||
response.status_code,
|
||||
)
|
||||
|
||||
response_data = response.json()
|
||||
if "errors" in response_data:
|
||||
|
||||
error_messages = [
|
||||
error.get("message", "") for error in response_data["errors"]
|
||||
]
|
||||
raise LinearAPIException(
|
||||
f"Linear API returned errors: {', '.join(error_messages)}",
|
||||
response.status_code,
|
||||
)
|
||||
|
||||
return response_data["data"]
|
||||
|
||||
def query(self, query: str, variables: Optional[dict] = None) -> dict:
|
||||
"""Executes a GraphQL query.
|
||||
|
||||
Args:
|
||||
query: The GraphQL query string.
|
||||
variables: Query variables, if any.
|
||||
|
||||
Returns:
|
||||
The response data.
|
||||
"""
|
||||
return self._execute_graphql_request(query, variables)
|
||||
|
||||
def mutate(self, mutation: str, variables: Optional[dict] = None) -> dict:
|
||||
"""Executes a GraphQL mutation.
|
||||
|
||||
Args:
|
||||
mutation: The GraphQL mutation string.
|
||||
variables: Query variables, if any.
|
||||
|
||||
Returns:
|
||||
The response data.
|
||||
"""
|
||||
return self._execute_graphql_request(mutation, variables)
|
||||
|
||||
def try_create_comment(self, issue_id: str, comment: str) -> CreateCommentResponse:
|
||||
try:
|
||||
mutation = """
|
||||
mutation CommentCreate($input: CommentCreateInput!) {
|
||||
commentCreate(input: $input) {
|
||||
success
|
||||
comment {
|
||||
id
|
||||
body
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
variables = {
|
||||
"input": {
|
||||
"body": comment,
|
||||
"issueId": issue_id,
|
||||
}
|
||||
}
|
||||
|
||||
added_comment = self.mutate(mutation, variables)
|
||||
# Select the commentCreate field from the mutation response
|
||||
return CreateCommentResponse(**added_comment["commentCreate"])
|
||||
except LinearAPIException as e:
|
||||
raise e
|
||||
|
||||
def try_get_team_by_name(self, team_name: str) -> str:
|
||||
try:
|
||||
query = """
|
||||
query GetTeamId($searchTerm: String!) {
|
||||
teams(filter: {
|
||||
or: [
|
||||
{ name: { eqIgnoreCase: $searchTerm } },
|
||||
{ key: { eqIgnoreCase: $searchTerm } }
|
||||
]
|
||||
}) {
|
||||
nodes {
|
||||
id
|
||||
name
|
||||
key
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
variables: dict[str, Any] = {
|
||||
"searchTerm": team_name,
|
||||
}
|
||||
|
||||
team_id = self.query(query, variables)
|
||||
return team_id["teams"]["nodes"][0]["id"]
|
||||
except LinearAPIException as e:
|
||||
raise e
|
||||
|
||||
def try_create_issue(
|
||||
self,
|
||||
team_id: str,
|
||||
title: str,
|
||||
description: str | None = None,
|
||||
priority: int | None = None,
|
||||
project_id: str | None = None,
|
||||
) -> CreateIssueResponse:
|
||||
try:
|
||||
mutation = """
|
||||
mutation IssueCreate($input: IssueCreateInput!) {
|
||||
issueCreate(input: $input) {
|
||||
issue {
|
||||
title
|
||||
description
|
||||
id
|
||||
identifier
|
||||
priority
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
variables: dict[str, Any] = {
|
||||
"input": {
|
||||
"teamId": team_id,
|
||||
"title": title,
|
||||
}
|
||||
}
|
||||
|
||||
if project_id:
|
||||
variables["input"]["projectId"] = project_id
|
||||
|
||||
if description:
|
||||
variables["input"]["description"] = description
|
||||
|
||||
if priority:
|
||||
variables["input"]["priority"] = priority
|
||||
|
||||
added_issue = self.mutate(mutation, variables)
|
||||
return CreateIssueResponse(**added_issue["issueCreate"])
|
||||
except LinearAPIException as e:
|
||||
raise e
|
||||
|
||||
def try_search_projects(self, term: str) -> list[Project]:
|
||||
try:
|
||||
query = """
|
||||
query SearchProjects($term: String!, $includeComments: Boolean!) {
|
||||
searchProjects(term: $term, includeComments: $includeComments) {
|
||||
nodes {
|
||||
id
|
||||
name
|
||||
description
|
||||
priority
|
||||
progress
|
||||
content
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
variables: dict[str, Any] = {
|
||||
"term": term,
|
||||
"includeComments": True,
|
||||
}
|
||||
|
||||
projects = self.query(query, variables)
|
||||
return [
|
||||
Project(**project) for project in projects["searchProjects"]["nodes"]
|
||||
]
|
||||
except LinearAPIException as e:
|
||||
raise e
|
||||
|
||||
def try_search_issues(self, term: str) -> list[Issue]:
|
||||
try:
|
||||
query = """
|
||||
query SearchIssues($term: String!, $includeComments: Boolean!) {
|
||||
searchIssues(term: $term, includeComments: $includeComments) {
|
||||
nodes {
|
||||
id
|
||||
identifier
|
||||
title
|
||||
description
|
||||
priority
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
variables: dict[str, Any] = {
|
||||
"term": term,
|
||||
"includeComments": True,
|
||||
}
|
||||
|
||||
issues = self.query(query, variables)
|
||||
return [Issue(**issue) for issue in issues["searchIssues"]["nodes"]]
|
||||
except LinearAPIException as e:
|
||||
raise e
|
||||
@@ -1,101 +0,0 @@
|
||||
from enum import Enum
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.model import (
|
||||
APIKeyCredentials,
|
||||
CredentialsField,
|
||||
CredentialsMetaInput,
|
||||
OAuth2Credentials,
|
||||
)
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.settings import Secrets
|
||||
|
||||
secrets = Secrets()
|
||||
LINEAR_OAUTH_IS_CONFIGURED = bool(
|
||||
secrets.linear_client_id and secrets.linear_client_secret
|
||||
)
|
||||
|
||||
LinearCredentials = OAuth2Credentials | APIKeyCredentials
|
||||
# LinearCredentialsInput = CredentialsMetaInput[
|
||||
# Literal[ProviderName.LINEAR],
|
||||
# Literal["oauth2", "api_key"] if LINEAR_OAUTH_IS_CONFIGURED else Literal["oauth2"],
|
||||
# ]
|
||||
LinearCredentialsInput = CredentialsMetaInput[
|
||||
Literal[ProviderName.LINEAR], Literal["oauth2"]
|
||||
]
|
||||
|
||||
|
||||
# (required) Comma separated list of scopes:
|
||||
|
||||
# read - (Default) Read access for the user's account. This scope will always be present.
|
||||
|
||||
# write - Write access for the user's account. If your application only needs to create comments, use a more targeted scope
|
||||
|
||||
# issues:create - Allows creating new issues and their attachments
|
||||
|
||||
# comments:create - Allows creating new issue comments
|
||||
|
||||
# timeSchedule:write - Allows creating and modifying time schedules
|
||||
|
||||
|
||||
# admin - Full access to admin level endpoints. You should never ask for this permission unless it's absolutely needed
|
||||
class LinearScope(str, Enum):
|
||||
READ = "read"
|
||||
WRITE = "write"
|
||||
ISSUES_CREATE = "issues:create"
|
||||
COMMENTS_CREATE = "comments:create"
|
||||
TIME_SCHEDULE_WRITE = "timeSchedule:write"
|
||||
ADMIN = "admin"
|
||||
|
||||
|
||||
def LinearCredentialsField(scopes: list[LinearScope]) -> LinearCredentialsInput:
|
||||
"""
|
||||
Creates a Linear credentials input on a block.
|
||||
|
||||
Params:
|
||||
scope: The authorization scope needed for the block to work. ([list of available scopes](https://docs.github.com/en/apps/oauth-apps/building-oauth-apps/scopes-for-oauth-apps#available-scopes))
|
||||
""" # noqa
|
||||
return CredentialsField(
|
||||
required_scopes=set([LinearScope.READ.value]).union(
|
||||
set([scope.value for scope in scopes])
|
||||
),
|
||||
description="The Linear integration can be used with OAuth, "
|
||||
"or any API key with sufficient permissions for the blocks it is used on.",
|
||||
)
|
||||
|
||||
|
||||
TEST_CREDENTIALS_OAUTH = OAuth2Credentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="linear",
|
||||
title="Mock Linear API key",
|
||||
username="mock-linear-username",
|
||||
access_token=SecretStr("mock-linear-access-token"),
|
||||
access_token_expires_at=None,
|
||||
refresh_token=SecretStr("mock-linear-refresh-token"),
|
||||
refresh_token_expires_at=None,
|
||||
scopes=["mock-linear-scopes"],
|
||||
)
|
||||
|
||||
TEST_CREDENTIALS_API_KEY = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="linear",
|
||||
title="Mock Linear API key",
|
||||
api_key=SecretStr("mock-linear-api-key"),
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
TEST_CREDENTIALS_INPUT_OAUTH = {
|
||||
"provider": TEST_CREDENTIALS_OAUTH.provider,
|
||||
"id": TEST_CREDENTIALS_OAUTH.id,
|
||||
"type": TEST_CREDENTIALS_OAUTH.type,
|
||||
"title": TEST_CREDENTIALS_OAUTH.type,
|
||||
}
|
||||
|
||||
TEST_CREDENTIALS_INPUT_API_KEY = {
|
||||
"provider": TEST_CREDENTIALS_API_KEY.provider,
|
||||
"id": TEST_CREDENTIALS_API_KEY.id,
|
||||
"type": TEST_CREDENTIALS_API_KEY.type,
|
||||
"title": TEST_CREDENTIALS_API_KEY.type,
|
||||
}
|
||||
@@ -1,83 +0,0 @@
|
||||
from backend.blocks.linear._api import LinearAPIException, LinearClient
|
||||
from backend.blocks.linear._auth import (
|
||||
LINEAR_OAUTH_IS_CONFIGURED,
|
||||
TEST_CREDENTIALS_INPUT_OAUTH,
|
||||
TEST_CREDENTIALS_OAUTH,
|
||||
LinearCredentials,
|
||||
LinearCredentialsField,
|
||||
LinearCredentialsInput,
|
||||
LinearScope,
|
||||
)
|
||||
from backend.blocks.linear.models import CreateCommentResponse
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
|
||||
class LinearCreateCommentBlock(Block):
|
||||
"""Block for creating comments on Linear issues"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: LinearCredentialsInput = LinearCredentialsField(
|
||||
scopes=[LinearScope.COMMENTS_CREATE],
|
||||
)
|
||||
issue_id: str = SchemaField(description="ID of the issue to comment on")
|
||||
comment: str = SchemaField(description="Comment text to add to the issue")
|
||||
|
||||
class Output(BlockSchema):
|
||||
comment_id: str = SchemaField(description="ID of the created comment")
|
||||
comment_body: str = SchemaField(
|
||||
description="Text content of the created comment"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if comment creation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="8f7d3a2e-9b5c-4c6a-8f1d-7c8b3e4a5d6c",
|
||||
description="Creates a new comment on a Linear issue",
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
categories={BlockCategory.PRODUCTIVITY, BlockCategory.ISSUE_TRACKING},
|
||||
test_input={
|
||||
"issue_id": "TEST-123",
|
||||
"comment": "Test comment",
|
||||
"credentials": TEST_CREDENTIALS_INPUT_OAUTH,
|
||||
},
|
||||
disabled=not LINEAR_OAUTH_IS_CONFIGURED,
|
||||
test_credentials=TEST_CREDENTIALS_OAUTH,
|
||||
test_output=[("comment_id", "abc123"), ("comment_body", "Test comment")],
|
||||
test_mock={
|
||||
"create_comment": lambda *args, **kwargs: (
|
||||
"abc123",
|
||||
"Test comment",
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def create_comment(
|
||||
credentials: LinearCredentials, issue_id: str, comment: str
|
||||
) -> tuple[str, str]:
|
||||
client = LinearClient(credentials=credentials)
|
||||
response: CreateCommentResponse = client.try_create_comment(
|
||||
issue_id=issue_id, comment=comment
|
||||
)
|
||||
return response.comment.id, response.comment.body
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: LinearCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
"""Execute the comment creation"""
|
||||
try:
|
||||
comment_id, comment_body = self.create_comment(
|
||||
credentials=credentials,
|
||||
issue_id=input_data.issue_id,
|
||||
comment=input_data.comment,
|
||||
)
|
||||
|
||||
yield "comment_id", comment_id
|
||||
yield "comment_body", comment_body
|
||||
|
||||
except LinearAPIException as e:
|
||||
yield "error", str(e)
|
||||
except Exception as e:
|
||||
yield "error", f"Unexpected error: {str(e)}"
|
||||
@@ -1,189 +0,0 @@
|
||||
from backend.blocks.linear._api import LinearAPIException, LinearClient
|
||||
from backend.blocks.linear._auth import (
|
||||
LINEAR_OAUTH_IS_CONFIGURED,
|
||||
TEST_CREDENTIALS_INPUT_OAUTH,
|
||||
TEST_CREDENTIALS_OAUTH,
|
||||
LinearCredentials,
|
||||
LinearCredentialsField,
|
||||
LinearCredentialsInput,
|
||||
LinearScope,
|
||||
)
|
||||
from backend.blocks.linear.models import CreateIssueResponse, Issue
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
|
||||
class LinearCreateIssueBlock(Block):
|
||||
"""Block for creating issues on Linear"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: LinearCredentialsInput = LinearCredentialsField(
|
||||
scopes=[LinearScope.ISSUES_CREATE],
|
||||
)
|
||||
title: str = SchemaField(description="Title of the issue")
|
||||
description: str | None = SchemaField(description="Description of the issue")
|
||||
team_name: str = SchemaField(
|
||||
description="Name of the team to create the issue on"
|
||||
)
|
||||
priority: int | None = SchemaField(
|
||||
description="Priority of the issue",
|
||||
default=None,
|
||||
minimum=0,
|
||||
maximum=4,
|
||||
)
|
||||
project_name: str | None = SchemaField(
|
||||
description="Name of the project to create the issue on",
|
||||
default=None,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
issue_id: str = SchemaField(description="ID of the created issue")
|
||||
issue_title: str = SchemaField(description="Title of the created issue")
|
||||
error: str = SchemaField(description="Error message if issue creation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="f9c68f55-dcca-40a8-8771-abf9601680aa",
|
||||
description="Creates a new issue on Linear",
|
||||
disabled=not LINEAR_OAUTH_IS_CONFIGURED,
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
categories={BlockCategory.PRODUCTIVITY, BlockCategory.ISSUE_TRACKING},
|
||||
test_input={
|
||||
"title": "Test issue",
|
||||
"description": "Test description",
|
||||
"team_name": "Test team",
|
||||
"project_name": "Test project",
|
||||
"credentials": TEST_CREDENTIALS_INPUT_OAUTH,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS_OAUTH,
|
||||
test_output=[("issue_id", "abc123"), ("issue_title", "Test issue")],
|
||||
test_mock={
|
||||
"create_issue": lambda *args, **kwargs: (
|
||||
"abc123",
|
||||
"Test issue",
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def create_issue(
|
||||
credentials: LinearCredentials,
|
||||
team_name: str,
|
||||
title: str,
|
||||
description: str | None = None,
|
||||
priority: int | None = None,
|
||||
project_name: str | None = None,
|
||||
) -> tuple[str, str]:
|
||||
client = LinearClient(credentials=credentials)
|
||||
team_id = client.try_get_team_by_name(team_name=team_name)
|
||||
project_id: str | None = None
|
||||
if project_name:
|
||||
projects = client.try_search_projects(term=project_name)
|
||||
if projects:
|
||||
project_id = projects[0].id
|
||||
else:
|
||||
raise LinearAPIException("Project not found", status_code=404)
|
||||
response: CreateIssueResponse = client.try_create_issue(
|
||||
team_id=team_id,
|
||||
title=title,
|
||||
description=description,
|
||||
priority=priority,
|
||||
project_id=project_id,
|
||||
)
|
||||
return response.issue.identifier, response.issue.title
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: LinearCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
"""Execute the issue creation"""
|
||||
try:
|
||||
issue_id, issue_title = self.create_issue(
|
||||
credentials=credentials,
|
||||
team_name=input_data.team_name,
|
||||
title=input_data.title,
|
||||
description=input_data.description,
|
||||
priority=input_data.priority,
|
||||
project_name=input_data.project_name,
|
||||
)
|
||||
|
||||
yield "issue_id", issue_id
|
||||
yield "issue_title", issue_title
|
||||
|
||||
except LinearAPIException as e:
|
||||
yield "error", str(e)
|
||||
except Exception as e:
|
||||
yield "error", f"Unexpected error: {str(e)}"
|
||||
|
||||
|
||||
class LinearSearchIssuesBlock(Block):
|
||||
"""Block for searching issues on Linear"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
term: str = SchemaField(description="Term to search for issues")
|
||||
credentials: LinearCredentialsInput = LinearCredentialsField(
|
||||
scopes=[LinearScope.READ],
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
issues: list[Issue] = SchemaField(description="List of issues")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="b5a2a0e6-26b4-4c5b-8a42-bc79e9cb65c2",
|
||||
description="Searches for issues on Linear",
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
disabled=not LINEAR_OAUTH_IS_CONFIGURED,
|
||||
test_input={
|
||||
"term": "Test issue",
|
||||
"credentials": TEST_CREDENTIALS_INPUT_OAUTH,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS_OAUTH,
|
||||
test_output=[
|
||||
(
|
||||
"issues",
|
||||
[
|
||||
Issue(
|
||||
id="abc123",
|
||||
identifier="abc123",
|
||||
title="Test issue",
|
||||
description="Test description",
|
||||
priority=1,
|
||||
)
|
||||
],
|
||||
)
|
||||
],
|
||||
test_mock={
|
||||
"search_issues": lambda *args, **kwargs: [
|
||||
Issue(
|
||||
id="abc123",
|
||||
identifier="abc123",
|
||||
title="Test issue",
|
||||
description="Test description",
|
||||
priority=1,
|
||||
)
|
||||
]
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def search_issues(
|
||||
credentials: LinearCredentials,
|
||||
term: str,
|
||||
) -> list[Issue]:
|
||||
client = LinearClient(credentials=credentials)
|
||||
response: list[Issue] = client.try_search_issues(term=term)
|
||||
return response
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: LinearCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
"""Execute the issue search"""
|
||||
try:
|
||||
issues = self.search_issues(credentials=credentials, term=input_data.term)
|
||||
yield "issues", issues
|
||||
except LinearAPIException as e:
|
||||
yield "error", str(e)
|
||||
except Exception as e:
|
||||
yield "error", f"Unexpected error: {str(e)}"
|
||||
@@ -1,41 +0,0 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class Comment(BaseModel):
|
||||
id: str
|
||||
body: str
|
||||
|
||||
|
||||
class CreateCommentInput(BaseModel):
|
||||
body: str
|
||||
issueId: str
|
||||
|
||||
|
||||
class CreateCommentResponse(BaseModel):
|
||||
success: bool
|
||||
comment: Comment
|
||||
|
||||
|
||||
class CreateCommentResponseWrapper(BaseModel):
|
||||
commentCreate: CreateCommentResponse
|
||||
|
||||
|
||||
class Issue(BaseModel):
|
||||
id: str
|
||||
identifier: str
|
||||
title: str
|
||||
description: str | None
|
||||
priority: int
|
||||
|
||||
|
||||
class CreateIssueResponse(BaseModel):
|
||||
issue: Issue
|
||||
|
||||
|
||||
class Project(BaseModel):
|
||||
id: str
|
||||
name: str
|
||||
description: str
|
||||
priority: int
|
||||
progress: int
|
||||
content: str
|
||||
@@ -1,95 +0,0 @@
|
||||
from backend.blocks.linear._api import LinearAPIException, LinearClient
|
||||
from backend.blocks.linear._auth import (
|
||||
LINEAR_OAUTH_IS_CONFIGURED,
|
||||
TEST_CREDENTIALS_INPUT_OAUTH,
|
||||
TEST_CREDENTIALS_OAUTH,
|
||||
LinearCredentials,
|
||||
LinearCredentialsField,
|
||||
LinearCredentialsInput,
|
||||
LinearScope,
|
||||
)
|
||||
from backend.blocks.linear.models import Project
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
|
||||
class LinearSearchProjectsBlock(Block):
|
||||
"""Block for searching projects on Linear"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: LinearCredentialsInput = LinearCredentialsField(
|
||||
scopes=[LinearScope.READ],
|
||||
)
|
||||
term: str = SchemaField(description="Term to search for projects")
|
||||
|
||||
class Output(BlockSchema):
|
||||
projects: list[Project] = SchemaField(description="List of projects")
|
||||
error: str = SchemaField(description="Error message if issue creation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="446a1d35-9d8f-4ac5-83ea-7684ec50e6af",
|
||||
description="Searches for projects on Linear",
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
categories={BlockCategory.PRODUCTIVITY, BlockCategory.ISSUE_TRACKING},
|
||||
test_input={
|
||||
"term": "Test project",
|
||||
"credentials": TEST_CREDENTIALS_INPUT_OAUTH,
|
||||
},
|
||||
disabled=not LINEAR_OAUTH_IS_CONFIGURED,
|
||||
test_credentials=TEST_CREDENTIALS_OAUTH,
|
||||
test_output=[
|
||||
(
|
||||
"projects",
|
||||
[
|
||||
Project(
|
||||
id="abc123",
|
||||
name="Test project",
|
||||
description="Test description",
|
||||
priority=1,
|
||||
progress=1,
|
||||
content="Test content",
|
||||
)
|
||||
],
|
||||
)
|
||||
],
|
||||
test_mock={
|
||||
"search_projects": lambda *args, **kwargs: [
|
||||
Project(
|
||||
id="abc123",
|
||||
name="Test project",
|
||||
description="Test description",
|
||||
priority=1,
|
||||
progress=1,
|
||||
content="Test content",
|
||||
)
|
||||
]
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def search_projects(
|
||||
credentials: LinearCredentials,
|
||||
term: str,
|
||||
) -> list[Project]:
|
||||
client = LinearClient(credentials=credentials)
|
||||
response: list[Project] = client.try_search_projects(term=term)
|
||||
return response
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: LinearCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
"""Execute the project search"""
|
||||
try:
|
||||
projects = self.search_projects(
|
||||
credentials=credentials,
|
||||
term=input_data.term,
|
||||
)
|
||||
|
||||
yield "projects", projects
|
||||
|
||||
except LinearAPIException as e:
|
||||
yield "error", str(e)
|
||||
except Exception as e:
|
||||
yield "error", f"Unexpected error: {str(e)}"
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,245 +0,0 @@
|
||||
import os
|
||||
import tempfile
|
||||
from typing import Literal, Optional
|
||||
|
||||
from moviepy.audio.io.AudioFileClip import AudioFileClip
|
||||
from moviepy.video.fx.Loop import Loop
|
||||
from moviepy.video.io.VideoFileClip import VideoFileClip
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.file import MediaFile, get_exec_file_path, store_media_file
|
||||
|
||||
|
||||
class MediaDurationBlock(Block):
|
||||
|
||||
class Input(BlockSchema):
|
||||
media_in: MediaFile = SchemaField(
|
||||
description="Media input (URL, data URI, or local path)."
|
||||
)
|
||||
is_video: bool = SchemaField(
|
||||
description="Whether the media is a video (True) or audio (False).",
|
||||
default=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
duration: float = SchemaField(
|
||||
description="Duration of the media file (in seconds)."
|
||||
)
|
||||
error: str = SchemaField(
|
||||
description="Error message if something fails.", default=""
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="d8b91fd4-da26-42d4-8ecb-8b196c6d84b6",
|
||||
description="Block to get the duration of a media file.",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=MediaDurationBlock.Input,
|
||||
output_schema=MediaDurationBlock.Output,
|
||||
)
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
graph_exec_id: str,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
# 1) Store the input media locally
|
||||
local_media_path = store_media_file(
|
||||
graph_exec_id=graph_exec_id,
|
||||
file=input_data.media_in,
|
||||
return_content=False,
|
||||
)
|
||||
media_abspath = get_exec_file_path(graph_exec_id, local_media_path)
|
||||
|
||||
# 2) Load the clip
|
||||
if input_data.is_video:
|
||||
clip = VideoFileClip(media_abspath)
|
||||
else:
|
||||
clip = AudioFileClip(media_abspath)
|
||||
|
||||
yield "duration", clip.duration
|
||||
|
||||
|
||||
class LoopVideoBlock(Block):
|
||||
"""
|
||||
Block for looping (repeating) a video clip until a given duration or number of loops.
|
||||
"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
video_in: MediaFile = SchemaField(
|
||||
description="The input video (can be a URL, data URI, or local path)."
|
||||
)
|
||||
# Provide EITHER a `duration` or `n_loops` or both. We'll demonstrate `duration`.
|
||||
duration: Optional[float] = SchemaField(
|
||||
description="Target duration (in seconds) to loop the video to. If omitted, defaults to no looping.",
|
||||
default=None,
|
||||
ge=0.0,
|
||||
)
|
||||
n_loops: Optional[int] = SchemaField(
|
||||
description="Number of times to repeat the video. If omitted, defaults to 1 (no repeat).",
|
||||
default=None,
|
||||
ge=1,
|
||||
)
|
||||
output_return_type: Literal["file_path", "data_uri"] = SchemaField(
|
||||
description="How to return the output video. Either a relative path or base64 data URI.",
|
||||
default="file_path",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
video_out: str = SchemaField(
|
||||
description="Looped video returned either as a relative path or a data URI."
|
||||
)
|
||||
error: str = SchemaField(
|
||||
description="Error message if something fails.", default=""
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="8bf9eef6-5451-4213-b265-25306446e94b",
|
||||
description="Block to loop a video to a given duration or number of repeats.",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=LoopVideoBlock.Input,
|
||||
output_schema=LoopVideoBlock.Output,
|
||||
)
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
node_exec_id: str,
|
||||
graph_exec_id: str,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
# 1) Store the input video locally
|
||||
local_video_path = store_media_file(
|
||||
graph_exec_id=graph_exec_id,
|
||||
file=input_data.video_in,
|
||||
return_content=False,
|
||||
)
|
||||
input_abspath = get_exec_file_path(graph_exec_id, local_video_path)
|
||||
|
||||
# 2) Load the clip
|
||||
clip = VideoFileClip(input_abspath)
|
||||
|
||||
# 3) Apply the loop effect
|
||||
looped_clip = clip
|
||||
if input_data.duration:
|
||||
# Loop until we reach the specified duration
|
||||
looped_clip = looped_clip.with_effects([Loop(duration=input_data.duration)])
|
||||
elif input_data.n_loops:
|
||||
looped_clip = looped_clip.with_effects([Loop(n=input_data.n_loops)])
|
||||
else:
|
||||
raise ValueError("Either 'duration' or 'n_loops' must be provided.")
|
||||
|
||||
assert isinstance(looped_clip, VideoFileClip)
|
||||
|
||||
# 4) Save the looped output
|
||||
output_filename = MediaFile(
|
||||
f"{node_exec_id}_looped_{os.path.basename(local_video_path)}"
|
||||
)
|
||||
output_abspath = get_exec_file_path(graph_exec_id, output_filename)
|
||||
|
||||
looped_clip = looped_clip.with_audio(clip.audio)
|
||||
looped_clip.write_videofile(output_abspath, codec="libx264", audio_codec="aac")
|
||||
|
||||
# Return as data URI
|
||||
video_out = store_media_file(
|
||||
graph_exec_id=graph_exec_id,
|
||||
file=output_filename,
|
||||
return_content=input_data.output_return_type == "data_uri",
|
||||
)
|
||||
|
||||
yield "video_out", video_out
|
||||
|
||||
|
||||
class AddAudioToVideoBlock(Block):
|
||||
"""
|
||||
Block that adds (attaches) an audio track to an existing video.
|
||||
Optionally scale the volume of the new track.
|
||||
"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
video_in: MediaFile = SchemaField(
|
||||
description="Video input (URL, data URI, or local path)."
|
||||
)
|
||||
audio_in: MediaFile = SchemaField(
|
||||
description="Audio input (URL, data URI, or local path)."
|
||||
)
|
||||
volume: float = SchemaField(
|
||||
description="Volume scale for the newly attached audio track (1.0 = original).",
|
||||
default=1.0,
|
||||
)
|
||||
output_return_type: Literal["file_path", "data_uri"] = SchemaField(
|
||||
description="Return the final output as a relative path or base64 data URI.",
|
||||
default="file_path",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
video_out: MediaFile = SchemaField(
|
||||
description="Final video (with attached audio), as a path or data URI."
|
||||
)
|
||||
error: str = SchemaField(
|
||||
description="Error message if something fails.", default=""
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="3503748d-62b6-4425-91d6-725b064af509",
|
||||
description="Block to attach an audio file to a video file using moviepy.",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=AddAudioToVideoBlock.Input,
|
||||
output_schema=AddAudioToVideoBlock.Output,
|
||||
)
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
node_exec_id: str,
|
||||
graph_exec_id: str,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
# 1) Store the inputs locally
|
||||
local_video_path = store_media_file(
|
||||
graph_exec_id=graph_exec_id,
|
||||
file=input_data.video_in,
|
||||
return_content=False,
|
||||
)
|
||||
local_audio_path = store_media_file(
|
||||
graph_exec_id=graph_exec_id,
|
||||
file=input_data.audio_in,
|
||||
return_content=False,
|
||||
)
|
||||
|
||||
abs_temp_dir = os.path.join(tempfile.gettempdir(), "exec_file", graph_exec_id)
|
||||
video_abspath = os.path.join(abs_temp_dir, local_video_path)
|
||||
audio_abspath = os.path.join(abs_temp_dir, local_audio_path)
|
||||
|
||||
# 2) Load video + audio with moviepy
|
||||
video_clip = VideoFileClip(video_abspath)
|
||||
audio_clip = AudioFileClip(audio_abspath)
|
||||
# Optionally scale volume
|
||||
if input_data.volume != 1.0:
|
||||
audio_clip = audio_clip.with_volume_scaled(input_data.volume)
|
||||
|
||||
# 3) Attach the new audio track
|
||||
final_clip = video_clip.with_audio(audio_clip)
|
||||
|
||||
# 4) Write to output file
|
||||
output_filename = MediaFile(
|
||||
f"{node_exec_id}_audio_attached_{os.path.basename(local_video_path)}"
|
||||
)
|
||||
output_abspath = os.path.join(abs_temp_dir, output_filename)
|
||||
final_clip.write_videofile(output_abspath, codec="libx264", audio_codec="aac")
|
||||
|
||||
# 5) Return either path or data URI
|
||||
video_out = store_media_file(
|
||||
graph_exec_id=graph_exec_id,
|
||||
file=output_filename,
|
||||
return_content=input_data.output_return_type == "data_uri",
|
||||
)
|
||||
|
||||
yield "video_out", video_out
|
||||
@@ -1,338 +0,0 @@
|
||||
from typing import Any, Literal, Optional, Union
|
||||
|
||||
from mem0 import MemoryClient
|
||||
from pydantic import BaseModel, SecretStr
|
||||
|
||||
from backend.data.block import Block, BlockOutput, BlockSchema
|
||||
from backend.data.model import (
|
||||
APIKeyCredentials,
|
||||
CredentialsField,
|
||||
CredentialsMetaInput,
|
||||
SchemaField,
|
||||
)
|
||||
from backend.integrations.providers import ProviderName
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="ed55ac19-356e-4243-a6cb-bc599e9b716f",
|
||||
provider="mem0",
|
||||
api_key=SecretStr("mock-mem0-api-key"),
|
||||
title="Mock Mem0 API key",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.title,
|
||||
}
|
||||
|
||||
|
||||
class Mem0Base:
|
||||
"""Base class with shared utilities for Mem0 blocks"""
|
||||
|
||||
@staticmethod
|
||||
def _get_client(credentials: APIKeyCredentials) -> MemoryClient:
|
||||
"""Get initialized Mem0 client"""
|
||||
return MemoryClient(api_key=credentials.api_key.get_secret_value())
|
||||
|
||||
|
||||
Filter = dict[str, list[dict[str, str | dict[str, list[str]]]]]
|
||||
|
||||
|
||||
class Conversation(BaseModel):
|
||||
discriminator: Literal["conversation"]
|
||||
messages: list[dict[str, str]]
|
||||
|
||||
|
||||
class Content(BaseModel):
|
||||
discriminator: Literal["content"]
|
||||
content: str
|
||||
|
||||
|
||||
class AddMemoryBlock(Block, Mem0Base):
|
||||
"""Block for adding memories to Mem0
|
||||
|
||||
Always limited by user_id and optional graph_id and graph_exec_id"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: CredentialsMetaInput[
|
||||
Literal[ProviderName.MEM0], Literal["api_key"]
|
||||
] = CredentialsField(description="Mem0 API key credentials")
|
||||
content: Union[Content, Conversation] = SchemaField(
|
||||
discriminator="discriminator",
|
||||
description="Content to add - either a string or list of message objects as output from an AI block",
|
||||
default=Content(discriminator="content", content="I'm a vegetarian"),
|
||||
)
|
||||
metadata: dict[str, Any] = SchemaField(
|
||||
description="Optional metadata for the memory", default={}
|
||||
)
|
||||
|
||||
limit_memory_to_run: bool = SchemaField(
|
||||
description="Limit the memory to the run", default=False
|
||||
)
|
||||
limit_memory_to_agent: bool = SchemaField(
|
||||
description="Limit the memory to the agent", default=False
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
action: str = SchemaField(description="Action of the operation")
|
||||
memory: str = SchemaField(description="Memory created")
|
||||
error: str = SchemaField(description="Error message if operation fails")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="dce97578-86be-45a4-ae50-f6de33fc935a",
|
||||
description="Add new memories to Mem0 with user segmentation",
|
||||
input_schema=AddMemoryBlock.Input,
|
||||
output_schema=AddMemoryBlock.Output,
|
||||
test_input=[
|
||||
{
|
||||
"content": {
|
||||
"discriminator": "conversation",
|
||||
"messages": [{"role": "user", "content": "I'm a vegetarian"}],
|
||||
},
|
||||
"metadata": {"food": "vegetarian"},
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
{
|
||||
"content": {
|
||||
"discriminator": "content",
|
||||
"content": "I am a vegetarian",
|
||||
},
|
||||
"metadata": {"food": "vegetarian"},
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
],
|
||||
test_output=[("action", "NO_CHANGE"), ("action", "NO_CHANGE")],
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_mock={"_get_client": lambda credentials: MockMemoryClient()},
|
||||
)
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: APIKeyCredentials,
|
||||
user_id: str,
|
||||
graph_id: str,
|
||||
graph_exec_id: str,
|
||||
**kwargs
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
client = self._get_client(credentials)
|
||||
|
||||
if isinstance(input_data.content, Conversation):
|
||||
messages = input_data.content.messages
|
||||
else:
|
||||
messages = [{"role": "user", "content": input_data.content}]
|
||||
|
||||
params = {
|
||||
"user_id": user_id,
|
||||
"output_format": "v1.1",
|
||||
"metadata": input_data.metadata,
|
||||
}
|
||||
|
||||
if input_data.limit_memory_to_run:
|
||||
params["run_id"] = graph_exec_id
|
||||
if input_data.limit_memory_to_agent:
|
||||
params["agent_id"] = graph_id
|
||||
|
||||
# Use the client to add memory
|
||||
result = client.add(
|
||||
messages,
|
||||
**params,
|
||||
)
|
||||
|
||||
if len(result.get("results", [])) > 0:
|
||||
for result in result.get("results", []):
|
||||
yield "action", result["event"]
|
||||
yield "memory", result["memory"]
|
||||
else:
|
||||
yield "action", "NO_CHANGE"
|
||||
|
||||
except Exception as e:
|
||||
yield "error", str(object=e)
|
||||
|
||||
|
||||
class SearchMemoryBlock(Block, Mem0Base):
|
||||
"""Block for searching memories in Mem0"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: CredentialsMetaInput[
|
||||
Literal[ProviderName.MEM0], Literal["api_key"]
|
||||
] = CredentialsField(description="Mem0 API key credentials")
|
||||
query: str = SchemaField(
|
||||
description="Search query",
|
||||
advanced=False,
|
||||
)
|
||||
trigger: bool = SchemaField(
|
||||
description="An unused field that is used to (re-)trigger the block when you have no other inputs",
|
||||
default=False,
|
||||
advanced=False,
|
||||
)
|
||||
categories_filter: list[str] = SchemaField(
|
||||
description="Categories to filter by",
|
||||
default=[],
|
||||
advanced=True,
|
||||
)
|
||||
limit_memory_to_run: bool = SchemaField(
|
||||
description="Limit the memory to the run", default=False
|
||||
)
|
||||
limit_memory_to_agent: bool = SchemaField(
|
||||
description="Limit the memory to the agent", default=True
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
memories: Any = SchemaField(description="List of matching memories")
|
||||
error: str = SchemaField(description="Error message if operation fails")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="bd7c84e3-e073-4b75-810c-600886ec8a5b",
|
||||
description="Search memories in Mem0 by user",
|
||||
input_schema=SearchMemoryBlock.Input,
|
||||
output_schema=SearchMemoryBlock.Output,
|
||||
test_input={
|
||||
"query": "vegetarian preferences",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"top_k": 10,
|
||||
"rerank": True,
|
||||
},
|
||||
test_output=[
|
||||
("memories", [{"id": "test-memory", "content": "test content"}])
|
||||
],
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_mock={"_get_client": lambda credentials: MockMemoryClient()},
|
||||
)
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: APIKeyCredentials,
|
||||
user_id: str,
|
||||
graph_id: str,
|
||||
graph_exec_id: str,
|
||||
**kwargs
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
client = self._get_client(credentials)
|
||||
|
||||
filters: Filter = {
|
||||
# This works with only one filter, so we can allow others to add on later
|
||||
"AND": [
|
||||
{"user_id": user_id},
|
||||
]
|
||||
}
|
||||
if input_data.categories_filter:
|
||||
filters["AND"].append(
|
||||
{"categories": {"contains": input_data.categories_filter}}
|
||||
)
|
||||
if input_data.limit_memory_to_run:
|
||||
filters["AND"].append({"run_id": graph_exec_id})
|
||||
if input_data.limit_memory_to_agent:
|
||||
filters["AND"].append({"agent_id": graph_id})
|
||||
|
||||
result: list[dict[str, Any]] = client.search(
|
||||
input_data.query, version="v2", filters=filters
|
||||
)
|
||||
yield "memories", result
|
||||
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class GetAllMemoriesBlock(Block, Mem0Base):
|
||||
"""Block for retrieving all memories from Mem0"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: CredentialsMetaInput[
|
||||
Literal[ProviderName.MEM0], Literal["api_key"]
|
||||
] = CredentialsField(description="Mem0 API key credentials")
|
||||
trigger: bool = SchemaField(
|
||||
description="An unused field that is used to trigger the block when you have no other inputs",
|
||||
default=False,
|
||||
advanced=False,
|
||||
)
|
||||
categories: Optional[list[str]] = SchemaField(
|
||||
description="Filter by categories", default=None
|
||||
)
|
||||
limit_memory_to_run: bool = SchemaField(
|
||||
description="Limit the memory to the run", default=False
|
||||
)
|
||||
limit_memory_to_agent: bool = SchemaField(
|
||||
description="Limit the memory to the agent", default=False
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
memories: Any = SchemaField(description="List of memories")
|
||||
error: str = SchemaField(description="Error message if operation fails")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="45aee5bf-4767-45d1-a28b-e01c5aae9fc1",
|
||||
description="Retrieve all memories from Mem0 with pagination",
|
||||
input_schema=GetAllMemoriesBlock.Input,
|
||||
output_schema=GetAllMemoriesBlock.Output,
|
||||
test_input={
|
||||
"user_id": "test_user",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_output=[
|
||||
("memories", [{"id": "test-memory", "content": "test content"}]),
|
||||
],
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_mock={"_get_client": lambda credentials: MockMemoryClient()},
|
||||
)
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: APIKeyCredentials,
|
||||
user_id: str,
|
||||
graph_id: str,
|
||||
graph_exec_id: str,
|
||||
**kwargs
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
client = self._get_client(credentials)
|
||||
|
||||
filters: Filter = {
|
||||
"AND": [
|
||||
{"user_id": user_id},
|
||||
]
|
||||
}
|
||||
if input_data.limit_memory_to_run:
|
||||
filters["AND"].append({"run_id": graph_exec_id})
|
||||
if input_data.limit_memory_to_agent:
|
||||
filters["AND"].append({"agent_id": graph_id})
|
||||
if input_data.categories:
|
||||
filters["AND"].append(
|
||||
{"categories": {"contains": input_data.categories}}
|
||||
)
|
||||
|
||||
memories: list[dict[str, Any]] = client.get_all(
|
||||
filters=filters,
|
||||
version="v2",
|
||||
)
|
||||
|
||||
yield "memories", memories
|
||||
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
# Mock client for testing
|
||||
class MockMemoryClient:
|
||||
"""Mock Mem0 client for testing"""
|
||||
|
||||
def add(self, *args, **kwargs):
|
||||
return {"memory_id": "test-memory-id", "status": "success"}
|
||||
|
||||
def search(self, *args, **kwargs) -> list[dict[str, str]]:
|
||||
return [{"id": "test-memory", "content": "test content"}]
|
||||
|
||||
def get_all(self, *args, **kwargs) -> list[dict[str, str]]:
|
||||
return [{"id": "test-memory", "content": "test content"}]
|
||||
@@ -1,48 +1,22 @@
|
||||
from datetime import datetime, timezone
|
||||
from typing import Iterator, Literal
|
||||
from typing import Iterator
|
||||
|
||||
import praw
|
||||
from pydantic import BaseModel, SecretStr
|
||||
from pydantic import BaseModel, ConfigDict
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import (
|
||||
CredentialsField,
|
||||
CredentialsMetaInput,
|
||||
SchemaField,
|
||||
UserPasswordCredentials,
|
||||
)
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.data.model import BlockSecret, SchemaField, SecretField
|
||||
from backend.util.mock import MockObject
|
||||
from backend.util.settings import Settings
|
||||
|
||||
RedditCredentials = UserPasswordCredentials
|
||||
RedditCredentialsInput = CredentialsMetaInput[
|
||||
Literal[ProviderName.REDDIT],
|
||||
Literal["user_password"],
|
||||
]
|
||||
|
||||
|
||||
def RedditCredentialsField() -> RedditCredentialsInput:
|
||||
"""Creates a Reddit credentials input on a block."""
|
||||
return CredentialsField(
|
||||
description="The Reddit integration requires a username and password.",
|
||||
)
|
||||
class RedditCredentials(BaseModel):
|
||||
client_id: BlockSecret = SecretField(key="reddit_client_id")
|
||||
client_secret: BlockSecret = SecretField(key="reddit_client_secret")
|
||||
username: BlockSecret = SecretField(key="reddit_username")
|
||||
password: BlockSecret = SecretField(key="reddit_password")
|
||||
user_agent: str = "AutoGPT:1.0 (by /u/autogpt)"
|
||||
|
||||
|
||||
TEST_CREDENTIALS = UserPasswordCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="reddit",
|
||||
username=SecretStr("mock-reddit-username"),
|
||||
password=SecretStr("mock-reddit-password"),
|
||||
title="Mock Reddit credentials",
|
||||
)
|
||||
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.title,
|
||||
}
|
||||
model_config = ConfigDict(title="Reddit Credentials")
|
||||
|
||||
|
||||
class RedditPost(BaseModel):
|
||||
@@ -57,16 +31,13 @@ class RedditComment(BaseModel):
|
||||
comment: str
|
||||
|
||||
|
||||
settings = Settings()
|
||||
|
||||
|
||||
def get_praw(creds: RedditCredentials) -> praw.Reddit:
|
||||
client = praw.Reddit(
|
||||
client_id=settings.secrets.reddit_client_id,
|
||||
client_secret=settings.secrets.reddit_client_secret,
|
||||
client_id=creds.client_id.get_secret_value(),
|
||||
client_secret=creds.client_secret.get_secret_value(),
|
||||
username=creds.username.get_secret_value(),
|
||||
password=creds.password.get_secret_value(),
|
||||
user_agent=settings.config.reddit_user_agent,
|
||||
user_agent=creds.user_agent,
|
||||
)
|
||||
me = client.user.me()
|
||||
if not me:
|
||||
@@ -77,11 +48,11 @@ def get_praw(creds: RedditCredentials) -> praw.Reddit:
|
||||
|
||||
class GetRedditPostsBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
subreddit: str = SchemaField(
|
||||
description="Subreddit name, excluding the /r/ prefix",
|
||||
default="writingprompts",
|
||||
subreddit: str = SchemaField(description="Subreddit name")
|
||||
creds: RedditCredentials = SchemaField(
|
||||
description="Reddit credentials",
|
||||
default=RedditCredentials(),
|
||||
)
|
||||
credentials: RedditCredentialsInput = RedditCredentialsField()
|
||||
last_minutes: int | None = SchemaField(
|
||||
description="Post time to stop minutes ago while fetching posts",
|
||||
default=None,
|
||||
@@ -99,18 +70,20 @@ class GetRedditPostsBlock(Block):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
disabled=True,
|
||||
id="c6731acb-4285-4ee1-bc9b-03d0766c370f",
|
||||
description="This block fetches Reddit posts from a defined subreddit name.",
|
||||
categories={BlockCategory.SOCIAL},
|
||||
disabled=(
|
||||
not settings.secrets.reddit_client_id
|
||||
or not settings.secrets.reddit_client_secret
|
||||
),
|
||||
input_schema=GetRedditPostsBlock.Input,
|
||||
output_schema=GetRedditPostsBlock.Output,
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"creds": {
|
||||
"client_id": "client_id",
|
||||
"client_secret": "client_secret",
|
||||
"username": "username",
|
||||
"password": "password",
|
||||
"user_agent": "user_agent",
|
||||
},
|
||||
"subreddit": "subreddit",
|
||||
"last_post": "id3",
|
||||
"post_limit": 2,
|
||||
@@ -130,7 +103,7 @@ class GetRedditPostsBlock(Block):
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"get_posts": lambda input_data, credentials: [
|
||||
"get_posts": lambda _: [
|
||||
MockObject(id="id1", title="title1", selftext="body1"),
|
||||
MockObject(id="id2", title="title2", selftext="body2"),
|
||||
MockObject(id="id3", title="title2", selftext="body2"),
|
||||
@@ -139,18 +112,14 @@ class GetRedditPostsBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_posts(
|
||||
input_data: Input, *, credentials: RedditCredentials
|
||||
) -> Iterator[praw.reddit.Submission]:
|
||||
client = get_praw(credentials)
|
||||
def get_posts(input_data: Input) -> Iterator[praw.reddit.Submission]:
|
||||
client = get_praw(input_data.creds)
|
||||
subreddit = client.subreddit(input_data.subreddit)
|
||||
return subreddit.new(limit=input_data.post_limit or 10)
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: RedditCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
current_time = datetime.now(tz=timezone.utc)
|
||||
for post in self.get_posts(input_data=input_data, credentials=credentials):
|
||||
for post in self.get_posts(input_data):
|
||||
if input_data.last_minutes:
|
||||
post_datetime = datetime.fromtimestamp(
|
||||
post.created_utc, tz=timezone.utc
|
||||
@@ -172,7 +141,9 @@ class GetRedditPostsBlock(Block):
|
||||
|
||||
class PostRedditCommentBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: RedditCredentialsInput = RedditCredentialsField()
|
||||
creds: RedditCredentials = SchemaField(
|
||||
description="Reddit credentials", default=RedditCredentials()
|
||||
)
|
||||
data: RedditComment = SchemaField(description="Reddit comment")
|
||||
|
||||
class Output(BlockSchema):
|
||||
@@ -185,15 +156,7 @@ class PostRedditCommentBlock(Block):
|
||||
categories={BlockCategory.SOCIAL},
|
||||
input_schema=PostRedditCommentBlock.Input,
|
||||
output_schema=PostRedditCommentBlock.Output,
|
||||
disabled=(
|
||||
not settings.secrets.reddit_client_id
|
||||
or not settings.secrets.reddit_client_secret
|
||||
),
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"data": {"post_id": "id", "comment": "comment"},
|
||||
},
|
||||
test_input={"data": {"post_id": "id", "comment": "comment"}},
|
||||
test_output=[("comment_id", "dummy_comment_id")],
|
||||
test_mock={"reply_post": lambda creds, comment: "dummy_comment_id"},
|
||||
)
|
||||
@@ -207,7 +170,5 @@ class PostRedditCommentBlock(Block):
|
||||
raise ValueError("Failed to post comment.")
|
||||
return new_comment.id
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: RedditCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
yield "comment_id", self.reply_post(credentials, input_data.data)
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
yield "comment_id", self.reply_post(input_data.creds, input_data.data)
|
||||
|
||||
@@ -131,7 +131,7 @@ class ReplicateFluxAdvancedModelBlock(Block):
|
||||
super().__init__(
|
||||
id="90f8c45e-e983-4644-aa0b-b4ebe2f531bc",
|
||||
description="This block runs Flux models on Replicate with advanced settings.",
|
||||
categories={BlockCategory.AI, BlockCategory.MULTIMEDIA},
|
||||
categories={BlockCategory.AI},
|
||||
input_schema=ReplicateFluxAdvancedModelBlock.Input,
|
||||
output_schema=ReplicateFluxAdvancedModelBlock.Output,
|
||||
test_input={
|
||||
|
||||
@@ -1,174 +0,0 @@
|
||||
from base64 import b64encode
|
||||
from enum import Enum
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import (
|
||||
APIKeyCredentials,
|
||||
CredentialsField,
|
||||
CredentialsMetaInput,
|
||||
SchemaField,
|
||||
)
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.file import MediaFile, store_media_file
|
||||
from backend.util.request import Requests
|
||||
|
||||
|
||||
class Format(str, Enum):
|
||||
PNG = "png"
|
||||
JPEG = "jpeg"
|
||||
WEBP = "webp"
|
||||
|
||||
|
||||
class ScreenshotWebPageBlock(Block):
|
||||
"""Block for taking screenshots using ScreenshotOne API"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: CredentialsMetaInput[
|
||||
Literal[ProviderName.SCREENSHOTONE], Literal["api_key"]
|
||||
] = CredentialsField(description="The ScreenshotOne API key")
|
||||
url: str = SchemaField(
|
||||
description="URL of the website to screenshot",
|
||||
placeholder="https://example.com",
|
||||
)
|
||||
viewport_width: int = SchemaField(
|
||||
description="Width of the viewport in pixels", default=1920
|
||||
)
|
||||
viewport_height: int = SchemaField(
|
||||
description="Height of the viewport in pixels", default=1080
|
||||
)
|
||||
full_page: bool = SchemaField(
|
||||
description="Whether to capture the full page length", default=False
|
||||
)
|
||||
format: Format = SchemaField(
|
||||
description="Output format (png, jpeg, webp)", default=Format.PNG
|
||||
)
|
||||
block_ads: bool = SchemaField(description="Whether to block ads", default=True)
|
||||
block_cookie_banners: bool = SchemaField(
|
||||
description="Whether to block cookie banners", default=True
|
||||
)
|
||||
block_chats: bool = SchemaField(
|
||||
description="Whether to block chat widgets", default=True
|
||||
)
|
||||
cache: bool = SchemaField(
|
||||
description="Whether to enable caching", default=False
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
image: MediaFile = SchemaField(description="The screenshot image data")
|
||||
error: str = SchemaField(description="Error message if the screenshot failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="3a7c4b8d-6e2f-4a5d-b9c1-f8d23c5a9b0e", # Generated UUID
|
||||
description="Takes a screenshot of a specified website using ScreenshotOne API",
|
||||
categories={BlockCategory.DATA},
|
||||
input_schema=ScreenshotWebPageBlock.Input,
|
||||
output_schema=ScreenshotWebPageBlock.Output,
|
||||
test_input={
|
||||
"url": "https://example.com",
|
||||
"viewport_width": 1920,
|
||||
"viewport_height": 1080,
|
||||
"full_page": False,
|
||||
"format": "png",
|
||||
"block_ads": True,
|
||||
"block_cookie_banners": True,
|
||||
"block_chats": True,
|
||||
"cache": False,
|
||||
"credentials": {
|
||||
"provider": "screenshotone",
|
||||
"type": "api_key",
|
||||
"id": "test-id",
|
||||
"title": "Test API Key",
|
||||
},
|
||||
},
|
||||
test_credentials=APIKeyCredentials(
|
||||
id="test-id",
|
||||
provider="screenshotone",
|
||||
api_key=SecretStr("test-key"),
|
||||
title="Test API Key",
|
||||
expires_at=None,
|
||||
),
|
||||
test_output=[
|
||||
(
|
||||
"image",
|
||||
"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAB5JREFUOE9jZPjP8J+BAsA4agDDaBgwjIYBw7AIAwCV5B/xAsMbygAAAABJRU5ErkJggg==",
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"take_screenshot": lambda *args, **kwargs: {
|
||||
"image": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAB5JREFUOE9jZPjP8J+BAsA4agDDaBgwjIYBw7AIAwCV5B/xAsMbygAAAABJRU5ErkJggg==",
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def take_screenshot(
|
||||
credentials: APIKeyCredentials,
|
||||
graph_exec_id: str,
|
||||
url: str,
|
||||
viewport_width: int,
|
||||
viewport_height: int,
|
||||
full_page: bool,
|
||||
format: Format,
|
||||
block_ads: bool,
|
||||
block_cookie_banners: bool,
|
||||
block_chats: bool,
|
||||
cache: bool,
|
||||
) -> dict:
|
||||
"""
|
||||
Takes a screenshot using the ScreenshotOne API
|
||||
"""
|
||||
api = Requests(trusted_origins=["https://api.screenshotone.com"])
|
||||
|
||||
# Build API URL with parameters
|
||||
params = {
|
||||
"access_key": credentials.api_key.get_secret_value(),
|
||||
"url": url,
|
||||
"viewport_width": viewport_width,
|
||||
"viewport_height": viewport_height,
|
||||
"full_page": str(full_page).lower(),
|
||||
"format": format.value,
|
||||
"block_ads": str(block_ads).lower(),
|
||||
"block_cookie_banners": str(block_cookie_banners).lower(),
|
||||
"block_chats": str(block_chats).lower(),
|
||||
"cache": str(cache).lower(),
|
||||
}
|
||||
|
||||
response = api.get("https://api.screenshotone.com/take", params=params)
|
||||
|
||||
return {
|
||||
"image": store_media_file(
|
||||
graph_exec_id=graph_exec_id,
|
||||
file=f"data:image/{format.value};base64,{b64encode(response.content).decode('utf-8')}",
|
||||
return_content=True,
|
||||
)
|
||||
}
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: APIKeyCredentials,
|
||||
graph_exec_id: str,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
screenshot_data = self.take_screenshot(
|
||||
credentials=credentials,
|
||||
graph_exec_id=graph_exec_id,
|
||||
url=input_data.url,
|
||||
viewport_width=input_data.viewport_width,
|
||||
viewport_height=input_data.viewport_height,
|
||||
full_page=input_data.full_page,
|
||||
format=input_data.format,
|
||||
block_ads=input_data.block_ads,
|
||||
block_cookie_banners=input_data.block_cookie_banners,
|
||||
block_chats=input_data.block_chats,
|
||||
cache=input_data.cache,
|
||||
)
|
||||
yield "image", screenshot_data["image"]
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
@@ -1,498 +0,0 @@
|
||||
import logging
|
||||
import re
|
||||
from collections import Counter
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from autogpt_libs.utils.cache import thread_cached
|
||||
|
||||
import backend.blocks.llm as llm
|
||||
from backend.blocks.agent import AgentExecutorBlock
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockInput,
|
||||
BlockOutput,
|
||||
BlockSchema,
|
||||
BlockType,
|
||||
get_block,
|
||||
)
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util import json
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from backend.data.graph import Link, Node
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@thread_cached
|
||||
def get_database_manager_client():
|
||||
from backend.executor import DatabaseManager
|
||||
from backend.util.service import get_service_client
|
||||
|
||||
return get_service_client(DatabaseManager)
|
||||
|
||||
|
||||
def _get_tool_requests(entry: dict[str, Any]) -> list[str]:
|
||||
"""
|
||||
Return a list of tool_call_ids if the entry is a tool request.
|
||||
Supports both OpenAI and Anthropics formats.
|
||||
"""
|
||||
tool_call_ids = []
|
||||
if entry.get("role") != "assistant":
|
||||
return tool_call_ids
|
||||
|
||||
# OpenAI: check for tool_calls in the entry.
|
||||
calls = entry.get("tool_calls")
|
||||
if isinstance(calls, list):
|
||||
for call in calls:
|
||||
if tool_id := call.get("id"):
|
||||
tool_call_ids.append(tool_id)
|
||||
|
||||
# Anthropics: check content items for tool_use type.
|
||||
content = entry.get("content")
|
||||
if isinstance(content, list):
|
||||
for item in content:
|
||||
if item.get("type") != "tool_use":
|
||||
continue
|
||||
if tool_id := item.get("id"):
|
||||
tool_call_ids.append(tool_id)
|
||||
|
||||
return tool_call_ids
|
||||
|
||||
|
||||
def _get_tool_responses(entry: dict[str, Any]) -> list[str]:
|
||||
"""
|
||||
Return a list of tool_call_ids if the entry is a tool response.
|
||||
Supports both OpenAI and Anthropics formats.
|
||||
"""
|
||||
tool_call_ids: list[str] = []
|
||||
|
||||
# OpenAI: a tool response message with role "tool" and key "tool_call_id".
|
||||
if entry.get("role") == "tool":
|
||||
if tool_call_id := entry.get("tool_call_id"):
|
||||
tool_call_ids.append(str(tool_call_id))
|
||||
|
||||
# Anthropics: check content items for tool_result type.
|
||||
if entry.get("role") == "user":
|
||||
content = entry.get("content")
|
||||
if isinstance(content, list):
|
||||
for item in content:
|
||||
if item.get("type") != "tool_result":
|
||||
continue
|
||||
if tool_call_id := item.get("tool_use_id"):
|
||||
tool_call_ids.append(tool_call_id)
|
||||
|
||||
return tool_call_ids
|
||||
|
||||
|
||||
def _create_tool_response(call_id: str, output: dict[str, Any]) -> dict[str, Any]:
|
||||
"""
|
||||
Create a tool response message for either OpenAI or Anthropics,
|
||||
based on the tool_id format.
|
||||
"""
|
||||
content = output if isinstance(output, str) else json.dumps(output)
|
||||
|
||||
# Anthropics format: tool IDs typically start with "toolu_"
|
||||
if call_id.startswith("toolu_"):
|
||||
return {
|
||||
"role": "user",
|
||||
"type": "message",
|
||||
"content": [
|
||||
{"tool_use_id": call_id, "type": "tool_result", "content": content}
|
||||
],
|
||||
}
|
||||
|
||||
# OpenAI format: tool IDs typically start with "call_".
|
||||
# Or default fallback (if the tool_id doesn't match any known prefix)
|
||||
return {"role": "tool", "tool_call_id": call_id, "content": content}
|
||||
|
||||
|
||||
def get_pending_tool_calls(conversation_history: list[Any]) -> dict[str, int]:
|
||||
"""
|
||||
All the tool calls entry in the conversation history requires a response.
|
||||
This function returns the pending tool calls that has not generated an output yet.
|
||||
|
||||
Return: dict[str, int] - A dictionary of pending tool call IDs with their count.
|
||||
"""
|
||||
pending_calls = Counter()
|
||||
for history in conversation_history:
|
||||
for call_id in _get_tool_requests(history):
|
||||
pending_calls[call_id] += 1
|
||||
|
||||
for call_id in _get_tool_responses(history):
|
||||
pending_calls[call_id] -= 1
|
||||
|
||||
return {call_id: count for call_id, count in pending_calls.items() if count > 0}
|
||||
|
||||
|
||||
class SmartDecisionMakerBlock(Block):
|
||||
"""
|
||||
A block that uses a language model to make smart decisions based on a given prompt.
|
||||
"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
prompt: str = SchemaField(
|
||||
description="The prompt to send to the language model.",
|
||||
placeholder="Enter your prompt here...",
|
||||
)
|
||||
model: llm.LlmModel = SchemaField(
|
||||
title="LLM Model",
|
||||
default=llm.LlmModel.GPT4O,
|
||||
description="The language model to use for answering the prompt.",
|
||||
advanced=False,
|
||||
)
|
||||
credentials: llm.AICredentials = llm.AICredentialsField()
|
||||
sys_prompt: str = SchemaField(
|
||||
title="System Prompt",
|
||||
default="Thinking carefully step by step decide which function to call. "
|
||||
"Always choose a function call from the list of function signatures, "
|
||||
"and always provide the complete argument provided with the type "
|
||||
"matching the required jsonschema signature, no missing argument is allowed. "
|
||||
"If you have already completed the task objective, you can end the task "
|
||||
"by providing the end result of your work as a finish message. "
|
||||
"Only provide EXACTLY one function call, multiple tool calls is strictly prohibited.",
|
||||
description="The system prompt to provide additional context to the model.",
|
||||
)
|
||||
conversation_history: list[dict] = SchemaField(
|
||||
default=[],
|
||||
description="The conversation history to provide context for the prompt.",
|
||||
)
|
||||
last_tool_output: Any = SchemaField(
|
||||
default=None,
|
||||
description="The output of the last tool that was called.",
|
||||
)
|
||||
retry: int = SchemaField(
|
||||
title="Retry Count",
|
||||
default=3,
|
||||
description="Number of times to retry the LLM call if the response does not match the expected format.",
|
||||
)
|
||||
prompt_values: dict[str, str] = SchemaField(
|
||||
advanced=False,
|
||||
default={},
|
||||
description="Values used to fill in the prompt. The values can be used in the prompt by putting them in a double curly braces, e.g. {{variable_name}}.",
|
||||
)
|
||||
max_tokens: int | None = SchemaField(
|
||||
advanced=True,
|
||||
default=None,
|
||||
description="The maximum number of tokens to generate in the chat completion.",
|
||||
)
|
||||
ollama_host: str = SchemaField(
|
||||
advanced=True,
|
||||
default="localhost:11434",
|
||||
description="Ollama host for local models",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_missing_links(cls, data: BlockInput, links: list["Link"]) -> set[str]:
|
||||
# conversation_history & last_tool_output validation is handled differently
|
||||
return super().get_missing_links(
|
||||
data,
|
||||
[
|
||||
link
|
||||
for link in links
|
||||
if link.sink_name
|
||||
not in ["conversation_history", "last_tool_output"]
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_missing_input(cls, data: BlockInput) -> set[str]:
|
||||
if missing_input := super().get_missing_input(data):
|
||||
return missing_input
|
||||
|
||||
conversation_history = data.get("conversation_history", [])
|
||||
pending_tool_calls = get_pending_tool_calls(conversation_history)
|
||||
last_tool_output = data.get("last_tool_output")
|
||||
if not last_tool_output and pending_tool_calls:
|
||||
return {"last_tool_output"}
|
||||
return set()
|
||||
|
||||
class Output(BlockSchema):
|
||||
error: str = SchemaField(description="Error message if the API call failed.")
|
||||
tools: Any = SchemaField(description="The tools that are available to use.")
|
||||
finished: str = SchemaField(
|
||||
description="The finished message to display to the user."
|
||||
)
|
||||
conversations: list[Any] = SchemaField(
|
||||
description="The conversation history to provide context for the prompt."
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="3b191d9f-356f-482d-8238-ba04b6d18381",
|
||||
description="Uses AI to intelligently decide what tool to use.",
|
||||
categories={BlockCategory.AI},
|
||||
block_type=BlockType.AI,
|
||||
input_schema=SmartDecisionMakerBlock.Input,
|
||||
output_schema=SmartDecisionMakerBlock.Output,
|
||||
test_input={
|
||||
"prompt": "Hello, World!",
|
||||
"credentials": llm.TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_output=[],
|
||||
test_credentials=llm.TEST_CREDENTIALS,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _create_block_function_signature(
|
||||
sink_node: "Node", links: list["Link"]
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Creates a function signature for a block node.
|
||||
|
||||
Args:
|
||||
sink_node: The node for which to create a function signature.
|
||||
links: The list of links connected to the sink node.
|
||||
|
||||
Returns:
|
||||
A dictionary representing the function signature in the format expected by LLM tools.
|
||||
|
||||
Raises:
|
||||
ValueError: If the block specified by sink_node.block_id is not found.
|
||||
"""
|
||||
block = get_block(sink_node.block_id)
|
||||
if not block:
|
||||
raise ValueError(f"Block not found: {sink_node.block_id}")
|
||||
|
||||
tool_function: dict[str, Any] = {
|
||||
"name": re.sub(r"[^a-zA-Z0-9_-]", "_", block.name).lower(),
|
||||
"description": block.description,
|
||||
}
|
||||
|
||||
properties = {}
|
||||
required = []
|
||||
|
||||
for link in links:
|
||||
sink_block_input_schema = block.input_schema
|
||||
description = (
|
||||
sink_block_input_schema.model_fields[link.sink_name].description
|
||||
if link.sink_name in sink_block_input_schema.model_fields
|
||||
and sink_block_input_schema.model_fields[link.sink_name].description
|
||||
else f"The {link.sink_name} of the tool"
|
||||
)
|
||||
properties[link.sink_name.lower()] = {
|
||||
"type": "string",
|
||||
"description": description,
|
||||
}
|
||||
|
||||
tool_function["parameters"] = {
|
||||
"type": "object",
|
||||
"properties": properties,
|
||||
"required": required,
|
||||
"additionalProperties": False,
|
||||
"strict": True,
|
||||
}
|
||||
|
||||
return {"type": "function", "function": tool_function}
|
||||
|
||||
@staticmethod
|
||||
def _create_agent_function_signature(
|
||||
sink_node: "Node", links: list["Link"]
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Creates a function signature for an agent node.
|
||||
|
||||
Args:
|
||||
sink_node: The agent node for which to create a function signature.
|
||||
links: The list of links connected to the sink node.
|
||||
|
||||
Returns:
|
||||
A dictionary representing the function signature in the format expected by LLM tools.
|
||||
|
||||
Raises:
|
||||
ValueError: If the graph metadata for the specified graph_id and graph_version is not found.
|
||||
"""
|
||||
graph_id = sink_node.input_default.get("graph_id")
|
||||
graph_version = sink_node.input_default.get("graph_version")
|
||||
if not graph_id or not graph_version:
|
||||
raise ValueError("Graph ID or Graph Version not found in sink node.")
|
||||
|
||||
db_client = get_database_manager_client()
|
||||
sink_graph_meta = db_client.get_graph_metadata(graph_id, graph_version)
|
||||
if not sink_graph_meta:
|
||||
raise ValueError(
|
||||
f"Sink graph metadata not found: {graph_id} {graph_version}"
|
||||
)
|
||||
|
||||
tool_function: dict[str, Any] = {
|
||||
"name": re.sub(r"[^a-zA-Z0-9_-]", "_", sink_graph_meta.name).lower(),
|
||||
"description": sink_graph_meta.description,
|
||||
}
|
||||
|
||||
properties = {}
|
||||
required = []
|
||||
|
||||
for link in links:
|
||||
sink_block_input_schema = sink_node.input_default["input_schema"]
|
||||
description = (
|
||||
sink_block_input_schema["properties"][link.sink_name]["description"]
|
||||
if "description"
|
||||
in sink_block_input_schema["properties"][link.sink_name]
|
||||
else f"The {link.sink_name} of the tool"
|
||||
)
|
||||
properties[link.sink_name.lower()] = {
|
||||
"type": "string",
|
||||
"description": description,
|
||||
}
|
||||
|
||||
tool_function["parameters"] = {
|
||||
"type": "object",
|
||||
"properties": properties,
|
||||
"required": required,
|
||||
"additionalProperties": False,
|
||||
"strict": True,
|
||||
}
|
||||
|
||||
return {"type": "function", "function": tool_function}
|
||||
|
||||
@staticmethod
|
||||
def _create_function_signature(node_id: str) -> list[dict[str, Any]]:
|
||||
"""
|
||||
Creates function signatures for tools linked to a specified node within a graph.
|
||||
|
||||
This method filters the graph links to identify those that are tools and are
|
||||
connected to the given node_id. It then constructs function signatures for each
|
||||
tool based on the metadata and input schema of the linked nodes.
|
||||
|
||||
Args:
|
||||
node_id: The node_id for which to create function signatures.
|
||||
|
||||
Returns:
|
||||
list[dict[str, Any]]: A list of dictionaries, each representing a function signature
|
||||
for a tool, including its name, description, and parameters.
|
||||
|
||||
Raises:
|
||||
ValueError: If no tool links are found for the specified node_id, or if a sink node
|
||||
or its metadata cannot be found.
|
||||
"""
|
||||
db_client = get_database_manager_client()
|
||||
tools = [
|
||||
(link, node)
|
||||
for link, node in db_client.get_connected_output_nodes(node_id)
|
||||
if link.source_name.startswith("tools_^_") and link.source_id == node_id
|
||||
]
|
||||
if not tools:
|
||||
raise ValueError("There is no next node to execute.")
|
||||
|
||||
return_tool_functions = []
|
||||
|
||||
grouped_tool_links: dict[str, tuple["Node", list["Link"]]] = {}
|
||||
for link, node in tools:
|
||||
if link.sink_id not in grouped_tool_links:
|
||||
grouped_tool_links[link.sink_id] = (node, [link])
|
||||
else:
|
||||
grouped_tool_links[link.sink_id][1].append(link)
|
||||
|
||||
for sink_node, links in grouped_tool_links.values():
|
||||
if not sink_node:
|
||||
raise ValueError(f"Sink node not found: {links[0].sink_id}")
|
||||
|
||||
if sink_node.block_id == AgentExecutorBlock().id:
|
||||
return_tool_functions.append(
|
||||
SmartDecisionMakerBlock._create_agent_function_signature(
|
||||
sink_node, links
|
||||
)
|
||||
)
|
||||
else:
|
||||
return_tool_functions.append(
|
||||
SmartDecisionMakerBlock._create_block_function_signature(
|
||||
sink_node, links
|
||||
)
|
||||
)
|
||||
|
||||
return return_tool_functions
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: llm.APIKeyCredentials,
|
||||
graph_id: str,
|
||||
node_id: str,
|
||||
graph_exec_id: str,
|
||||
node_exec_id: str,
|
||||
user_id: str,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
tool_functions = self._create_function_signature(node_id)
|
||||
|
||||
input_data.conversation_history = input_data.conversation_history or []
|
||||
prompt = [json.to_dict(p) for p in input_data.conversation_history if p]
|
||||
|
||||
pending_tool_calls = get_pending_tool_calls(input_data.conversation_history)
|
||||
if pending_tool_calls and not input_data.last_tool_output:
|
||||
raise ValueError(f"Tool call requires an output for {pending_tool_calls}")
|
||||
|
||||
# Prefill all missing tool calls with the last tool output/
|
||||
# TODO: we need a better way to handle this.
|
||||
tool_output = [
|
||||
_create_tool_response(pending_call_id, input_data.last_tool_output)
|
||||
for pending_call_id, count in pending_tool_calls.items()
|
||||
for _ in range(count)
|
||||
]
|
||||
|
||||
# If the SDM block only calls 1 tool at a time, this should not happen.
|
||||
if len(tool_output) > 1:
|
||||
logger.warning(
|
||||
f"[SmartDecisionMakerBlock-node_exec_id={node_exec_id}] "
|
||||
f"Multiple pending tool calls are prefilled using a single output. "
|
||||
f"Execution may not be accurate."
|
||||
)
|
||||
|
||||
# Fallback on adding tool output in the conversation history as user prompt.
|
||||
if len(tool_output) == 0:
|
||||
logger.warning(
|
||||
f"[SmartDecisionMakerBlock-node_exec_id={node_exec_id}] "
|
||||
f"No pending tool calls found. This may indicate an issue with the "
|
||||
f"conversation history, or an LLM calling two tools at the same time."
|
||||
)
|
||||
tool_output.append(
|
||||
{
|
||||
"role": "user",
|
||||
"content": f"Last tool output: {json.dumps(input_data.last_tool_output)}",
|
||||
}
|
||||
)
|
||||
|
||||
prompt.extend(tool_output)
|
||||
|
||||
values = input_data.prompt_values
|
||||
if values:
|
||||
input_data.prompt = llm.fmt.format_string(input_data.prompt, values)
|
||||
input_data.sys_prompt = llm.fmt.format_string(input_data.sys_prompt, values)
|
||||
|
||||
prefix = "[Main Objective Prompt]: "
|
||||
|
||||
if input_data.sys_prompt and not any(
|
||||
p["role"] == "system" and p["content"].startswith(prefix) for p in prompt
|
||||
):
|
||||
prompt.append({"role": "system", "content": prefix + input_data.sys_prompt})
|
||||
|
||||
if input_data.prompt and not any(
|
||||
p["role"] == "user" and p["content"].startswith(prefix) for p in prompt
|
||||
):
|
||||
prompt.append({"role": "user", "content": prefix + input_data.prompt})
|
||||
|
||||
response = llm.llm_call(
|
||||
credentials=credentials,
|
||||
llm_model=input_data.model,
|
||||
prompt=prompt,
|
||||
json_format=False,
|
||||
max_tokens=input_data.max_tokens,
|
||||
tools=tool_functions,
|
||||
ollama_host=input_data.ollama_host,
|
||||
)
|
||||
|
||||
if not response.tool_calls:
|
||||
yield "finished", response.response
|
||||
return
|
||||
|
||||
for tool_call in response.tool_calls:
|
||||
tool_name = tool_call.function.name
|
||||
tool_args = json.loads(tool_call.function.arguments)
|
||||
|
||||
for arg_name, arg_value in tool_args.items():
|
||||
yield f"tools_^_{tool_name}_{arg_name}".lower(), arg_value
|
||||
|
||||
response.prompt.append(response.raw_response)
|
||||
yield "conversations", response.prompt
|
||||
@@ -1,97 +0,0 @@
|
||||
from backend.blocks.smartlead.models import (
|
||||
AddLeadsRequest,
|
||||
AddLeadsToCampaignResponse,
|
||||
CreateCampaignRequest,
|
||||
CreateCampaignResponse,
|
||||
SaveSequencesRequest,
|
||||
SaveSequencesResponse,
|
||||
)
|
||||
from backend.util.request import Requests
|
||||
|
||||
|
||||
class SmartLeadClient:
|
||||
"""Client for the SmartLead API"""
|
||||
|
||||
# This api is stupid and requires your api key in the url. DO NOT RAISE ERRORS FOR BAD REQUESTS.
|
||||
# FILTER OUT THE API KEY FROM THE ERROR MESSAGE.
|
||||
|
||||
API_URL = "https://server.smartlead.ai/api/v1"
|
||||
|
||||
def __init__(self, api_key: str):
|
||||
self.api_key = api_key
|
||||
self.requests = Requests()
|
||||
|
||||
def _add_auth_to_url(self, url: str) -> str:
|
||||
return f"{url}?api_key={self.api_key}"
|
||||
|
||||
def _handle_error(self, e: Exception) -> str:
|
||||
return e.__str__().replace(self.api_key, "API KEY")
|
||||
|
||||
def create_campaign(self, request: CreateCampaignRequest) -> CreateCampaignResponse:
|
||||
try:
|
||||
response = self.requests.post(
|
||||
self._add_auth_to_url(f"{self.API_URL}/campaigns/create"),
|
||||
json=request.model_dump(),
|
||||
)
|
||||
response_data = response.json()
|
||||
return CreateCampaignResponse(**response_data)
|
||||
except ValueError as e:
|
||||
raise ValueError(f"Invalid response format: {str(e)}")
|
||||
except Exception as e:
|
||||
raise ValueError(f"Failed to create campaign: {self._handle_error(e)}")
|
||||
|
||||
def add_leads_to_campaign(
|
||||
self, request: AddLeadsRequest
|
||||
) -> AddLeadsToCampaignResponse:
|
||||
try:
|
||||
response = self.requests.post(
|
||||
self._add_auth_to_url(
|
||||
f"{self.API_URL}/campaigns/{request.campaign_id}/leads"
|
||||
),
|
||||
json=request.model_dump(exclude={"campaign_id"}),
|
||||
)
|
||||
response_data = response.json()
|
||||
response_parsed = AddLeadsToCampaignResponse(**response_data)
|
||||
if not response_parsed.ok:
|
||||
raise ValueError(
|
||||
f"Failed to add leads to campaign: {response_parsed.error}"
|
||||
)
|
||||
return response_parsed
|
||||
except ValueError as e:
|
||||
raise ValueError(f"Invalid response format: {str(e)}")
|
||||
except Exception as e:
|
||||
raise ValueError(
|
||||
f"Failed to add leads to campaign: {self._handle_error(e)}"
|
||||
)
|
||||
|
||||
def save_campaign_sequences(
|
||||
self, campaign_id: int, request: SaveSequencesRequest
|
||||
) -> SaveSequencesResponse:
|
||||
"""
|
||||
Save sequences within a campaign.
|
||||
|
||||
Args:
|
||||
campaign_id: ID of the campaign to save sequences for
|
||||
request: SaveSequencesRequest containing the sequences configuration
|
||||
|
||||
Returns:
|
||||
SaveSequencesResponse with the result of the operation
|
||||
|
||||
Note:
|
||||
For variant_distribution_type:
|
||||
- MANUAL_EQUAL: Equally distributes variants across leads
|
||||
- AI_EQUAL: Requires winning_metric_property and lead_distribution_percentage
|
||||
- MANUAL_PERCENTAGE: Requires variant_distribution_percentage in seq_variants
|
||||
"""
|
||||
try:
|
||||
response = self.requests.post(
|
||||
self._add_auth_to_url(
|
||||
f"{self.API_URL}/campaigns/{campaign_id}/sequences"
|
||||
),
|
||||
json=request.model_dump(exclude_none=True),
|
||||
)
|
||||
return SaveSequencesResponse(**response.json())
|
||||
except Exception as e:
|
||||
raise ValueError(
|
||||
f"Failed to save campaign sequences: {e.__str__().replace(self.api_key, 'API KEY')}"
|
||||
)
|
||||
@@ -1,35 +0,0 @@
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput
|
||||
from backend.integrations.providers import ProviderName
|
||||
|
||||
SmartLeadCredentials = APIKeyCredentials
|
||||
SmartLeadCredentialsInput = CredentialsMetaInput[
|
||||
Literal[ProviderName.SMARTLEAD],
|
||||
Literal["api_key"],
|
||||
]
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="smartlead",
|
||||
api_key=SecretStr("mock-smartlead-api-key"),
|
||||
title="Mock SmartLead API key",
|
||||
expires_at=None,
|
||||
)
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.title,
|
||||
}
|
||||
|
||||
|
||||
def SmartLeadCredentialsField() -> SmartLeadCredentialsInput:
|
||||
"""
|
||||
Creates a SmartLead credentials input on a block.
|
||||
"""
|
||||
return CredentialsField(
|
||||
description="The SmartLead integration can be used with an API Key.",
|
||||
)
|
||||
@@ -1,326 +0,0 @@
|
||||
from backend.blocks.smartlead._api import SmartLeadClient
|
||||
from backend.blocks.smartlead._auth import (
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
SmartLeadCredentials,
|
||||
SmartLeadCredentialsInput,
|
||||
)
|
||||
from backend.blocks.smartlead.models import (
|
||||
AddLeadsRequest,
|
||||
AddLeadsToCampaignResponse,
|
||||
CreateCampaignRequest,
|
||||
CreateCampaignResponse,
|
||||
LeadInput,
|
||||
LeadUploadSettings,
|
||||
SaveSequencesRequest,
|
||||
SaveSequencesResponse,
|
||||
Sequence,
|
||||
)
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
|
||||
class CreateCampaignBlock(Block):
|
||||
"""Create a campaign in SmartLead"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
name: str = SchemaField(
|
||||
description="The name of the campaign",
|
||||
)
|
||||
credentials: SmartLeadCredentialsInput = SchemaField(
|
||||
description="SmartLead credentials",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
id: int = SchemaField(
|
||||
description="The ID of the created campaign",
|
||||
)
|
||||
name: str = SchemaField(
|
||||
description="The name of the created campaign",
|
||||
)
|
||||
created_at: str = SchemaField(
|
||||
description="The date and time the campaign was created",
|
||||
)
|
||||
error: str = SchemaField(
|
||||
description="Error message if the search failed",
|
||||
default="",
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="8865699f-9188-43c4-89b0-79c84cfaa03e",
|
||||
description="Create a campaign in SmartLead",
|
||||
categories={BlockCategory.CRM},
|
||||
input_schema=CreateCampaignBlock.Input,
|
||||
output_schema=CreateCampaignBlock.Output,
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_input={"name": "Test Campaign", "credentials": TEST_CREDENTIALS_INPUT},
|
||||
test_output=[
|
||||
(
|
||||
"id",
|
||||
1,
|
||||
),
|
||||
(
|
||||
"name",
|
||||
"Test Campaign",
|
||||
),
|
||||
(
|
||||
"created_at",
|
||||
"2024-01-01T00:00:00Z",
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"create_campaign": lambda name, credentials: CreateCampaignResponse(
|
||||
ok=True,
|
||||
id=1,
|
||||
name=name,
|
||||
created_at="2024-01-01T00:00:00Z",
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def create_campaign(
|
||||
name: str, credentials: SmartLeadCredentials
|
||||
) -> CreateCampaignResponse:
|
||||
client = SmartLeadClient(credentials.api_key.get_secret_value())
|
||||
return client.create_campaign(CreateCampaignRequest(name=name))
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: SmartLeadCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
response = self.create_campaign(input_data.name, credentials)
|
||||
|
||||
yield "id", response.id
|
||||
yield "name", response.name
|
||||
yield "created_at", response.created_at
|
||||
if not response.ok:
|
||||
yield "error", "Failed to create campaign"
|
||||
|
||||
|
||||
class AddLeadToCampaignBlock(Block):
|
||||
"""Add a lead to a campaign in SmartLead"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
campaign_id: int = SchemaField(
|
||||
description="The ID of the campaign to add the lead to",
|
||||
)
|
||||
lead_list: list[LeadInput] = SchemaField(
|
||||
description="An array of JSON objects, each representing a lead's details. Can hold max 100 leads.",
|
||||
max_length=100,
|
||||
default=[],
|
||||
advanced=False,
|
||||
)
|
||||
settings: LeadUploadSettings = SchemaField(
|
||||
description="Settings for lead upload",
|
||||
default=LeadUploadSettings(),
|
||||
)
|
||||
credentials: SmartLeadCredentialsInput = SchemaField(
|
||||
description="SmartLead credentials",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
campaign_id: int = SchemaField(
|
||||
description="The ID of the campaign the lead was added to (passed through)",
|
||||
)
|
||||
upload_count: int = SchemaField(
|
||||
description="The number of leads added to the campaign",
|
||||
)
|
||||
already_added_to_campaign: int = SchemaField(
|
||||
description="The number of leads that were already added to the campaign",
|
||||
)
|
||||
duplicate_count: int = SchemaField(
|
||||
description="The number of emails that were duplicates",
|
||||
)
|
||||
invalid_email_count: int = SchemaField(
|
||||
description="The number of emails that were invalidly formatted",
|
||||
)
|
||||
is_lead_limit_exhausted: bool = SchemaField(
|
||||
description="Whether the lead limit was exhausted",
|
||||
)
|
||||
lead_import_stopped_count: int = SchemaField(
|
||||
description="The number of leads that were not added to the campaign because the lead import was stopped",
|
||||
)
|
||||
error: str = SchemaField(
|
||||
description="Error message if the lead was not added to the campaign",
|
||||
default="",
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="fb8106a4-1a8f-42f9-a502-f6d07e6fe0ec",
|
||||
description="Add a lead to a campaign in SmartLead",
|
||||
categories={BlockCategory.CRM},
|
||||
input_schema=AddLeadToCampaignBlock.Input,
|
||||
output_schema=AddLeadToCampaignBlock.Output,
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_input={
|
||||
"campaign_id": 1,
|
||||
"lead_list": [],
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_output=[
|
||||
(
|
||||
"campaign_id",
|
||||
1,
|
||||
),
|
||||
(
|
||||
"upload_count",
|
||||
1,
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"add_leads_to_campaign": lambda campaign_id, lead_list, credentials: AddLeadsToCampaignResponse(
|
||||
ok=True,
|
||||
upload_count=1,
|
||||
already_added_to_campaign=0,
|
||||
duplicate_count=0,
|
||||
invalid_email_count=0,
|
||||
is_lead_limit_exhausted=False,
|
||||
lead_import_stopped_count=0,
|
||||
error="",
|
||||
total_leads=1,
|
||||
block_count=0,
|
||||
invalid_emails=[],
|
||||
unsubscribed_leads=[],
|
||||
bounce_count=0,
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def add_leads_to_campaign(
|
||||
campaign_id: int, lead_list: list[LeadInput], credentials: SmartLeadCredentials
|
||||
) -> AddLeadsToCampaignResponse:
|
||||
client = SmartLeadClient(credentials.api_key.get_secret_value())
|
||||
return client.add_leads_to_campaign(
|
||||
AddLeadsRequest(
|
||||
campaign_id=campaign_id,
|
||||
lead_list=lead_list,
|
||||
settings=LeadUploadSettings(
|
||||
ignore_global_block_list=False,
|
||||
ignore_unsubscribe_list=False,
|
||||
ignore_community_bounce_list=False,
|
||||
ignore_duplicate_leads_in_other_campaign=False,
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: SmartLeadCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
response = self.add_leads_to_campaign(
|
||||
input_data.campaign_id, input_data.lead_list, credentials
|
||||
)
|
||||
|
||||
yield "campaign_id", input_data.campaign_id
|
||||
yield "upload_count", response.upload_count
|
||||
if response.already_added_to_campaign:
|
||||
yield "already_added_to_campaign", response.already_added_to_campaign
|
||||
if response.duplicate_count:
|
||||
yield "duplicate_count", response.duplicate_count
|
||||
if response.invalid_email_count:
|
||||
yield "invalid_email_count", response.invalid_email_count
|
||||
if response.is_lead_limit_exhausted:
|
||||
yield "is_lead_limit_exhausted", response.is_lead_limit_exhausted
|
||||
if response.lead_import_stopped_count:
|
||||
yield "lead_import_stopped_count", response.lead_import_stopped_count
|
||||
if response.error:
|
||||
yield "error", response.error
|
||||
if not response.ok:
|
||||
yield "error", "Failed to add leads to campaign"
|
||||
|
||||
|
||||
class SaveCampaignSequencesBlock(Block):
|
||||
"""Save sequences within a campaign"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
campaign_id: int = SchemaField(
|
||||
description="The ID of the campaign to save sequences for",
|
||||
)
|
||||
sequences: list[Sequence] = SchemaField(
|
||||
description="The sequences to save",
|
||||
default=[],
|
||||
advanced=False,
|
||||
)
|
||||
credentials: SmartLeadCredentialsInput = SchemaField(
|
||||
description="SmartLead credentials",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
data: dict | str | None = SchemaField(
|
||||
description="Data from the API",
|
||||
default=None,
|
||||
)
|
||||
message: str = SchemaField(
|
||||
description="Message from the API",
|
||||
default="",
|
||||
)
|
||||
error: str = SchemaField(
|
||||
description="Error message if the sequences were not saved",
|
||||
default="",
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="e7d9f41c-dc10-4f39-98ba-a432abd128c0",
|
||||
description="Save sequences within a campaign",
|
||||
categories={BlockCategory.CRM},
|
||||
input_schema=SaveCampaignSequencesBlock.Input,
|
||||
output_schema=SaveCampaignSequencesBlock.Output,
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_input={
|
||||
"campaign_id": 1,
|
||||
"sequences": [],
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_output=[
|
||||
(
|
||||
"message",
|
||||
"Sequences saved successfully",
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"save_campaign_sequences": lambda campaign_id, sequences, credentials: SaveSequencesResponse(
|
||||
ok=True,
|
||||
message="Sequences saved successfully",
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def save_campaign_sequences(
|
||||
campaign_id: int, sequences: list[Sequence], credentials: SmartLeadCredentials
|
||||
) -> SaveSequencesResponse:
|
||||
client = SmartLeadClient(credentials.api_key.get_secret_value())
|
||||
return client.save_campaign_sequences(
|
||||
campaign_id=campaign_id, request=SaveSequencesRequest(sequences=sequences)
|
||||
)
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: SmartLeadCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
response = self.save_campaign_sequences(
|
||||
input_data.campaign_id, input_data.sequences, credentials
|
||||
)
|
||||
|
||||
if response.data:
|
||||
yield "data", response.data
|
||||
if response.message:
|
||||
yield "message", response.message
|
||||
if response.error:
|
||||
yield "error", response.error
|
||||
if not response.ok:
|
||||
yield "error", "Failed to save sequences"
|
||||
@@ -1,147 +0,0 @@
|
||||
from enum import Enum
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
|
||||
class CreateCampaignResponse(BaseModel):
|
||||
ok: bool
|
||||
id: int
|
||||
name: str
|
||||
created_at: str
|
||||
|
||||
|
||||
class CreateCampaignRequest(BaseModel):
|
||||
name: str
|
||||
client_id: str | None = None
|
||||
|
||||
|
||||
class AddLeadsToCampaignResponse(BaseModel):
|
||||
ok: bool
|
||||
upload_count: int
|
||||
total_leads: int
|
||||
block_count: int
|
||||
duplicate_count: int
|
||||
invalid_email_count: int
|
||||
invalid_emails: list[str]
|
||||
already_added_to_campaign: int
|
||||
unsubscribed_leads: list[str]
|
||||
is_lead_limit_exhausted: bool
|
||||
lead_import_stopped_count: int
|
||||
bounce_count: int
|
||||
error: str | None = None
|
||||
|
||||
|
||||
class LeadCustomFields(BaseModel):
|
||||
"""Custom fields for a lead (max 20 fields)"""
|
||||
|
||||
fields: dict[str, str] = SchemaField(
|
||||
description="Custom fields for a lead (max 20 fields)",
|
||||
max_length=20,
|
||||
default={},
|
||||
)
|
||||
|
||||
|
||||
class LeadInput(BaseModel):
|
||||
"""Single lead input data"""
|
||||
|
||||
first_name: str
|
||||
last_name: str
|
||||
email: str
|
||||
phone_number: str | None = None # Changed from int to str for phone numbers
|
||||
company_name: str | None = None
|
||||
website: str | None = None
|
||||
location: str | None = None
|
||||
custom_fields: LeadCustomFields | None = None
|
||||
linkedin_profile: str | None = None
|
||||
company_url: str | None = None
|
||||
|
||||
|
||||
class LeadUploadSettings(BaseModel):
|
||||
"""Settings for lead upload"""
|
||||
|
||||
ignore_global_block_list: bool = SchemaField(
|
||||
description="Ignore the global block list",
|
||||
default=False,
|
||||
)
|
||||
ignore_unsubscribe_list: bool = SchemaField(
|
||||
description="Ignore the unsubscribe list",
|
||||
default=False,
|
||||
)
|
||||
ignore_community_bounce_list: bool = SchemaField(
|
||||
description="Ignore the community bounce list",
|
||||
default=False,
|
||||
)
|
||||
ignore_duplicate_leads_in_other_campaign: bool = SchemaField(
|
||||
description="Ignore duplicate leads in other campaigns",
|
||||
default=False,
|
||||
)
|
||||
|
||||
|
||||
class AddLeadsRequest(BaseModel):
|
||||
"""Request body for adding leads to a campaign"""
|
||||
|
||||
lead_list: list[LeadInput] = SchemaField(
|
||||
description="List of leads to add to the campaign",
|
||||
max_length=100,
|
||||
default=[],
|
||||
)
|
||||
settings: LeadUploadSettings
|
||||
campaign_id: int
|
||||
|
||||
|
||||
class VariantDistributionType(str, Enum):
|
||||
MANUAL_EQUAL = "MANUAL_EQUAL"
|
||||
MANUAL_PERCENTAGE = "MANUAL_PERCENTAGE"
|
||||
AI_EQUAL = "AI_EQUAL"
|
||||
|
||||
|
||||
class WinningMetricProperty(str, Enum):
|
||||
OPEN_RATE = "OPEN_RATE"
|
||||
CLICK_RATE = "CLICK_RATE"
|
||||
REPLY_RATE = "REPLY_RATE"
|
||||
POSITIVE_REPLY_RATE = "POSITIVE_REPLY_RATE"
|
||||
|
||||
|
||||
class SequenceDelayDetails(BaseModel):
|
||||
delay_in_days: int
|
||||
|
||||
|
||||
class SequenceVariant(BaseModel):
|
||||
subject: str
|
||||
email_body: str
|
||||
variant_label: str
|
||||
id: int | None = None # Optional for creation, required for updates
|
||||
variant_distribution_percentage: int | None = None
|
||||
|
||||
|
||||
class Sequence(BaseModel):
|
||||
seq_number: int = SchemaField(
|
||||
description="The sequence number",
|
||||
default=1,
|
||||
)
|
||||
seq_delay_details: SequenceDelayDetails
|
||||
id: int | None = None
|
||||
variant_distribution_type: VariantDistributionType | None = None
|
||||
lead_distribution_percentage: int | None = SchemaField(
|
||||
None, ge=20, le=100
|
||||
) # >= 20% for fair calculation
|
||||
winning_metric_property: WinningMetricProperty | None = None
|
||||
seq_variants: list[SequenceVariant] | None = None
|
||||
subject: str = "" # blank makes the follow up in the same thread
|
||||
email_body: str | None = None
|
||||
|
||||
|
||||
class SaveSequencesRequest(BaseModel):
|
||||
sequences: list[Sequence]
|
||||
|
||||
|
||||
class SaveSequencesResponse(BaseModel):
|
||||
ok: bool
|
||||
message: str = SchemaField(
|
||||
description="Message from the API",
|
||||
default="",
|
||||
)
|
||||
data: dict | str | None = None
|
||||
error: str | None = None
|
||||
@@ -78,7 +78,7 @@ class CreateTalkingAvatarVideoBlock(Block):
|
||||
super().__init__(
|
||||
id="98c6f503-8c47-4b1c-a96d-351fc7c87dab",
|
||||
description="This block integrates with D-ID to create video clips and retrieve their URLs.",
|
||||
categories={BlockCategory.AI, BlockCategory.MULTIMEDIA},
|
||||
categories={BlockCategory.AI},
|
||||
input_schema=CreateTalkingAvatarVideoBlock.Input,
|
||||
output_schema=CreateTalkingAvatarVideoBlock.Output,
|
||||
test_input={
|
||||
|
||||
@@ -76,8 +76,6 @@ class ExtractTextInformationBlock(Block):
|
||||
class Output(BlockSchema):
|
||||
positive: str = SchemaField(description="Extracted text")
|
||||
negative: str = SchemaField(description="Original text")
|
||||
matched_results: list[str] = SchemaField(description="List of matched results")
|
||||
matched_count: int = SchemaField(description="Number of matched results")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
@@ -105,31 +103,13 @@ class ExtractTextInformationBlock(Block):
|
||||
},
|
||||
],
|
||||
test_output=[
|
||||
# Test case 1
|
||||
("positive", "World!"),
|
||||
("matched_results", ["World!"]),
|
||||
("matched_count", 1),
|
||||
# Test case 2
|
||||
("positive", "Hello, World!"),
|
||||
("matched_results", ["Hello, World!"]),
|
||||
("matched_count", 1),
|
||||
# Test case 3
|
||||
("negative", "Hello, World!"),
|
||||
("matched_results", []),
|
||||
("matched_count", 0),
|
||||
# Test case 4
|
||||
("positive", "Hello,"),
|
||||
("matched_results", ["Hello,"]),
|
||||
("matched_count", 1),
|
||||
# Test case 5
|
||||
("positive", "World!!"),
|
||||
("matched_results", ["World!!"]),
|
||||
("matched_count", 1),
|
||||
# Test case 6
|
||||
("positive", "World!!"),
|
||||
("positive", "Earth!!"),
|
||||
("matched_results", ["World!!", "Earth!!"]),
|
||||
("matched_count", 2),
|
||||
],
|
||||
)
|
||||
|
||||
@@ -150,16 +130,13 @@ class ExtractTextInformationBlock(Block):
|
||||
for match in re.finditer(input_data.pattern, txt, flags)
|
||||
if input_data.group <= len(match.groups())
|
||||
]
|
||||
if not input_data.find_all:
|
||||
matches = matches[:1]
|
||||
for match in matches:
|
||||
yield "positive", match
|
||||
if not input_data.find_all:
|
||||
return
|
||||
if not matches:
|
||||
yield "negative", input_data.text
|
||||
|
||||
yield "matched_results", matches
|
||||
yield "matched_count", len(matches)
|
||||
|
||||
|
||||
class FillTextTemplateBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
@@ -235,71 +212,3 @@ class CombineTextsBlock(Block):
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
combined_text = input_data.delimiter.join(input_data.input)
|
||||
yield "output", combined_text
|
||||
|
||||
|
||||
class TextSplitBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
text: str = SchemaField(description="The text to split.")
|
||||
delimiter: str = SchemaField(description="The delimiter to split the text by.")
|
||||
strip: bool = SchemaField(
|
||||
description="Whether to strip the text.", default=True
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
texts: list[str] = SchemaField(
|
||||
description="The text split into a list of strings."
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="d5ea33c8-a575-477a-b42f-2fe3be5055ec",
|
||||
description="This block is used to split a text into a list of strings.",
|
||||
categories={BlockCategory.TEXT},
|
||||
input_schema=TextSplitBlock.Input,
|
||||
output_schema=TextSplitBlock.Output,
|
||||
test_input=[
|
||||
{"text": "Hello, World!", "delimiter": ","},
|
||||
{"text": "Hello, World!", "delimiter": ",", "strip": False},
|
||||
],
|
||||
test_output=[
|
||||
("texts", ["Hello", "World!"]),
|
||||
("texts", ["Hello", " World!"]),
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
if len(input_data.text) == 0:
|
||||
yield "texts", []
|
||||
else:
|
||||
texts = input_data.text.split(input_data.delimiter)
|
||||
if input_data.strip:
|
||||
texts = [text.strip() for text in texts]
|
||||
yield "texts", texts
|
||||
|
||||
|
||||
class TextReplaceBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
text: str = SchemaField(description="The text to replace.")
|
||||
old: str = SchemaField(description="The old text to replace.")
|
||||
new: str = SchemaField(description="The new text to replace with.")
|
||||
|
||||
class Output(BlockSchema):
|
||||
output: str = SchemaField(description="The text with the replaced text.")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="7e7c87ab-3469-4bcc-9abe-67705091b713",
|
||||
description="This block is used to replace a text with a new text.",
|
||||
categories={BlockCategory.TEXT},
|
||||
input_schema=TextReplaceBlock.Input,
|
||||
output_schema=TextReplaceBlock.Output,
|
||||
test_input=[
|
||||
{"text": "Hello, World!", "old": "Hello", "new": "Hi"},
|
||||
],
|
||||
test_output=[
|
||||
("output", "Hi, World!"),
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
yield "output", input_data.text.replace(input_data.old, input_data.new)
|
||||
|
||||
@@ -53,7 +53,7 @@ class UnrealTextToSpeechBlock(Block):
|
||||
super().__init__(
|
||||
id="4ff1ff6d-cc40-4caa-ae69-011daa20c378",
|
||||
description="Converts text to speech using the Unreal Speech API",
|
||||
categories={BlockCategory.AI, BlockCategory.TEXT, BlockCategory.MULTIMEDIA},
|
||||
categories={BlockCategory.AI, BlockCategory.TEXT},
|
||||
input_schema=UnrealTextToSpeechBlock.Input,
|
||||
output_schema=UnrealTextToSpeechBlock.Output,
|
||||
test_input={
|
||||
|
||||
@@ -1,61 +0,0 @@
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.model import (
|
||||
CredentialsField,
|
||||
CredentialsMetaInput,
|
||||
OAuth2Credentials,
|
||||
ProviderName,
|
||||
)
|
||||
from backend.integrations.oauth.todoist import TodoistOAuthHandler
|
||||
from backend.util.settings import Secrets
|
||||
|
||||
secrets = Secrets()
|
||||
TODOIST_OAUTH_IS_CONFIGURED = bool(
|
||||
secrets.todoist_client_id and secrets.todoist_client_secret
|
||||
)
|
||||
|
||||
TodoistCredentials = OAuth2Credentials
|
||||
TodoistCredentialsInput = CredentialsMetaInput[
|
||||
Literal[ProviderName.TODOIST], Literal["oauth2"]
|
||||
]
|
||||
|
||||
|
||||
def TodoistCredentialsField(scopes: list[str]) -> TodoistCredentialsInput:
|
||||
"""
|
||||
Creates a Todoist credentials input on a block.
|
||||
|
||||
Params:
|
||||
scopes: The authorization scopes needed for the block to work.
|
||||
"""
|
||||
return CredentialsField(
|
||||
required_scopes=set(TodoistOAuthHandler.DEFAULT_SCOPES + scopes),
|
||||
description="The Todoist integration requires OAuth2 authentication.",
|
||||
)
|
||||
|
||||
|
||||
TEST_CREDENTIALS = OAuth2Credentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="todoist",
|
||||
access_token=SecretStr("mock-todoist-access-token"),
|
||||
refresh_token=None,
|
||||
access_token_expires_at=None,
|
||||
scopes=[
|
||||
"task:add",
|
||||
"data:read",
|
||||
"data:read_write",
|
||||
"data:delete",
|
||||
"project:delete",
|
||||
],
|
||||
title="Mock Todoist OAuth2 Credentials",
|
||||
username="mock-todoist-username",
|
||||
refresh_token_expires_at=None,
|
||||
)
|
||||
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.title,
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class Colors(Enum):
|
||||
berry_red = "berry_red"
|
||||
red = "red"
|
||||
orange = "orange"
|
||||
yellow = "yellow"
|
||||
olive_green = "olive_green"
|
||||
lime_green = "lime_green"
|
||||
green = "green"
|
||||
mint_green = "mint_green"
|
||||
teal = "teal"
|
||||
sky_blue = "sky_blue"
|
||||
light_blue = "light_blue"
|
||||
blue = "blue"
|
||||
grape = "grape"
|
||||
violet = "violet"
|
||||
lavender = "lavender"
|
||||
magenta = "magenta"
|
||||
salmon = "salmon"
|
||||
charcoal = "charcoal"
|
||||
grey = "grey"
|
||||
taupe = "taupe"
|
||||
@@ -1,439 +0,0 @@
|
||||
from typing import Literal, Union
|
||||
|
||||
from pydantic import BaseModel
|
||||
from todoist_api_python.api import TodoistAPI
|
||||
from typing_extensions import Optional
|
||||
|
||||
from backend.blocks.todoist._auth import (
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
TodoistCredentials,
|
||||
TodoistCredentialsField,
|
||||
TodoistCredentialsInput,
|
||||
)
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
|
||||
class TaskId(BaseModel):
|
||||
discriminator: Literal["task"]
|
||||
task_id: str
|
||||
|
||||
|
||||
class ProjectId(BaseModel):
|
||||
discriminator: Literal["project"]
|
||||
project_id: str
|
||||
|
||||
|
||||
class TodoistCreateCommentBlock(Block):
|
||||
"""Creates a new comment on a Todoist task or project"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: TodoistCredentialsInput = TodoistCredentialsField([])
|
||||
content: str = SchemaField(description="Comment content")
|
||||
id_type: Union[TaskId, ProjectId] = SchemaField(
|
||||
discriminator="discriminator",
|
||||
description="Specify either task_id or project_id to comment on",
|
||||
default=TaskId(discriminator="task", task_id=""),
|
||||
advanced=False,
|
||||
)
|
||||
attachment: Optional[dict] = SchemaField(
|
||||
description="Optional file attachment", default=None
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
id: str = SchemaField(description="ID of created comment")
|
||||
content: str = SchemaField(description="Comment content")
|
||||
posted_at: str = SchemaField(description="Comment timestamp")
|
||||
task_id: Optional[str] = SchemaField(
|
||||
description="Associated task ID", default=None
|
||||
)
|
||||
project_id: Optional[str] = SchemaField(
|
||||
description="Associated project ID", default=None
|
||||
)
|
||||
|
||||
error: str = SchemaField(description="Error message if the request failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="1bba7e54-2310-4a31-8e6f-54d5f9ab7459",
|
||||
description="Creates a new comment on a Todoist task or project",
|
||||
categories={BlockCategory.PRODUCTIVITY},
|
||||
input_schema=TodoistCreateCommentBlock.Input,
|
||||
output_schema=TodoistCreateCommentBlock.Output,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"content": "Test comment",
|
||||
"id_type": {"discriminator": "task", "task_id": "2995104339"},
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("id", "2992679862"),
|
||||
("content", "Test comment"),
|
||||
("posted_at", "2016-09-22T07:00:00.000000Z"),
|
||||
("task_id", "2995104339"),
|
||||
("project_id", None),
|
||||
],
|
||||
test_mock={
|
||||
"create_comment": lambda content, credentials, task_id=None, project_id=None, attachment=None: {
|
||||
"id": "2992679862",
|
||||
"content": "Test comment",
|
||||
"posted_at": "2016-09-22T07:00:00.000000Z",
|
||||
"task_id": "2995104339",
|
||||
"project_id": None,
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def create_comment(
|
||||
credentials: TodoistCredentials,
|
||||
content: str,
|
||||
task_id: Optional[str] = None,
|
||||
project_id: Optional[str] = None,
|
||||
attachment: Optional[dict] = None,
|
||||
):
|
||||
try:
|
||||
api = TodoistAPI(credentials.access_token.get_secret_value())
|
||||
comment = api.add_comment(
|
||||
content=content,
|
||||
task_id=task_id,
|
||||
project_id=project_id,
|
||||
attachment=attachment,
|
||||
)
|
||||
return comment.__dict__
|
||||
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: TodoistCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
task_id = None
|
||||
project_id = None
|
||||
|
||||
if isinstance(input_data.id_type, TaskId):
|
||||
task_id = input_data.id_type.task_id
|
||||
else:
|
||||
project_id = input_data.id_type.project_id
|
||||
|
||||
comment_data = self.create_comment(
|
||||
credentials,
|
||||
input_data.content,
|
||||
task_id=task_id,
|
||||
project_id=project_id,
|
||||
attachment=input_data.attachment,
|
||||
)
|
||||
|
||||
if comment_data:
|
||||
yield "id", comment_data["id"]
|
||||
yield "content", comment_data["content"]
|
||||
yield "posted_at", comment_data["posted_at"]
|
||||
yield "task_id", comment_data["task_id"]
|
||||
yield "project_id", comment_data["project_id"]
|
||||
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class TodoistGetCommentsBlock(Block):
|
||||
"""Get all comments for a Todoist task or project"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: TodoistCredentialsInput = TodoistCredentialsField([])
|
||||
id_type: Union[TaskId, ProjectId] = SchemaField(
|
||||
discriminator="discriminator",
|
||||
description="Specify either task_id or project_id to get comments for",
|
||||
default=TaskId(discriminator="task", task_id=""),
|
||||
advanced=False,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
comments: list = SchemaField(description="List of comments")
|
||||
error: str = SchemaField(description="Error message if the request failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="9972d8ae-ddf2-11ef-a9b8-32d3674e8b7e",
|
||||
description="Get all comments for a Todoist task or project",
|
||||
categories={BlockCategory.PRODUCTIVITY},
|
||||
input_schema=TodoistGetCommentsBlock.Input,
|
||||
output_schema=TodoistGetCommentsBlock.Output,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"id_type": {"discriminator": "task", "task_id": "2995104339"},
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
(
|
||||
"comments",
|
||||
[
|
||||
{
|
||||
"id": "2992679862",
|
||||
"content": "Test comment",
|
||||
"posted_at": "2016-09-22T07:00:00.000000Z",
|
||||
"task_id": "2995104339",
|
||||
"project_id": None,
|
||||
"attachment": None,
|
||||
}
|
||||
],
|
||||
)
|
||||
],
|
||||
test_mock={
|
||||
"get_comments": lambda credentials, task_id=None, project_id=None: [
|
||||
{
|
||||
"id": "2992679862",
|
||||
"content": "Test comment",
|
||||
"posted_at": "2016-09-22T07:00:00.000000Z",
|
||||
"task_id": "2995104339",
|
||||
"project_id": None,
|
||||
"attachment": None,
|
||||
}
|
||||
]
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_comments(
|
||||
credentials: TodoistCredentials,
|
||||
task_id: Optional[str] = None,
|
||||
project_id: Optional[str] = None,
|
||||
):
|
||||
try:
|
||||
api = TodoistAPI(credentials.access_token.get_secret_value())
|
||||
comments = api.get_comments(task_id=task_id, project_id=project_id)
|
||||
return [comment.__dict__ for comment in comments]
|
||||
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: TodoistCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
task_id = None
|
||||
project_id = None
|
||||
|
||||
if isinstance(input_data.id_type, TaskId):
|
||||
task_id = input_data.id_type.task_id
|
||||
else:
|
||||
project_id = input_data.id_type.project_id
|
||||
|
||||
comments = self.get_comments(
|
||||
credentials, task_id=task_id, project_id=project_id
|
||||
)
|
||||
|
||||
yield "comments", comments
|
||||
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class TodoistGetCommentBlock(Block):
|
||||
"""Get a single comment from Todoist using comment ID"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: TodoistCredentialsInput = TodoistCredentialsField([])
|
||||
comment_id: str = SchemaField(description="Comment ID to retrieve")
|
||||
|
||||
class Output(BlockSchema):
|
||||
content: str = SchemaField(description="Comment content")
|
||||
id: str = SchemaField(description="Comment ID")
|
||||
posted_at: str = SchemaField(description="Comment timestamp")
|
||||
project_id: Optional[str] = SchemaField(
|
||||
description="Associated project ID", default=None
|
||||
)
|
||||
task_id: Optional[str] = SchemaField(
|
||||
description="Associated task ID", default=None
|
||||
)
|
||||
attachment: Optional[dict] = SchemaField(
|
||||
description="Optional file attachment", default=None
|
||||
)
|
||||
|
||||
error: str = SchemaField(description="Error message if the request failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="a809d264-ddf2-11ef-9764-32d3674e8b7e",
|
||||
description="Get a single comment from Todoist",
|
||||
categories={BlockCategory.PRODUCTIVITY},
|
||||
input_schema=TodoistGetCommentBlock.Input,
|
||||
output_schema=TodoistGetCommentBlock.Output,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"comment_id": "2992679862",
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("content", "Test comment"),
|
||||
("id", "2992679862"),
|
||||
("posted_at", "2016-09-22T07:00:00.000000Z"),
|
||||
("project_id", None),
|
||||
("task_id", "2995104339"),
|
||||
("attachment", None),
|
||||
],
|
||||
test_mock={
|
||||
"get_comment": lambda credentials, comment_id: {
|
||||
"content": "Test comment",
|
||||
"id": "2992679862",
|
||||
"posted_at": "2016-09-22T07:00:00.000000Z",
|
||||
"project_id": None,
|
||||
"task_id": "2995104339",
|
||||
"attachment": None,
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_comment(credentials: TodoistCredentials, comment_id: str):
|
||||
try:
|
||||
api = TodoistAPI(credentials.access_token.get_secret_value())
|
||||
comment = api.get_comment(comment_id=comment_id)
|
||||
return comment.__dict__
|
||||
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: TodoistCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
comment_data = self.get_comment(
|
||||
credentials, comment_id=input_data.comment_id
|
||||
)
|
||||
|
||||
if comment_data:
|
||||
yield "content", comment_data["content"]
|
||||
yield "id", comment_data["id"]
|
||||
yield "posted_at", comment_data["posted_at"]
|
||||
yield "project_id", comment_data["project_id"]
|
||||
yield "task_id", comment_data["task_id"]
|
||||
yield "attachment", comment_data["attachment"]
|
||||
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class TodoistUpdateCommentBlock(Block):
|
||||
"""Updates a Todoist comment"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: TodoistCredentialsInput = TodoistCredentialsField([])
|
||||
comment_id: str = SchemaField(description="Comment ID to update")
|
||||
content: str = SchemaField(description="New content for the comment")
|
||||
|
||||
class Output(BlockSchema):
|
||||
success: bool = SchemaField(description="Whether the update was successful")
|
||||
error: str = SchemaField(description="Error message if the request failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="b773c520-ddf2-11ef-9f34-32d3674e8b7e",
|
||||
description="Updates a Todoist comment",
|
||||
categories={BlockCategory.PRODUCTIVITY},
|
||||
input_schema=TodoistUpdateCommentBlock.Input,
|
||||
output_schema=TodoistUpdateCommentBlock.Output,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"comment_id": "2992679862",
|
||||
"content": "Need one bottle of milk",
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[("success", True)],
|
||||
test_mock={"update_comment": lambda credentials, comment_id, content: True},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def update_comment(credentials: TodoistCredentials, comment_id: str, content: str):
|
||||
try:
|
||||
api = TodoistAPI(credentials.access_token.get_secret_value())
|
||||
api.update_comment(comment_id=comment_id, content=content)
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: TodoistCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
success = self.update_comment(
|
||||
credentials,
|
||||
comment_id=input_data.comment_id,
|
||||
content=input_data.content,
|
||||
)
|
||||
|
||||
yield "success", success
|
||||
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class TodoistDeleteCommentBlock(Block):
|
||||
"""Deletes a Todoist comment"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: TodoistCredentialsInput = TodoistCredentialsField([])
|
||||
comment_id: str = SchemaField(description="Comment ID to delete")
|
||||
|
||||
class Output(BlockSchema):
|
||||
success: bool = SchemaField(description="Whether the deletion was successful")
|
||||
error: str = SchemaField(description="Error message if the request failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="bda4c020-ddf2-11ef-b114-32d3674e8b7e",
|
||||
description="Deletes a Todoist comment",
|
||||
categories={BlockCategory.PRODUCTIVITY},
|
||||
input_schema=TodoistDeleteCommentBlock.Input,
|
||||
output_schema=TodoistDeleteCommentBlock.Output,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"comment_id": "2992679862",
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[("success", True)],
|
||||
test_mock={"delete_comment": lambda credentials, comment_id: True},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def delete_comment(credentials: TodoistCredentials, comment_id: str):
|
||||
try:
|
||||
api = TodoistAPI(credentials.access_token.get_secret_value())
|
||||
success = api.delete_comment(comment_id=comment_id)
|
||||
return success
|
||||
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: TodoistCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
success = self.delete_comment(credentials, comment_id=input_data.comment_id)
|
||||
|
||||
yield "success", success
|
||||
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
@@ -1,557 +0,0 @@
|
||||
from todoist_api_python.api import TodoistAPI
|
||||
from typing_extensions import Optional
|
||||
|
||||
from backend.blocks.todoist._auth import (
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
TodoistCredentials,
|
||||
TodoistCredentialsField,
|
||||
TodoistCredentialsInput,
|
||||
)
|
||||
from backend.blocks.todoist._types import Colors
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
|
||||
class TodoistCreateLabelBlock(Block):
|
||||
"""Creates a new label in Todoist"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: TodoistCredentialsInput = TodoistCredentialsField([])
|
||||
name: str = SchemaField(description="Name of the label")
|
||||
order: Optional[int] = SchemaField(description="Label order", default=None)
|
||||
color: Optional[Colors] = SchemaField(
|
||||
description="The color of the label icon", default=Colors.charcoal
|
||||
)
|
||||
is_favorite: bool = SchemaField(
|
||||
description="Whether the label is a favorite", default=False
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
id: str = SchemaField(description="ID of the created label")
|
||||
name: str = SchemaField(description="Name of the label")
|
||||
color: str = SchemaField(description="Color of the label")
|
||||
order: int = SchemaField(description="Label order")
|
||||
is_favorite: bool = SchemaField(description="Favorite status")
|
||||
error: str = SchemaField(description="Error message if the request failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="7288a968-de14-11ef-8997-32d3674e8b7e",
|
||||
description="Creates a new label in Todoist, It will not work if same name already exists",
|
||||
categories={BlockCategory.PRODUCTIVITY},
|
||||
input_schema=TodoistCreateLabelBlock.Input,
|
||||
output_schema=TodoistCreateLabelBlock.Output,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"name": "Test Label",
|
||||
"color": Colors.charcoal.value,
|
||||
"order": 1,
|
||||
"is_favorite": False,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("id", "2156154810"),
|
||||
("name", "Test Label"),
|
||||
("color", "charcoal"),
|
||||
("order", 1),
|
||||
("is_favorite", False),
|
||||
],
|
||||
test_mock={
|
||||
"create_label": lambda *args, **kwargs: {
|
||||
"id": "2156154810",
|
||||
"name": "Test Label",
|
||||
"color": "charcoal",
|
||||
"order": 1,
|
||||
"is_favorite": False,
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def create_label(credentials: TodoistCredentials, name: str, **kwargs):
|
||||
try:
|
||||
api = TodoistAPI(credentials.access_token.get_secret_value())
|
||||
label = api.add_label(name=name, **kwargs)
|
||||
return label.__dict__
|
||||
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: TodoistCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
label_args = {
|
||||
"order": input_data.order,
|
||||
"color": (
|
||||
input_data.color.value if input_data.color is not None else None
|
||||
),
|
||||
"is_favorite": input_data.is_favorite,
|
||||
}
|
||||
|
||||
label_data = self.create_label(
|
||||
credentials,
|
||||
input_data.name,
|
||||
**{k: v for k, v in label_args.items() if v is not None},
|
||||
)
|
||||
|
||||
if label_data:
|
||||
yield "id", label_data["id"]
|
||||
yield "name", label_data["name"]
|
||||
yield "color", label_data["color"]
|
||||
yield "order", label_data["order"]
|
||||
yield "is_favorite", label_data["is_favorite"]
|
||||
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class TodoistListLabelsBlock(Block):
|
||||
"""Gets all personal labels from Todoist"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: TodoistCredentialsInput = TodoistCredentialsField([])
|
||||
|
||||
class Output(BlockSchema):
|
||||
labels: list = SchemaField(description="List of complete label data")
|
||||
label_ids: list = SchemaField(description="List of label IDs")
|
||||
label_names: list = SchemaField(description="List of label names")
|
||||
error: str = SchemaField(description="Error message if the request failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="776dd750-de14-11ef-b927-32d3674e8b7e",
|
||||
description="Gets all personal labels from Todoist",
|
||||
categories={BlockCategory.PRODUCTIVITY},
|
||||
input_schema=TodoistListLabelsBlock.Input,
|
||||
output_schema=TodoistListLabelsBlock.Output,
|
||||
test_input={"credentials": TEST_CREDENTIALS_INPUT},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
(
|
||||
"labels",
|
||||
[
|
||||
{
|
||||
"id": "2156154810",
|
||||
"name": "Test Label",
|
||||
"color": "charcoal",
|
||||
"order": 1,
|
||||
"is_favorite": False,
|
||||
}
|
||||
],
|
||||
),
|
||||
("label_ids", ["2156154810"]),
|
||||
("label_names", ["Test Label"]),
|
||||
],
|
||||
test_mock={
|
||||
"get_labels": lambda *args, **kwargs: [
|
||||
{
|
||||
"id": "2156154810",
|
||||
"name": "Test Label",
|
||||
"color": "charcoal",
|
||||
"order": 1,
|
||||
"is_favorite": False,
|
||||
}
|
||||
]
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_labels(credentials: TodoistCredentials):
|
||||
try:
|
||||
api = TodoistAPI(credentials.access_token.get_secret_value())
|
||||
labels = api.get_labels()
|
||||
return [label.__dict__ for label in labels]
|
||||
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: TodoistCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
labels = self.get_labels(credentials)
|
||||
yield "labels", labels
|
||||
yield "label_ids", [label["id"] for label in labels]
|
||||
yield "label_names", [label["name"] for label in labels]
|
||||
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class TodoistGetLabelBlock(Block):
|
||||
"""Gets a personal label from Todoist by ID"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: TodoistCredentialsInput = TodoistCredentialsField([])
|
||||
label_id: str = SchemaField(description="ID of the label to retrieve")
|
||||
|
||||
class Output(BlockSchema):
|
||||
id: str = SchemaField(description="ID of the label")
|
||||
name: str = SchemaField(description="Name of the label")
|
||||
color: str = SchemaField(description="Color of the label")
|
||||
order: int = SchemaField(description="Label order")
|
||||
is_favorite: bool = SchemaField(description="Favorite status")
|
||||
|
||||
error: str = SchemaField(description="Error message if the request failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="7f236514-de14-11ef-bd7a-32d3674e8b7e",
|
||||
description="Gets a personal label from Todoist by ID",
|
||||
categories={BlockCategory.PRODUCTIVITY},
|
||||
input_schema=TodoistGetLabelBlock.Input,
|
||||
output_schema=TodoistGetLabelBlock.Output,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"label_id": "2156154810",
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("id", "2156154810"),
|
||||
("name", "Test Label"),
|
||||
("color", "charcoal"),
|
||||
("order", 1),
|
||||
("is_favorite", False),
|
||||
],
|
||||
test_mock={
|
||||
"get_label": lambda *args, **kwargs: {
|
||||
"id": "2156154810",
|
||||
"name": "Test Label",
|
||||
"color": "charcoal",
|
||||
"order": 1,
|
||||
"is_favorite": False,
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_label(credentials: TodoistCredentials, label_id: str):
|
||||
try:
|
||||
api = TodoistAPI(credentials.access_token.get_secret_value())
|
||||
label = api.get_label(label_id=label_id)
|
||||
return label.__dict__
|
||||
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: TodoistCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
label_data = self.get_label(credentials, input_data.label_id)
|
||||
|
||||
if label_data:
|
||||
yield "id", label_data["id"]
|
||||
yield "name", label_data["name"]
|
||||
yield "color", label_data["color"]
|
||||
yield "order", label_data["order"]
|
||||
yield "is_favorite", label_data["is_favorite"]
|
||||
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class TodoistUpdateLabelBlock(Block):
|
||||
"""Updates a personal label in Todoist using ID"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: TodoistCredentialsInput = TodoistCredentialsField([])
|
||||
label_id: str = SchemaField(description="ID of the label to update")
|
||||
name: Optional[str] = SchemaField(
|
||||
description="New name of the label", default=None
|
||||
)
|
||||
order: Optional[int] = SchemaField(description="Label order", default=None)
|
||||
color: Optional[Colors] = SchemaField(
|
||||
description="The color of the label icon", default=None
|
||||
)
|
||||
is_favorite: bool = SchemaField(
|
||||
description="Whether the label is a favorite (true/false)", default=False
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
success: bool = SchemaField(description="Whether the update was successful")
|
||||
error: str = SchemaField(description="Error message if the request failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="8755614c-de14-11ef-9b56-32d3674e8b7e",
|
||||
description="Updates a personal label in Todoist",
|
||||
categories={BlockCategory.PRODUCTIVITY},
|
||||
input_schema=TodoistUpdateLabelBlock.Input,
|
||||
output_schema=TodoistUpdateLabelBlock.Output,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"label_id": "2156154810",
|
||||
"name": "Updated Label",
|
||||
"color": Colors.charcoal.value,
|
||||
"order": 2,
|
||||
"is_favorite": True,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[("success", True)],
|
||||
test_mock={"update_label": lambda *args, **kwargs: True},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def update_label(credentials: TodoistCredentials, label_id: str, **kwargs):
|
||||
try:
|
||||
api = TodoistAPI(credentials.access_token.get_secret_value())
|
||||
api.update_label(label_id=label_id, **kwargs)
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: TodoistCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
label_args = {}
|
||||
if input_data.name is not None:
|
||||
label_args["name"] = input_data.name
|
||||
if input_data.order is not None:
|
||||
label_args["order"] = input_data.order
|
||||
if input_data.color is not None:
|
||||
label_args["color"] = input_data.color.value
|
||||
if input_data.is_favorite is not None:
|
||||
label_args["is_favorite"] = input_data.is_favorite
|
||||
|
||||
success = self.update_label(
|
||||
credentials,
|
||||
input_data.label_id,
|
||||
**{k: v for k, v in label_args.items() if v is not None},
|
||||
)
|
||||
|
||||
yield "success", success
|
||||
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class TodoistDeleteLabelBlock(Block):
|
||||
"""Deletes a personal label in Todoist"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: TodoistCredentialsInput = TodoistCredentialsField([])
|
||||
label_id: str = SchemaField(description="ID of the label to delete")
|
||||
|
||||
class Output(BlockSchema):
|
||||
success: bool = SchemaField(description="Whether the deletion was successful")
|
||||
error: str = SchemaField(description="Error message if the request failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="901b8f86-de14-11ef-98b8-32d3674e8b7e",
|
||||
description="Deletes a personal label in Todoist",
|
||||
categories={BlockCategory.PRODUCTIVITY},
|
||||
input_schema=TodoistDeleteLabelBlock.Input,
|
||||
output_schema=TodoistDeleteLabelBlock.Output,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"label_id": "2156154810",
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[("success", True)],
|
||||
test_mock={"delete_label": lambda *args, **kwargs: True},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def delete_label(credentials: TodoistCredentials, label_id: str):
|
||||
try:
|
||||
api = TodoistAPI(credentials.access_token.get_secret_value())
|
||||
success = api.delete_label(label_id=label_id)
|
||||
return success
|
||||
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: TodoistCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
success = self.delete_label(credentials, input_data.label_id)
|
||||
yield "success", success
|
||||
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class TodoistGetSharedLabelsBlock(Block):
|
||||
"""Gets all shared labels from Todoist"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: TodoistCredentialsInput = TodoistCredentialsField([])
|
||||
|
||||
class Output(BlockSchema):
|
||||
labels: list = SchemaField(description="List of shared label names")
|
||||
error: str = SchemaField(description="Error message if the request failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="55fba510-de15-11ef-aed2-32d3674e8b7e",
|
||||
description="Gets all shared labels from Todoist",
|
||||
categories={BlockCategory.PRODUCTIVITY},
|
||||
input_schema=TodoistGetSharedLabelsBlock.Input,
|
||||
output_schema=TodoistGetSharedLabelsBlock.Output,
|
||||
test_input={"credentials": TEST_CREDENTIALS_INPUT},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[("labels", ["Label1", "Label2", "Label3"])],
|
||||
test_mock={
|
||||
"get_shared_labels": lambda *args, **kwargs: [
|
||||
"Label1",
|
||||
"Label2",
|
||||
"Label3",
|
||||
]
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_shared_labels(credentials: TodoistCredentials):
|
||||
try:
|
||||
api = TodoistAPI(credentials.access_token.get_secret_value())
|
||||
labels = api.get_shared_labels()
|
||||
return labels
|
||||
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: TodoistCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
labels = self.get_shared_labels(credentials)
|
||||
yield "labels", labels
|
||||
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class TodoistRenameSharedLabelsBlock(Block):
|
||||
"""Renames all instances of a shared label"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: TodoistCredentialsInput = TodoistCredentialsField([])
|
||||
name: str = SchemaField(description="The name of the existing label to rename")
|
||||
new_name: str = SchemaField(description="The new name for the label")
|
||||
|
||||
class Output(BlockSchema):
|
||||
success: bool = SchemaField(description="Whether the rename was successful")
|
||||
error: str = SchemaField(description="Error message if the request failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="9d63ad9a-de14-11ef-ab3f-32d3674e8b7e",
|
||||
description="Renames all instances of a shared label",
|
||||
categories={BlockCategory.PRODUCTIVITY},
|
||||
input_schema=TodoistRenameSharedLabelsBlock.Input,
|
||||
output_schema=TodoistRenameSharedLabelsBlock.Output,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"name": "OldLabel",
|
||||
"new_name": "NewLabel",
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[("success", True)],
|
||||
test_mock={"rename_shared_labels": lambda *args, **kwargs: True},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def rename_shared_labels(credentials: TodoistCredentials, name: str, new_name: str):
|
||||
try:
|
||||
api = TodoistAPI(credentials.access_token.get_secret_value())
|
||||
success = api.rename_shared_label(name=name, new_name=new_name)
|
||||
return success
|
||||
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: TodoistCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
success = self.rename_shared_labels(
|
||||
credentials, input_data.name, input_data.new_name
|
||||
)
|
||||
yield "success", success
|
||||
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class TodoistRemoveSharedLabelsBlock(Block):
|
||||
"""Removes all instances of a shared label"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: TodoistCredentialsInput = TodoistCredentialsField([])
|
||||
name: str = SchemaField(description="The name of the label to remove")
|
||||
|
||||
class Output(BlockSchema):
|
||||
success: bool = SchemaField(description="Whether the removal was successful")
|
||||
error: str = SchemaField(description="Error message if the request failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="a6c5cbde-de14-11ef-8863-32d3674e8b7e",
|
||||
description="Removes all instances of a shared label",
|
||||
categories={BlockCategory.PRODUCTIVITY},
|
||||
input_schema=TodoistRemoveSharedLabelsBlock.Input,
|
||||
output_schema=TodoistRemoveSharedLabelsBlock.Output,
|
||||
test_input={"credentials": TEST_CREDENTIALS_INPUT, "name": "LabelToRemove"},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[("success", True)],
|
||||
test_mock={"remove_shared_label": lambda *args, **kwargs: True},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def remove_shared_label(credentials: TodoistCredentials, name: str):
|
||||
try:
|
||||
api = TodoistAPI(credentials.access_token.get_secret_value())
|
||||
success = api.remove_shared_label(name=name)
|
||||
return success
|
||||
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: TodoistCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
success = self.remove_shared_label(credentials, input_data.name)
|
||||
yield "success", success
|
||||
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
@@ -1,566 +0,0 @@
|
||||
from todoist_api_python.api import TodoistAPI
|
||||
from typing_extensions import Optional
|
||||
|
||||
from backend.blocks.todoist._auth import (
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
TodoistCredentials,
|
||||
TodoistCredentialsField,
|
||||
TodoistCredentialsInput,
|
||||
)
|
||||
from backend.blocks.todoist._types import Colors
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
|
||||
class TodoistListProjectsBlock(Block):
|
||||
"""Gets all projects for a Todoist user"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: TodoistCredentialsInput = TodoistCredentialsField([])
|
||||
|
||||
class Output(BlockSchema):
|
||||
names_list: list[str] = SchemaField(description="List of project names")
|
||||
ids_list: list[str] = SchemaField(description="List of project IDs")
|
||||
url_list: list[str] = SchemaField(description="List of project URLs")
|
||||
complete_data: list[dict] = SchemaField(
|
||||
description="Complete project data including all fields"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if request failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="5f3e1d5b-6bc5-40e3-97ee-1318b3f38813",
|
||||
description="Gets all projects and their details from Todoist",
|
||||
categories={BlockCategory.PRODUCTIVITY},
|
||||
input_schema=TodoistListProjectsBlock.Input,
|
||||
output_schema=TodoistListProjectsBlock.Output,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("names_list", ["Inbox"]),
|
||||
("ids_list", ["220474322"]),
|
||||
("url_list", ["https://todoist.com/showProject?id=220474322"]),
|
||||
(
|
||||
"complete_data",
|
||||
[
|
||||
{
|
||||
"id": "220474322",
|
||||
"name": "Inbox",
|
||||
"url": "https://todoist.com/showProject?id=220474322",
|
||||
}
|
||||
],
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"get_project_lists": lambda *args, **kwargs: (
|
||||
["Inbox"],
|
||||
["220474322"],
|
||||
["https://todoist.com/showProject?id=220474322"],
|
||||
[
|
||||
{
|
||||
"id": "220474322",
|
||||
"name": "Inbox",
|
||||
"url": "https://todoist.com/showProject?id=220474322",
|
||||
}
|
||||
],
|
||||
None,
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_project_lists(credentials: TodoistCredentials):
|
||||
try:
|
||||
api = TodoistAPI(credentials.access_token.get_secret_value())
|
||||
projects = api.get_projects()
|
||||
|
||||
names = []
|
||||
ids = []
|
||||
urls = []
|
||||
complete_data = []
|
||||
|
||||
for project in projects:
|
||||
names.append(project.name)
|
||||
ids.append(project.id)
|
||||
urls.append(project.url)
|
||||
complete_data.append(project.__dict__)
|
||||
|
||||
return names, ids, urls, complete_data, None
|
||||
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: TodoistCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
names, ids, urls, data, error = self.get_project_lists(credentials)
|
||||
|
||||
if names:
|
||||
yield "names_list", names
|
||||
if ids:
|
||||
yield "ids_list", ids
|
||||
if urls:
|
||||
yield "url_list", urls
|
||||
if data:
|
||||
yield "complete_data", data
|
||||
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class TodoistCreateProjectBlock(Block):
|
||||
"""Creates a new project in Todoist"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: TodoistCredentialsInput = TodoistCredentialsField([])
|
||||
name: str = SchemaField(description="Name of the project", advanced=False)
|
||||
parent_id: Optional[str] = SchemaField(
|
||||
description="Parent project ID", default=None, advanced=True
|
||||
)
|
||||
color: Optional[Colors] = SchemaField(
|
||||
description="Color of the project icon",
|
||||
default=Colors.charcoal,
|
||||
advanced=True,
|
||||
)
|
||||
is_favorite: bool = SchemaField(
|
||||
description="Whether the project is a favorite",
|
||||
default=False,
|
||||
advanced=True,
|
||||
)
|
||||
view_style: Optional[str] = SchemaField(
|
||||
description="Display style (list or board)", default=None, advanced=True
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
success: bool = SchemaField(description="Whether the creation was successful")
|
||||
error: str = SchemaField(description="Error message if the request failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="ade60136-de14-11ef-b5e5-32d3674e8b7e",
|
||||
description="Creates a new project in Todoist",
|
||||
categories={BlockCategory.PRODUCTIVITY},
|
||||
input_schema=TodoistCreateProjectBlock.Input,
|
||||
output_schema=TodoistCreateProjectBlock.Output,
|
||||
test_input={"credentials": TEST_CREDENTIALS_INPUT, "name": "Test Project"},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[("success", True)],
|
||||
test_mock={"create_project": lambda *args, **kwargs: (True)},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def create_project(
|
||||
credentials: TodoistCredentials,
|
||||
name: str,
|
||||
parent_id: Optional[str],
|
||||
color: Optional[Colors],
|
||||
is_favorite: bool,
|
||||
view_style: Optional[str],
|
||||
):
|
||||
try:
|
||||
api = TodoistAPI(credentials.access_token.get_secret_value())
|
||||
params = {"name": name, "is_favorite": is_favorite}
|
||||
|
||||
if parent_id is not None:
|
||||
params["parent_id"] = parent_id
|
||||
if color is not None:
|
||||
params["color"] = color.value
|
||||
if view_style is not None:
|
||||
params["view_style"] = view_style
|
||||
|
||||
api.add_project(**params)
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: TodoistCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
success = self.create_project(
|
||||
credentials=credentials,
|
||||
name=input_data.name,
|
||||
parent_id=input_data.parent_id,
|
||||
color=input_data.color,
|
||||
is_favorite=input_data.is_favorite,
|
||||
view_style=input_data.view_style,
|
||||
)
|
||||
|
||||
yield "success", success
|
||||
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class TodoistGetProjectBlock(Block):
|
||||
"""Gets details for a specific Todoist project"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: TodoistCredentialsInput = TodoistCredentialsField([])
|
||||
project_id: str = SchemaField(
|
||||
description="ID of the project to get details for", advanced=False
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
project_id: str = SchemaField(description="ID of project")
|
||||
project_name: str = SchemaField(description="Name of project")
|
||||
project_url: str = SchemaField(description="URL of project")
|
||||
complete_data: dict = SchemaField(
|
||||
description="Complete project data including all fields"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the request failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="b435b5ea-de14-11ef-8b51-32d3674e8b7e",
|
||||
description="Gets details for a specific Todoist project",
|
||||
categories={BlockCategory.PRODUCTIVITY},
|
||||
input_schema=TodoistGetProjectBlock.Input,
|
||||
output_schema=TodoistGetProjectBlock.Output,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"project_id": "2203306141",
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("project_id", "2203306141"),
|
||||
("project_name", "Shopping List"),
|
||||
("project_url", "https://todoist.com/showProject?id=2203306141"),
|
||||
(
|
||||
"complete_data",
|
||||
{
|
||||
"id": "2203306141",
|
||||
"name": "Shopping List",
|
||||
"url": "https://todoist.com/showProject?id=2203306141",
|
||||
},
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"get_project": lambda *args, **kwargs: (
|
||||
"2203306141",
|
||||
"Shopping List",
|
||||
"https://todoist.com/showProject?id=2203306141",
|
||||
{
|
||||
"id": "2203306141",
|
||||
"name": "Shopping List",
|
||||
"url": "https://todoist.com/showProject?id=2203306141",
|
||||
},
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_project(credentials: TodoistCredentials, project_id: str):
|
||||
try:
|
||||
api = TodoistAPI(credentials.access_token.get_secret_value())
|
||||
project = api.get_project(project_id=project_id)
|
||||
|
||||
return project.id, project.name, project.url, project.__dict__
|
||||
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: TodoistCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
project_id, project_name, project_url, data = self.get_project(
|
||||
credentials=credentials, project_id=input_data.project_id
|
||||
)
|
||||
|
||||
if project_id:
|
||||
yield "project_id", project_id
|
||||
if project_name:
|
||||
yield "project_name", project_name
|
||||
if project_url:
|
||||
yield "project_url", project_url
|
||||
if data:
|
||||
yield "complete_data", data
|
||||
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class TodoistUpdateProjectBlock(Block):
|
||||
"""Updates an existing project in Todoist"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: TodoistCredentialsInput = TodoistCredentialsField([])
|
||||
project_id: str = SchemaField(
|
||||
description="ID of project to update", advanced=False
|
||||
)
|
||||
name: Optional[str] = SchemaField(
|
||||
description="New name for the project", default=None, advanced=False
|
||||
)
|
||||
color: Optional[Colors] = SchemaField(
|
||||
description="New color for the project icon", default=None, advanced=True
|
||||
)
|
||||
is_favorite: Optional[bool] = SchemaField(
|
||||
description="Whether the project should be a favorite",
|
||||
default=None,
|
||||
advanced=True,
|
||||
)
|
||||
view_style: Optional[str] = SchemaField(
|
||||
description="Display style (list or board)", default=None, advanced=True
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
success: bool = SchemaField(description="Whether the update was successful")
|
||||
error: str = SchemaField(description="Error message if the request failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="ba41a20a-de14-11ef-91d7-32d3674e8b7e",
|
||||
description="Updates an existing project in Todoist",
|
||||
categories={BlockCategory.PRODUCTIVITY},
|
||||
input_schema=TodoistUpdateProjectBlock.Input,
|
||||
output_schema=TodoistUpdateProjectBlock.Output,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"project_id": "2203306141",
|
||||
"name": "Things To Buy",
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[("success", True)],
|
||||
test_mock={"update_project": lambda *args, **kwargs: (True)},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def update_project(
|
||||
credentials: TodoistCredentials,
|
||||
project_id: str,
|
||||
name: Optional[str],
|
||||
color: Optional[Colors],
|
||||
is_favorite: Optional[bool],
|
||||
view_style: Optional[str],
|
||||
):
|
||||
try:
|
||||
api = TodoistAPI(credentials.access_token.get_secret_value())
|
||||
params = {}
|
||||
|
||||
if name is not None:
|
||||
params["name"] = name
|
||||
if color is not None:
|
||||
params["color"] = color.value
|
||||
if is_favorite is not None:
|
||||
params["is_favorite"] = is_favorite
|
||||
if view_style is not None:
|
||||
params["view_style"] = view_style
|
||||
|
||||
api.update_project(project_id=project_id, **params)
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: TodoistCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
success = self.update_project(
|
||||
credentials=credentials,
|
||||
project_id=input_data.project_id,
|
||||
name=input_data.name,
|
||||
color=input_data.color,
|
||||
is_favorite=input_data.is_favorite,
|
||||
view_style=input_data.view_style,
|
||||
)
|
||||
|
||||
yield "success", success
|
||||
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class TodoistDeleteProjectBlock(Block):
|
||||
"""Deletes a project and all of its sections and tasks"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: TodoistCredentialsInput = TodoistCredentialsField([])
|
||||
project_id: str = SchemaField(
|
||||
description="ID of project to delete", advanced=False
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
success: bool = SchemaField(description="Whether the deletion was successful")
|
||||
error: str = SchemaField(description="Error message if the request failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="c2893acc-de14-11ef-a113-32d3674e8b7e",
|
||||
description="Deletes a Todoist project and all its contents",
|
||||
categories={BlockCategory.PRODUCTIVITY},
|
||||
input_schema=TodoistDeleteProjectBlock.Input,
|
||||
output_schema=TodoistDeleteProjectBlock.Output,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"project_id": "2203306141",
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[("success", True)],
|
||||
test_mock={"delete_project": lambda *args, **kwargs: (True)},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def delete_project(credentials: TodoistCredentials, project_id: str):
|
||||
try:
|
||||
api = TodoistAPI(credentials.access_token.get_secret_value())
|
||||
success = api.delete_project(project_id=project_id)
|
||||
return success
|
||||
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: TodoistCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
success = self.delete_project(
|
||||
credentials=credentials, project_id=input_data.project_id
|
||||
)
|
||||
|
||||
yield "success", success
|
||||
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class TodoistListCollaboratorsBlock(Block):
|
||||
"""Gets all collaborators for a Todoist project"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: TodoistCredentialsInput = TodoistCredentialsField([])
|
||||
project_id: str = SchemaField(
|
||||
description="ID of the project to get collaborators for", advanced=False
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
collaborator_ids: list[str] = SchemaField(
|
||||
description="List of collaborator IDs"
|
||||
)
|
||||
collaborator_names: list[str] = SchemaField(
|
||||
description="List of collaborator names"
|
||||
)
|
||||
collaborator_emails: list[str] = SchemaField(
|
||||
description="List of collaborator email addresses"
|
||||
)
|
||||
complete_data: list[dict] = SchemaField(
|
||||
description="Complete collaborator data including all fields"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the request failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="c99c804e-de14-11ef-9f47-32d3674e8b7e",
|
||||
description="Gets all collaborators for a specific Todoist project",
|
||||
categories={BlockCategory.PRODUCTIVITY},
|
||||
input_schema=TodoistListCollaboratorsBlock.Input,
|
||||
output_schema=TodoistListCollaboratorsBlock.Output,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"project_id": "2203306141",
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("collaborator_ids", ["2671362", "2671366"]),
|
||||
("collaborator_names", ["Alice", "Bob"]),
|
||||
("collaborator_emails", ["alice@example.com", "bob@example.com"]),
|
||||
(
|
||||
"complete_data",
|
||||
[
|
||||
{
|
||||
"id": "2671362",
|
||||
"name": "Alice",
|
||||
"email": "alice@example.com",
|
||||
},
|
||||
{"id": "2671366", "name": "Bob", "email": "bob@example.com"},
|
||||
],
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"get_collaborators": lambda *args, **kwargs: (
|
||||
["2671362", "2671366"],
|
||||
["Alice", "Bob"],
|
||||
["alice@example.com", "bob@example.com"],
|
||||
[
|
||||
{
|
||||
"id": "2671362",
|
||||
"name": "Alice",
|
||||
"email": "alice@example.com",
|
||||
},
|
||||
{"id": "2671366", "name": "Bob", "email": "bob@example.com"},
|
||||
],
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_collaborators(credentials: TodoistCredentials, project_id: str):
|
||||
try:
|
||||
api = TodoistAPI(credentials.access_token.get_secret_value())
|
||||
collaborators = api.get_collaborators(project_id=project_id)
|
||||
|
||||
ids = []
|
||||
names = []
|
||||
emails = []
|
||||
complete_data = []
|
||||
|
||||
for collaborator in collaborators:
|
||||
ids.append(collaborator.id)
|
||||
names.append(collaborator.name)
|
||||
emails.append(collaborator.email)
|
||||
complete_data.append(collaborator.__dict__)
|
||||
|
||||
return ids, names, emails, complete_data
|
||||
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: TodoistCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
ids, names, emails, data = self.get_collaborators(
|
||||
credentials=credentials, project_id=input_data.project_id
|
||||
)
|
||||
|
||||
if ids:
|
||||
yield "collaborator_ids", ids
|
||||
if names:
|
||||
yield "collaborator_names", names
|
||||
if emails:
|
||||
yield "collaborator_emails", emails
|
||||
if data:
|
||||
yield "complete_data", data
|
||||
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
@@ -1,306 +0,0 @@
|
||||
from todoist_api_python.api import TodoistAPI
|
||||
from typing_extensions import Optional
|
||||
|
||||
from backend.blocks.todoist._auth import (
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
TodoistCredentials,
|
||||
TodoistCredentialsField,
|
||||
TodoistCredentialsInput,
|
||||
)
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
|
||||
class TodoistListSectionsBlock(Block):
|
||||
"""Gets all sections for a Todoist project"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: TodoistCredentialsInput = TodoistCredentialsField([])
|
||||
project_id: Optional[str] = SchemaField(
|
||||
description="Optional project ID to filter sections"
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
names_list: list[str] = SchemaField(description="List of section names")
|
||||
ids_list: list[str] = SchemaField(description="List of section IDs")
|
||||
complete_data: list[dict] = SchemaField(
|
||||
description="Complete section data including all fields"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the request failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="d6a116d8-de14-11ef-a94c-32d3674e8b7e",
|
||||
description="Gets all sections and their details from Todoist",
|
||||
categories={BlockCategory.PRODUCTIVITY},
|
||||
input_schema=TodoistListSectionsBlock.Input,
|
||||
output_schema=TodoistListSectionsBlock.Output,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"project_id": "2203306141",
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("names_list", ["Groceries"]),
|
||||
("ids_list", ["7025"]),
|
||||
(
|
||||
"complete_data",
|
||||
[
|
||||
{
|
||||
"id": "7025",
|
||||
"project_id": "2203306141",
|
||||
"order": 1,
|
||||
"name": "Groceries",
|
||||
}
|
||||
],
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"get_section_lists": lambda *args, **kwargs: (
|
||||
["Groceries"],
|
||||
["7025"],
|
||||
[
|
||||
{
|
||||
"id": "7025",
|
||||
"project_id": "2203306141",
|
||||
"order": 1,
|
||||
"name": "Groceries",
|
||||
}
|
||||
],
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_section_lists(
|
||||
credentials: TodoistCredentials, project_id: Optional[str] = None
|
||||
):
|
||||
try:
|
||||
api = TodoistAPI(credentials.access_token.get_secret_value())
|
||||
sections = api.get_sections(project_id=project_id)
|
||||
|
||||
names = []
|
||||
ids = []
|
||||
complete_data = []
|
||||
|
||||
for section in sections:
|
||||
names.append(section.name)
|
||||
ids.append(section.id)
|
||||
complete_data.append(section.__dict__)
|
||||
|
||||
return names, ids, complete_data
|
||||
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: TodoistCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
names, ids, data = self.get_section_lists(
|
||||
credentials, input_data.project_id
|
||||
)
|
||||
|
||||
if names:
|
||||
yield "names_list", names
|
||||
if ids:
|
||||
yield "ids_list", ids
|
||||
if data:
|
||||
yield "complete_data", data
|
||||
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
# Error in official todoist SDK. Will add this block using sync_api
|
||||
# class TodoistCreateSectionBlock(Block):
|
||||
# """Creates a new section in a Todoist project"""
|
||||
|
||||
# class Input(BlockSchema):
|
||||
# credentials: TodoistCredentialsInput = TodoistCredentialsField([])
|
||||
# name: str = SchemaField(description="Section name")
|
||||
# project_id: str = SchemaField(description="Project ID this section should belong to")
|
||||
# order: Optional[int] = SchemaField(description="Optional order among other sections", default=None)
|
||||
|
||||
# class Output(BlockSchema):
|
||||
# success: bool = SchemaField(description="Whether section was successfully created")
|
||||
# error: str = SchemaField(description="Error message if the request failed")
|
||||
|
||||
# def __init__(self):
|
||||
# super().__init__(
|
||||
# id="e3025cfc-de14-11ef-b9f2-32d3674e8b7e",
|
||||
# description="Creates a new section in a Todoist project",
|
||||
# categories={BlockCategory.PRODUCTIVITY},
|
||||
# input_schema=TodoistCreateSectionBlock.Input,
|
||||
# output_schema=TodoistCreateSectionBlock.Output,
|
||||
# test_input={
|
||||
# "credentials": TEST_CREDENTIALS_INPUT,
|
||||
# "name": "Groceries",
|
||||
# "project_id": "2203306141"
|
||||
# },
|
||||
# test_credentials=TEST_CREDENTIALS,
|
||||
# test_output=[
|
||||
# ("success", True)
|
||||
# ],
|
||||
# test_mock={
|
||||
# "create_section": lambda *args, **kwargs: (
|
||||
# {"id": "7025", "project_id": "2203306141", "order": 1, "name": "Groceries"},
|
||||
# )
|
||||
# },
|
||||
# )
|
||||
|
||||
# @staticmethod
|
||||
# def create_section(credentials: TodoistCredentials, name: str, project_id: str, order: Optional[int] = None):
|
||||
# try:
|
||||
# api = TodoistAPI(credentials.access_token.get_secret_value())
|
||||
# section = api.add_section(name=name, project_id=project_id, order=order)
|
||||
# return section.__dict__
|
||||
|
||||
# except Exception as e:
|
||||
# raise e
|
||||
|
||||
# def run(
|
||||
# self,
|
||||
# input_data: Input,
|
||||
# *,
|
||||
# credentials: TodoistCredentials,
|
||||
# **kwargs,
|
||||
# ) -> BlockOutput:
|
||||
# try:
|
||||
# section_data = self.create_section(
|
||||
# credentials,
|
||||
# input_data.name,
|
||||
# input_data.project_id,
|
||||
# input_data.order
|
||||
# )
|
||||
|
||||
# if section_data:
|
||||
# yield "success", True
|
||||
|
||||
# except Exception as e:
|
||||
# yield "error", str(e)
|
||||
|
||||
|
||||
class TodoistGetSectionBlock(Block):
|
||||
"""Gets a single section from Todoist by ID"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: TodoistCredentialsInput = TodoistCredentialsField([])
|
||||
section_id: str = SchemaField(description="ID of section to fetch")
|
||||
|
||||
class Output(BlockSchema):
|
||||
id: str = SchemaField(description="ID of section")
|
||||
project_id: str = SchemaField(description="Project ID the section belongs to")
|
||||
order: int = SchemaField(description="Order of the section")
|
||||
name: str = SchemaField(description="Name of the section")
|
||||
error: str = SchemaField(description="Error message if the request failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="ea5580e2-de14-11ef-a5d3-32d3674e8b7e",
|
||||
description="Gets a single section by ID from Todoist",
|
||||
categories={BlockCategory.PRODUCTIVITY},
|
||||
input_schema=TodoistGetSectionBlock.Input,
|
||||
output_schema=TodoistGetSectionBlock.Output,
|
||||
test_input={"credentials": TEST_CREDENTIALS_INPUT, "section_id": "7025"},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("id", "7025"),
|
||||
("project_id", "2203306141"),
|
||||
("order", 1),
|
||||
("name", "Groceries"),
|
||||
],
|
||||
test_mock={
|
||||
"get_section": lambda *args, **kwargs: {
|
||||
"id": "7025",
|
||||
"project_id": "2203306141",
|
||||
"order": 1,
|
||||
"name": "Groceries",
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_section(credentials: TodoistCredentials, section_id: str):
|
||||
try:
|
||||
api = TodoistAPI(credentials.access_token.get_secret_value())
|
||||
section = api.get_section(section_id=section_id)
|
||||
return section.__dict__
|
||||
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: TodoistCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
section_data = self.get_section(credentials, input_data.section_id)
|
||||
|
||||
if section_data:
|
||||
yield "id", section_data["id"]
|
||||
yield "project_id", section_data["project_id"]
|
||||
yield "order", section_data["order"]
|
||||
yield "name", section_data["name"]
|
||||
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class TodoistDeleteSectionBlock(Block):
|
||||
"""Deletes a section and all its tasks from Todoist"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: TodoistCredentialsInput = TodoistCredentialsField([])
|
||||
section_id: str = SchemaField(description="ID of section to delete")
|
||||
|
||||
class Output(BlockSchema):
|
||||
success: bool = SchemaField(
|
||||
description="Whether section was successfully deleted"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the request failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="f0e52eee-de14-11ef-9b12-32d3674e8b7e",
|
||||
description="Deletes a section and all its tasks from Todoist",
|
||||
categories={BlockCategory.PRODUCTIVITY},
|
||||
input_schema=TodoistDeleteSectionBlock.Input,
|
||||
output_schema=TodoistDeleteSectionBlock.Output,
|
||||
test_input={"credentials": TEST_CREDENTIALS_INPUT, "section_id": "7025"},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[("success", True)],
|
||||
test_mock={"delete_section": lambda *args, **kwargs: (True)},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def delete_section(credentials: TodoistCredentials, section_id: str):
|
||||
try:
|
||||
api = TodoistAPI(credentials.access_token.get_secret_value())
|
||||
success = api.delete_section(section_id=section_id)
|
||||
return success
|
||||
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: TodoistCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
success = self.delete_section(credentials, input_data.section_id)
|
||||
yield "success", success
|
||||
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
@@ -1,660 +0,0 @@
|
||||
from datetime import datetime
|
||||
|
||||
from todoist_api_python.api import TodoistAPI
|
||||
from todoist_api_python.models import Task
|
||||
from typing_extensions import Optional
|
||||
|
||||
from backend.blocks.todoist._auth import (
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
TodoistCredentials,
|
||||
TodoistCredentialsField,
|
||||
TodoistCredentialsInput,
|
||||
)
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
|
||||
class TodoistCreateTaskBlock(Block):
|
||||
"""Creates a new task in a Todoist project"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: TodoistCredentialsInput = TodoistCredentialsField([])
|
||||
content: str = SchemaField(description="Task content", advanced=False)
|
||||
description: Optional[str] = SchemaField(
|
||||
description="Task description", default=None, advanced=False
|
||||
)
|
||||
project_id: Optional[str] = SchemaField(
|
||||
description="Project ID this task should belong to",
|
||||
default=None,
|
||||
advanced=False,
|
||||
)
|
||||
section_id: Optional[str] = SchemaField(
|
||||
description="Section ID this task should belong to",
|
||||
default=None,
|
||||
advanced=False,
|
||||
)
|
||||
parent_id: Optional[str] = SchemaField(
|
||||
description="Parent task ID", default=None, advanced=True
|
||||
)
|
||||
order: Optional[int] = SchemaField(
|
||||
description="Optional order among other tasks,[Non-zero integer value used by clients to sort tasks under the same parent]",
|
||||
default=None,
|
||||
advanced=True,
|
||||
)
|
||||
labels: Optional[list[str]] = SchemaField(
|
||||
description="Task labels", default=None, advanced=True
|
||||
)
|
||||
priority: Optional[int] = SchemaField(
|
||||
description="Task priority from 1 (normal) to 4 (urgent)",
|
||||
default=None,
|
||||
advanced=True,
|
||||
)
|
||||
due_date: Optional[datetime] = SchemaField(
|
||||
description="Due date in YYYY-MM-DD format", advanced=True, default=None
|
||||
)
|
||||
deadline_date: Optional[datetime] = SchemaField(
|
||||
description="Specific date in YYYY-MM-DD format relative to user's timezone",
|
||||
default=None,
|
||||
advanced=True,
|
||||
)
|
||||
assignee_id: Optional[str] = SchemaField(
|
||||
description="Responsible user ID", default=None, advanced=True
|
||||
)
|
||||
duration_unit: Optional[str] = SchemaField(
|
||||
description="Task duration unit (minute/day)", default=None, advanced=True
|
||||
)
|
||||
duration: Optional[int] = SchemaField(
|
||||
description="Task duration amount, You need to selecct the duration unit first",
|
||||
depends_on=["duration_unit"],
|
||||
default=None,
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
id: str = SchemaField(description="Task ID")
|
||||
url: str = SchemaField(description="Task URL")
|
||||
complete_data: dict = SchemaField(
|
||||
description="Complete task data as dictionary"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if request failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="fde4f458-de14-11ef-bf0c-32d3674e8b7e",
|
||||
description="Creates a new task in a Todoist project",
|
||||
categories={BlockCategory.PRODUCTIVITY},
|
||||
input_schema=TodoistCreateTaskBlock.Input,
|
||||
output_schema=TodoistCreateTaskBlock.Output,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"content": "Buy groceries",
|
||||
"project_id": "2203306141",
|
||||
"priority": 4,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("id", "2995104339"),
|
||||
("url", "https://todoist.com/showTask?id=2995104339"),
|
||||
(
|
||||
"complete_data",
|
||||
{
|
||||
"id": "2995104339",
|
||||
"project_id": "2203306141",
|
||||
"url": "https://todoist.com/showTask?id=2995104339",
|
||||
},
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"create_task": lambda *args, **kwargs: (
|
||||
"2995104339",
|
||||
"https://todoist.com/showTask?id=2995104339",
|
||||
{
|
||||
"id": "2995104339",
|
||||
"project_id": "2203306141",
|
||||
"url": "https://todoist.com/showTask?id=2995104339",
|
||||
},
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def create_task(credentials: TodoistCredentials, content: str, **kwargs):
|
||||
try:
|
||||
api = TodoistAPI(credentials.access_token.get_secret_value())
|
||||
task = api.add_task(content=content, **kwargs)
|
||||
task_dict = Task.to_dict(task)
|
||||
return task.id, task.url, task_dict
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: TodoistCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
due_date = (
|
||||
input_data.due_date.strftime("%Y-%m-%d")
|
||||
if input_data.due_date
|
||||
else None
|
||||
)
|
||||
deadline_date = (
|
||||
input_data.deadline_date.strftime("%Y-%m-%d")
|
||||
if input_data.deadline_date
|
||||
else None
|
||||
)
|
||||
|
||||
task_args = {
|
||||
"description": input_data.description,
|
||||
"project_id": input_data.project_id,
|
||||
"section_id": input_data.section_id,
|
||||
"parent_id": input_data.parent_id,
|
||||
"order": input_data.order,
|
||||
"labels": input_data.labels,
|
||||
"priority": input_data.priority,
|
||||
"due_date": due_date,
|
||||
"deadline_date": deadline_date,
|
||||
"assignee_id": input_data.assignee_id,
|
||||
"duration": input_data.duration,
|
||||
"duration_unit": input_data.duration_unit,
|
||||
}
|
||||
|
||||
id, url, complete_data = self.create_task(
|
||||
credentials,
|
||||
input_data.content,
|
||||
**{k: v for k, v in task_args.items() if v is not None},
|
||||
)
|
||||
|
||||
yield "id", id
|
||||
yield "url", url
|
||||
yield "complete_data", complete_data
|
||||
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class TodoistGetTasksBlock(Block):
|
||||
"""Get active tasks from Todoist"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: TodoistCredentialsInput = TodoistCredentialsField([])
|
||||
project_id: Optional[str] = SchemaField(
|
||||
description="Filter tasks by project ID", default=None, advanced=False
|
||||
)
|
||||
section_id: Optional[str] = SchemaField(
|
||||
description="Filter tasks by section ID", default=None, advanced=True
|
||||
)
|
||||
label: Optional[str] = SchemaField(
|
||||
description="Filter tasks by label name", default=None, advanced=True
|
||||
)
|
||||
filter: Optional[str] = SchemaField(
|
||||
description="Filter by any supported filter, You can see How to use filters or create one of your one here - https://todoist.com/help/articles/introduction-to-filters-V98wIH",
|
||||
default=None,
|
||||
advanced=True,
|
||||
)
|
||||
lang: Optional[str] = SchemaField(
|
||||
description="IETF language tag for filter language", default=None
|
||||
)
|
||||
ids: Optional[list[str]] = SchemaField(
|
||||
description="List of task IDs to retrieve", default=None, advanced=False
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
ids: list[str] = SchemaField(description="Task IDs")
|
||||
urls: list[str] = SchemaField(description="Task URLs")
|
||||
complete_data: list[dict] = SchemaField(
|
||||
description="Complete task data as dictionary"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if request failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="0b706e86-de15-11ef-a113-32d3674e8b7e",
|
||||
description="Get active tasks from Todoist",
|
||||
categories={BlockCategory.PRODUCTIVITY},
|
||||
input_schema=TodoistGetTasksBlock.Input,
|
||||
output_schema=TodoistGetTasksBlock.Output,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"project_id": "2203306141",
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("ids", ["2995104339"]),
|
||||
("urls", ["https://todoist.com/showTask?id=2995104339"]),
|
||||
(
|
||||
"complete_data",
|
||||
[
|
||||
{
|
||||
"id": "2995104339",
|
||||
"project_id": "2203306141",
|
||||
"url": "https://todoist.com/showTask?id=2995104339",
|
||||
"is_completed": False,
|
||||
}
|
||||
],
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"get_tasks": lambda *args, **kwargs: [
|
||||
{
|
||||
"id": "2995104339",
|
||||
"project_id": "2203306141",
|
||||
"url": "https://todoist.com/showTask?id=2995104339",
|
||||
"is_completed": False,
|
||||
}
|
||||
]
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_tasks(credentials: TodoistCredentials, **kwargs):
|
||||
try:
|
||||
api = TodoistAPI(credentials.access_token.get_secret_value())
|
||||
tasks = api.get_tasks(**kwargs)
|
||||
return [Task.to_dict(task) for task in tasks]
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: TodoistCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
task_filters = {
|
||||
"project_id": input_data.project_id,
|
||||
"section_id": input_data.section_id,
|
||||
"label": input_data.label,
|
||||
"filter": input_data.filter,
|
||||
"lang": input_data.lang,
|
||||
"ids": input_data.ids,
|
||||
}
|
||||
|
||||
tasks = self.get_tasks(
|
||||
credentials, **{k: v for k, v in task_filters.items() if v is not None}
|
||||
)
|
||||
|
||||
yield "ids", [task["id"] for task in tasks]
|
||||
yield "urls", [task["url"] for task in tasks]
|
||||
yield "complete_data", tasks
|
||||
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class TodoistGetTaskBlock(Block):
|
||||
"""Get an active task from Todoist"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: TodoistCredentialsInput = TodoistCredentialsField([])
|
||||
task_id: str = SchemaField(description="Task ID to retrieve")
|
||||
|
||||
class Output(BlockSchema):
|
||||
project_id: str = SchemaField(description="Project ID containing the task")
|
||||
url: str = SchemaField(description="Task URL")
|
||||
complete_data: dict = SchemaField(
|
||||
description="Complete task data as dictionary"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if request failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="16d7dc8c-de15-11ef-8ace-32d3674e8b7e",
|
||||
description="Get an active task from Todoist",
|
||||
categories={BlockCategory.PRODUCTIVITY},
|
||||
input_schema=TodoistGetTaskBlock.Input,
|
||||
output_schema=TodoistGetTaskBlock.Output,
|
||||
test_input={"credentials": TEST_CREDENTIALS_INPUT, "task_id": "2995104339"},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("project_id", "2203306141"),
|
||||
("url", "https://todoist.com/showTask?id=2995104339"),
|
||||
(
|
||||
"complete_data",
|
||||
{
|
||||
"id": "2995104339",
|
||||
"project_id": "2203306141",
|
||||
"url": "https://todoist.com/showTask?id=2995104339",
|
||||
},
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"get_task": lambda *args, **kwargs: {
|
||||
"project_id": "2203306141",
|
||||
"id": "2995104339",
|
||||
"url": "https://todoist.com/showTask?id=2995104339",
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_task(credentials: TodoistCredentials, task_id: str):
|
||||
try:
|
||||
api = TodoistAPI(credentials.access_token.get_secret_value())
|
||||
task = api.get_task(task_id=task_id)
|
||||
return Task.to_dict(task)
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: TodoistCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
task_data = self.get_task(credentials, input_data.task_id)
|
||||
|
||||
if task_data:
|
||||
yield "project_id", task_data["project_id"]
|
||||
yield "url", task_data["url"]
|
||||
yield "complete_data", task_data
|
||||
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class TodoistUpdateTaskBlock(Block):
|
||||
"""Updates an existing task in Todoist"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: TodoistCredentialsInput = TodoistCredentialsField([])
|
||||
task_id: str = SchemaField(description="Task ID to update")
|
||||
content: str = SchemaField(description="Task content", advanced=False)
|
||||
description: Optional[str] = SchemaField(
|
||||
description="Task description", default=None, advanced=False
|
||||
)
|
||||
project_id: Optional[str] = SchemaField(
|
||||
description="Project ID this task should belong to",
|
||||
default=None,
|
||||
advanced=False,
|
||||
)
|
||||
section_id: Optional[str] = SchemaField(
|
||||
description="Section ID this task should belong to",
|
||||
default=None,
|
||||
advanced=False,
|
||||
)
|
||||
parent_id: Optional[str] = SchemaField(
|
||||
description="Parent task ID", default=None, advanced=True
|
||||
)
|
||||
order: Optional[int] = SchemaField(
|
||||
description="Optional order among other tasks,[Non-zero integer value used by clients to sort tasks under the same parent]",
|
||||
default=None,
|
||||
advanced=True,
|
||||
)
|
||||
labels: Optional[list[str]] = SchemaField(
|
||||
description="Task labels", default=None, advanced=True
|
||||
)
|
||||
priority: Optional[int] = SchemaField(
|
||||
description="Task priority from 1 (normal) to 4 (urgent)",
|
||||
default=None,
|
||||
advanced=True,
|
||||
)
|
||||
due_date: Optional[datetime] = SchemaField(
|
||||
description="Due date in YYYY-MM-DD format", advanced=True, default=None
|
||||
)
|
||||
deadline_date: Optional[datetime] = SchemaField(
|
||||
description="Specific date in YYYY-MM-DD format relative to user's timezone",
|
||||
default=None,
|
||||
advanced=True,
|
||||
)
|
||||
assignee_id: Optional[str] = SchemaField(
|
||||
description="Responsible user ID", default=None, advanced=True
|
||||
)
|
||||
duration_unit: Optional[str] = SchemaField(
|
||||
description="Task duration unit (minute/day)", default=None, advanced=True
|
||||
)
|
||||
duration: Optional[int] = SchemaField(
|
||||
description="Task duration amount, You need to selecct the duration unit first",
|
||||
depends_on=["duration_unit"],
|
||||
default=None,
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
success: bool = SchemaField(description="Whether the update was successful")
|
||||
error: str = SchemaField(description="Error message if request failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="1eee6d32-de15-11ef-a2ff-32d3674e8b7e",
|
||||
description="Updates an existing task in Todoist",
|
||||
categories={BlockCategory.PRODUCTIVITY},
|
||||
input_schema=TodoistUpdateTaskBlock.Input,
|
||||
output_schema=TodoistUpdateTaskBlock.Output,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"task_id": "2995104339",
|
||||
"content": "Buy Coffee",
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[("success", True)],
|
||||
test_mock={"update_task": lambda *args, **kwargs: True},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def update_task(credentials: TodoistCredentials, task_id: str, **kwargs):
|
||||
try:
|
||||
api = TodoistAPI(credentials.access_token.get_secret_value())
|
||||
is_success = api.update_task(task_id=task_id, **kwargs)
|
||||
return is_success
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: TodoistCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
due_date = (
|
||||
input_data.due_date.strftime("%Y-%m-%d")
|
||||
if input_data.due_date
|
||||
else None
|
||||
)
|
||||
deadline_date = (
|
||||
input_data.deadline_date.strftime("%Y-%m-%d")
|
||||
if input_data.deadline_date
|
||||
else None
|
||||
)
|
||||
|
||||
task_updates = {}
|
||||
if input_data.content is not None:
|
||||
task_updates["content"] = input_data.content
|
||||
if input_data.description is not None:
|
||||
task_updates["description"] = input_data.description
|
||||
if input_data.project_id is not None:
|
||||
task_updates["project_id"] = input_data.project_id
|
||||
if input_data.section_id is not None:
|
||||
task_updates["section_id"] = input_data.section_id
|
||||
if input_data.parent_id is not None:
|
||||
task_updates["parent_id"] = input_data.parent_id
|
||||
if input_data.order is not None:
|
||||
task_updates["order"] = input_data.order
|
||||
if input_data.labels is not None:
|
||||
task_updates["labels"] = input_data.labels
|
||||
if input_data.priority is not None:
|
||||
task_updates["priority"] = input_data.priority
|
||||
if due_date is not None:
|
||||
task_updates["due_date"] = due_date
|
||||
if deadline_date is not None:
|
||||
task_updates["deadline_date"] = deadline_date
|
||||
if input_data.assignee_id is not None:
|
||||
task_updates["assignee_id"] = input_data.assignee_id
|
||||
if input_data.duration is not None:
|
||||
task_updates["duration"] = input_data.duration
|
||||
if input_data.duration_unit is not None:
|
||||
task_updates["duration_unit"] = input_data.duration_unit
|
||||
|
||||
self.update_task(
|
||||
credentials,
|
||||
input_data.task_id,
|
||||
**{k: v for k, v in task_updates.items() if v is not None},
|
||||
)
|
||||
|
||||
yield "success", True
|
||||
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class TodoistCloseTaskBlock(Block):
|
||||
"""Closes a task in Todoist"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: TodoistCredentialsInput = TodoistCredentialsField([])
|
||||
task_id: str = SchemaField(description="Task ID to close")
|
||||
|
||||
class Output(BlockSchema):
|
||||
success: bool = SchemaField(
|
||||
description="Whether the task was successfully closed"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if request failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="29fac798-de15-11ef-b839-32d3674e8b7e",
|
||||
description="Closes a task in Todoist",
|
||||
categories={BlockCategory.PRODUCTIVITY},
|
||||
input_schema=TodoistCloseTaskBlock.Input,
|
||||
output_schema=TodoistCloseTaskBlock.Output,
|
||||
test_input={"credentials": TEST_CREDENTIALS_INPUT, "task_id": "2995104339"},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[("success", True)],
|
||||
test_mock={"close_task": lambda *args, **kwargs: True},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def close_task(credentials: TodoistCredentials, task_id: str):
|
||||
try:
|
||||
api = TodoistAPI(credentials.access_token.get_secret_value())
|
||||
is_success = api.close_task(task_id=task_id)
|
||||
return is_success
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: TodoistCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
is_success = self.close_task(credentials, input_data.task_id)
|
||||
yield "success", is_success
|
||||
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class TodoistReopenTaskBlock(Block):
|
||||
"""Reopens a task in Todoist"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: TodoistCredentialsInput = TodoistCredentialsField([])
|
||||
task_id: str = SchemaField(description="Task ID to reopen")
|
||||
|
||||
class Output(BlockSchema):
|
||||
success: bool = SchemaField(
|
||||
description="Whether the task was successfully reopened"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if request failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="2e6bf6f8-de15-11ef-ae7c-32d3674e8b7e",
|
||||
description="Reopens a task in Todoist",
|
||||
categories={BlockCategory.PRODUCTIVITY},
|
||||
input_schema=TodoistReopenTaskBlock.Input,
|
||||
output_schema=TodoistReopenTaskBlock.Output,
|
||||
test_input={"credentials": TEST_CREDENTIALS_INPUT, "task_id": "2995104339"},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("success", True),
|
||||
],
|
||||
test_mock={"reopen_task": lambda *args, **kwargs: (True)},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def reopen_task(credentials: TodoistCredentials, task_id: str):
|
||||
try:
|
||||
api = TodoistAPI(credentials.access_token.get_secret_value())
|
||||
is_success = api.reopen_task(task_id=task_id)
|
||||
return is_success
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: TodoistCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
is_success = self.reopen_task(credentials, input_data.task_id)
|
||||
yield "success", is_success
|
||||
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class TodoistDeleteTaskBlock(Block):
|
||||
"""Deletes a task in Todoist"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: TodoistCredentialsInput = TodoistCredentialsField([])
|
||||
task_id: str = SchemaField(description="Task ID to delete")
|
||||
|
||||
class Output(BlockSchema):
|
||||
success: bool = SchemaField(
|
||||
description="Whether the task was successfully deleted"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if request failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="33c29ada-de15-11ef-bcbb-32d3674e8b7e",
|
||||
description="Deletes a task in Todoist",
|
||||
categories={BlockCategory.PRODUCTIVITY},
|
||||
input_schema=TodoistDeleteTaskBlock.Input,
|
||||
output_schema=TodoistDeleteTaskBlock.Output,
|
||||
test_input={"credentials": TEST_CREDENTIALS_INPUT, "task_id": "2995104339"},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("success", True),
|
||||
],
|
||||
test_mock={"delete_task": lambda *args, **kwargs: (True)},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def delete_task(credentials: TodoistCredentials, task_id: str):
|
||||
try:
|
||||
api = TodoistAPI(credentials.access_token.get_secret_value())
|
||||
is_success = api.delete_task(task_id=task_id)
|
||||
return is_success
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: TodoistCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
is_success = self.delete_task(credentials, input_data.task_id)
|
||||
yield "success", is_success
|
||||
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
@@ -92,8 +92,7 @@ class TwitterPostTweetBlock(Block):
|
||||
attachment: Union[Media, DeepLink, Poll, Place, Quote] | None = SchemaField(
|
||||
discriminator="discriminator",
|
||||
description="Additional tweet data (media, deep link, poll, place or quote)",
|
||||
advanced=False,
|
||||
default=Media(discriminator="media"),
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
exclude_reply_user_ids: Optional[List[str]] = SchemaField(
|
||||
|
||||
@@ -23,6 +23,71 @@ from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
|
||||
class TwitterUnblockUserBlock(Block):
|
||||
"""
|
||||
Unblock a specific user on Twitter. The request succeeds with no action when the user sends a request to a user they're not blocking or have already unblocked.
|
||||
"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: TwitterCredentialsInput = TwitterCredentialsField(
|
||||
["block.write", "users.read", "offline.access"]
|
||||
)
|
||||
|
||||
target_user_id: str = SchemaField(
|
||||
description="The user ID of the user that you would like to unblock",
|
||||
placeholder="Enter target user ID",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
success: bool = SchemaField(description="Whether the unblock was successful")
|
||||
error: str = SchemaField(description="Error message if the request failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="0f1b6570-a631-11ef-a3ea-230cbe9650dd",
|
||||
description="This block unblocks a specific user on Twitter.",
|
||||
categories={BlockCategory.SOCIAL},
|
||||
input_schema=TwitterUnblockUserBlock.Input,
|
||||
output_schema=TwitterUnblockUserBlock.Output,
|
||||
test_input={
|
||||
"target_user_id": "12345",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("success", True),
|
||||
],
|
||||
test_mock={"unblock_user": lambda *args, **kwargs: True},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def unblock_user(credentials: TwitterCredentials, target_user_id: str):
|
||||
try:
|
||||
client = tweepy.Client(
|
||||
bearer_token=credentials.access_token.get_secret_value()
|
||||
)
|
||||
|
||||
client.unblock(target_user_id=target_user_id, user_auth=False)
|
||||
|
||||
return True
|
||||
|
||||
except tweepy.TweepyException:
|
||||
raise
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: TwitterCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
success = self.unblock_user(credentials, input_data.target_user_id)
|
||||
yield "success", success
|
||||
except Exception as e:
|
||||
yield "error", handle_tweepy_exception(e)
|
||||
|
||||
|
||||
class TwitterGetBlockedUsersBlock(Block):
|
||||
"""
|
||||
Get a list of users who are blocked by the authenticating user
|
||||
@@ -173,3 +238,68 @@ class TwitterGetBlockedUsersBlock(Block):
|
||||
yield "next_token", next_token
|
||||
except Exception as e:
|
||||
yield "error", handle_tweepy_exception(e)
|
||||
|
||||
|
||||
class TwitterBlockUserBlock(Block):
|
||||
"""
|
||||
Block a specific user on Twitter
|
||||
"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: TwitterCredentialsInput = TwitterCredentialsField(
|
||||
["block.write", "users.read", "offline.access"]
|
||||
)
|
||||
|
||||
target_user_id: str = SchemaField(
|
||||
description="The user ID of the user that you would like to block",
|
||||
placeholder="Enter target user ID",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
success: bool = SchemaField(description="Whether the block was successful")
|
||||
error: str = SchemaField(description="Error message if the request failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="fc258b94-a630-11ef-abc3-df050b75b816",
|
||||
description="This block blocks a specific user on Twitter.",
|
||||
categories={BlockCategory.SOCIAL},
|
||||
input_schema=TwitterBlockUserBlock.Input,
|
||||
output_schema=TwitterBlockUserBlock.Output,
|
||||
test_input={
|
||||
"target_user_id": "12345",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("success", True),
|
||||
],
|
||||
test_mock={"block_user": lambda *args, **kwargs: True},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def block_user(credentials: TwitterCredentials, target_user_id: str):
|
||||
try:
|
||||
client = tweepy.Client(
|
||||
bearer_token=credentials.access_token.get_secret_value()
|
||||
)
|
||||
|
||||
client.block(target_user_id=target_user_id, user_auth=False)
|
||||
|
||||
return True
|
||||
|
||||
except tweepy.TweepyException:
|
||||
raise
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: TwitterCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
success = self.block_user(credentials, input_data.target_user_id)
|
||||
yield "success", success
|
||||
except Exception as e:
|
||||
yield "error", handle_tweepy_exception(e)
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
from gravitasml.parser import Parser
|
||||
from gravitasml.token import tokenize
|
||||
|
||||
from backend.data.block import Block, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
|
||||
class XMLParserBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
input_xml: str = SchemaField(description="input xml to be parsed")
|
||||
|
||||
class Output(BlockSchema):
|
||||
parsed_xml: dict = SchemaField(description="output parsed xml to dict")
|
||||
error: str = SchemaField(description="Error in parsing")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="286380af-9529-4b55-8be0-1d7c854abdb5",
|
||||
description="Parses XML using gravitasml to tokenize and coverts it to dict",
|
||||
input_schema=XMLParserBlock.Input,
|
||||
output_schema=XMLParserBlock.Output,
|
||||
test_input={"input_xml": "<tag1><tag2>content</tag2></tag1>"},
|
||||
test_output=[
|
||||
("parsed_xml", {"tag1": {"tag2": "content"}}),
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
try:
|
||||
tokens = tokenize(input_data.input_xml)
|
||||
parser = Parser(tokens)
|
||||
parsed_result = parser.parse()
|
||||
yield "parsed_xml", parsed_result
|
||||
except ValueError as val_e:
|
||||
raise ValueError(f"Validation error for dict:{val_e}") from val_e
|
||||
except SyntaxError as syn_e:
|
||||
raise SyntaxError(f"Error in input xml syntax: {syn_e}") from syn_e
|
||||
@@ -1,10 +0,0 @@
|
||||
from zerobouncesdk import ZBValidateResponse, ZeroBounce
|
||||
|
||||
|
||||
class ZeroBounceClient:
|
||||
def __init__(self, api_key: str):
|
||||
self.api_key = api_key
|
||||
self.client = ZeroBounce(api_key)
|
||||
|
||||
def validate_email(self, email: str, ip_address: str) -> ZBValidateResponse:
|
||||
return self.client.validate(email, ip_address)
|
||||
@@ -1,35 +0,0 @@
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput
|
||||
from backend.integrations.providers import ProviderName
|
||||
|
||||
ZeroBounceCredentials = APIKeyCredentials
|
||||
ZeroBounceCredentialsInput = CredentialsMetaInput[
|
||||
Literal[ProviderName.ZEROBOUNCE],
|
||||
Literal["api_key"],
|
||||
]
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="zerobounce",
|
||||
api_key=SecretStr("mock-zerobounce-api-key"),
|
||||
title="Mock ZeroBounce API key",
|
||||
expires_at=None,
|
||||
)
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.title,
|
||||
}
|
||||
|
||||
|
||||
def ZeroBounceCredentialsField() -> ZeroBounceCredentialsInput:
|
||||
"""
|
||||
Creates a ZeroBounce credentials input on a block.
|
||||
"""
|
||||
return CredentialsField(
|
||||
description="The ZeroBounce integration can be used with an API Key.",
|
||||
)
|
||||
@@ -1,175 +0,0 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
from zerobouncesdk.zb_validate_response import (
|
||||
ZBValidateResponse,
|
||||
ZBValidateStatus,
|
||||
ZBValidateSubStatus,
|
||||
)
|
||||
|
||||
from backend.blocks.zerobounce._api import ZeroBounceClient
|
||||
from backend.blocks.zerobounce._auth import (
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
ZeroBounceCredentials,
|
||||
ZeroBounceCredentialsInput,
|
||||
)
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
|
||||
class Response(BaseModel):
|
||||
address: str = SchemaField(
|
||||
description="The email address you are validating.", default="N/A"
|
||||
)
|
||||
status: ZBValidateStatus = SchemaField(
|
||||
description="The status of the email address.", default=ZBValidateStatus.unknown
|
||||
)
|
||||
sub_status: ZBValidateSubStatus = SchemaField(
|
||||
description="The sub-status of the email address.",
|
||||
default=ZBValidateSubStatus.none,
|
||||
)
|
||||
account: Optional[str] = SchemaField(
|
||||
description="The portion of the email address before the '@' symbol.",
|
||||
default="N/A",
|
||||
)
|
||||
domain: Optional[str] = SchemaField(
|
||||
description="The portion of the email address after the '@' symbol."
|
||||
)
|
||||
did_you_mean: Optional[str] = SchemaField(
|
||||
description="Suggestive Fix for an email typo",
|
||||
default=None,
|
||||
)
|
||||
domain_age_days: Optional[str] = SchemaField(
|
||||
description="Age of the email domain in days or [null].",
|
||||
default=None,
|
||||
)
|
||||
free_email: Optional[bool] = SchemaField(
|
||||
description="Whether the email address is a free email provider.", default=False
|
||||
)
|
||||
mx_found: Optional[bool] = SchemaField(
|
||||
description="Whether the MX record was found.", default=False
|
||||
)
|
||||
mx_record: Optional[str] = SchemaField(
|
||||
description="The MX record of the email address.", default=None
|
||||
)
|
||||
smtp_provider: Optional[str] = SchemaField(
|
||||
description="The SMTP provider of the email address.", default=None
|
||||
)
|
||||
firstname: Optional[str] = SchemaField(
|
||||
description="The first name of the email address.", default=None
|
||||
)
|
||||
lastname: Optional[str] = SchemaField(
|
||||
description="The last name of the email address.", default=None
|
||||
)
|
||||
gender: Optional[str] = SchemaField(
|
||||
description="The gender of the email address.", default=None
|
||||
)
|
||||
city: Optional[str] = SchemaField(
|
||||
description="The city of the email address.", default=None
|
||||
)
|
||||
region: Optional[str] = SchemaField(
|
||||
description="The region of the email address.", default=None
|
||||
)
|
||||
zipcode: Optional[str] = SchemaField(
|
||||
description="The zipcode of the email address.", default=None
|
||||
)
|
||||
country: Optional[str] = SchemaField(
|
||||
description="The country of the email address.", default=None
|
||||
)
|
||||
|
||||
|
||||
class ValidateEmailsBlock(Block):
|
||||
"""Search for people in Apollo"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
email: str = SchemaField(
|
||||
description="Email to validate",
|
||||
)
|
||||
ip_address: str = SchemaField(
|
||||
description="IP address to validate",
|
||||
default="",
|
||||
)
|
||||
credentials: ZeroBounceCredentialsInput = SchemaField(
|
||||
description="ZeroBounce credentials",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
response: Response = SchemaField(
|
||||
description="Response from ZeroBounce",
|
||||
)
|
||||
error: str = SchemaField(
|
||||
description="Error message if the search failed",
|
||||
default="",
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="e3950439-fa0b-40e8-b19f-e0dca0bf5853",
|
||||
description="Validate emails",
|
||||
categories={BlockCategory.SEARCH},
|
||||
input_schema=ValidateEmailsBlock.Input,
|
||||
output_schema=ValidateEmailsBlock.Output,
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"email": "test@test.com",
|
||||
},
|
||||
test_output=[
|
||||
(
|
||||
"response",
|
||||
Response(
|
||||
address="test@test.com",
|
||||
status=ZBValidateStatus.valid,
|
||||
sub_status=ZBValidateSubStatus.allowed,
|
||||
account="test",
|
||||
domain="test.com",
|
||||
did_you_mean=None,
|
||||
domain_age_days=None,
|
||||
free_email=False,
|
||||
mx_found=False,
|
||||
mx_record=None,
|
||||
smtp_provider=None,
|
||||
),
|
||||
)
|
||||
],
|
||||
test_mock={
|
||||
"validate_email": lambda email, ip_address, credentials: ZBValidateResponse(
|
||||
data={
|
||||
"address": email,
|
||||
"status": ZBValidateStatus.valid,
|
||||
"sub_status": ZBValidateSubStatus.allowed,
|
||||
"account": "test",
|
||||
"domain": "test.com",
|
||||
"did_you_mean": None,
|
||||
"domain_age_days": None,
|
||||
"free_email": False,
|
||||
"mx_found": False,
|
||||
"mx_record": None,
|
||||
"smtp_provider": None,
|
||||
}
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def validate_email(
|
||||
email: str, ip_address: str, credentials: ZeroBounceCredentials
|
||||
) -> ZBValidateResponse:
|
||||
client = ZeroBounceClient(credentials.api_key.get_secret_value())
|
||||
return client.validate_email(email, ip_address)
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: ZeroBounceCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
response: ZBValidateResponse = self.validate_email(
|
||||
input_data.email, input_data.ip_address, credentials
|
||||
)
|
||||
|
||||
response_model = Response(**response.__dict__)
|
||||
|
||||
yield "response", response_model
|
||||
@@ -221,8 +221,7 @@ def event():
|
||||
@test.command()
|
||||
@click.argument("server_address")
|
||||
@click.argument("graph_id")
|
||||
@click.argument("graph_version")
|
||||
def websocket(server_address: str, graph_id: str, graph_version: int):
|
||||
def websocket(server_address: str, graph_id: str):
|
||||
"""
|
||||
Tests the websocket connection.
|
||||
"""
|
||||
@@ -238,9 +237,7 @@ def websocket(server_address: str, graph_id: str, graph_version: int):
|
||||
try:
|
||||
msg = WsMessage(
|
||||
method=Methods.SUBSCRIBE,
|
||||
data=ExecutionSubscription(
|
||||
graph_id=graph_id, graph_version=graph_version
|
||||
).model_dump(),
|
||||
data=ExecutionSubscription(graph_id=graph_id).model_dump(),
|
||||
).model_dump_json()
|
||||
await websocket.send(msg)
|
||||
print(f"Sending: {msg}")
|
||||
|
||||
@@ -2,7 +2,6 @@ import inspect
|
||||
from abc import ABC, abstractmethod
|
||||
from enum import Enum
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
ClassVar,
|
||||
Generator,
|
||||
@@ -19,7 +18,6 @@ import jsonschema
|
||||
from prisma.models import AgentBlock
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.data.model import NodeExecutionStats
|
||||
from backend.util import json
|
||||
from backend.util.settings import Config
|
||||
|
||||
@@ -30,9 +28,6 @@ from .model import (
|
||||
is_credentials_field_name,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .graph import Link
|
||||
|
||||
app_config = Config()
|
||||
|
||||
BlockData = tuple[str, Any] # Input & Output data should be a tuple of (name, data).
|
||||
@@ -49,7 +44,6 @@ class BlockType(Enum):
|
||||
WEBHOOK = "Webhook"
|
||||
WEBHOOK_MANUAL = "Webhook (manual)"
|
||||
AGENT = "Agent"
|
||||
AI = "AI"
|
||||
|
||||
|
||||
class BlockCategory(Enum):
|
||||
@@ -70,9 +64,6 @@ class BlockCategory(Enum):
|
||||
SAFETY = (
|
||||
"Block that provides AI safety mechanisms such as detecting harmful content"
|
||||
)
|
||||
PRODUCTIVITY = "Block that helps with productivity"
|
||||
ISSUE_TRACKING = "Block that helps with issue tracking"
|
||||
MULTIMEDIA = "Block that interacts with multimedia content"
|
||||
|
||||
def dict(self) -> dict[str, str]:
|
||||
return {"category": self.name, "description": self.value}
|
||||
@@ -115,10 +106,6 @@ class BlockSchema(BaseModel):
|
||||
def validate_data(cls, data: BlockInput) -> str | None:
|
||||
return json.validate_with_jsonschema(schema=cls.jsonschema(), data=data)
|
||||
|
||||
@classmethod
|
||||
def get_mismatch_error(cls, data: BlockInput) -> str | None:
|
||||
return cls.validate_data(data)
|
||||
|
||||
@classmethod
|
||||
def validate_field(cls, field_name: str, data: BlockInput) -> str | None:
|
||||
"""
|
||||
@@ -196,19 +183,6 @@ class BlockSchema(BaseModel):
|
||||
)
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def get_input_defaults(cls, data: BlockInput) -> BlockInput:
|
||||
return data # Return as is, by default.
|
||||
|
||||
@classmethod
|
||||
def get_missing_links(cls, data: BlockInput, links: list["Link"]) -> set[str]:
|
||||
input_fields_from_nodes = {link.sink_name for link in links}
|
||||
return input_fields_from_nodes - set(data)
|
||||
|
||||
@classmethod
|
||||
def get_missing_input(cls, data: BlockInput) -> set[str]:
|
||||
return cls.get_required_fields() - set(data)
|
||||
|
||||
|
||||
BlockSchemaInputType = TypeVar("BlockSchemaInputType", bound=BlockSchema)
|
||||
BlockSchemaOutputType = TypeVar("BlockSchemaOutputType", bound=BlockSchema)
|
||||
@@ -317,7 +291,7 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
|
||||
self.static_output = static_output
|
||||
self.block_type = block_type
|
||||
self.webhook_config = webhook_config
|
||||
self.execution_stats: NodeExecutionStats = NodeExecutionStats()
|
||||
self.execution_stats = {}
|
||||
|
||||
if self.webhook_config:
|
||||
if isinstance(self.webhook_config, BlockWebhookConfig):
|
||||
@@ -374,14 +348,6 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
|
||||
Run the block with the given input data.
|
||||
Args:
|
||||
input_data: The input data with the structure of input_schema.
|
||||
|
||||
Kwargs: Currently 14/02/2025 these include
|
||||
graph_id: The ID of the graph.
|
||||
node_id: The ID of the node.
|
||||
graph_exec_id: The ID of the graph execution.
|
||||
node_exec_id: The ID of the node execution.
|
||||
user_id: The ID of the user.
|
||||
|
||||
Returns:
|
||||
A Generator that yields (output_name, output_data).
|
||||
output_name: One of the output name defined in Block's output_schema.
|
||||
@@ -395,29 +361,18 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
|
||||
return data
|
||||
raise ValueError(f"{self.name} did not produce any output for {output}")
|
||||
|
||||
def merge_stats(self, stats: NodeExecutionStats) -> NodeExecutionStats:
|
||||
stats_dict = stats.model_dump()
|
||||
current_stats = self.execution_stats.model_dump()
|
||||
|
||||
for key, value in stats_dict.items():
|
||||
if key not in current_stats:
|
||||
# Field doesn't exist yet, just set it, but this will probably
|
||||
# not happen, just in case though so we throw for invalid when
|
||||
# converting back in
|
||||
current_stats[key] = value
|
||||
elif isinstance(value, dict) and isinstance(current_stats[key], dict):
|
||||
current_stats[key].update(value)
|
||||
elif isinstance(value, (int, float)) and isinstance(
|
||||
current_stats[key], (int, float)
|
||||
):
|
||||
current_stats[key] += value
|
||||
elif isinstance(value, list) and isinstance(current_stats[key], list):
|
||||
current_stats[key].extend(value)
|
||||
def merge_stats(self, stats: dict[str, Any]) -> dict[str, Any]:
|
||||
for key, value in stats.items():
|
||||
if isinstance(value, dict):
|
||||
self.execution_stats.setdefault(key, {}).update(value)
|
||||
elif isinstance(value, (int, float)):
|
||||
self.execution_stats.setdefault(key, 0)
|
||||
self.execution_stats[key] += value
|
||||
elif isinstance(value, list):
|
||||
self.execution_stats.setdefault(key, [])
|
||||
self.execution_stats[key].extend(value)
|
||||
else:
|
||||
current_stats[key] = value
|
||||
|
||||
self.execution_stats = NodeExecutionStats(**current_stats)
|
||||
|
||||
self.execution_stats[key] = value
|
||||
return self.execution_stats
|
||||
|
||||
@property
|
||||
|
||||
@@ -15,7 +15,6 @@ from backend.blocks.llm import (
|
||||
LlmModel,
|
||||
)
|
||||
from backend.blocks.replicate_flux_advanced import ReplicateFluxAdvancedModelBlock
|
||||
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
|
||||
from backend.blocks.talking_head import CreateTalkingAvatarVideoBlock
|
||||
from backend.blocks.text_to_speech_block import UnrealTextToSpeechBlock
|
||||
from backend.data.block import Block
|
||||
@@ -36,8 +35,6 @@ from backend.integrations.credentials_store import (
|
||||
# =============== Configure the cost for each LLM Model call =============== #
|
||||
|
||||
MODEL_COST: dict[LlmModel, int] = {
|
||||
LlmModel.O3_MINI: 2, # $1.10 / $4.40
|
||||
LlmModel.O1: 16, # $15 / $60
|
||||
LlmModel.O1_PREVIEW: 16,
|
||||
LlmModel.O1_MINI: 4,
|
||||
LlmModel.GPT4O_MINI: 1,
|
||||
@@ -45,21 +42,20 @@ MODEL_COST: dict[LlmModel, int] = {
|
||||
LlmModel.GPT4_TURBO: 10,
|
||||
LlmModel.GPT3_5_TURBO: 1,
|
||||
LlmModel.CLAUDE_3_5_SONNET: 4,
|
||||
LlmModel.CLAUDE_3_5_HAIKU: 1, # $0.80 / $4.00
|
||||
LlmModel.CLAUDE_3_HAIKU: 1,
|
||||
LlmModel.LLAMA3_8B: 1,
|
||||
LlmModel.LLAMA3_70B: 1,
|
||||
LlmModel.MIXTRAL_8X7B: 1,
|
||||
LlmModel.GEMMA_7B: 1,
|
||||
LlmModel.GEMMA2_9B: 1,
|
||||
LlmModel.LLAMA3_3_70B: 1, # $0.59 / $0.79
|
||||
LlmModel.LLAMA3_1_405B: 1,
|
||||
LlmModel.LLAMA3_1_70B: 1,
|
||||
LlmModel.LLAMA3_1_8B: 1,
|
||||
LlmModel.OLLAMA_LLAMA3_3: 1,
|
||||
LlmModel.OLLAMA_LLAMA3_2: 1,
|
||||
LlmModel.OLLAMA_LLAMA3_8B: 1,
|
||||
LlmModel.OLLAMA_LLAMA3_405B: 1,
|
||||
LlmModel.DEEPSEEK_LLAMA_70B: 1, # ? / ?
|
||||
LlmModel.OLLAMA_DOLPHIN: 1,
|
||||
LlmModel.GEMINI_FLASH_1_5: 1,
|
||||
LlmModel.GEMINI_FLASH_1_5_8B: 1,
|
||||
LlmModel.GROK_BETA: 5,
|
||||
LlmModel.MISTRAL_NEMO: 1,
|
||||
LlmModel.COHERE_COMMAND_R_08_2024: 1,
|
||||
@@ -266,5 +262,4 @@ BLOCK_COSTS: dict[Type[Block], list[BlockCost]] = {
|
||||
},
|
||||
)
|
||||
],
|
||||
SmartDecisionMakerBlock: LLM_COST,
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ class BlockCostType(str, Enum):
|
||||
RUN = "run" # cost X credits per run
|
||||
BYTE = "byte" # cost X credits per byte
|
||||
SECOND = "second" # cost X credits per second
|
||||
DOLLAR = "dollar" # cost X dollars per run
|
||||
|
||||
|
||||
class BlockCost(BaseModel):
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,5 @@
|
||||
import logging
|
||||
import os
|
||||
import zlib
|
||||
from contextlib import asynccontextmanager
|
||||
from uuid import uuid4
|
||||
|
||||
@@ -55,14 +54,6 @@ async def transaction():
|
||||
yield tx
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def locked_transaction(key: str):
|
||||
lock_key = zlib.crc32(key.encode("utf-8"))
|
||||
async with transaction() as tx:
|
||||
await tx.execute_raw(f"SELECT pg_advisory_xact_lock({lock_key})")
|
||||
yield tx
|
||||
|
||||
|
||||
class BaseDbModel(BaseModel):
|
||||
id: str = Field(default_factory=lambda: str(uuid4()))
|
||||
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
from collections import defaultdict
|
||||
from datetime import datetime, timezone
|
||||
from multiprocessing import Manager
|
||||
from typing import Any, AsyncGenerator, Generator, Generic, Type, TypeVar
|
||||
from typing import Any, AsyncGenerator, Generator, Generic, TypeVar
|
||||
|
||||
from prisma import Json
|
||||
from prisma.enums import AgentExecutionStatus
|
||||
from prisma.models import (
|
||||
AgentGraphExecution,
|
||||
@@ -14,10 +13,8 @@ from pydantic import BaseModel
|
||||
|
||||
from backend.data.block import BlockData, BlockInput, CompletedBlockOutput
|
||||
from backend.data.includes import EXECUTION_RESULT_INCLUDE, GRAPH_EXECUTION_INCLUDE
|
||||
from backend.data.model import GraphExecutionStats, NodeExecutionStats
|
||||
from backend.data.queue import AsyncRedisEventBus, RedisEventBus
|
||||
from backend.server.v2.store.exceptions import DatabaseError
|
||||
from backend.util import mock, type
|
||||
from backend.util import json, mock
|
||||
from backend.util.settings import Config
|
||||
|
||||
|
||||
@@ -25,7 +22,6 @@ class GraphExecutionEntry(BaseModel):
|
||||
user_id: str
|
||||
graph_exec_id: str
|
||||
graph_id: str
|
||||
graph_version: int
|
||||
start_node_execs: list["NodeExecutionEntry"]
|
||||
|
||||
|
||||
@@ -35,7 +31,6 @@ class NodeExecutionEntry(BaseModel):
|
||||
graph_id: str
|
||||
node_exec_id: str
|
||||
node_id: str
|
||||
block_id: str
|
||||
data: BlockInput
|
||||
|
||||
|
||||
@@ -103,16 +98,16 @@ class ExecutionResult(BaseModel):
|
||||
def from_db(execution: AgentNodeExecution):
|
||||
if execution.executionData:
|
||||
# Execution that has been queued for execution will persist its data.
|
||||
input_data = type.convert(execution.executionData, dict[str, Any])
|
||||
input_data = json.loads(execution.executionData, target_type=dict[str, Any])
|
||||
else:
|
||||
# For incomplete execution, executionData will not be yet available.
|
||||
input_data: BlockInput = defaultdict()
|
||||
for data in execution.Input or []:
|
||||
input_data[data.name] = type.convert(data.data, Type[Any])
|
||||
input_data[data.name] = json.loads(data.data)
|
||||
|
||||
output_data: CompletedBlockOutput = defaultdict(list)
|
||||
for data in execution.Output or []:
|
||||
output_data[data.name].append(type.convert(data.data, Type[Any]))
|
||||
output_data[data.name].append(json.loads(data.data))
|
||||
|
||||
graph_execution: AgentGraphExecution | None = execution.AgentGraphExecution
|
||||
|
||||
@@ -141,7 +136,6 @@ async def create_graph_execution(
|
||||
graph_version: int,
|
||||
nodes_input: list[tuple[str, BlockInput]],
|
||||
user_id: str,
|
||||
preset_id: str | None = None,
|
||||
) -> tuple[str, list[ExecutionResult]]:
|
||||
"""
|
||||
Create a new AgentGraphExecution record.
|
||||
@@ -160,7 +154,7 @@ async def create_graph_execution(
|
||||
"executionStatus": ExecutionStatus.INCOMPLETE,
|
||||
"Input": {
|
||||
"create": [
|
||||
{"name": name, "data": Json(data)}
|
||||
{"name": name, "data": json.dumps(data)}
|
||||
for name, data in node_input.items()
|
||||
]
|
||||
},
|
||||
@@ -169,7 +163,6 @@ async def create_graph_execution(
|
||||
]
|
||||
},
|
||||
"userId": user_id,
|
||||
"agentPresetId": preset_id,
|
||||
},
|
||||
include=GRAPH_EXECUTION_INCLUDE,
|
||||
)
|
||||
@@ -213,7 +206,7 @@ async def upsert_execution_input(
|
||||
order={"addedTime": "asc"},
|
||||
include={"Input": True},
|
||||
)
|
||||
json_input_data = Json(input_data)
|
||||
json_input_data = json.dumps(input_data)
|
||||
|
||||
if existing_execution:
|
||||
await AgentNodeExecutionInputOutput.prisma().create(
|
||||
@@ -225,7 +218,7 @@ async def upsert_execution_input(
|
||||
)
|
||||
return existing_execution.id, {
|
||||
**{
|
||||
input_data.name: type.convert(input_data.data, Type[Any])
|
||||
input_data.name: json.loads(input_data.data)
|
||||
for input_data in existing_execution.Input or []
|
||||
},
|
||||
input_name: input_data,
|
||||
@@ -259,39 +252,32 @@ async def upsert_execution_output(
|
||||
await AgentNodeExecutionInputOutput.prisma().create(
|
||||
data={
|
||||
"name": output_name,
|
||||
"data": Json(output_data),
|
||||
"data": json.dumps(output_data),
|
||||
"referencedByOutputExecId": node_exec_id,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
async def update_graph_execution_start_time(graph_exec_id: str) -> ExecutionResult:
|
||||
res = await AgentGraphExecution.prisma().update(
|
||||
async def update_graph_execution_start_time(graph_exec_id: str):
|
||||
await AgentGraphExecution.prisma().update(
|
||||
where={"id": graph_exec_id},
|
||||
data={
|
||||
"executionStatus": ExecutionStatus.RUNNING,
|
||||
"startedAt": datetime.now(tz=timezone.utc),
|
||||
},
|
||||
)
|
||||
if not res:
|
||||
raise ValueError(f"Execution {graph_exec_id} not found.")
|
||||
|
||||
return ExecutionResult.from_graph(res)
|
||||
|
||||
|
||||
async def update_graph_execution_stats(
|
||||
graph_exec_id: str,
|
||||
status: ExecutionStatus,
|
||||
stats: GraphExecutionStats,
|
||||
stats: dict[str, Any],
|
||||
) -> ExecutionResult:
|
||||
data = stats.model_dump()
|
||||
if isinstance(data["error"], Exception):
|
||||
data["error"] = str(data["error"])
|
||||
res = await AgentGraphExecution.prisma().update(
|
||||
where={"id": graph_exec_id},
|
||||
data={
|
||||
"executionStatus": status,
|
||||
"stats": Json(data),
|
||||
"stats": json.dumps(stats),
|
||||
},
|
||||
)
|
||||
if not res:
|
||||
@@ -300,13 +286,10 @@ async def update_graph_execution_stats(
|
||||
return ExecutionResult.from_graph(res)
|
||||
|
||||
|
||||
async def update_node_execution_stats(node_exec_id: str, stats: NodeExecutionStats):
|
||||
data = stats.model_dump()
|
||||
if isinstance(data["error"], Exception):
|
||||
data["error"] = str(data["error"])
|
||||
async def update_node_execution_stats(node_exec_id: str, stats: dict[str, Any]):
|
||||
await AgentNodeExecution.prisma().update(
|
||||
where={"id": node_exec_id},
|
||||
data={"stats": Json(data)},
|
||||
data={"stats": json.dumps(stats)},
|
||||
)
|
||||
|
||||
|
||||
@@ -326,8 +309,8 @@ async def update_execution_status(
|
||||
**({"startedTime": now} if status == ExecutionStatus.RUNNING else {}),
|
||||
**({"endedTime": now} if status == ExecutionStatus.FAILED else {}),
|
||||
**({"endedTime": now} if status == ExecutionStatus.COMPLETED else {}),
|
||||
**({"executionData": Json(execution_data)} if execution_data else {}),
|
||||
**({"stats": Json(stats)} if stats else {}),
|
||||
**({"executionData": json.dumps(execution_data)} if execution_data else {}),
|
||||
**({"stats": json.dumps(stats)} if stats else {}),
|
||||
}
|
||||
|
||||
res = await AgentNodeExecution.prisma().update(
|
||||
@@ -341,23 +324,6 @@ async def update_execution_status(
|
||||
return ExecutionResult.from_db(res)
|
||||
|
||||
|
||||
async def delete_execution(
|
||||
graph_exec_id: str, user_id: str, soft_delete: bool = True
|
||||
) -> None:
|
||||
if soft_delete:
|
||||
deleted_count = await AgentGraphExecution.prisma().update_many(
|
||||
where={"id": graph_exec_id, "userId": user_id}, data={"isDeleted": True}
|
||||
)
|
||||
else:
|
||||
deleted_count = await AgentGraphExecution.prisma().delete_many(
|
||||
where={"id": graph_exec_id, "userId": user_id}
|
||||
)
|
||||
if deleted_count < 1:
|
||||
raise DatabaseError(
|
||||
f"Could not delete graph execution #{graph_exec_id}: not found"
|
||||
)
|
||||
|
||||
|
||||
async def get_execution_results(graph_exec_id: str) -> list[ExecutionResult]:
|
||||
executions = await AgentNodeExecution.prisma().find_many(
|
||||
where={"agentGraphExecutionId": graph_exec_id},
|
||||
@@ -371,66 +337,13 @@ async def get_execution_results(graph_exec_id: str) -> list[ExecutionResult]:
|
||||
return res
|
||||
|
||||
|
||||
async def get_executions_in_timerange(
|
||||
user_id: str, start_time: str, end_time: str
|
||||
) -> list[ExecutionResult]:
|
||||
try:
|
||||
executions = await AgentGraphExecution.prisma().find_many(
|
||||
where={
|
||||
"startedAt": {
|
||||
"gte": datetime.fromisoformat(start_time),
|
||||
"lte": datetime.fromisoformat(end_time),
|
||||
},
|
||||
"userId": user_id,
|
||||
"isDeleted": False,
|
||||
},
|
||||
include=GRAPH_EXECUTION_INCLUDE,
|
||||
)
|
||||
return [ExecutionResult.from_graph(execution) for execution in executions]
|
||||
except Exception as e:
|
||||
raise DatabaseError(
|
||||
f"Failed to get executions in timerange {start_time} to {end_time} for user {user_id}: {e}"
|
||||
) from e
|
||||
|
||||
|
||||
LIST_SPLIT = "_$_"
|
||||
DICT_SPLIT = "_#_"
|
||||
OBJC_SPLIT = "_@_"
|
||||
|
||||
|
||||
def parse_execution_output(output: BlockData, name: str) -> Any | None:
|
||||
"""
|
||||
Extracts partial output data by name from a given BlockData.
|
||||
|
||||
The function supports extracting data from lists, dictionaries, and objects
|
||||
using specific naming conventions:
|
||||
- For lists: <output_name>_$_<index>
|
||||
- For dictionaries: <output_name>_#_<key>
|
||||
- For objects: <output_name>_@_<attribute>
|
||||
|
||||
Args:
|
||||
output (BlockData): A tuple containing the output name and data.
|
||||
name (str): The name used to extract specific data from the output.
|
||||
|
||||
Returns:
|
||||
Any | None: The extracted data if found, otherwise None.
|
||||
|
||||
Examples:
|
||||
>>> output = ("result", [10, 20, 30])
|
||||
>>> parse_execution_output(output, "result_$_1")
|
||||
20
|
||||
|
||||
>>> output = ("config", {"key1": "value1", "key2": "value2"})
|
||||
>>> parse_execution_output(output, "config_#_key1")
|
||||
'value1'
|
||||
|
||||
>>> class Sample:
|
||||
... attr1 = "value1"
|
||||
... attr2 = "value2"
|
||||
>>> output = ("object", Sample())
|
||||
>>> parse_execution_output(output, "object_@_attr1")
|
||||
'value1'
|
||||
"""
|
||||
# Allow extracting partial output data by name.
|
||||
output_name, output_data = output
|
||||
|
||||
if name == output_name:
|
||||
@@ -459,37 +372,11 @@ def parse_execution_output(output: BlockData, name: str) -> Any | None:
|
||||
|
||||
def merge_execution_input(data: BlockInput) -> BlockInput:
|
||||
"""
|
||||
Merges dynamic input pins into a single list, dictionary, or object based on naming patterns.
|
||||
|
||||
This function processes input keys that follow specific patterns to merge them into a unified structure:
|
||||
- `<input_name>_$_<index>` for list inputs.
|
||||
- `<input_name>_#_<index>` for dictionary inputs.
|
||||
- `<input_name>_@_<index>` for object inputs.
|
||||
|
||||
Args:
|
||||
data (BlockInput): A dictionary containing input keys and their corresponding values.
|
||||
|
||||
Returns:
|
||||
BlockInput: A dictionary with merged inputs.
|
||||
|
||||
Raises:
|
||||
ValueError: If a list index is not an integer.
|
||||
|
||||
Examples:
|
||||
>>> data = {
|
||||
... "list_$_0": "a",
|
||||
... "list_$_1": "b",
|
||||
... "dict_#_key1": "value1",
|
||||
... "dict_#_key2": "value2",
|
||||
... "object_@_attr1": "value1",
|
||||
... "object_@_attr2": "value2"
|
||||
... }
|
||||
>>> merge_execution_input(data)
|
||||
{
|
||||
"list": ["a", "b"],
|
||||
"dict": {"key1": "value1", "key2": "value2"},
|
||||
"object": <MockObject attr1="value1" attr2="value2">
|
||||
}
|
||||
Merge all dynamic input pins which described by the following pattern:
|
||||
- <input_name>_$_<index> for list input.
|
||||
- <input_name>_#_<index> for dict input.
|
||||
- <input_name>_@_<index> for object input.
|
||||
This function will construct pins with the same name into a single list/dict/object.
|
||||
"""
|
||||
|
||||
# Merge all input with <input_name>_$_<index> into a single list.
|
||||
@@ -533,7 +420,8 @@ async def get_latest_execution(node_id: str, graph_eid: str) -> ExecutionResult
|
||||
where={
|
||||
"agentNodeId": node_id,
|
||||
"agentGraphExecutionId": graph_eid,
|
||||
"executionStatus": {"not": ExecutionStatus.INCOMPLETE}, # type: ignore
|
||||
"executionStatus": {"not": ExecutionStatus.INCOMPLETE},
|
||||
"executionData": {"not": None}, # type: ignore
|
||||
},
|
||||
order={"queuedTime": "desc"},
|
||||
include=EXECUTION_RESULT_INCLUDE,
|
||||
|
||||
@@ -6,32 +6,22 @@ from datetime import datetime, timezone
|
||||
from typing import Any, Literal, Optional, Type
|
||||
|
||||
import prisma
|
||||
from prisma import Json
|
||||
from prisma.models import (
|
||||
AgentGraph,
|
||||
AgentGraphExecution,
|
||||
AgentNode,
|
||||
AgentNodeLink,
|
||||
StoreListingVersion,
|
||||
)
|
||||
from prisma.models import AgentGraph, AgentGraphExecution, AgentNode, AgentNodeLink
|
||||
from prisma.types import AgentGraphWhereInput
|
||||
from pydantic.fields import Field, computed_field
|
||||
from pydantic.fields import computed_field
|
||||
|
||||
from backend.blocks.agent import AgentExecutorBlock
|
||||
from backend.blocks.basic import AgentInputBlock, AgentOutputBlock
|
||||
from backend.util import type
|
||||
from backend.util import json
|
||||
|
||||
from .block import BlockInput, BlockType, get_block, get_blocks
|
||||
from .db import BaseDbModel, transaction
|
||||
from .execution import ExecutionResult, ExecutionStatus
|
||||
from .execution import ExecutionStatus
|
||||
from .includes import AGENT_GRAPH_INCLUDE, AGENT_NODE_INCLUDE
|
||||
from .integrations import Webhook
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_INPUT_BLOCK_ID = AgentInputBlock().id
|
||||
_OUTPUT_BLOCK_ID = AgentOutputBlock().id
|
||||
|
||||
|
||||
class Link(BaseDbModel):
|
||||
source_id: str
|
||||
@@ -72,12 +62,14 @@ class NodeModel(Node):
|
||||
webhook: Optional[Webhook] = None
|
||||
|
||||
@staticmethod
|
||||
def from_db(node: AgentNode) -> "NodeModel":
|
||||
def from_db(node: AgentNode):
|
||||
if not node.AgentBlock:
|
||||
raise ValueError(f"Invalid node {node.id}, invalid AgentBlock.")
|
||||
obj = NodeModel(
|
||||
id=node.id,
|
||||
block_id=node.agentBlockId,
|
||||
input_default=type.convert(node.constantInput, dict[str, Any]),
|
||||
metadata=type.convert(node.metadata, dict[str, Any]),
|
||||
block_id=node.AgentBlock.id,
|
||||
input_default=json.loads(node.constantInput, target_type=dict[str, Any]),
|
||||
metadata=json.loads(node.metadata, target_type=dict[str, Any]),
|
||||
graph_id=node.agentGraphId,
|
||||
graph_version=node.agentGraphVersion,
|
||||
webhook_id=node.webhookId,
|
||||
@@ -108,96 +100,42 @@ class NodeModel(Node):
|
||||
Webhook.model_rebuild()
|
||||
|
||||
|
||||
class GraphExecutionMeta(BaseDbModel):
|
||||
class GraphExecution(BaseDbModel):
|
||||
execution_id: str
|
||||
started_at: datetime
|
||||
ended_at: datetime
|
||||
cost: Optional[int] = Field(..., description="Execution cost in credits")
|
||||
duration: float
|
||||
total_run_time: float
|
||||
status: ExecutionStatus
|
||||
graph_id: str
|
||||
graph_version: int
|
||||
preset_id: Optional[str]
|
||||
|
||||
@staticmethod
|
||||
def from_db(_graph_exec: AgentGraphExecution):
|
||||
def from_db(execution: AgentGraphExecution):
|
||||
now = datetime.now(timezone.utc)
|
||||
start_time = _graph_exec.startedAt or _graph_exec.createdAt
|
||||
end_time = _graph_exec.updatedAt or now
|
||||
start_time = execution.startedAt or execution.createdAt
|
||||
end_time = execution.updatedAt or now
|
||||
duration = (end_time - start_time).total_seconds()
|
||||
total_run_time = duration
|
||||
|
||||
try:
|
||||
stats = type.convert(_graph_exec.stats or {}, dict[str, Any])
|
||||
stats = json.loads(execution.stats or "{}", target_type=dict[str, Any])
|
||||
except ValueError:
|
||||
stats = {}
|
||||
|
||||
duration = stats.get("walltime", duration)
|
||||
total_run_time = stats.get("nodes_walltime", total_run_time)
|
||||
|
||||
return GraphExecutionMeta(
|
||||
id=_graph_exec.id,
|
||||
execution_id=_graph_exec.id,
|
||||
return GraphExecution(
|
||||
id=execution.id,
|
||||
execution_id=execution.id,
|
||||
started_at=start_time,
|
||||
ended_at=end_time,
|
||||
cost=stats.get("cost", None),
|
||||
duration=duration,
|
||||
total_run_time=total_run_time,
|
||||
status=ExecutionStatus(_graph_exec.executionStatus),
|
||||
graph_id=_graph_exec.agentGraphId,
|
||||
graph_version=_graph_exec.agentGraphVersion,
|
||||
preset_id=_graph_exec.agentPresetId,
|
||||
)
|
||||
|
||||
|
||||
class GraphExecution(GraphExecutionMeta):
|
||||
inputs: dict[str, Any]
|
||||
outputs: dict[str, list[Any]]
|
||||
node_executions: list[ExecutionResult]
|
||||
|
||||
@staticmethod
|
||||
def from_db(_graph_exec: AgentGraphExecution):
|
||||
if _graph_exec.AgentNodeExecutions is None:
|
||||
raise ValueError("Node executions must be included in query")
|
||||
|
||||
graph_exec = GraphExecutionMeta.from_db(_graph_exec)
|
||||
|
||||
node_executions = [
|
||||
ExecutionResult.from_db(ne) for ne in _graph_exec.AgentNodeExecutions
|
||||
]
|
||||
|
||||
inputs = {
|
||||
**{
|
||||
# inputs from Agent Input Blocks
|
||||
exec.input_data["name"]: exec.input_data["value"]
|
||||
for exec in node_executions
|
||||
if exec.block_id == _INPUT_BLOCK_ID
|
||||
},
|
||||
**{
|
||||
# input from webhook-triggered block
|
||||
"payload": exec.input_data["payload"]
|
||||
for exec in node_executions
|
||||
if (block := get_block(exec.block_id))
|
||||
and block.block_type in [BlockType.WEBHOOK, BlockType.WEBHOOK_MANUAL]
|
||||
},
|
||||
}
|
||||
|
||||
outputs: dict[str, list] = defaultdict(list)
|
||||
for exec in node_executions:
|
||||
if exec.block_id == _OUTPUT_BLOCK_ID:
|
||||
outputs[exec.input_data["name"]].append(
|
||||
exec.input_data.get("value", None)
|
||||
)
|
||||
|
||||
return GraphExecution(
|
||||
**{
|
||||
field_name: getattr(graph_exec, field_name)
|
||||
for field_name in graph_exec.model_fields
|
||||
},
|
||||
inputs=inputs,
|
||||
outputs=outputs,
|
||||
node_executions=node_executions,
|
||||
status=ExecutionStatus(execution.executionStatus),
|
||||
graph_id=execution.agentGraphId,
|
||||
graph_version=execution.agentGraphVersion,
|
||||
)
|
||||
|
||||
|
||||
@@ -316,39 +254,16 @@ class GraphModel(Graph):
|
||||
|
||||
def validate_graph(self, for_run: bool = False):
|
||||
def sanitize(name):
|
||||
sanitized_name = name.split("_#_")[0].split("_@_")[0].split("_$_")[0]
|
||||
if sanitized_name.startswith("tools_^_"):
|
||||
return sanitized_name.split("_^_")[0]
|
||||
return sanitized_name
|
||||
|
||||
# Validate smart decision maker nodes
|
||||
smart_decision_maker_nodes = set()
|
||||
agent_nodes = set()
|
||||
nodes_block = {
|
||||
node.id: block
|
||||
for node in self.nodes
|
||||
if (block := get_block(node.block_id)) is not None
|
||||
}
|
||||
|
||||
for node in self.nodes:
|
||||
if (block := nodes_block.get(node.id)) is None:
|
||||
raise ValueError(f"Invalid block {node.block_id} for node #{node.id}")
|
||||
|
||||
# Smart decision maker nodes
|
||||
if block.block_type == BlockType.AI:
|
||||
smart_decision_maker_nodes.add(node.id)
|
||||
# Agent nodes
|
||||
elif block.block_type == BlockType.AGENT:
|
||||
agent_nodes.add(node.id)
|
||||
return name.split("_#_")[0].split("_@_")[0].split("_$_")[0]
|
||||
|
||||
input_links = defaultdict(list)
|
||||
|
||||
for link in self.links:
|
||||
input_links[link.sink_id].append(link)
|
||||
|
||||
# Nodes: required fields are filled or connected and dependencies are satisfied
|
||||
for node in self.nodes:
|
||||
if (block := nodes_block.get(node.id)) is None:
|
||||
block = get_block(node.block_id)
|
||||
if block is None:
|
||||
raise ValueError(f"Invalid block {node.block_id} for node #{node.id}")
|
||||
|
||||
provided_inputs = set(
|
||||
@@ -365,12 +280,9 @@ class GraphModel(Graph):
|
||||
)
|
||||
and (
|
||||
for_run # Skip input completion validation, unless when executing.
|
||||
or block.block_type
|
||||
in [
|
||||
BlockType.INPUT,
|
||||
BlockType.OUTPUT,
|
||||
BlockType.AGENT,
|
||||
]
|
||||
or block.block_type == BlockType.INPUT
|
||||
or block.block_type == BlockType.OUTPUT
|
||||
or block.block_type == BlockType.AGENT
|
||||
)
|
||||
):
|
||||
raise ValueError(
|
||||
@@ -419,20 +331,20 @@ class GraphModel(Graph):
|
||||
for link in self.links:
|
||||
source = (link.source_id, link.source_name)
|
||||
sink = (link.sink_id, link.sink_name)
|
||||
prefix = f"Link {source} <-> {sink}"
|
||||
suffix = f"Link {source} <-> {sink}"
|
||||
|
||||
for i, (node_id, name) in enumerate([source, sink]):
|
||||
node = node_map.get(node_id)
|
||||
if not node:
|
||||
raise ValueError(
|
||||
f"{prefix}, {node_id} is invalid node id, available nodes: {node_map.keys()}"
|
||||
f"{suffix}, {node_id} is invalid node id, available nodes: {node_map.keys()}"
|
||||
)
|
||||
|
||||
block = get_block(node.block_id)
|
||||
if not block:
|
||||
blocks = {v().id: v().name for v in get_blocks().values()}
|
||||
raise ValueError(
|
||||
f"{prefix}, {node.block_id} is invalid block id, available blocks: {blocks}"
|
||||
f"{suffix}, {node.block_id} is invalid block id, available blocks: {blocks}"
|
||||
)
|
||||
|
||||
sanitized_name = sanitize(name)
|
||||
@@ -440,18 +352,18 @@ class GraphModel(Graph):
|
||||
if i == 0:
|
||||
fields = (
|
||||
block.output_schema.get_fields()
|
||||
if block.block_type not in [BlockType.AGENT]
|
||||
if block.block_type != BlockType.AGENT
|
||||
else vals.get("output_schema", {}).get("properties", {}).keys()
|
||||
)
|
||||
else:
|
||||
fields = (
|
||||
block.input_schema.get_fields()
|
||||
if block.block_type not in [BlockType.AGENT]
|
||||
if block.block_type != BlockType.AGENT
|
||||
else vals.get("input_schema", {}).get("properties", {}).keys()
|
||||
)
|
||||
if sanitized_name not in fields and not name.startswith("tools_^_"):
|
||||
if sanitized_name not in fields:
|
||||
fields_msg = f"Allowed fields: {fields}"
|
||||
raise ValueError(f"{prefix}, `{name}` invalid, {fields_msg}")
|
||||
raise ValueError(f"{suffix}, `{name}` invalid, {fields_msg}")
|
||||
|
||||
if is_static_output_block(link.source_id):
|
||||
link.is_static = True # Each value block output should be static.
|
||||
@@ -484,9 +396,11 @@ class GraphModel(Graph):
|
||||
if for_export:
|
||||
# Remove credentials from node input
|
||||
if node.constantInput:
|
||||
constant_input = type.convert(node.constantInput, dict[str, Any])
|
||||
constant_input = json.loads(
|
||||
node.constantInput, target_type=dict[str, Any]
|
||||
)
|
||||
constant_input = GraphModel._hide_node_input_credentials(constant_input)
|
||||
node.constantInput = Json(constant_input)
|
||||
node.constantInput = json.dumps(constant_input)
|
||||
|
||||
# Remove webhook info
|
||||
node.webhookId = None
|
||||
@@ -597,81 +511,25 @@ async def get_graphs(
|
||||
return graph_models
|
||||
|
||||
|
||||
# TODO: move execution stuff to .execution
|
||||
async def get_graphs_executions(user_id: str) -> list[GraphExecutionMeta]:
|
||||
async def get_executions(user_id: str) -> list[GraphExecution]:
|
||||
executions = await AgentGraphExecution.prisma().find_many(
|
||||
where={"isDeleted": False, "userId": user_id},
|
||||
where={"userId": user_id},
|
||||
order={"createdAt": "desc"},
|
||||
)
|
||||
return [GraphExecutionMeta.from_db(execution) for execution in executions]
|
||||
|
||||
|
||||
async def get_graph_executions(graph_id: str, user_id: str) -> list[GraphExecutionMeta]:
|
||||
executions = await AgentGraphExecution.prisma().find_many(
|
||||
where={"agentGraphId": graph_id, "isDeleted": False, "userId": user_id},
|
||||
order={"createdAt": "desc"},
|
||||
)
|
||||
return [GraphExecutionMeta.from_db(execution) for execution in executions]
|
||||
|
||||
|
||||
async def get_execution_meta(
|
||||
user_id: str, execution_id: str
|
||||
) -> GraphExecutionMeta | None:
|
||||
execution = await AgentGraphExecution.prisma().find_first(
|
||||
where={"id": execution_id, "isDeleted": False, "userId": user_id}
|
||||
)
|
||||
return GraphExecutionMeta.from_db(execution) if execution else None
|
||||
return [GraphExecution.from_db(execution) for execution in executions]
|
||||
|
||||
|
||||
async def get_execution(user_id: str, execution_id: str) -> GraphExecution | None:
|
||||
execution = await AgentGraphExecution.prisma().find_first(
|
||||
where={"id": execution_id, "isDeleted": False, "userId": user_id},
|
||||
include={
|
||||
"AgentNodeExecutions": {
|
||||
"include": {"AgentNode": True, "Input": True, "Output": True},
|
||||
"order_by": [
|
||||
{"queuedTime": "asc"},
|
||||
{ # Fallback: Incomplete execs has no queuedTime.
|
||||
"addedTime": "asc"
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
where={"id": execution_id, "userId": user_id}
|
||||
)
|
||||
return GraphExecution.from_db(execution) if execution else None
|
||||
|
||||
|
||||
async def get_graph_metadata(graph_id: str, version: int | None = None) -> Graph | None:
|
||||
where_clause: AgentGraphWhereInput = {
|
||||
"id": graph_id,
|
||||
}
|
||||
|
||||
if version is not None:
|
||||
where_clause["version"] = version
|
||||
|
||||
graph = await AgentGraph.prisma().find_first(
|
||||
where=where_clause,
|
||||
include=AGENT_GRAPH_INCLUDE,
|
||||
order={"version": "desc"},
|
||||
)
|
||||
|
||||
if not graph:
|
||||
return None
|
||||
|
||||
return Graph(
|
||||
id=graph.id,
|
||||
name=graph.name or "",
|
||||
description=graph.description or "",
|
||||
version=graph.version,
|
||||
is_active=graph.isActive,
|
||||
is_template=graph.isTemplate,
|
||||
)
|
||||
|
||||
|
||||
async def get_graph(
|
||||
graph_id: str,
|
||||
version: int | None = None,
|
||||
template: bool = False, # note: currently not in use; TODO: remove from DB entirely
|
||||
template: bool = False,
|
||||
user_id: str | None = None,
|
||||
for_export: bool = False,
|
||||
) -> GraphModel | None:
|
||||
@@ -685,47 +543,21 @@ async def get_graph(
|
||||
where_clause: AgentGraphWhereInput = {
|
||||
"id": graph_id,
|
||||
}
|
||||
|
||||
if version is not None:
|
||||
where_clause["version"] = version
|
||||
elif not template:
|
||||
where_clause["isActive"] = True
|
||||
|
||||
# TODO: Fix hack workaround to get adding store agents to work
|
||||
if user_id is not None and not template:
|
||||
where_clause["userId"] = user_id
|
||||
|
||||
graph = await AgentGraph.prisma().find_first(
|
||||
where=where_clause,
|
||||
include=AGENT_GRAPH_INCLUDE,
|
||||
order={"version": "desc"},
|
||||
)
|
||||
|
||||
# For access, the graph must be owned by the user or listed in the store
|
||||
if graph is None or (
|
||||
graph.userId != user_id
|
||||
and not (
|
||||
await StoreListingVersion.prisma().find_first(
|
||||
where={
|
||||
"agentId": graph_id,
|
||||
"agentVersion": version or graph.version,
|
||||
"isDeleted": False,
|
||||
"StoreListing": {"is": {"isApproved": True}},
|
||||
}
|
||||
)
|
||||
)
|
||||
):
|
||||
return None
|
||||
|
||||
return GraphModel.from_db(graph, for_export)
|
||||
|
||||
|
||||
async def get_connected_output_nodes(node_id: str) -> list[tuple[Link, Node]]:
|
||||
links = await AgentNodeLink.prisma().find_many(
|
||||
where={"agentNodeSourceId": node_id},
|
||||
include={"AgentNodeSink": {"include": AGENT_NODE_INCLUDE}}, # type: ignore
|
||||
)
|
||||
return [
|
||||
(Link.from_db(link), NodeModel.from_db(link.AgentNodeSink))
|
||||
for link in links
|
||||
if link.AgentNodeSink
|
||||
]
|
||||
return GraphModel.from_db(graph, for_export) if graph else None
|
||||
|
||||
|
||||
async def set_graph_active_version(graph_id: str, version: int, user_id: str) -> None:
|
||||
@@ -780,7 +612,7 @@ async def create_graph(graph: Graph, user_id: str) -> GraphModel:
|
||||
await __create_graph(tx, graph, user_id)
|
||||
|
||||
if created_graph := await get_graph(
|
||||
graph.id, graph.version, template=graph.is_template, user_id=user_id
|
||||
graph.id, graph.version, graph.is_template, user_id=user_id
|
||||
):
|
||||
return created_graph
|
||||
|
||||
@@ -802,8 +634,8 @@ async def __create_graph(tx, graph: Graph, user_id: str):
|
||||
{
|
||||
"id": node.id,
|
||||
"agentBlockId": node.block_id,
|
||||
"constantInput": Json(node.input_default),
|
||||
"metadata": Json(node.metadata),
|
||||
"constantInput": json.dumps(node.input_default),
|
||||
"metadata": json.dumps(node.metadata),
|
||||
}
|
||||
for node in graph.nodes
|
||||
]
|
||||
@@ -865,11 +697,9 @@ async def fix_llm_provider_credentials():
|
||||
|
||||
store = IntegrationCredentialsStore()
|
||||
|
||||
broken_nodes = []
|
||||
try:
|
||||
broken_nodes = await prisma.get_client().query_raw(
|
||||
"""
|
||||
SELECT graph."userId" user_id,
|
||||
broken_nodes = await prisma.get_client().query_raw(
|
||||
"""
|
||||
SELECT graph."userId" user_id,
|
||||
node.id node_id,
|
||||
node."constantInput" node_preset_input
|
||||
FROM platform."AgentNode" node
|
||||
@@ -878,10 +708,8 @@ async def fix_llm_provider_credentials():
|
||||
WHERE node."constantInput"::jsonb->'credentials'->>'provider' = 'llm'
|
||||
ORDER BY graph."userId";
|
||||
"""
|
||||
)
|
||||
logger.info(f"Fixing LLM credential inputs on {len(broken_nodes)} nodes")
|
||||
except Exception as e:
|
||||
logger.error(f"Error fixing LLM credential inputs: {e}")
|
||||
)
|
||||
logger.info(f"Fixing LLM credential inputs on {len(broken_nodes)} nodes")
|
||||
|
||||
user_id: str = ""
|
||||
user_integrations = None
|
||||
@@ -894,7 +722,7 @@ async def fix_llm_provider_credentials():
|
||||
raise RuntimeError(f"Impossible state while processing node {node}")
|
||||
|
||||
node_id: str = node["node_id"]
|
||||
node_preset_input: dict = node["node_preset_input"]
|
||||
node_preset_input: dict = json.loads(node["node_preset_input"])
|
||||
credentials_meta: dict = node_preset_input["credentials"]
|
||||
|
||||
credentials = next(
|
||||
@@ -930,5 +758,5 @@ async def fix_llm_provider_credentials():
|
||||
store.update_creds(user_id, credentials)
|
||||
await AgentNode.prisma().update(
|
||||
where={"id": node_id},
|
||||
data={"constantInput": Json(node_preset_input)},
|
||||
data={"constantInput": json.dumps(node_preset_input)},
|
||||
)
|
||||
|
||||
@@ -32,15 +32,3 @@ GRAPH_EXECUTION_INCLUDE: prisma.types.AgentGraphExecutionInclude = {
|
||||
INTEGRATION_WEBHOOK_INCLUDE: prisma.types.IntegrationWebhookInclude = {
|
||||
"AgentNodes": {"include": AGENT_NODE_INCLUDE} # type: ignore
|
||||
}
|
||||
|
||||
|
||||
def library_agent_include(user_id: str) -> prisma.types.LibraryAgentInclude:
|
||||
return {
|
||||
"Agent": {
|
||||
"include": {
|
||||
**AGENT_GRAPH_INCLUDE,
|
||||
"AgentGraphExecution": {"where": {"userId": user_id}},
|
||||
}
|
||||
},
|
||||
"Creator": True,
|
||||
}
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import logging
|
||||
from datetime import datetime, timezone
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Annotated,
|
||||
@@ -18,7 +16,6 @@ from typing import (
|
||||
)
|
||||
from uuid import uuid4
|
||||
|
||||
from prisma.enums import CreditTransactionType
|
||||
from pydantic import (
|
||||
BaseModel,
|
||||
ConfigDict,
|
||||
@@ -202,42 +199,27 @@ class OAuth2Credentials(_BaseCredentials):
|
||||
scopes: list[str]
|
||||
metadata: dict[str, Any] = Field(default_factory=dict)
|
||||
|
||||
def auth_header(self) -> str:
|
||||
def bearer(self) -> str:
|
||||
return f"Bearer {self.access_token.get_secret_value()}"
|
||||
|
||||
|
||||
class APIKeyCredentials(_BaseCredentials):
|
||||
type: Literal["api_key"] = "api_key"
|
||||
api_key: SecretStr
|
||||
expires_at: Optional[int] = Field(
|
||||
default=None,
|
||||
description="Unix timestamp (seconds) indicating when the API key expires (if at all)",
|
||||
)
|
||||
expires_at: Optional[int]
|
||||
"""Unix timestamp (seconds) indicating when the API key expires (if at all)"""
|
||||
|
||||
def auth_header(self) -> str:
|
||||
def bearer(self) -> str:
|
||||
return f"Bearer {self.api_key.get_secret_value()}"
|
||||
|
||||
|
||||
class UserPasswordCredentials(_BaseCredentials):
|
||||
type: Literal["user_password"] = "user_password"
|
||||
username: SecretStr
|
||||
password: SecretStr
|
||||
|
||||
def auth_header(self) -> str:
|
||||
# Converting the string to bytes using encode()
|
||||
# Base64 encoding it with base64.b64encode()
|
||||
# Converting the resulting bytes back to a string with decode()
|
||||
return f"Basic {base64.b64encode(f'{self.username.get_secret_value()}:{self.password.get_secret_value()}'.encode()).decode()}"
|
||||
|
||||
|
||||
Credentials = Annotated[
|
||||
OAuth2Credentials | APIKeyCredentials | UserPasswordCredentials,
|
||||
OAuth2Credentials | APIKeyCredentials,
|
||||
Field(discriminator="type"),
|
||||
]
|
||||
|
||||
|
||||
CredentialsType = Literal["api_key", "oauth2", "user_password"]
|
||||
CredentialsType = Literal["api_key", "oauth2"]
|
||||
|
||||
|
||||
class OAuthState(BaseModel):
|
||||
@@ -365,74 +347,3 @@ def CredentialsField(
|
||||
|
||||
class ContributorDetails(BaseModel):
|
||||
name: str = Field(title="Name", description="The name of the contributor.")
|
||||
|
||||
|
||||
class AutoTopUpConfig(BaseModel):
|
||||
amount: int
|
||||
"""Amount of credits to top up."""
|
||||
threshold: int
|
||||
"""Threshold to trigger auto top up."""
|
||||
|
||||
|
||||
class UserTransaction(BaseModel):
|
||||
transaction_key: str = ""
|
||||
transaction_time: datetime = datetime.min.replace(tzinfo=timezone.utc)
|
||||
transaction_type: CreditTransactionType = CreditTransactionType.USAGE
|
||||
amount: int = 0
|
||||
balance: int = 0
|
||||
description: str | None = None
|
||||
usage_graph_id: str | None = None
|
||||
usage_execution_id: str | None = None
|
||||
usage_node_count: int = 0
|
||||
usage_start_time: datetime = datetime.max.replace(tzinfo=timezone.utc)
|
||||
|
||||
|
||||
class TransactionHistory(BaseModel):
|
||||
transactions: list[UserTransaction]
|
||||
next_transaction_time: datetime | None
|
||||
|
||||
|
||||
class RefundRequest(BaseModel):
|
||||
id: str
|
||||
user_id: str
|
||||
transaction_key: str
|
||||
amount: int
|
||||
reason: str
|
||||
result: str | None = None
|
||||
status: str
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
|
||||
|
||||
class NodeExecutionStats(BaseModel):
|
||||
"""Execution statistics for a node execution."""
|
||||
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
error: Optional[Exception | str] = None
|
||||
walltime: float = 0
|
||||
cputime: float = 0
|
||||
cost: float = 0
|
||||
input_size: int = 0
|
||||
output_size: int = 0
|
||||
llm_call_count: int = 0
|
||||
llm_retry_count: int = 0
|
||||
input_token_count: int = 0
|
||||
output_token_count: int = 0
|
||||
|
||||
|
||||
class GraphExecutionStats(BaseModel):
|
||||
"""Execution statistics for a graph execution."""
|
||||
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
error: Optional[Exception | str] = None
|
||||
walltime: float = 0
|
||||
cputime: float = 0
|
||||
nodes_walltime: float = 0
|
||||
nodes_cputime: float = 0
|
||||
node_count: int = 0
|
||||
node_error_count: int = 0
|
||||
cost: float = 0
|
||||
|
||||
@@ -1,406 +0,0 @@
|
||||
import logging
|
||||
from datetime import datetime, timedelta
|
||||
from enum import Enum
|
||||
from typing import Annotated, Any, Generic, Optional, TypeVar, Union
|
||||
|
||||
from prisma import Json
|
||||
from prisma.enums import NotificationType
|
||||
from prisma.models import NotificationEvent, UserNotificationBatch
|
||||
from prisma.types import UserNotificationBatchWhereInput
|
||||
|
||||
# from backend.notifications.models import NotificationEvent
|
||||
from pydantic import BaseModel, EmailStr, Field, field_validator
|
||||
|
||||
from backend.server.v2.store.exceptions import DatabaseError
|
||||
|
||||
from .db import transaction
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
T_co = TypeVar("T_co", bound="BaseNotificationData", covariant=True)
|
||||
|
||||
|
||||
class QueueType(Enum):
|
||||
IMMEDIATE = "immediate" # Send right away (errors, critical notifications)
|
||||
HOURLY = "hourly" # Batch for up to an hour (usage reports)
|
||||
DAILY = "daily" # Daily digest (summary notifications)
|
||||
BACKOFF = "backoff" # Backoff strategy (exponential backoff)
|
||||
ADMIN = "admin" # Admin notifications (errors, critical notifications)
|
||||
|
||||
|
||||
class BaseNotificationData(BaseModel):
|
||||
pass
|
||||
|
||||
|
||||
class AgentRunData(BaseNotificationData):
|
||||
agent_name: str
|
||||
credits_used: float
|
||||
execution_time: float
|
||||
node_count: int = Field(..., description="Number of nodes executed")
|
||||
graph_id: str
|
||||
outputs: list[dict[str, Any]] = Field(..., description="Outputs of the agent")
|
||||
|
||||
|
||||
class ZeroBalanceData(BaseNotificationData):
|
||||
last_transaction: float
|
||||
last_transaction_time: datetime
|
||||
top_up_link: str
|
||||
|
||||
|
||||
class LowBalanceData(BaseNotificationData):
|
||||
agent_name: str = Field(..., description="Name of the agent")
|
||||
current_balance: float = Field(
|
||||
..., description="Current balance in credits (100 = $1)"
|
||||
)
|
||||
billing_page_link: str = Field(..., description="Link to billing page")
|
||||
shortfall: float = Field(..., description="Amount of credits needed to continue")
|
||||
|
||||
|
||||
class BlockExecutionFailedData(BaseNotificationData):
|
||||
block_name: str
|
||||
block_id: str
|
||||
error_message: str
|
||||
graph_id: str
|
||||
node_id: str
|
||||
execution_id: str
|
||||
|
||||
|
||||
class ContinuousAgentErrorData(BaseNotificationData):
|
||||
agent_name: str
|
||||
error_message: str
|
||||
graph_id: str
|
||||
execution_id: str
|
||||
start_time: datetime
|
||||
error_time: datetime
|
||||
attempts: int = Field(..., description="Number of retry attempts made")
|
||||
|
||||
|
||||
class BaseSummaryData(BaseNotificationData):
|
||||
total_credits_used: float
|
||||
total_executions: int
|
||||
most_used_agent: str
|
||||
total_execution_time: float
|
||||
successful_runs: int
|
||||
failed_runs: int
|
||||
average_execution_time: float
|
||||
cost_breakdown: dict[str, float]
|
||||
|
||||
|
||||
class DailySummaryData(BaseSummaryData):
|
||||
date: datetime
|
||||
|
||||
|
||||
class WeeklySummaryData(BaseSummaryData):
|
||||
start_date: datetime
|
||||
end_date: datetime
|
||||
week_number: int
|
||||
year: int
|
||||
|
||||
|
||||
class MonthlySummaryData(BaseSummaryData):
|
||||
month: int
|
||||
year: int
|
||||
|
||||
|
||||
class RefundRequestData(BaseNotificationData):
|
||||
user_id: str
|
||||
user_name: str
|
||||
user_email: str
|
||||
transaction_id: str
|
||||
refund_request_id: str
|
||||
reason: str
|
||||
amount: float
|
||||
balance: int
|
||||
|
||||
|
||||
NotificationData = Annotated[
|
||||
Union[
|
||||
AgentRunData,
|
||||
ZeroBalanceData,
|
||||
LowBalanceData,
|
||||
BlockExecutionFailedData,
|
||||
ContinuousAgentErrorData,
|
||||
MonthlySummaryData,
|
||||
WeeklySummaryData,
|
||||
DailySummaryData,
|
||||
RefundRequestData,
|
||||
],
|
||||
Field(discriminator="type"),
|
||||
]
|
||||
|
||||
|
||||
class NotificationEventDTO(BaseModel):
|
||||
user_id: str
|
||||
type: NotificationType
|
||||
data: dict
|
||||
created_at: datetime = Field(default_factory=datetime.now)
|
||||
retry_count: int = 0
|
||||
|
||||
|
||||
class NotificationEventModel(BaseModel, Generic[T_co]):
|
||||
user_id: str
|
||||
type: NotificationType
|
||||
data: T_co
|
||||
created_at: datetime = Field(default_factory=datetime.now)
|
||||
|
||||
@property
|
||||
def strategy(self) -> QueueType:
|
||||
return NotificationTypeOverride(self.type).strategy
|
||||
|
||||
@field_validator("type", mode="before")
|
||||
def uppercase_type(cls, v):
|
||||
if isinstance(v, str):
|
||||
return v.upper()
|
||||
return v
|
||||
|
||||
@property
|
||||
def template(self) -> str:
|
||||
return NotificationTypeOverride(self.type).template
|
||||
|
||||
|
||||
def get_data_type(
|
||||
notification_type: NotificationType,
|
||||
) -> type[BaseNotificationData]:
|
||||
return {
|
||||
NotificationType.AGENT_RUN: AgentRunData,
|
||||
NotificationType.ZERO_BALANCE: ZeroBalanceData,
|
||||
NotificationType.LOW_BALANCE: LowBalanceData,
|
||||
NotificationType.BLOCK_EXECUTION_FAILED: BlockExecutionFailedData,
|
||||
NotificationType.CONTINUOUS_AGENT_ERROR: ContinuousAgentErrorData,
|
||||
NotificationType.DAILY_SUMMARY: DailySummaryData,
|
||||
NotificationType.WEEKLY_SUMMARY: WeeklySummaryData,
|
||||
NotificationType.MONTHLY_SUMMARY: MonthlySummaryData,
|
||||
NotificationType.REFUND_REQUEST: RefundRequestData,
|
||||
NotificationType.REFUND_PROCESSED: RefundRequestData,
|
||||
}[notification_type]
|
||||
|
||||
|
||||
class NotificationBatch(BaseModel):
|
||||
user_id: str
|
||||
events: list[NotificationEvent]
|
||||
strategy: QueueType
|
||||
last_update: datetime = datetime.now()
|
||||
|
||||
|
||||
class NotificationResult(BaseModel):
|
||||
success: bool
|
||||
message: Optional[str] = None
|
||||
|
||||
|
||||
class NotificationTypeOverride:
|
||||
def __init__(self, notification_type: NotificationType):
|
||||
self.notification_type = notification_type
|
||||
|
||||
@property
|
||||
def strategy(self) -> QueueType:
|
||||
BATCHING_RULES = {
|
||||
# These are batched by the notification service
|
||||
NotificationType.AGENT_RUN: QueueType.IMMEDIATE,
|
||||
# These are batched by the notification service, but with a backoff strategy
|
||||
NotificationType.ZERO_BALANCE: QueueType.BACKOFF,
|
||||
NotificationType.LOW_BALANCE: QueueType.IMMEDIATE,
|
||||
NotificationType.BLOCK_EXECUTION_FAILED: QueueType.BACKOFF,
|
||||
NotificationType.CONTINUOUS_AGENT_ERROR: QueueType.BACKOFF,
|
||||
NotificationType.DAILY_SUMMARY: QueueType.DAILY,
|
||||
NotificationType.WEEKLY_SUMMARY: QueueType.DAILY,
|
||||
NotificationType.MONTHLY_SUMMARY: QueueType.DAILY,
|
||||
NotificationType.REFUND_REQUEST: QueueType.ADMIN,
|
||||
NotificationType.REFUND_PROCESSED: QueueType.ADMIN,
|
||||
}
|
||||
return BATCHING_RULES.get(self.notification_type, QueueType.IMMEDIATE)
|
||||
|
||||
@property
|
||||
def template(self) -> str:
|
||||
"""Returns template name for this notification type"""
|
||||
return {
|
||||
NotificationType.AGENT_RUN: "agent_run.html",
|
||||
NotificationType.ZERO_BALANCE: "zero_balance.html",
|
||||
NotificationType.LOW_BALANCE: "low_balance.html",
|
||||
NotificationType.BLOCK_EXECUTION_FAILED: "block_failed.html",
|
||||
NotificationType.CONTINUOUS_AGENT_ERROR: "agent_error.html",
|
||||
NotificationType.DAILY_SUMMARY: "daily_summary.html",
|
||||
NotificationType.WEEKLY_SUMMARY: "weekly_summary.html",
|
||||
NotificationType.MONTHLY_SUMMARY: "monthly_summary.html",
|
||||
NotificationType.REFUND_REQUEST: "refund_request.html",
|
||||
NotificationType.REFUND_PROCESSED: "refund_processed.html",
|
||||
}[self.notification_type]
|
||||
|
||||
@property
|
||||
def subject(self) -> str:
|
||||
return {
|
||||
NotificationType.AGENT_RUN: "Agent Run Report",
|
||||
NotificationType.ZERO_BALANCE: "You're out of credits!",
|
||||
NotificationType.LOW_BALANCE: "Low Balance Warning!",
|
||||
NotificationType.BLOCK_EXECUTION_FAILED: "Uh oh! Block Execution Failed",
|
||||
NotificationType.CONTINUOUS_AGENT_ERROR: "Shoot! Continuous Agent Error",
|
||||
NotificationType.DAILY_SUMMARY: "Here's your daily summary!",
|
||||
NotificationType.WEEKLY_SUMMARY: "Look at all the cool stuff you did last week!",
|
||||
NotificationType.MONTHLY_SUMMARY: "We did a lot this month!",
|
||||
NotificationType.REFUND_REQUEST: "[ACTION REQUIRED] You got a ${{data.amount / 100}} refund request from {{data.user_name}}",
|
||||
NotificationType.REFUND_PROCESSED: "Refund for ${{data.amount / 100}} to {{data.user_name}} has been processed",
|
||||
}[self.notification_type]
|
||||
|
||||
|
||||
class NotificationPreferenceDTO(BaseModel):
|
||||
email: EmailStr = Field(..., description="User's email address")
|
||||
preferences: dict[NotificationType, bool] = Field(
|
||||
..., description="Which notifications the user wants"
|
||||
)
|
||||
daily_limit: int = Field(..., description="Max emails per day")
|
||||
|
||||
|
||||
class NotificationPreference(BaseModel):
|
||||
user_id: str
|
||||
email: EmailStr
|
||||
preferences: dict[NotificationType, bool] = Field(
|
||||
default_factory=dict, description="Which notifications the user wants"
|
||||
)
|
||||
daily_limit: int = 10 # Max emails per day
|
||||
emails_sent_today: int = 0
|
||||
last_reset_date: datetime = Field(default_factory=datetime.now)
|
||||
|
||||
|
||||
def get_batch_delay(notification_type: NotificationType) -> timedelta:
|
||||
return {
|
||||
NotificationType.AGENT_RUN: timedelta(seconds=1),
|
||||
NotificationType.ZERO_BALANCE: timedelta(minutes=60),
|
||||
NotificationType.LOW_BALANCE: timedelta(minutes=60),
|
||||
NotificationType.BLOCK_EXECUTION_FAILED: timedelta(minutes=60),
|
||||
NotificationType.CONTINUOUS_AGENT_ERROR: timedelta(minutes=60),
|
||||
}[notification_type]
|
||||
|
||||
|
||||
async def create_or_add_to_user_notification_batch(
|
||||
user_id: str,
|
||||
notification_type: NotificationType,
|
||||
data: str, # type: 'NotificationEventModel'
|
||||
) -> dict:
|
||||
try:
|
||||
logger.info(
|
||||
f"Creating or adding to notification batch for {user_id} with type {notification_type} and data {data}"
|
||||
)
|
||||
|
||||
notification_data = NotificationEventModel[
|
||||
get_data_type(notification_type)
|
||||
].model_validate_json(data)
|
||||
|
||||
# Serialize the data
|
||||
json_data: Json = Json(notification_data.data.model_dump_json())
|
||||
|
||||
# First try to find existing batch
|
||||
existing_batch = await UserNotificationBatch.prisma().find_unique(
|
||||
where={
|
||||
"userId_type": {
|
||||
"userId": user_id,
|
||||
"type": notification_type,
|
||||
}
|
||||
},
|
||||
include={"notifications": True},
|
||||
)
|
||||
|
||||
if not existing_batch:
|
||||
async with transaction() as tx:
|
||||
notification_event = await tx.notificationevent.create(
|
||||
data={
|
||||
"type": notification_type,
|
||||
"data": json_data,
|
||||
}
|
||||
)
|
||||
|
||||
# Create new batch
|
||||
resp = await tx.usernotificationbatch.create(
|
||||
data={
|
||||
"userId": user_id,
|
||||
"type": notification_type,
|
||||
"notifications": {"connect": [{"id": notification_event.id}]},
|
||||
},
|
||||
include={"notifications": True},
|
||||
)
|
||||
return resp.model_dump()
|
||||
else:
|
||||
async with transaction() as tx:
|
||||
notification_event = await tx.notificationevent.create(
|
||||
data={
|
||||
"type": notification_type,
|
||||
"data": json_data,
|
||||
"UserNotificationBatch": {"connect": {"id": existing_batch.id}},
|
||||
}
|
||||
)
|
||||
# Add to existing batch
|
||||
resp = await tx.usernotificationbatch.update(
|
||||
where={"id": existing_batch.id},
|
||||
data={
|
||||
"notifications": {"connect": [{"id": notification_event.id}]}
|
||||
},
|
||||
include={"notifications": True},
|
||||
)
|
||||
if not resp:
|
||||
raise DatabaseError(
|
||||
f"Failed to add notification event {notification_event.id} to existing batch {existing_batch.id}"
|
||||
)
|
||||
return resp.model_dump()
|
||||
except Exception as e:
|
||||
raise DatabaseError(
|
||||
f"Failed to create or add to notification batch for user {user_id} and type {notification_type}: {e}"
|
||||
) from e
|
||||
|
||||
|
||||
async def get_user_notification_last_message_in_batch(
|
||||
user_id: str,
|
||||
notification_type: NotificationType,
|
||||
) -> NotificationEvent | None:
|
||||
try:
|
||||
batch = await UserNotificationBatch.prisma().find_first(
|
||||
where={"userId": user_id, "type": notification_type},
|
||||
order={"createdAt": "desc"},
|
||||
)
|
||||
if not batch:
|
||||
return None
|
||||
if not batch.notifications:
|
||||
return None
|
||||
return batch.notifications[-1]
|
||||
except Exception as e:
|
||||
raise DatabaseError(
|
||||
f"Failed to get user notification last message in batch for user {user_id} and type {notification_type}: {e}"
|
||||
) from e
|
||||
|
||||
|
||||
async def empty_user_notification_batch(
|
||||
user_id: str, notification_type: NotificationType
|
||||
) -> None:
|
||||
try:
|
||||
async with transaction() as tx:
|
||||
await tx.notificationevent.delete_many(
|
||||
where={
|
||||
"UserNotificationBatch": {
|
||||
"is": {"userId": user_id, "type": notification_type}
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
await tx.usernotificationbatch.delete_many(
|
||||
where=UserNotificationBatchWhereInput(
|
||||
userId=user_id,
|
||||
type=notification_type,
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
raise DatabaseError(
|
||||
f"Failed to empty user notification batch for user {user_id} and type {notification_type}: {e}"
|
||||
) from e
|
||||
|
||||
|
||||
async def get_user_notification_batch(
|
||||
user_id: str,
|
||||
notification_type: NotificationType,
|
||||
) -> UserNotificationBatch | None:
|
||||
try:
|
||||
return await UserNotificationBatch.prisma().find_first(
|
||||
where={"userId": user_id, "type": notification_type},
|
||||
include={"notifications": True},
|
||||
)
|
||||
except Exception as e:
|
||||
raise DatabaseError(
|
||||
f"Failed to get user notification batch for user {user_id} and type {notification_type}: {e}"
|
||||
) from e
|
||||
@@ -1,247 +0,0 @@
|
||||
import re
|
||||
from typing import Any, Optional
|
||||
|
||||
import prisma
|
||||
import pydantic
|
||||
from prisma import Json
|
||||
from prisma.models import (
|
||||
AgentGraph,
|
||||
AgentGraphExecution,
|
||||
StoreListingVersion,
|
||||
UserOnboarding,
|
||||
)
|
||||
from prisma.types import UserOnboardingUpdateInput
|
||||
|
||||
from backend.server.v2.library.db import set_is_deleted_for_library_agent
|
||||
from backend.server.v2.store.db import get_store_agent_details
|
||||
from backend.server.v2.store.model import StoreAgentDetails
|
||||
|
||||
# Mapping from user reason id to categories to search for when choosing agent to show
|
||||
REASON_MAPPING: dict[str, list[str]] = {
|
||||
"content_marketing": ["writing", "marketing", "creative"],
|
||||
"business_workflow_automation": ["business", "productivity"],
|
||||
"data_research": ["data", "research"],
|
||||
"ai_innovation": ["development", "research"],
|
||||
"personal_productivity": ["personal", "productivity"],
|
||||
}
|
||||
|
||||
|
||||
class UserOnboardingUpdate(pydantic.BaseModel):
|
||||
step: int
|
||||
usageReason: Optional[str] = None
|
||||
integrations: list[str] = pydantic.Field(default_factory=list)
|
||||
otherIntegrations: Optional[str] = None
|
||||
selectedAgentCreator: Optional[str] = None
|
||||
selectedAgentSlug: Optional[str] = None
|
||||
agentInput: Optional[dict[str, Any]] = None
|
||||
isCompleted: bool = False
|
||||
|
||||
|
||||
async def get_user_onboarding(user_id: str):
|
||||
return await UserOnboarding.prisma().upsert(
|
||||
where={"userId": user_id},
|
||||
data={
|
||||
"create": {"userId": user_id}, # type: ignore
|
||||
"update": {},
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
async def update_user_onboarding(user_id: str, data: UserOnboardingUpdate):
|
||||
# Get the user onboarding data
|
||||
user_onboarding = await get_user_onboarding(user_id)
|
||||
update: UserOnboardingUpdateInput = {
|
||||
"step": data.step,
|
||||
"isCompleted": data.isCompleted,
|
||||
}
|
||||
if data.usageReason:
|
||||
update["usageReason"] = data.usageReason
|
||||
if data.integrations:
|
||||
update["integrations"] = data.integrations
|
||||
if data.otherIntegrations:
|
||||
update["otherIntegrations"] = data.otherIntegrations
|
||||
if data.selectedAgentSlug and data.selectedAgentCreator:
|
||||
update["selectedAgentSlug"] = data.selectedAgentSlug
|
||||
update["selectedAgentCreator"] = data.selectedAgentCreator
|
||||
# Check if slug changes
|
||||
if (
|
||||
user_onboarding.selectedAgentCreator
|
||||
and user_onboarding.selectedAgentSlug
|
||||
and user_onboarding.selectedAgentSlug != data.selectedAgentSlug
|
||||
):
|
||||
store_agent = await get_store_agent_details(
|
||||
user_onboarding.selectedAgentCreator, user_onboarding.selectedAgentSlug
|
||||
)
|
||||
store_listing = await StoreListingVersion.prisma().find_unique_or_raise(
|
||||
where={"id": store_agent.store_listing_version_id}
|
||||
)
|
||||
agent_graph = await AgentGraph.prisma().find_first(
|
||||
where={"id": store_listing.agentId, "version": store_listing.version}
|
||||
)
|
||||
execution_count = await AgentGraphExecution.prisma().count(
|
||||
where={
|
||||
"userId": user_id,
|
||||
"agentGraphId": store_listing.agentId,
|
||||
"agentGraphVersion": store_listing.version,
|
||||
}
|
||||
)
|
||||
# If there was no execution and graph doesn't belong to the user,
|
||||
# mark the agent as deleted
|
||||
if execution_count == 0 and agent_graph and agent_graph.userId != user_id:
|
||||
await set_is_deleted_for_library_agent(
|
||||
user_id, store_listing.agentId, store_listing.agentVersion, True
|
||||
)
|
||||
if data.agentInput:
|
||||
update["agentInput"] = Json(data.agentInput)
|
||||
|
||||
return await UserOnboarding.prisma().upsert(
|
||||
where={"userId": user_id},
|
||||
data={
|
||||
"create": {"userId": user_id, **update}, # type: ignore
|
||||
"update": update,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def clean_and_split(text: str) -> list[str]:
|
||||
"""
|
||||
Removes all special characters from a string, truncates it to 100 characters,
|
||||
and splits it by whitespace and commas.
|
||||
|
||||
Args:
|
||||
text (str): The input string.
|
||||
|
||||
Returns:
|
||||
list[str]: A list of cleaned words.
|
||||
"""
|
||||
# Remove all special characters (keep only alphanumeric and whitespace)
|
||||
cleaned_text = re.sub(r"[^a-zA-Z0-9\s,]", "", text.strip()[:100])
|
||||
|
||||
# Split by whitespace and commas
|
||||
words = re.split(r"[\s,]+", cleaned_text)
|
||||
|
||||
# Remove empty strings from the list
|
||||
words = [word.lower() for word in words if word]
|
||||
|
||||
return words
|
||||
|
||||
|
||||
def calculate_points(
|
||||
agent, categories: list[str], custom: list[str], integrations: list[str]
|
||||
) -> int:
|
||||
"""
|
||||
Calculates the total points for an agent based on the specified criteria.
|
||||
|
||||
Args:
|
||||
agent: The agent object.
|
||||
categories (list[str]): List of categories to match.
|
||||
words (list[str]): List of words to match in the description.
|
||||
|
||||
Returns:
|
||||
int: Total points for the agent.
|
||||
"""
|
||||
points = 0
|
||||
|
||||
# 1. Category Matches
|
||||
matched_categories = sum(
|
||||
1 for category in categories if category in agent.categories
|
||||
)
|
||||
points += matched_categories * 100
|
||||
|
||||
# 2. Description Word Matches
|
||||
description_words = agent.description.split() # Split description into words
|
||||
matched_words = sum(1 for word in custom if word in description_words)
|
||||
points += matched_words * 100
|
||||
|
||||
matched_words = sum(1 for word in integrations if word in description_words)
|
||||
points += matched_words * 50
|
||||
|
||||
# 3. Featured Bonus
|
||||
if agent.featured:
|
||||
points += 50
|
||||
|
||||
# 4. Rating Bonus
|
||||
points += agent.rating * 10
|
||||
|
||||
# 5. Runs Bonus
|
||||
runs_points = min(agent.runs / 1000 * 100, 100) # Cap at 100 points
|
||||
points += runs_points
|
||||
|
||||
return int(points)
|
||||
|
||||
|
||||
async def get_recommended_agents(user_id: str) -> list[StoreAgentDetails]:
|
||||
user_onboarding = await get_user_onboarding(user_id)
|
||||
categories = REASON_MAPPING.get(user_onboarding.usageReason or "", [])
|
||||
|
||||
where_clause: dict[str, Any] = {}
|
||||
|
||||
custom = clean_and_split((user_onboarding.usageReason or "").lower())
|
||||
|
||||
if categories:
|
||||
where_clause["OR"] = [
|
||||
{"categories": {"has": category}} for category in categories
|
||||
]
|
||||
else:
|
||||
where_clause["OR"] = [
|
||||
{"description": {"contains": word, "mode": "insensitive"}}
|
||||
for word in custom
|
||||
]
|
||||
|
||||
where_clause["OR"] += [
|
||||
{"description": {"contains": word, "mode": "insensitive"}}
|
||||
for word in user_onboarding.integrations
|
||||
]
|
||||
|
||||
agents = await prisma.models.StoreAgent.prisma().find_many(
|
||||
where=prisma.types.StoreAgentWhereInput(**where_clause),
|
||||
order=[
|
||||
{"featured": "desc"},
|
||||
{"runs": "desc"},
|
||||
{"rating": "desc"},
|
||||
],
|
||||
)
|
||||
|
||||
if len(agents) < 2:
|
||||
agents += await prisma.models.StoreAgent.prisma().find_many(
|
||||
where={
|
||||
"listing_id": {"not_in": [agent.listing_id for agent in agents]},
|
||||
},
|
||||
order=[
|
||||
{"featured": "desc"},
|
||||
{"runs": "desc"},
|
||||
{"rating": "desc"},
|
||||
],
|
||||
take=2 - len(agents),
|
||||
)
|
||||
|
||||
# Calculate points for the first 30 agents and choose the top 2
|
||||
agent_points = []
|
||||
for agent in agents[:50]:
|
||||
points = calculate_points(
|
||||
agent, categories, custom, user_onboarding.integrations
|
||||
)
|
||||
agent_points.append((agent, points))
|
||||
|
||||
agent_points.sort(key=lambda x: x[1], reverse=True)
|
||||
recommended_agents = [agent for agent, _ in agent_points[:2]]
|
||||
|
||||
return [
|
||||
StoreAgentDetails(
|
||||
store_listing_version_id=agent.storeListingVersionId,
|
||||
slug=agent.slug,
|
||||
agent_name=agent.agent_name,
|
||||
agent_video=agent.agent_video or "",
|
||||
agent_image=agent.agent_image,
|
||||
creator=agent.creator_username,
|
||||
creator_avatar=agent.creator_avatar,
|
||||
sub_heading=agent.sub_heading,
|
||||
description=agent.description,
|
||||
categories=agent.categories,
|
||||
runs=agent.runs,
|
||||
rating=agent.rating,
|
||||
versions=agent.versions,
|
||||
last_updated=agent.updated_at,
|
||||
)
|
||||
for agent in recommended_agents
|
||||
]
|
||||
@@ -1,296 +0,0 @@
|
||||
import logging
|
||||
from abc import ABC, abstractmethod
|
||||
from enum import Enum
|
||||
from typing import Awaitable, Optional
|
||||
|
||||
import aio_pika
|
||||
import pika
|
||||
import pika.adapters.blocking_connection
|
||||
from pika.spec import BasicProperties
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.util.retry import conn_retry
|
||||
from backend.util.settings import Settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ExchangeType(str, Enum):
|
||||
DIRECT = "direct"
|
||||
FANOUT = "fanout"
|
||||
TOPIC = "topic"
|
||||
HEADERS = "headers"
|
||||
|
||||
|
||||
class Exchange(BaseModel):
|
||||
name: str
|
||||
type: ExchangeType
|
||||
durable: bool = True
|
||||
auto_delete: bool = False
|
||||
|
||||
|
||||
class Queue(BaseModel):
|
||||
name: str
|
||||
durable: bool = True
|
||||
auto_delete: bool = False
|
||||
# Optional exchange binding configuration
|
||||
exchange: Optional[Exchange] = None
|
||||
routing_key: Optional[str] = None
|
||||
arguments: Optional[dict] = None
|
||||
|
||||
|
||||
class RabbitMQConfig(BaseModel):
|
||||
"""Configuration for a RabbitMQ service instance"""
|
||||
|
||||
vhost: str = "/"
|
||||
exchanges: list[Exchange]
|
||||
queues: list[Queue]
|
||||
|
||||
|
||||
class RabbitMQBase(ABC):
|
||||
"""Base class for RabbitMQ connections with shared configuration"""
|
||||
|
||||
def __init__(self, config: RabbitMQConfig):
|
||||
settings = Settings()
|
||||
self.host = settings.config.rabbitmq_host
|
||||
self.port = settings.config.rabbitmq_port
|
||||
self.username = settings.secrets.rabbitmq_default_user
|
||||
self.password = settings.secrets.rabbitmq_default_pass
|
||||
self.config = config
|
||||
|
||||
self._connection = None
|
||||
self._channel = None
|
||||
|
||||
@property
|
||||
def is_connected(self) -> bool:
|
||||
"""Check if we have a valid connection"""
|
||||
return bool(self._connection)
|
||||
|
||||
@property
|
||||
def is_ready(self) -> bool:
|
||||
"""Check if we have a valid channel"""
|
||||
return bool(self.is_connected and self._channel)
|
||||
|
||||
@abstractmethod
|
||||
def connect(self) -> None | Awaitable[None]:
|
||||
"""Establish connection to RabbitMQ"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def disconnect(self) -> None | Awaitable[None]:
|
||||
"""Close connection to RabbitMQ"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def declare_infrastructure(self) -> None | Awaitable[None]:
|
||||
"""Declare exchanges and queues for this service"""
|
||||
pass
|
||||
|
||||
|
||||
class SyncRabbitMQ(RabbitMQBase):
|
||||
"""Synchronous RabbitMQ client"""
|
||||
|
||||
@property
|
||||
def is_connected(self) -> bool:
|
||||
return bool(self._connection and self._connection.is_open)
|
||||
|
||||
@property
|
||||
def is_ready(self) -> bool:
|
||||
return bool(self.is_connected and self._channel and self._channel.is_open)
|
||||
|
||||
@conn_retry("RabbitMQ", "Acquiring connection")
|
||||
def connect(self) -> None:
|
||||
if self.is_connected:
|
||||
return
|
||||
|
||||
credentials = pika.PlainCredentials(self.username, self.password)
|
||||
parameters = pika.ConnectionParameters(
|
||||
host=self.host,
|
||||
port=self.port,
|
||||
virtual_host=self.config.vhost,
|
||||
credentials=credentials,
|
||||
heartbeat=600,
|
||||
blocked_connection_timeout=300,
|
||||
)
|
||||
|
||||
self._connection = pika.BlockingConnection(parameters)
|
||||
self._channel = self._connection.channel()
|
||||
self._channel.basic_qos(prefetch_count=1)
|
||||
|
||||
self.declare_infrastructure()
|
||||
|
||||
def disconnect(self) -> None:
|
||||
if self._channel:
|
||||
if self._channel.is_open:
|
||||
self._channel.close()
|
||||
self._channel = None
|
||||
if self._connection:
|
||||
if self._connection.is_open:
|
||||
self._connection.close()
|
||||
self._connection = None
|
||||
|
||||
def declare_infrastructure(self) -> None:
|
||||
"""Declare exchanges and queues for this service"""
|
||||
if not self.is_ready:
|
||||
self.connect()
|
||||
|
||||
if self._channel is None:
|
||||
raise RuntimeError("Channel should be established after connect")
|
||||
|
||||
# Declare exchanges
|
||||
for exchange in self.config.exchanges:
|
||||
self._channel.exchange_declare(
|
||||
exchange=exchange.name,
|
||||
exchange_type=exchange.type.value,
|
||||
durable=exchange.durable,
|
||||
auto_delete=exchange.auto_delete,
|
||||
)
|
||||
|
||||
# Declare queues and bind them to exchanges
|
||||
for queue in self.config.queues:
|
||||
self._channel.queue_declare(
|
||||
queue=queue.name,
|
||||
durable=queue.durable,
|
||||
auto_delete=queue.auto_delete,
|
||||
arguments=queue.arguments,
|
||||
)
|
||||
if queue.exchange:
|
||||
self._channel.queue_bind(
|
||||
queue=queue.name,
|
||||
exchange=queue.exchange.name,
|
||||
routing_key=queue.routing_key or queue.name,
|
||||
)
|
||||
|
||||
def publish_message(
|
||||
self,
|
||||
routing_key: str,
|
||||
message: str,
|
||||
exchange: Optional[Exchange] = None,
|
||||
properties: Optional[BasicProperties] = None,
|
||||
mandatory: bool = True,
|
||||
) -> None:
|
||||
if not self.is_ready:
|
||||
self.connect()
|
||||
|
||||
if self._channel is None:
|
||||
raise RuntimeError("Channel should be established after connect")
|
||||
|
||||
self._channel.basic_publish(
|
||||
exchange=exchange.name if exchange else "",
|
||||
routing_key=routing_key,
|
||||
body=message.encode(),
|
||||
properties=properties or BasicProperties(delivery_mode=2),
|
||||
mandatory=mandatory,
|
||||
)
|
||||
|
||||
def get_channel(self) -> pika.adapters.blocking_connection.BlockingChannel:
|
||||
if not self.is_ready:
|
||||
self.connect()
|
||||
if self._channel is None:
|
||||
raise RuntimeError("Channel should be established after connect")
|
||||
return self._channel
|
||||
|
||||
|
||||
class AsyncRabbitMQ(RabbitMQBase):
|
||||
"""Asynchronous RabbitMQ client"""
|
||||
|
||||
@property
|
||||
def is_connected(self) -> bool:
|
||||
return bool(self._connection and not self._connection.is_closed)
|
||||
|
||||
@property
|
||||
def is_ready(self) -> bool:
|
||||
return bool(self.is_connected and self._channel and not self._channel.is_closed)
|
||||
|
||||
@conn_retry("AsyncRabbitMQ", "Acquiring async connection")
|
||||
async def connect(self):
|
||||
if self.is_connected:
|
||||
return
|
||||
|
||||
self._connection = await aio_pika.connect_robust(
|
||||
host=self.host,
|
||||
port=self.port,
|
||||
login=self.username,
|
||||
password=self.password,
|
||||
virtualhost=self.config.vhost.lstrip("/"),
|
||||
)
|
||||
self._channel = await self._connection.channel()
|
||||
await self._channel.set_qos(prefetch_count=1)
|
||||
|
||||
await self.declare_infrastructure()
|
||||
|
||||
async def disconnect(self):
|
||||
if self._channel:
|
||||
await self._channel.close()
|
||||
self._channel = None
|
||||
if self._connection:
|
||||
await self._connection.close()
|
||||
self._connection = None
|
||||
|
||||
async def declare_infrastructure(self):
|
||||
"""Declare exchanges and queues for this service"""
|
||||
if not self.is_ready:
|
||||
await self.connect()
|
||||
|
||||
if self._channel is None:
|
||||
raise RuntimeError("Channel should be established after connect")
|
||||
|
||||
# Declare exchanges
|
||||
for exchange in self.config.exchanges:
|
||||
await self._channel.declare_exchange(
|
||||
name=exchange.name,
|
||||
type=exchange.type.value,
|
||||
durable=exchange.durable,
|
||||
auto_delete=exchange.auto_delete,
|
||||
)
|
||||
|
||||
# Declare queues and bind them to exchanges
|
||||
for queue in self.config.queues:
|
||||
queue_obj = await self._channel.declare_queue(
|
||||
name=queue.name,
|
||||
durable=queue.durable,
|
||||
auto_delete=queue.auto_delete,
|
||||
arguments=queue.arguments,
|
||||
)
|
||||
if queue.exchange:
|
||||
exchange = await self._channel.get_exchange(queue.exchange.name)
|
||||
await queue_obj.bind(
|
||||
exchange, routing_key=queue.routing_key or queue.name
|
||||
)
|
||||
|
||||
async def publish_message(
|
||||
self,
|
||||
routing_key: str,
|
||||
message: str,
|
||||
exchange: Optional[Exchange] = None,
|
||||
persistent: bool = True,
|
||||
) -> None:
|
||||
if not self.is_ready:
|
||||
await self.connect()
|
||||
|
||||
if self._channel is None:
|
||||
raise RuntimeError("Channel should be established after connect")
|
||||
|
||||
if exchange:
|
||||
exchange_obj = await self._channel.get_exchange(exchange.name)
|
||||
else:
|
||||
exchange_obj = self._channel.default_exchange
|
||||
|
||||
await exchange_obj.publish(
|
||||
aio_pika.Message(
|
||||
body=message.encode(),
|
||||
delivery_mode=(
|
||||
aio_pika.DeliveryMode.PERSISTENT
|
||||
if persistent
|
||||
else aio_pika.DeliveryMode.NOT_PERSISTENT
|
||||
),
|
||||
),
|
||||
routing_key=routing_key,
|
||||
)
|
||||
|
||||
async def get_channel(self) -> aio_pika.abc.AbstractChannel:
|
||||
if not self.is_ready:
|
||||
await self.connect()
|
||||
if self._channel is None:
|
||||
raise RuntimeError("Channel should be established after connect")
|
||||
return self._channel
|
||||
@@ -1,78 +1,42 @@
|
||||
import logging
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Optional, cast
|
||||
|
||||
from autogpt_libs.auth.models import DEFAULT_USER_ID
|
||||
from fastapi import HTTPException
|
||||
from prisma import Json
|
||||
from prisma.enums import NotificationType
|
||||
from prisma.models import User
|
||||
from prisma.types import UserUpdateInput
|
||||
|
||||
from backend.data.db import prisma
|
||||
from backend.data.model import UserIntegrations, UserMetadata, UserMetadataRaw
|
||||
from backend.data.notifications import NotificationPreference, NotificationPreferenceDTO
|
||||
from backend.server.v2.store.exceptions import DatabaseError
|
||||
from backend.util.encryption import JSONCryptor
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def get_or_create_user(user_data: dict) -> User:
|
||||
try:
|
||||
user_id = user_data.get("sub")
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=401, detail="User ID not found in token")
|
||||
user_id = user_data.get("sub")
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=401, detail="User ID not found in token")
|
||||
|
||||
user_email = user_data.get("email")
|
||||
if not user_email:
|
||||
raise HTTPException(status_code=401, detail="Email not found in token")
|
||||
user_email = user_data.get("email")
|
||||
if not user_email:
|
||||
raise HTTPException(status_code=401, detail="Email not found in token")
|
||||
|
||||
user = await prisma.user.find_unique(where={"id": user_id})
|
||||
if not user:
|
||||
user = await prisma.user.create(
|
||||
data={
|
||||
"id": user_id,
|
||||
"email": user_email,
|
||||
"name": user_data.get("user_metadata", {}).get("name"),
|
||||
}
|
||||
)
|
||||
|
||||
return User.model_validate(user)
|
||||
except Exception as e:
|
||||
raise DatabaseError(f"Failed to get or create user {user_data}: {e}") from e
|
||||
|
||||
|
||||
async def get_user_by_id(user_id: str) -> User:
|
||||
user = await prisma.user.find_unique(where={"id": user_id})
|
||||
if not user:
|
||||
raise ValueError(f"User not found with ID: {user_id}")
|
||||
user = await prisma.user.create(
|
||||
data={
|
||||
"id": user_id,
|
||||
"email": user_email,
|
||||
"name": user_data.get("user_metadata", {}).get("name"),
|
||||
}
|
||||
)
|
||||
return User.model_validate(user)
|
||||
|
||||
|
||||
async def get_user_email_by_id(user_id: str) -> Optional[str]:
|
||||
try:
|
||||
user = await prisma.user.find_unique(where={"id": user_id})
|
||||
return user.email if user else None
|
||||
except Exception as e:
|
||||
raise DatabaseError(f"Failed to get user email for user {user_id}: {e}") from e
|
||||
|
||||
|
||||
async def get_user_by_email(email: str) -> Optional[User]:
|
||||
try:
|
||||
user = await prisma.user.find_unique(where={"email": email})
|
||||
return User.model_validate(user) if user else None
|
||||
except Exception as e:
|
||||
raise DatabaseError(f"Failed to get user by email {email}: {e}") from e
|
||||
|
||||
|
||||
async def update_user_email(user_id: str, email: str):
|
||||
try:
|
||||
await prisma.user.update(where={"id": user_id}, data={"email": email})
|
||||
except Exception as e:
|
||||
raise DatabaseError(
|
||||
f"Failed to update user email for user {user_id}: {e}"
|
||||
) from e
|
||||
async def get_user_by_id(user_id: str) -> Optional[User]:
|
||||
user = await prisma.user.find_unique(where={"id": user_id})
|
||||
return User.model_validate(user) if user else None
|
||||
|
||||
|
||||
async def create_default_user() -> Optional[User]:
|
||||
@@ -164,173 +128,3 @@ async def migrate_and_encrypt_user_integrations():
|
||||
where={"id": user.id},
|
||||
data={"metadata": Json(raw_metadata)},
|
||||
)
|
||||
|
||||
|
||||
async def get_active_user_ids_in_timerange(start_time: str, end_time: str) -> list[str]:
|
||||
try:
|
||||
users = await User.prisma().find_many(
|
||||
where={
|
||||
"AgentGraphExecutions": {
|
||||
"some": {
|
||||
"createdAt": {
|
||||
"gte": datetime.fromisoformat(start_time),
|
||||
"lte": datetime.fromisoformat(end_time),
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
)
|
||||
return [user.id for user in users]
|
||||
|
||||
except Exception as e:
|
||||
raise DatabaseError(
|
||||
f"Failed to get active user ids in timerange {start_time} to {end_time}: {e}"
|
||||
) from e
|
||||
|
||||
|
||||
async def get_active_users_ids() -> list[str]:
|
||||
user_ids = await get_active_user_ids_in_timerange(
|
||||
(datetime.now() - timedelta(days=30)).isoformat(),
|
||||
datetime.now().isoformat(),
|
||||
)
|
||||
return user_ids
|
||||
|
||||
|
||||
async def get_user_notification_preference(user_id: str) -> NotificationPreference:
|
||||
try:
|
||||
user = await User.prisma().find_unique_or_raise(
|
||||
where={"id": user_id},
|
||||
)
|
||||
|
||||
# enable notifications by default if user has no notification preference (shouldn't ever happen though)
|
||||
preferences: dict[NotificationType, bool] = {
|
||||
NotificationType.AGENT_RUN: user.notifyOnAgentRun or False,
|
||||
NotificationType.ZERO_BALANCE: user.notifyOnZeroBalance or False,
|
||||
NotificationType.LOW_BALANCE: user.notifyOnLowBalance or False,
|
||||
NotificationType.BLOCK_EXECUTION_FAILED: user.notifyOnBlockExecutionFailed
|
||||
or False,
|
||||
NotificationType.CONTINUOUS_AGENT_ERROR: user.notifyOnContinuousAgentError
|
||||
or False,
|
||||
NotificationType.DAILY_SUMMARY: user.notifyOnDailySummary or False,
|
||||
NotificationType.WEEKLY_SUMMARY: user.notifyOnWeeklySummary or False,
|
||||
NotificationType.MONTHLY_SUMMARY: user.notifyOnMonthlySummary or False,
|
||||
}
|
||||
daily_limit = user.maxEmailsPerDay or 3
|
||||
notification_preference = NotificationPreference(
|
||||
user_id=user.id,
|
||||
email=user.email,
|
||||
preferences=preferences,
|
||||
daily_limit=daily_limit,
|
||||
# TODO with other changes later, for now we just will email them
|
||||
emails_sent_today=0,
|
||||
last_reset_date=datetime.now(),
|
||||
)
|
||||
return NotificationPreference.model_validate(notification_preference)
|
||||
|
||||
except Exception as e:
|
||||
raise DatabaseError(
|
||||
f"Failed to upsert user notification preference for user {user_id}: {e}"
|
||||
) from e
|
||||
|
||||
|
||||
async def update_user_notification_preference(
|
||||
user_id: str, data: NotificationPreferenceDTO
|
||||
) -> NotificationPreference:
|
||||
try:
|
||||
update_data: UserUpdateInput = {}
|
||||
if data.email:
|
||||
update_data["email"] = data.email
|
||||
if NotificationType.AGENT_RUN in data.preferences:
|
||||
update_data["notifyOnAgentRun"] = data.preferences[
|
||||
NotificationType.AGENT_RUN
|
||||
]
|
||||
if NotificationType.ZERO_BALANCE in data.preferences:
|
||||
update_data["notifyOnZeroBalance"] = data.preferences[
|
||||
NotificationType.ZERO_BALANCE
|
||||
]
|
||||
if NotificationType.LOW_BALANCE in data.preferences:
|
||||
update_data["notifyOnLowBalance"] = data.preferences[
|
||||
NotificationType.LOW_BALANCE
|
||||
]
|
||||
if NotificationType.BLOCK_EXECUTION_FAILED in data.preferences:
|
||||
update_data["notifyOnBlockExecutionFailed"] = data.preferences[
|
||||
NotificationType.BLOCK_EXECUTION_FAILED
|
||||
]
|
||||
if NotificationType.CONTINUOUS_AGENT_ERROR in data.preferences:
|
||||
update_data["notifyOnContinuousAgentError"] = data.preferences[
|
||||
NotificationType.CONTINUOUS_AGENT_ERROR
|
||||
]
|
||||
if NotificationType.DAILY_SUMMARY in data.preferences:
|
||||
update_data["notifyOnDailySummary"] = data.preferences[
|
||||
NotificationType.DAILY_SUMMARY
|
||||
]
|
||||
if NotificationType.WEEKLY_SUMMARY in data.preferences:
|
||||
update_data["notifyOnWeeklySummary"] = data.preferences[
|
||||
NotificationType.WEEKLY_SUMMARY
|
||||
]
|
||||
if NotificationType.MONTHLY_SUMMARY in data.preferences:
|
||||
update_data["notifyOnMonthlySummary"] = data.preferences[
|
||||
NotificationType.MONTHLY_SUMMARY
|
||||
]
|
||||
if data.daily_limit:
|
||||
update_data["maxEmailsPerDay"] = data.daily_limit
|
||||
|
||||
user = await User.prisma().update(
|
||||
where={"id": user_id},
|
||||
data=update_data,
|
||||
)
|
||||
if not user:
|
||||
raise ValueError(f"User not found with ID: {user_id}")
|
||||
preferences: dict[NotificationType, bool] = {
|
||||
NotificationType.AGENT_RUN: user.notifyOnAgentRun or True,
|
||||
NotificationType.ZERO_BALANCE: user.notifyOnZeroBalance or True,
|
||||
NotificationType.LOW_BALANCE: user.notifyOnLowBalance or True,
|
||||
NotificationType.BLOCK_EXECUTION_FAILED: user.notifyOnBlockExecutionFailed
|
||||
or True,
|
||||
NotificationType.CONTINUOUS_AGENT_ERROR: user.notifyOnContinuousAgentError
|
||||
or True,
|
||||
NotificationType.DAILY_SUMMARY: user.notifyOnDailySummary or True,
|
||||
NotificationType.WEEKLY_SUMMARY: user.notifyOnWeeklySummary or True,
|
||||
NotificationType.MONTHLY_SUMMARY: user.notifyOnMonthlySummary or True,
|
||||
}
|
||||
notification_preference = NotificationPreference(
|
||||
user_id=user.id,
|
||||
email=user.email,
|
||||
preferences=preferences,
|
||||
daily_limit=user.maxEmailsPerDay or 3,
|
||||
# TODO with other changes later, for now we just will email them
|
||||
emails_sent_today=0,
|
||||
last_reset_date=datetime.now(),
|
||||
)
|
||||
return NotificationPreference.model_validate(notification_preference)
|
||||
|
||||
except Exception as e:
|
||||
raise DatabaseError(
|
||||
f"Failed to update user notification preference for user {user_id}: {e}"
|
||||
) from e
|
||||
|
||||
|
||||
async def set_user_email_verification(user_id: str, verified: bool) -> None:
|
||||
"""Set the email verification status for a user."""
|
||||
try:
|
||||
await User.prisma().update(
|
||||
where={"id": user_id},
|
||||
data={"emailVerified": verified},
|
||||
)
|
||||
except Exception as e:
|
||||
raise DatabaseError(
|
||||
f"Failed to set email verification status for user {user_id}: {e}"
|
||||
) from e
|
||||
|
||||
|
||||
async def get_user_email_verification(user_id: str) -> bool:
|
||||
"""Get the email verification status for a user."""
|
||||
try:
|
||||
user = await User.prisma().find_unique_or_raise(
|
||||
where={"id": user_id},
|
||||
)
|
||||
return user.emailVerified
|
||||
except Exception as e:
|
||||
raise DatabaseError(
|
||||
f"Failed to get email verification status for user {user_id}: {e}"
|
||||
) from e
|
||||
|
||||
@@ -1,40 +1,33 @@
|
||||
from functools import wraps
|
||||
from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast
|
||||
|
||||
from backend.data.credit import get_user_credit_model
|
||||
from backend.data.execution import (
|
||||
ExecutionResult,
|
||||
NodeExecutionEntry,
|
||||
RedisExecutionEventBus,
|
||||
create_graph_execution,
|
||||
get_execution_results,
|
||||
get_incomplete_executions,
|
||||
get_latest_execution,
|
||||
update_execution_status,
|
||||
update_graph_execution_start_time,
|
||||
update_graph_execution_stats,
|
||||
update_node_execution_stats,
|
||||
upsert_execution_input,
|
||||
upsert_execution_output,
|
||||
)
|
||||
from backend.data.graph import (
|
||||
get_connected_output_nodes,
|
||||
get_graph,
|
||||
get_graph_metadata,
|
||||
get_node,
|
||||
)
|
||||
from backend.data.graph import get_graph, get_node
|
||||
from backend.data.user import (
|
||||
get_user_integrations,
|
||||
get_user_metadata,
|
||||
update_user_integrations,
|
||||
update_user_metadata,
|
||||
)
|
||||
from backend.util.service import AppService, expose, exposed_run_and_wait
|
||||
from backend.util.service import AppService, expose, register_pydantic_serializers
|
||||
from backend.util.settings import Config
|
||||
|
||||
P = ParamSpec("P")
|
||||
R = TypeVar("R")
|
||||
config = Config()
|
||||
_user_credit_model = get_user_credit_model()
|
||||
|
||||
|
||||
async def _spend_credits(entry: NodeExecutionEntry) -> int:
|
||||
return await _user_credit_model.spend_credits(entry, 0, 0)
|
||||
|
||||
|
||||
class DatabaseManager(AppService):
|
||||
@@ -52,15 +45,28 @@ class DatabaseManager(AppService):
|
||||
def send_execution_update(self, execution_result: ExecutionResult):
|
||||
self.event_queue.publish(execution_result)
|
||||
|
||||
@staticmethod
|
||||
def exposed_run_and_wait(
|
||||
f: Callable[P, Coroutine[None, None, R]]
|
||||
) -> Callable[Concatenate[object, P], R]:
|
||||
@expose
|
||||
@wraps(f)
|
||||
def wrapper(self, *args: P.args, **kwargs: P.kwargs) -> R:
|
||||
coroutine = f(*args, **kwargs)
|
||||
res = self.run_and_wait(coroutine)
|
||||
return res
|
||||
|
||||
# Register serializers for annotations on bare function
|
||||
register_pydantic_serializers(f)
|
||||
|
||||
return wrapper
|
||||
|
||||
# Executions
|
||||
create_graph_execution = exposed_run_and_wait(create_graph_execution)
|
||||
get_execution_results = exposed_run_and_wait(get_execution_results)
|
||||
get_incomplete_executions = exposed_run_and_wait(get_incomplete_executions)
|
||||
get_latest_execution = exposed_run_and_wait(get_latest_execution)
|
||||
update_execution_status = exposed_run_and_wait(update_execution_status)
|
||||
update_graph_execution_start_time = exposed_run_and_wait(
|
||||
update_graph_execution_start_time
|
||||
)
|
||||
update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
|
||||
update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
|
||||
upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
|
||||
@@ -69,11 +75,17 @@ class DatabaseManager(AppService):
|
||||
# Graphs
|
||||
get_node = exposed_run_and_wait(get_node)
|
||||
get_graph = exposed_run_and_wait(get_graph)
|
||||
get_connected_output_nodes = exposed_run_and_wait(get_connected_output_nodes)
|
||||
get_graph_metadata = exposed_run_and_wait(get_graph_metadata)
|
||||
|
||||
# Credits
|
||||
spend_credits = exposed_run_and_wait(_spend_credits)
|
||||
user_credit_model = get_user_credit_model()
|
||||
get_or_refill_credit = cast(
|
||||
Callable[[Any, str], int],
|
||||
exposed_run_and_wait(user_credit_model.get_or_refill_credit),
|
||||
)
|
||||
spend_credits = cast(
|
||||
Callable[[Any, str, int, str, dict[str, str], float, float], int],
|
||||
exposed_run_and_wait(user_credit_model.spend_credits),
|
||||
)
|
||||
|
||||
# User + User Metadata + User Integrations
|
||||
get_user_metadata = exposed_run_and_wait(get_user_metadata)
|
||||
|
||||
@@ -8,23 +8,12 @@ import threading
|
||||
from concurrent.futures import Future, ProcessPoolExecutor
|
||||
from contextlib import contextmanager
|
||||
from multiprocessing.pool import AsyncResult, Pool
|
||||
from typing import TYPE_CHECKING, Any, Generator, Optional, TypeVar, cast
|
||||
from typing import TYPE_CHECKING, Any, Generator, TypeVar, cast
|
||||
|
||||
from redis.lock import Lock as RedisLock
|
||||
|
||||
from backend.blocks.basic import AgentOutputBlock
|
||||
from backend.data.model import GraphExecutionStats, NodeExecutionStats
|
||||
from backend.data.notifications import (
|
||||
AgentRunData,
|
||||
LowBalanceData,
|
||||
NotificationEventDTO,
|
||||
NotificationType,
|
||||
)
|
||||
from backend.util.exceptions import InsufficientBalanceError
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from backend.executor import DatabaseManager
|
||||
from backend.notifications.notifications import NotificationManager
|
||||
|
||||
from autogpt_libs.utils.cache import thread_cached
|
||||
|
||||
@@ -51,7 +40,6 @@ from backend.data.graph import GraphModel, Link, Node
|
||||
from backend.integrations.creds_manager import IntegrationCredentialsManager
|
||||
from backend.util import json
|
||||
from backend.util.decorator import error_logged, time_measured
|
||||
from backend.util.file import clean_exec_files
|
||||
from backend.util.logging import configure_logging
|
||||
from backend.util.process import set_service_name
|
||||
from backend.util.service import (
|
||||
@@ -120,7 +108,7 @@ def execute_node(
|
||||
db_client: "DatabaseManager",
|
||||
creds_manager: IntegrationCredentialsManager,
|
||||
data: NodeExecutionEntry,
|
||||
execution_stats: NodeExecutionStats | None = None,
|
||||
execution_stats: dict[str, Any] | None = None,
|
||||
) -> ExecutionStream:
|
||||
"""
|
||||
Execute a node in the graph. This will trigger a block execution on a node,
|
||||
@@ -174,7 +162,6 @@ def execute_node(
|
||||
# AgentExecutorBlock specially separate the node input_data & its input_default.
|
||||
if isinstance(node_block, AgentExecutorBlock):
|
||||
input_data = {**node.input_default, "data": input_data}
|
||||
data.data = input_data
|
||||
|
||||
# Execute the node
|
||||
input_data_str = json.dumps(input_data)
|
||||
@@ -182,15 +169,7 @@ def execute_node(
|
||||
log_metadata.info("Executed node with input", input=input_data_str)
|
||||
update_execution(ExecutionStatus.RUNNING)
|
||||
|
||||
# Inject extra execution arguments for the blocks via kwargs
|
||||
extra_exec_kwargs: dict = {
|
||||
"graph_id": graph_id,
|
||||
"node_id": node_id,
|
||||
"graph_exec_id": graph_exec_id,
|
||||
"node_exec_id": node_exec_id,
|
||||
"user_id": user_id,
|
||||
}
|
||||
|
||||
extra_exec_kwargs = {}
|
||||
# Last-minute fetch credentials + acquire a system-wide read-write lock to prevent
|
||||
# changes during execution. ⚠️ This means a set of credentials can only be used by
|
||||
# one (running) block at a time; simultaneous execution of blocks using same
|
||||
@@ -203,20 +182,19 @@ def execute_node(
|
||||
extra_exec_kwargs[field_name] = credentials
|
||||
|
||||
output_size = 0
|
||||
cost = 0
|
||||
try:
|
||||
# Charge the user for the execution before running the block.
|
||||
cost = db_client.spend_credits(data)
|
||||
end_status = ExecutionStatus.COMPLETED
|
||||
credit = db_client.get_or_refill_credit(user_id)
|
||||
if credit < 0:
|
||||
raise ValueError(f"Insufficient credit: {credit}")
|
||||
|
||||
outputs: dict[str, Any] = {}
|
||||
try:
|
||||
for output_name, output_data in node_block.execute(
|
||||
input_data, **extra_exec_kwargs
|
||||
):
|
||||
output_data = json.convert_pydantic_to_json(output_data)
|
||||
output_size += len(json.dumps(output_data))
|
||||
log_metadata.info("Node produced output", **{output_name: output_data})
|
||||
db_client.upsert_execution_output(node_exec_id, output_name, output_data)
|
||||
outputs[output_name] = output_data
|
||||
|
||||
for execution in _enqueue_next_nodes(
|
||||
db_client=db_client,
|
||||
node=node,
|
||||
@@ -228,13 +206,11 @@ def execute_node(
|
||||
):
|
||||
yield execution
|
||||
|
||||
# Update execution status and spend credits
|
||||
update_execution(ExecutionStatus.COMPLETED)
|
||||
|
||||
except Exception as e:
|
||||
end_status = ExecutionStatus.FAILED
|
||||
error_msg = str(e)
|
||||
log_metadata.exception(f"Node execution failed with error {error_msg}")
|
||||
db_client.upsert_execution_output(node_exec_id, "error", error_msg)
|
||||
update_execution(ExecutionStatus.FAILED)
|
||||
|
||||
for execution in _enqueue_next_nodes(
|
||||
db_client=db_client,
|
||||
@@ -250,20 +226,28 @@ def execute_node(
|
||||
raise e
|
||||
finally:
|
||||
# Ensure credentials are released even if execution fails
|
||||
if creds_lock and creds_lock.locked():
|
||||
if creds_lock:
|
||||
try:
|
||||
creds_lock.release()
|
||||
except Exception as e:
|
||||
log_metadata.error(f"Failed to release credentials lock: {e}")
|
||||
|
||||
# Update execution status and spend credits
|
||||
res = update_execution(end_status)
|
||||
if end_status == ExecutionStatus.COMPLETED:
|
||||
s = input_size + output_size
|
||||
t = (
|
||||
(res.end_time - res.start_time).total_seconds()
|
||||
if res.end_time and res.start_time
|
||||
else 0
|
||||
)
|
||||
db_client.spend_credits(user_id, credit, node_block.id, input_data, s, t)
|
||||
|
||||
# Update execution stats
|
||||
if execution_stats is not None:
|
||||
execution_stats = execution_stats.model_copy(
|
||||
update=node_block.execution_stats.model_dump()
|
||||
)
|
||||
execution_stats.input_size = input_size
|
||||
execution_stats.output_size = output_size
|
||||
execution_stats.cost = cost
|
||||
execution_stats.update(node_block.execution_stats)
|
||||
execution_stats["input_size"] = input_size
|
||||
execution_stats["output_size"] = output_size
|
||||
|
||||
|
||||
def _enqueue_next_nodes(
|
||||
@@ -276,7 +260,7 @@ def _enqueue_next_nodes(
|
||||
log_metadata: LogMetadata,
|
||||
) -> list[NodeExecutionEntry]:
|
||||
def add_enqueued_execution(
|
||||
node_exec_id: str, node_id: str, block_id: str, data: BlockInput
|
||||
node_exec_id: str, node_id: str, data: BlockInput
|
||||
) -> NodeExecutionEntry:
|
||||
exec_update = db_client.update_execution_status(
|
||||
node_exec_id, ExecutionStatus.QUEUED, data
|
||||
@@ -288,7 +272,6 @@ def _enqueue_next_nodes(
|
||||
graph_id=graph_id,
|
||||
node_exec_id=node_exec_id,
|
||||
node_id=node_id,
|
||||
block_id=block_id,
|
||||
data=data,
|
||||
)
|
||||
|
||||
@@ -342,12 +325,7 @@ def _enqueue_next_nodes(
|
||||
# Input is complete, enqueue the execution.
|
||||
log_metadata.info(f"Enqueued {suffix}")
|
||||
enqueued_executions.append(
|
||||
add_enqueued_execution(
|
||||
node_exec_id=next_node_exec_id,
|
||||
node_id=next_node_id,
|
||||
block_id=next_node.block_id,
|
||||
data=next_node_input,
|
||||
)
|
||||
add_enqueued_execution(next_node_exec_id, next_node_id, next_node_input)
|
||||
)
|
||||
|
||||
# Next execution stops here if the link is not static.
|
||||
@@ -377,12 +355,7 @@ def _enqueue_next_nodes(
|
||||
continue
|
||||
log_metadata.info(f"Enqueueing static-link execution {suffix}")
|
||||
enqueued_executions.append(
|
||||
add_enqueued_execution(
|
||||
node_exec_id=iexec.node_exec_id,
|
||||
node_id=next_node_id,
|
||||
block_id=next_node.block_id,
|
||||
data=idata,
|
||||
)
|
||||
add_enqueued_execution(iexec.node_exec_id, next_node_id, idata)
|
||||
)
|
||||
return enqueued_executions
|
||||
|
||||
@@ -416,30 +389,46 @@ def validate_exec(
|
||||
node_block: Block | None = get_block(node.block_id)
|
||||
if not node_block:
|
||||
return None, f"Block for {node.block_id} not found."
|
||||
schema = node_block.input_schema
|
||||
|
||||
# Convert non-matching data types to the expected input schema.
|
||||
for name, data_type in schema.__annotations__.items():
|
||||
if (value := data.get(name)) and (type(value) is not data_type):
|
||||
data[name] = convert(value, data_type)
|
||||
if isinstance(node_block, AgentExecutorBlock):
|
||||
# Validate the execution metadata for the agent executor block.
|
||||
try:
|
||||
exec_data = AgentExecutorBlock.Input(**node.input_default)
|
||||
except Exception as e:
|
||||
return None, f"Input data doesn't match {node_block.name}: {str(e)}"
|
||||
|
||||
# Validation input
|
||||
input_schema = exec_data.input_schema
|
||||
required_fields = set(input_schema["required"])
|
||||
input_default = exec_data.data
|
||||
else:
|
||||
# Convert non-matching data types to the expected input schema.
|
||||
for name, data_type in node_block.input_schema.__annotations__.items():
|
||||
if (value := data.get(name)) and (type(value) is not data_type):
|
||||
data[name] = convert(value, data_type)
|
||||
|
||||
# Validation input
|
||||
input_schema = node_block.input_schema.jsonschema()
|
||||
required_fields = node_block.input_schema.get_required_fields()
|
||||
input_default = node.input_default
|
||||
|
||||
# Input data (without default values) should contain all required fields.
|
||||
error_prefix = f"Input data missing or mismatch for `{node_block.name}`:"
|
||||
if missing_links := schema.get_missing_links(data, node.input_links):
|
||||
return None, f"{error_prefix} unpopulated links {missing_links}"
|
||||
input_fields_from_nodes = {link.sink_name for link in node.input_links}
|
||||
if not input_fields_from_nodes.issubset(data):
|
||||
return None, f"{error_prefix} {input_fields_from_nodes - set(data)}"
|
||||
|
||||
# Merge input data with default values and resolve dynamic dict/list/object pins.
|
||||
input_default = schema.get_input_defaults(node.input_default)
|
||||
data = {**input_default, **data}
|
||||
if resolve_input:
|
||||
data = merge_execution_input(data)
|
||||
|
||||
# Input data post-merge should contain all required fields from the schema.
|
||||
if missing_input := schema.get_missing_input(data):
|
||||
return None, f"{error_prefix} missing input {missing_input}"
|
||||
if not required_fields.issubset(data):
|
||||
return None, f"{error_prefix} {required_fields - set(data)}"
|
||||
|
||||
# Last validation: Validate the input values against the schema.
|
||||
if error := schema.get_mismatch_error(data):
|
||||
if error := json.validate_with_jsonschema(schema=input_schema, data=data):
|
||||
error_message = f"{error_prefix} {error}"
|
||||
logger.error(error_message)
|
||||
return None, error_message
|
||||
@@ -520,7 +509,7 @@ class Executor:
|
||||
cls,
|
||||
q: ExecutionQueue[NodeExecutionEntry],
|
||||
node_exec: NodeExecutionEntry,
|
||||
) -> NodeExecutionStats:
|
||||
) -> dict[str, Any]:
|
||||
log_metadata = LogMetadata(
|
||||
user_id=node_exec.user_id,
|
||||
graph_eid=node_exec.graph_exec_id,
|
||||
@@ -530,15 +519,13 @@ class Executor:
|
||||
block_name="-",
|
||||
)
|
||||
|
||||
execution_stats = NodeExecutionStats()
|
||||
execution_stats = {}
|
||||
timing_info, _ = cls._on_node_execution(
|
||||
q, node_exec, log_metadata, execution_stats
|
||||
)
|
||||
execution_stats.walltime = timing_info.wall_time
|
||||
execution_stats.cputime = timing_info.cpu_time
|
||||
execution_stats["walltime"] = timing_info.wall_time
|
||||
execution_stats["cputime"] = timing_info.cpu_time
|
||||
|
||||
if isinstance(execution_stats.error, Exception):
|
||||
execution_stats.error = str(execution_stats.error)
|
||||
cls.db_client.update_node_execution_stats(
|
||||
node_exec.node_exec_id, execution_stats
|
||||
)
|
||||
@@ -551,31 +538,19 @@ class Executor:
|
||||
q: ExecutionQueue[NodeExecutionEntry],
|
||||
node_exec: NodeExecutionEntry,
|
||||
log_metadata: LogMetadata,
|
||||
stats: NodeExecutionStats | None = None,
|
||||
stats: dict[str, Any] | None = None,
|
||||
):
|
||||
try:
|
||||
log_metadata.info(f"Start node execution {node_exec.node_exec_id}")
|
||||
for execution in execute_node(
|
||||
db_client=cls.db_client,
|
||||
creds_manager=cls.creds_manager,
|
||||
data=node_exec,
|
||||
execution_stats=stats,
|
||||
cls.db_client, cls.creds_manager, node_exec, stats
|
||||
):
|
||||
q.add(execution)
|
||||
log_metadata.info(f"Finished node execution {node_exec.node_exec_id}")
|
||||
except Exception as e:
|
||||
# Avoid user error being marked as an actual error.
|
||||
if isinstance(e, ValueError):
|
||||
log_metadata.info(
|
||||
f"Failed node execution {node_exec.node_exec_id}: {e}"
|
||||
)
|
||||
else:
|
||||
log_metadata.exception(
|
||||
f"Failed node execution {node_exec.node_exec_id}: {e}"
|
||||
)
|
||||
|
||||
if stats is not None:
|
||||
stats.error = e
|
||||
log_metadata.exception(
|
||||
f"Failed node execution {node_exec.node_exec_id}: {e}"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def on_graph_executor_start(cls):
|
||||
@@ -585,7 +560,6 @@ class Executor:
|
||||
cls.db_client = get_db_client()
|
||||
cls.pool_size = settings.config.num_node_workers
|
||||
cls.pid = os.getpid()
|
||||
cls.notification_service = get_notification_service()
|
||||
cls._init_node_executor_pool()
|
||||
logger.info(
|
||||
f"Graph executor {cls.pid} started with {cls.pool_size} node workers"
|
||||
@@ -623,16 +597,12 @@ class Executor:
|
||||
node_eid="*",
|
||||
block_name="-",
|
||||
)
|
||||
cls.db_client.update_graph_execution_start_time(graph_exec.graph_exec_id)
|
||||
timing_info, (exec_stats, status, error) = cls._on_graph_execution(
|
||||
graph_exec, cancel, log_metadata
|
||||
)
|
||||
exec_stats.walltime = timing_info.wall_time
|
||||
exec_stats.cputime = timing_info.cpu_time
|
||||
exec_stats.error = error
|
||||
|
||||
if isinstance(exec_stats.error, Exception):
|
||||
exec_stats.error = str(exec_stats.error)
|
||||
exec_stats["walltime"] = timing_info.wall_time
|
||||
exec_stats["cputime"] = timing_info.cpu_time
|
||||
exec_stats["error"] = str(error) if error else None
|
||||
result = cls.db_client.update_graph_execution_stats(
|
||||
graph_exec_id=graph_exec.graph_exec_id,
|
||||
status=status,
|
||||
@@ -640,8 +610,6 @@ class Executor:
|
||||
)
|
||||
cls.db_client.send_execution_update(result)
|
||||
|
||||
cls._handle_agent_run_notif(graph_exec, exec_stats)
|
||||
|
||||
@classmethod
|
||||
@time_measured
|
||||
def _on_graph_execution(
|
||||
@@ -649,7 +617,7 @@ class Executor:
|
||||
graph_exec: GraphExecutionEntry,
|
||||
cancel: threading.Event,
|
||||
log_metadata: LogMetadata,
|
||||
) -> tuple[GraphExecutionStats, ExecutionStatus, Exception | None]:
|
||||
) -> tuple[dict[str, Any], ExecutionStatus, Exception | None]:
|
||||
"""
|
||||
Returns:
|
||||
dict: The execution statistics of the graph execution.
|
||||
@@ -657,7 +625,11 @@ class Executor:
|
||||
Exception | None: The error that occurred during the execution, if any.
|
||||
"""
|
||||
log_metadata.info(f"Start graph execution {graph_exec.graph_exec_id}")
|
||||
exec_stats = GraphExecutionStats()
|
||||
exec_stats = {
|
||||
"nodes_walltime": 0,
|
||||
"nodes_cputime": 0,
|
||||
"node_count": 0,
|
||||
}
|
||||
error = None
|
||||
finished = False
|
||||
|
||||
@@ -676,33 +648,20 @@ class Executor:
|
||||
try:
|
||||
queue = ExecutionQueue[NodeExecutionEntry]()
|
||||
for node_exec in graph_exec.start_node_execs:
|
||||
exec_update = cls.db_client.update_execution_status(
|
||||
node_exec.node_exec_id, ExecutionStatus.QUEUED, node_exec.data
|
||||
)
|
||||
cls.db_client.send_execution_update(exec_update)
|
||||
queue.add(node_exec)
|
||||
|
||||
running_executions: dict[str, AsyncResult] = {}
|
||||
low_balance_error: Optional[InsufficientBalanceError] = None
|
||||
|
||||
def make_exec_callback(exec_data: NodeExecutionEntry):
|
||||
node_id = exec_data.node_id
|
||||
|
||||
def callback(result: object):
|
||||
running_executions.pop(exec_data.node_id)
|
||||
|
||||
if not isinstance(result, NodeExecutionStats):
|
||||
return
|
||||
|
||||
nonlocal exec_stats, low_balance_error
|
||||
exec_stats.node_count += 1
|
||||
exec_stats.nodes_cputime += result.cputime
|
||||
exec_stats.nodes_walltime += result.walltime
|
||||
exec_stats.cost += result.cost
|
||||
if (err := result.error) and isinstance(err, Exception):
|
||||
exec_stats.node_error_count += 1
|
||||
|
||||
if isinstance(err, InsufficientBalanceError):
|
||||
low_balance_error = err
|
||||
running_executions.pop(node_id)
|
||||
nonlocal exec_stats
|
||||
if isinstance(result, dict):
|
||||
exec_stats["node_count"] += 1
|
||||
exec_stats["nodes_cputime"] += result.get("cputime", 0)
|
||||
exec_stats["nodes_walltime"] += result.get("walltime", 0)
|
||||
|
||||
return callback
|
||||
|
||||
@@ -747,16 +706,6 @@ class Executor:
|
||||
execution.wait(3)
|
||||
|
||||
log_metadata.info(f"Finished graph execution {graph_exec.graph_exec_id}")
|
||||
|
||||
if isinstance(low_balance_error, InsufficientBalanceError):
|
||||
cls._handle_low_balance_notif(
|
||||
graph_exec.user_id,
|
||||
graph_exec.graph_id,
|
||||
exec_stats,
|
||||
low_balance_error,
|
||||
)
|
||||
raise low_balance_error
|
||||
|
||||
except Exception as e:
|
||||
log_metadata.exception(
|
||||
f"Failed graph execution {graph_exec.graph_exec_id}: {e}"
|
||||
@@ -767,7 +716,6 @@ class Executor:
|
||||
finished = True
|
||||
cancel.set()
|
||||
cancel_thread.join()
|
||||
clean_exec_files(graph_exec.graph_exec_id)
|
||||
|
||||
return (
|
||||
exec_stats,
|
||||
@@ -775,67 +723,6 @@ class Executor:
|
||||
error,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def _handle_agent_run_notif(
|
||||
cls,
|
||||
graph_exec: GraphExecutionEntry,
|
||||
exec_stats: GraphExecutionStats,
|
||||
):
|
||||
metadata = cls.db_client.get_graph_metadata(
|
||||
graph_exec.graph_id, graph_exec.graph_version
|
||||
)
|
||||
outputs = cls.db_client.get_execution_results(graph_exec.graph_exec_id)
|
||||
|
||||
named_outputs = [
|
||||
{
|
||||
key: value[0] if key == "name" else value
|
||||
for key, value in output.output_data.items()
|
||||
}
|
||||
for output in outputs
|
||||
if output.block_id == AgentOutputBlock().id
|
||||
]
|
||||
|
||||
event = NotificationEventDTO(
|
||||
user_id=graph_exec.user_id,
|
||||
type=NotificationType.AGENT_RUN,
|
||||
data=AgentRunData(
|
||||
outputs=named_outputs,
|
||||
agent_name=metadata.name if metadata else "Unknown Agent",
|
||||
credits_used=exec_stats.cost,
|
||||
execution_time=exec_stats.walltime,
|
||||
graph_id=graph_exec.graph_id,
|
||||
node_count=exec_stats.node_count,
|
||||
).model_dump(),
|
||||
)
|
||||
|
||||
cls.notification_service.queue_notification(event)
|
||||
|
||||
@classmethod
|
||||
def _handle_low_balance_notif(
|
||||
cls,
|
||||
user_id: str,
|
||||
graph_id: str,
|
||||
exec_stats: GraphExecutionStats,
|
||||
e: InsufficientBalanceError,
|
||||
):
|
||||
shortfall = e.balance - e.amount
|
||||
metadata = cls.db_client.get_graph_metadata(graph_id)
|
||||
base_url = (
|
||||
settings.config.frontend_base_url or settings.config.platform_base_url
|
||||
)
|
||||
cls.notification_service.queue_notification(
|
||||
NotificationEventDTO(
|
||||
user_id=user_id,
|
||||
type=NotificationType.LOW_BALANCE,
|
||||
data=LowBalanceData(
|
||||
current_balance=exec_stats.cost,
|
||||
billing_page_link=f"{base_url}/profile/credits",
|
||||
shortfall=shortfall,
|
||||
agent_name=metadata.name if metadata else "Unknown Agent",
|
||||
).model_dump(),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class ExecutionManager(AppService):
|
||||
def __init__(self):
|
||||
@@ -893,8 +780,7 @@ class ExecutionManager(AppService):
|
||||
graph_id: str,
|
||||
data: BlockInput,
|
||||
user_id: str,
|
||||
graph_version: Optional[int] = None,
|
||||
preset_id: str | None = None,
|
||||
graph_version: int | None = None,
|
||||
) -> GraphExecutionEntry:
|
||||
graph: GraphModel | None = self.db_client.get_graph(
|
||||
graph_id=graph_id, user_id=user_id, version=graph_version
|
||||
@@ -916,9 +802,9 @@ class ExecutionManager(AppService):
|
||||
|
||||
# Extract request input data, and assign it to the input pin.
|
||||
if block.block_type == BlockType.INPUT:
|
||||
input_name = node.input_default.get("name")
|
||||
if input_name and input_name in data:
|
||||
input_data = {"value": data[input_name]}
|
||||
name = node.input_default.get("name")
|
||||
if name and name in data:
|
||||
input_data = {"value": data[name]}
|
||||
|
||||
# Extract webhook payload, and assign it to the input pin
|
||||
webhook_payload_key = f"webhook_{node.webhook_id}_payload"
|
||||
@@ -938,17 +824,11 @@ class ExecutionManager(AppService):
|
||||
else:
|
||||
nodes_input.append((node.id, input_data))
|
||||
|
||||
if not nodes_input:
|
||||
raise ValueError(
|
||||
"No starting nodes found for the graph, make sure an AgentInput or blocks with no inbound links are present as starting nodes."
|
||||
)
|
||||
|
||||
graph_exec_id, node_execs = self.db_client.create_graph_execution(
|
||||
graph_id=graph_id,
|
||||
graph_version=graph.version,
|
||||
nodes_input=nodes_input,
|
||||
user_id=user_id,
|
||||
preset_id=preset_id,
|
||||
)
|
||||
|
||||
starting_node_execs = []
|
||||
@@ -960,15 +840,17 @@ class ExecutionManager(AppService):
|
||||
graph_id=node_exec.graph_id,
|
||||
node_exec_id=node_exec.node_exec_id,
|
||||
node_id=node_exec.node_id,
|
||||
block_id=node_exec.block_id,
|
||||
data=node_exec.input_data,
|
||||
)
|
||||
)
|
||||
exec_update = self.db_client.update_execution_status(
|
||||
node_exec.node_exec_id, ExecutionStatus.QUEUED, node_exec.input_data
|
||||
)
|
||||
self.db_client.send_execution_update(exec_update)
|
||||
|
||||
graph_exec = GraphExecutionEntry(
|
||||
user_id=user_id,
|
||||
graph_id=graph_id,
|
||||
graph_version=graph_version or 0,
|
||||
graph_exec_id=graph_exec_id,
|
||||
start_node_execs=starting_node_execs,
|
||||
)
|
||||
@@ -1064,13 +946,6 @@ def get_db_client() -> "DatabaseManager":
|
||||
return get_service_client(DatabaseManager)
|
||||
|
||||
|
||||
@thread_cached
|
||||
def get_notification_service() -> "NotificationManager":
|
||||
from backend.notifications import NotificationManager
|
||||
|
||||
return get_service_client(NotificationManager)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def synchronized(key: str, timeout: int = 60):
|
||||
lock: RedisLock = redis.get_redis().lock(f"lock:{key}", timeout=timeout)
|
||||
|
||||
@@ -63,10 +63,7 @@ def execute_graph(**kwargs):
|
||||
try:
|
||||
log(f"Executing recurring job for graph #{args.graph_id}")
|
||||
get_execution_client().add_execution(
|
||||
graph_id=args.graph_id,
|
||||
data=args.input_data,
|
||||
user_id=args.user_id,
|
||||
graph_version=args.graph_version,
|
||||
args.graph_id, args.input_data, args.user_id
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception(f"Error executing graph {args.graph_id}: {e}")
|
||||
|
||||
@@ -23,15 +23,6 @@ from backend.util.settings import Settings
|
||||
|
||||
settings = Settings()
|
||||
|
||||
# This is an overrride since ollama doesn't actually require an API key, but the creddential system enforces one be attached
|
||||
ollama_credentials = APIKeyCredentials(
|
||||
id="744fdc56-071a-4761-b5a5-0af0ce10a2b5",
|
||||
provider="ollama",
|
||||
api_key=SecretStr("FAKE_API_KEY"),
|
||||
title="Use Credits for Ollama",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
revid_credentials = APIKeyCredentials(
|
||||
id="fdb7f412-f519-48d1-9b5f-d2f73d0e01fe",
|
||||
provider="revid",
|
||||
@@ -130,47 +121,9 @@ nvidia_credentials = APIKeyCredentials(
|
||||
title="Use Credits for Nvidia",
|
||||
expires_at=None,
|
||||
)
|
||||
screenshotone_credentials = APIKeyCredentials(
|
||||
id="3b1bdd16-8818-4bc2-8cbb-b23f9a3439ed",
|
||||
provider="screenshotone",
|
||||
api_key=SecretStr(settings.secrets.screenshotone_api_key),
|
||||
title="Use Credits for ScreenshotOne",
|
||||
expires_at=None,
|
||||
)
|
||||
mem0_credentials = APIKeyCredentials(
|
||||
id="ed55ac19-356e-4243-a6cb-bc599e9b716f",
|
||||
provider="mem0",
|
||||
api_key=SecretStr(settings.secrets.mem0_api_key),
|
||||
title="Use Credits for Mem0",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
apollo_credentials = APIKeyCredentials(
|
||||
id="544c62b5-1d0f-4156-8fb4-9525f11656eb",
|
||||
provider="apollo",
|
||||
api_key=SecretStr(settings.secrets.apollo_api_key),
|
||||
title="Use Credits for Apollo",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
smartlead_credentials = APIKeyCredentials(
|
||||
id="3bcdbda3-84a3-46af-8fdb-bfd2472298b8",
|
||||
provider="smartlead",
|
||||
api_key=SecretStr(settings.secrets.smartlead_api_key),
|
||||
title="Use Credits for SmartLead",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
zerobounce_credentials = APIKeyCredentials(
|
||||
id="63a6e279-2dc2-448e-bf57-85776f7176dc",
|
||||
provider="zerobounce",
|
||||
api_key=SecretStr(settings.secrets.zerobounce_api_key),
|
||||
title="Use Credits for ZeroBounce",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
DEFAULT_CREDENTIALS = [
|
||||
ollama_credentials,
|
||||
revid_credentials,
|
||||
ideogram_credentials,
|
||||
replicate_credentials,
|
||||
@@ -184,12 +137,7 @@ DEFAULT_CREDENTIALS = [
|
||||
fal_credentials,
|
||||
exa_credentials,
|
||||
e2b_credentials,
|
||||
mem0_credentials,
|
||||
nvidia_credentials,
|
||||
screenshotone_credentials,
|
||||
apollo_credentials,
|
||||
smartlead_credentials,
|
||||
zerobounce_credentials,
|
||||
]
|
||||
|
||||
|
||||
@@ -221,10 +169,6 @@ class IntegrationCredentialsStore:
|
||||
def get_all_creds(self, user_id: str) -> list[Credentials]:
|
||||
users_credentials = self._get_user_integrations(user_id).credentials
|
||||
all_credentials = users_credentials
|
||||
# These will always be added
|
||||
all_credentials.append(ollama_credentials)
|
||||
|
||||
# These will only be added if the API key is set
|
||||
if settings.secrets.revid_api_key:
|
||||
all_credentials.append(revid_credentials)
|
||||
if settings.secrets.ideogram_api_key:
|
||||
@@ -253,16 +197,6 @@ class IntegrationCredentialsStore:
|
||||
all_credentials.append(e2b_credentials)
|
||||
if settings.secrets.nvidia_api_key:
|
||||
all_credentials.append(nvidia_credentials)
|
||||
if settings.secrets.screenshotone_api_key:
|
||||
all_credentials.append(screenshotone_credentials)
|
||||
if settings.secrets.mem0_api_key:
|
||||
all_credentials.append(mem0_credentials)
|
||||
if settings.secrets.apollo_api_key:
|
||||
all_credentials.append(apollo_credentials)
|
||||
if settings.secrets.smartlead_api_key:
|
||||
all_credentials.append(smartlead_credentials)
|
||||
if settings.secrets.zerobounce_api_key:
|
||||
all_credentials.append(zerobounce_credentials)
|
||||
return all_credentials
|
||||
|
||||
def get_creds_by_id(self, user_id: str, credentials_id: str) -> Credentials | None:
|
||||
|
||||
@@ -1,10 +1,7 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from backend.integrations.oauth.todoist import TodoistOAuthHandler
|
||||
|
||||
from .github import GitHubOAuthHandler
|
||||
from .google import GoogleOAuthHandler
|
||||
from .linear import LinearOAuthHandler
|
||||
from .notion import NotionOAuthHandler
|
||||
from .twitter import TwitterOAuthHandler
|
||||
|
||||
@@ -20,8 +17,6 @@ HANDLERS_BY_NAME: dict["ProviderName", type["BaseOAuthHandler"]] = {
|
||||
GoogleOAuthHandler,
|
||||
NotionOAuthHandler,
|
||||
TwitterOAuthHandler,
|
||||
LinearOAuthHandler,
|
||||
TodoistOAuthHandler,
|
||||
]
|
||||
}
|
||||
# --8<-- [end:HANDLERS_BY_NAMEExample]
|
||||
|
||||
@@ -1,165 +0,0 @@
|
||||
import json
|
||||
from typing import Optional
|
||||
from urllib.parse import urlencode
|
||||
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.blocks.linear._api import LinearAPIException
|
||||
from backend.data.model import APIKeyCredentials, OAuth2Credentials
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.request import requests
|
||||
|
||||
from .base import BaseOAuthHandler
|
||||
|
||||
|
||||
class LinearOAuthHandler(BaseOAuthHandler):
|
||||
"""
|
||||
OAuth2 handler for Linear.
|
||||
"""
|
||||
|
||||
PROVIDER_NAME = ProviderName.LINEAR
|
||||
|
||||
def __init__(self, client_id: str, client_secret: str, redirect_uri: str):
|
||||
self.client_id = client_id
|
||||
self.client_secret = client_secret
|
||||
self.redirect_uri = redirect_uri
|
||||
self.auth_base_url = "https://linear.app/oauth/authorize"
|
||||
self.token_url = "https://api.linear.app/oauth/token" # Correct token URL
|
||||
self.revoke_url = "https://api.linear.app/oauth/revoke"
|
||||
|
||||
def get_login_url(
|
||||
self, scopes: list[str], state: str, code_challenge: Optional[str]
|
||||
) -> str:
|
||||
|
||||
params = {
|
||||
"client_id": self.client_id,
|
||||
"redirect_uri": self.redirect_uri,
|
||||
"response_type": "code", # Important: include "response_type"
|
||||
"scope": ",".join(scopes), # Comma-separated, not space-separated
|
||||
"state": state,
|
||||
}
|
||||
return f"{self.auth_base_url}?{urlencode(params)}"
|
||||
|
||||
def exchange_code_for_tokens(
|
||||
self, code: str, scopes: list[str], code_verifier: Optional[str]
|
||||
) -> OAuth2Credentials:
|
||||
return self._request_tokens({"code": code, "redirect_uri": self.redirect_uri})
|
||||
|
||||
def revoke_tokens(self, credentials: OAuth2Credentials) -> bool:
|
||||
if not credentials.access_token:
|
||||
raise ValueError("No access token to revoke")
|
||||
|
||||
headers = {
|
||||
"Authorization": f"Bearer {credentials.access_token.get_secret_value()}"
|
||||
}
|
||||
|
||||
response = requests.post(self.revoke_url, headers=headers)
|
||||
if not response.ok:
|
||||
try:
|
||||
error_data = response.json()
|
||||
error_message = error_data.get("error", "Unknown error")
|
||||
except json.JSONDecodeError:
|
||||
error_message = response.text
|
||||
raise LinearAPIException(
|
||||
f"Failed to revoke Linear tokens ({response.status_code}): {error_message}",
|
||||
response.status_code,
|
||||
)
|
||||
|
||||
return True # Linear doesn't return JSON on successful revoke
|
||||
|
||||
def _refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials:
|
||||
if not credentials.refresh_token:
|
||||
raise ValueError(
|
||||
"No refresh token available."
|
||||
) # Linear uses non-expiring tokens
|
||||
|
||||
return self._request_tokens(
|
||||
{
|
||||
"refresh_token": credentials.refresh_token.get_secret_value(),
|
||||
"grant_type": "refresh_token",
|
||||
}
|
||||
)
|
||||
|
||||
def _request_tokens(
|
||||
self,
|
||||
params: dict[str, str],
|
||||
current_credentials: Optional[OAuth2Credentials] = None,
|
||||
) -> OAuth2Credentials:
|
||||
request_body = {
|
||||
"client_id": self.client_id,
|
||||
"client_secret": self.client_secret,
|
||||
"grant_type": "authorization_code", # Ensure grant_type is correct
|
||||
**params,
|
||||
}
|
||||
|
||||
headers = {
|
||||
"Content-Type": "application/x-www-form-urlencoded"
|
||||
} # Correct header for token request
|
||||
response = requests.post(self.token_url, data=request_body, headers=headers)
|
||||
|
||||
if not response.ok:
|
||||
try:
|
||||
error_data = response.json()
|
||||
error_message = error_data.get("error", "Unknown error")
|
||||
|
||||
except json.JSONDecodeError:
|
||||
error_message = response.text
|
||||
raise LinearAPIException(
|
||||
f"Failed to fetch Linear tokens ({response.status_code}): {error_message}",
|
||||
response.status_code,
|
||||
)
|
||||
|
||||
token_data = response.json()
|
||||
|
||||
# Note: Linear access tokens do not expire, so we set expires_at to None
|
||||
new_credentials = OAuth2Credentials(
|
||||
provider=self.PROVIDER_NAME,
|
||||
title=current_credentials.title if current_credentials else None,
|
||||
username=token_data.get("user", {}).get(
|
||||
"name", "Unknown User"
|
||||
), # extract name or set appropriate
|
||||
access_token=token_data["access_token"],
|
||||
scopes=token_data["scope"].split(
|
||||
","
|
||||
), # Linear returns comma-separated scopes
|
||||
refresh_token=token_data.get(
|
||||
"refresh_token"
|
||||
), # Linear uses non-expiring tokens so this might be null
|
||||
access_token_expires_at=None,
|
||||
refresh_token_expires_at=None,
|
||||
)
|
||||
if current_credentials:
|
||||
new_credentials.id = current_credentials.id
|
||||
return new_credentials
|
||||
|
||||
def _request_username(self, access_token: str) -> Optional[str]:
|
||||
|
||||
# Use the LinearClient to fetch user details using GraphQL
|
||||
from backend.blocks.linear._api import LinearClient
|
||||
|
||||
try:
|
||||
|
||||
linear_client = LinearClient(
|
||||
APIKeyCredentials(
|
||||
api_key=SecretStr(access_token),
|
||||
title="temp",
|
||||
provider=self.PROVIDER_NAME,
|
||||
expires_at=None,
|
||||
)
|
||||
) # Temporary credentials for this request
|
||||
|
||||
query = """
|
||||
query Viewer {
|
||||
viewer {
|
||||
name
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
response = linear_client.query(query)
|
||||
return response["viewer"]["name"]
|
||||
|
||||
except Exception as e: # Handle any errors
|
||||
|
||||
print(f"Error fetching username: {e}")
|
||||
return None
|
||||
@@ -1,81 +0,0 @@
|
||||
import urllib.parse
|
||||
from typing import ClassVar, Optional
|
||||
|
||||
import requests
|
||||
|
||||
from backend.data.model import OAuth2Credentials, ProviderName
|
||||
from backend.integrations.oauth.base import BaseOAuthHandler
|
||||
|
||||
|
||||
class TodoistOAuthHandler(BaseOAuthHandler):
|
||||
PROVIDER_NAME = ProviderName.TODOIST
|
||||
DEFAULT_SCOPES: ClassVar[list[str]] = [
|
||||
"task:add",
|
||||
"data:read",
|
||||
"data:read_write",
|
||||
"data:delete",
|
||||
"project:delete",
|
||||
]
|
||||
|
||||
AUTHORIZE_URL = "https://todoist.com/oauth/authorize"
|
||||
TOKEN_URL = "https://todoist.com/oauth/access_token"
|
||||
|
||||
def __init__(self, client_id: str, client_secret: str, redirect_uri: str):
|
||||
self.client_id = client_id
|
||||
self.client_secret = client_secret
|
||||
self.redirect_uri = redirect_uri
|
||||
|
||||
def get_login_url(
|
||||
self, scopes: list[str], state: str, code_challenge: Optional[str]
|
||||
) -> str:
|
||||
params = {
|
||||
"client_id": self.client_id,
|
||||
"scope": ",".join(self.DEFAULT_SCOPES),
|
||||
"state": state,
|
||||
}
|
||||
|
||||
return f"{self.AUTHORIZE_URL}?{urllib.parse.urlencode(params)}"
|
||||
|
||||
def exchange_code_for_tokens(
|
||||
self, code: str, scopes: list[str], code_verifier: Optional[str]
|
||||
) -> OAuth2Credentials:
|
||||
"""Exchange authorization code for access tokens"""
|
||||
|
||||
data = {
|
||||
"client_id": self.client_id,
|
||||
"client_secret": self.client_secret,
|
||||
"code": code,
|
||||
"redirect_uri": self.redirect_uri,
|
||||
}
|
||||
|
||||
response = requests.post(self.TOKEN_URL, data=data)
|
||||
response.raise_for_status()
|
||||
|
||||
tokens = response.json()
|
||||
|
||||
response = requests.post(
|
||||
"https://api.todoist.com/sync/v9/sync",
|
||||
headers={"Authorization": f"Bearer {tokens['access_token']}"},
|
||||
data={"sync_token": "*", "resource_types": '["user"]'},
|
||||
)
|
||||
response.raise_for_status()
|
||||
user_info = response.json()
|
||||
user_email = user_info["user"].get("email")
|
||||
|
||||
return OAuth2Credentials(
|
||||
provider=self.PROVIDER_NAME,
|
||||
title=None,
|
||||
username=user_email,
|
||||
access_token=tokens["access_token"],
|
||||
refresh_token=None,
|
||||
access_token_expires_at=None,
|
||||
refresh_token_expires_at=None,
|
||||
scopes=scopes,
|
||||
)
|
||||
|
||||
def _refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials:
|
||||
# Todoist does not support token refresh
|
||||
return credentials
|
||||
|
||||
def revoke_tokens(self, credentials: OAuth2Credentials) -> bool:
|
||||
return False
|
||||
@@ -4,7 +4,6 @@ from enum import Enum
|
||||
# --8<-- [start:ProviderName]
|
||||
class ProviderName(str, Enum):
|
||||
ANTHROPIC = "anthropic"
|
||||
APOLLO = "apollo"
|
||||
COMPASS = "compass"
|
||||
DISCORD = "discord"
|
||||
D_ID = "d_id"
|
||||
@@ -18,9 +17,7 @@ class ProviderName(str, Enum):
|
||||
HUBSPOT = "hubspot"
|
||||
IDEOGRAM = "ideogram"
|
||||
JINA = "jina"
|
||||
LINEAR = "linear"
|
||||
MEDIUM = "medium"
|
||||
MEM0 = "mem0"
|
||||
NOTION = "notion"
|
||||
NVIDIA = "nvidia"
|
||||
OLLAMA = "ollama"
|
||||
@@ -28,15 +25,9 @@ class ProviderName(str, Enum):
|
||||
OPENWEATHERMAP = "openweathermap"
|
||||
OPEN_ROUTER = "open_router"
|
||||
PINECONE = "pinecone"
|
||||
REDDIT = "reddit"
|
||||
REPLICATE = "replicate"
|
||||
REVID = "revid"
|
||||
SCREENSHOTONE = "screenshotone"
|
||||
SLANT3D = "slant3d"
|
||||
SMARTLEAD = "smartlead"
|
||||
SMTP = "smtp"
|
||||
TWITTER = "twitter"
|
||||
TODOIST = "todoist"
|
||||
UNREAL_SPEECH = "unreal_speech"
|
||||
ZEROBOUNCE = "zerobounce"
|
||||
# --8<-- [end:ProviderName]
|
||||
|
||||
@@ -168,7 +168,7 @@ class BaseWebhooksManager(ABC, Generic[WT]):
|
||||
|
||||
id = str(uuid4())
|
||||
secret = secrets.token_hex(32)
|
||||
provider_name: ProviderName = self.PROVIDER_NAME
|
||||
provider_name = self.PROVIDER_NAME
|
||||
ingress_url = webhook_ingress_url(provider_name=provider_name, webhook_id=id)
|
||||
if register:
|
||||
if not credentials:
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import logging
|
||||
|
||||
from backend.data import integrations
|
||||
from backend.data.model import Credentials
|
||||
from backend.data.model import APIKeyCredentials, Credentials, OAuth2Credentials
|
||||
|
||||
from ._base import WT, BaseWebhooksManager
|
||||
|
||||
@@ -25,6 +25,6 @@ class ManualWebhookManagerBase(BaseWebhooksManager[WT]):
|
||||
async def _deregister_webhook(
|
||||
self,
|
||||
webhook: integrations.Webhook,
|
||||
credentials: Credentials,
|
||||
credentials: OAuth2Credentials | APIKeyCredentials,
|
||||
) -> None:
|
||||
pass
|
||||
|
||||
@@ -67,7 +67,7 @@ class GithubWebhooksManager(BaseWebhooksManager):
|
||||
|
||||
headers = {
|
||||
**self.GITHUB_API_DEFAULT_HEADERS,
|
||||
"Authorization": credentials.auth_header(),
|
||||
"Authorization": credentials.bearer(),
|
||||
}
|
||||
|
||||
repo, github_hook_id = webhook.resource, webhook.provider_webhook_id
|
||||
@@ -96,7 +96,7 @@ class GithubWebhooksManager(BaseWebhooksManager):
|
||||
|
||||
headers = {
|
||||
**self.GITHUB_API_DEFAULT_HEADERS,
|
||||
"Authorization": credentials.auth_header(),
|
||||
"Authorization": credentials.bearer(),
|
||||
}
|
||||
webhook_data = {
|
||||
"name": "web",
|
||||
@@ -142,7 +142,7 @@ class GithubWebhooksManager(BaseWebhooksManager):
|
||||
|
||||
headers = {
|
||||
**self.GITHUB_API_DEFAULT_HEADERS,
|
||||
"Authorization": credentials.auth_header(),
|
||||
"Authorization": credentials.bearer(),
|
||||
}
|
||||
|
||||
if webhook_type == self.WebhookType.REPO:
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
from .notifications import NotificationManager
|
||||
|
||||
__all__ = [
|
||||
"NotificationManager",
|
||||
]
|
||||
@@ -1,103 +0,0 @@
|
||||
import logging
|
||||
import pathlib
|
||||
|
||||
from postmarker.core import PostmarkClient
|
||||
from postmarker.models.emails import EmailManager
|
||||
from prisma.enums import NotificationType
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.data.notifications import (
|
||||
NotificationEventModel,
|
||||
NotificationTypeOverride,
|
||||
T_co,
|
||||
)
|
||||
from backend.util.settings import Settings
|
||||
from backend.util.text import TextFormatter
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
settings = Settings()
|
||||
|
||||
|
||||
# The following is a workaround to get the type checker to recognize the EmailManager type
|
||||
# This is a temporary solution and should be removed once the Postmark library is updated
|
||||
# to support type annotations.
|
||||
class TypedPostmarkClient(PostmarkClient):
|
||||
emails: EmailManager
|
||||
|
||||
|
||||
class Template(BaseModel):
|
||||
subject_template: str
|
||||
body_template: str
|
||||
base_template: str
|
||||
|
||||
|
||||
class EmailSender:
|
||||
def __init__(self):
|
||||
if settings.secrets.postmark_server_api_token:
|
||||
self.postmark = TypedPostmarkClient(
|
||||
server_token=settings.secrets.postmark_server_api_token
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
"Postmark server API token not found, email sending disabled"
|
||||
)
|
||||
self.postmark = None
|
||||
self.formatter = TextFormatter()
|
||||
|
||||
def send_templated(
|
||||
self,
|
||||
notification: NotificationType,
|
||||
user_email: str,
|
||||
data: NotificationEventModel[T_co] | list[NotificationEventModel[T_co]],
|
||||
):
|
||||
"""Send an email to a user using a template pulled from the notification type"""
|
||||
if not self.postmark:
|
||||
logger.warning("Postmark client not initialized, email not sent")
|
||||
return
|
||||
template = self._get_template(notification)
|
||||
|
||||
try:
|
||||
subject, full_message = self.formatter.format_email(
|
||||
base_template=template.base_template,
|
||||
subject_template=template.subject_template,
|
||||
content_template=template.body_template,
|
||||
data=data,
|
||||
unsubscribe_link="https://platform.agpt.co/profile/settings",
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error formatting full message: {e}")
|
||||
raise e
|
||||
|
||||
self._send_email(user_email, subject, full_message)
|
||||
|
||||
def _get_template(self, notification: NotificationType):
|
||||
# convert the notification type to a notification type override
|
||||
notification_type_override = NotificationTypeOverride(notification)
|
||||
# find the template in templates/name.html (the .template returns with the .html)
|
||||
template_path = f"templates/{notification_type_override.template}.jinja2"
|
||||
logger.debug(
|
||||
f"Template full path: {pathlib.Path(__file__).parent / template_path}"
|
||||
)
|
||||
base_template_path = "templates/base.html.jinja2"
|
||||
with open(pathlib.Path(__file__).parent / base_template_path, "r") as file:
|
||||
base_template = file.read()
|
||||
with open(pathlib.Path(__file__).parent / template_path, "r") as file:
|
||||
template = file.read()
|
||||
return Template(
|
||||
subject_template=notification_type_override.subject,
|
||||
body_template=template,
|
||||
base_template=base_template,
|
||||
)
|
||||
|
||||
def _send_email(self, user_email: str, subject: str, body: str):
|
||||
if not self.postmark:
|
||||
logger.warning("Email tried to send without postmark configured")
|
||||
return
|
||||
logger.debug(f"Sending email to {user_email} with subject {subject}")
|
||||
self.postmark.emails.send(
|
||||
From=settings.config.postmark_sender_email,
|
||||
To=user_email,
|
||||
Subject=subject,
|
||||
HtmlBody=body,
|
||||
)
|
||||
@@ -1,275 +0,0 @@
|
||||
import logging
|
||||
import time
|
||||
from typing import Callable
|
||||
|
||||
import aio_pika
|
||||
from aio_pika.exceptions import QueueEmpty
|
||||
from prisma.enums import NotificationType
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.data.notifications import (
|
||||
NotificationEventDTO,
|
||||
NotificationEventModel,
|
||||
NotificationResult,
|
||||
QueueType,
|
||||
get_data_type,
|
||||
)
|
||||
from backend.data.rabbitmq import Exchange, ExchangeType, Queue, RabbitMQConfig
|
||||
from backend.data.user import (
|
||||
get_user_email_by_id,
|
||||
get_user_email_verification,
|
||||
get_user_notification_preference,
|
||||
)
|
||||
from backend.notifications.email import EmailSender
|
||||
from backend.util.service import AppService, expose
|
||||
from backend.util.settings import Settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
settings = Settings()
|
||||
|
||||
|
||||
class NotificationEvent(BaseModel):
|
||||
event: NotificationEventDTO
|
||||
model: NotificationEventModel
|
||||
|
||||
|
||||
def create_notification_config() -> RabbitMQConfig:
|
||||
"""Create RabbitMQ configuration for notifications"""
|
||||
notification_exchange = Exchange(name="notifications", type=ExchangeType.TOPIC)
|
||||
|
||||
dead_letter_exchange = Exchange(name="dead_letter", type=ExchangeType.TOPIC)
|
||||
|
||||
queues = [
|
||||
# Main notification queues
|
||||
Queue(
|
||||
name="immediate_notifications",
|
||||
exchange=notification_exchange,
|
||||
routing_key="notification.immediate.#",
|
||||
arguments={
|
||||
"x-dead-letter-exchange": dead_letter_exchange.name,
|
||||
"x-dead-letter-routing-key": "failed.immediate",
|
||||
},
|
||||
),
|
||||
Queue(
|
||||
name="admin_notifications",
|
||||
exchange=notification_exchange,
|
||||
routing_key="notification.admin.#",
|
||||
arguments={
|
||||
"x-dead-letter-exchange": dead_letter_exchange.name,
|
||||
"x-dead-letter-routing-key": "failed.admin",
|
||||
},
|
||||
),
|
||||
# Failed notifications queue
|
||||
Queue(
|
||||
name="failed_notifications",
|
||||
exchange=dead_letter_exchange,
|
||||
routing_key="failed.#",
|
||||
),
|
||||
]
|
||||
|
||||
return RabbitMQConfig(
|
||||
exchanges=[
|
||||
notification_exchange,
|
||||
dead_letter_exchange,
|
||||
],
|
||||
queues=queues,
|
||||
)
|
||||
|
||||
|
||||
class NotificationManager(AppService):
|
||||
"""Service for handling notifications with batching support"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.use_db = True
|
||||
self.rabbitmq_config = create_notification_config()
|
||||
self.running = True
|
||||
self.email_sender = EmailSender()
|
||||
|
||||
@classmethod
|
||||
def get_port(cls) -> int:
|
||||
return settings.config.notification_service_port
|
||||
|
||||
def get_routing_key(self, event: NotificationEventModel) -> str:
|
||||
"""Get the appropriate routing key for an event"""
|
||||
if event.strategy == QueueType.IMMEDIATE:
|
||||
return f"notification.immediate.{event.type.value}"
|
||||
elif event.strategy == QueueType.BACKOFF:
|
||||
return f"notification.backoff.{event.type.value}"
|
||||
elif event.strategy == QueueType.ADMIN:
|
||||
return f"notification.admin.{event.type.value}"
|
||||
elif event.strategy == QueueType.HOURLY:
|
||||
return f"notification.hourly.{event.type.value}"
|
||||
elif event.strategy == QueueType.DAILY:
|
||||
return f"notification.daily.{event.type.value}"
|
||||
return f"notification.{event.type.value}"
|
||||
|
||||
@expose
|
||||
def queue_notification(self, event: NotificationEventDTO) -> NotificationResult:
|
||||
"""Queue a notification - exposed method for other services to call"""
|
||||
try:
|
||||
logger.info(f"Received Request to queue {event=}")
|
||||
# Workaround for not being able to serialize generics over the expose bus
|
||||
parsed_event = NotificationEventModel[
|
||||
get_data_type(event.type)
|
||||
].model_validate(event.model_dump())
|
||||
routing_key = self.get_routing_key(parsed_event)
|
||||
message = parsed_event.model_dump_json()
|
||||
|
||||
logger.info(f"Received Request to queue {message=}")
|
||||
|
||||
exchange = "notifications"
|
||||
|
||||
# Publish to RabbitMQ
|
||||
self.run_and_wait(
|
||||
self.rabbit.publish_message(
|
||||
routing_key=routing_key,
|
||||
message=message,
|
||||
exchange=next(
|
||||
ex for ex in self.rabbit_config.exchanges if ex.name == exchange
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
return NotificationResult(
|
||||
success=True,
|
||||
message=f"Notification queued with routing key: {routing_key}",
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.exception(f"Error queueing notification: {e}")
|
||||
return NotificationResult(success=False, message=str(e))
|
||||
|
||||
def _should_email_user_based_on_preference(
|
||||
self, user_id: str, event_type: NotificationType
|
||||
) -> bool:
|
||||
"""Check if a user wants to receive a notification based on their preferences and email verification status"""
|
||||
validated_email = self.run_and_wait(get_user_email_verification(user_id))
|
||||
preference = self.run_and_wait(
|
||||
get_user_notification_preference(user_id)
|
||||
).preferences.get(event_type, True)
|
||||
# only if both are true, should we email this person
|
||||
return validated_email and preference
|
||||
|
||||
def _parse_message(self, message: str) -> NotificationEvent | None:
|
||||
try:
|
||||
event = NotificationEventDTO.model_validate_json(message)
|
||||
model = NotificationEventModel[
|
||||
get_data_type(event.type)
|
||||
].model_validate_json(message)
|
||||
return NotificationEvent(event=event, model=model)
|
||||
except Exception as e:
|
||||
logger.error(f"Error parsing message due to non matching schema {e}")
|
||||
return None
|
||||
|
||||
def _process_admin_message(self, message: str) -> bool:
|
||||
"""Process a single notification, sending to an admin, returning whether to put into the failed queue"""
|
||||
try:
|
||||
parsed = self._parse_message(message)
|
||||
if not parsed:
|
||||
return False
|
||||
event = parsed.event
|
||||
model = parsed.model
|
||||
logger.debug(f"Processing notification for admin: {model}")
|
||||
recipient_email = settings.config.refund_notification_email
|
||||
self.email_sender.send_templated(event.type, recipient_email, model)
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.exception(f"Error processing notification: {e}")
|
||||
return False
|
||||
|
||||
def _process_immediate(self, message: str) -> bool:
|
||||
"""Process a single notification immediately, returning whether to put into the failed queue"""
|
||||
try:
|
||||
parsed = self._parse_message(message)
|
||||
if not parsed:
|
||||
return False
|
||||
event = parsed.event
|
||||
model = parsed.model
|
||||
logger.debug(f"Processing immediate notification: {model}")
|
||||
|
||||
recipient_email = self.run_and_wait(get_user_email_by_id(event.user_id))
|
||||
if not recipient_email:
|
||||
logger.error(f"User email not found for user {event.user_id}")
|
||||
return False
|
||||
|
||||
should_send = self._should_email_user_based_on_preference(
|
||||
event.user_id, event.type
|
||||
)
|
||||
if not should_send:
|
||||
logger.debug(
|
||||
f"User {event.user_id} does not want to receive {event.type} notifications"
|
||||
)
|
||||
return True
|
||||
|
||||
self.email_sender.send_templated(event.type, recipient_email, model)
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.exception(f"Error processing notification: {e}")
|
||||
return False
|
||||
|
||||
def _run_queue(
|
||||
self,
|
||||
queue: aio_pika.abc.AbstractQueue,
|
||||
process_func: Callable[[str], bool],
|
||||
error_queue_name: str,
|
||||
):
|
||||
message: aio_pika.abc.AbstractMessage | None = None
|
||||
try:
|
||||
# This parameter "no_ack" is named like shit, think of it as "auto_ack"
|
||||
message = self.run_and_wait(queue.get(timeout=1.0, no_ack=False))
|
||||
result = process_func(message.body.decode())
|
||||
if result:
|
||||
self.run_and_wait(message.ack())
|
||||
else:
|
||||
self.run_and_wait(message.reject(requeue=False))
|
||||
|
||||
except QueueEmpty:
|
||||
logger.debug(f"Queue {error_queue_name} empty")
|
||||
except Exception as e:
|
||||
if message:
|
||||
logger.error(
|
||||
f"Error in notification service loop, message rejected {e}"
|
||||
)
|
||||
self.run_and_wait(message.reject(requeue=False))
|
||||
else:
|
||||
logger.error(
|
||||
f"Error in notification service loop, message unable to be rejected, and will have to be manually removed to free space in the queue: {e}"
|
||||
)
|
||||
|
||||
def run_service(self):
|
||||
logger.info(f"[{self.service_name}] Started notification service")
|
||||
|
||||
# Set up queue consumers
|
||||
channel = self.run_and_wait(self.rabbit.get_channel())
|
||||
|
||||
immediate_queue = self.run_and_wait(
|
||||
channel.get_queue("immediate_notifications")
|
||||
)
|
||||
|
||||
admin_queue = self.run_and_wait(channel.get_queue("admin_notifications"))
|
||||
|
||||
while self.running:
|
||||
try:
|
||||
self._run_queue(
|
||||
queue=immediate_queue,
|
||||
process_func=self._process_immediate,
|
||||
error_queue_name="immediate_notifications",
|
||||
)
|
||||
self._run_queue(
|
||||
queue=admin_queue,
|
||||
process_func=self._process_admin_message,
|
||||
error_queue_name="admin_notifications",
|
||||
)
|
||||
|
||||
time.sleep(0.1)
|
||||
|
||||
except QueueEmpty as e:
|
||||
logger.debug(f"Queue empty: {e}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error in notification service loop: {e}")
|
||||
|
||||
def cleanup(self):
|
||||
"""Cleanup service resources"""
|
||||
self.running = False
|
||||
super().cleanup()
|
||||
@@ -1,96 +0,0 @@
|
||||
{# Agent Run #}
|
||||
{# Template variables:
|
||||
data.agent_name: the name of the agent
|
||||
data.credits_used: the number of credits used by the agent
|
||||
data.node_count: the number of nodes the agent ran on
|
||||
data.execution_time: the time it took to run the agent
|
||||
data.graph_id: the id of the graph the agent ran on
|
||||
data.outputs: the list of outputs of the agent
|
||||
#}
|
||||
<p style="
|
||||
font-family: 'Poppins', sans-serif;
|
||||
color: #070629;
|
||||
font-size: 16px;
|
||||
line-height: 165%;
|
||||
margin-top: 0;
|
||||
margin-bottom: 10px;
|
||||
">
|
||||
Your agent, <strong>{{ data.agent_name }}</strong>, has completed its run!
|
||||
</p>
|
||||
<p style="
|
||||
font-family: 'Poppins', sans-serif;
|
||||
color: #070629;
|
||||
font-size: 16px;
|
||||
line-height: 165%;
|
||||
margin-top: 0;
|
||||
margin-bottom: 20px;
|
||||
padding-left: 20px;
|
||||
">
|
||||
<p style="margin-bottom: 10px;"><strong>Time Taken:</strong> {{ data.execution_time | int }} seconds</p>
|
||||
<p style="margin-bottom: 10px;"><strong>Nodes Used:</strong> {{ data.node_count }}</p>
|
||||
<p style="margin-bottom: 10px;"><strong>Cost:</strong> ${{ "{:.2f}".format((data.credits_used|float)/100) }}</p>
|
||||
</p>
|
||||
{% if data.outputs and data.outputs|length > 0 %}
|
||||
<div style="
|
||||
margin-left: 15px;
|
||||
margin-bottom: 20px;
|
||||
">
|
||||
<p style="
|
||||
font-family: 'Poppins', sans-serif;
|
||||
color: #070629;
|
||||
font-weight: 600;
|
||||
font-size: 16px;
|
||||
margin-bottom: 10px;
|
||||
">
|
||||
Results:
|
||||
</p>
|
||||
|
||||
{% for output in data.outputs %}
|
||||
<div style="
|
||||
margin-left: 15px;
|
||||
margin-bottom: 15px;
|
||||
">
|
||||
<p style="
|
||||
font-family: 'Poppins', sans-serif;
|
||||
color: #5D23BB;
|
||||
font-weight: 500;
|
||||
font-size: 16px;
|
||||
margin-top: 0;
|
||||
margin-bottom: 8px;
|
||||
">
|
||||
{{ output.name }}
|
||||
</p>
|
||||
|
||||
{% for key, value in output.items() %}
|
||||
{% if key != 'name' %}
|
||||
<div style="
|
||||
margin-left: 15px;
|
||||
background-color: #f5f5ff;
|
||||
padding: 8px 12px;
|
||||
border-radius: 4px;
|
||||
font-family: 'Roboto Mono', monospace;
|
||||
white-space: pre-wrap;
|
||||
word-break: break-word;
|
||||
overflow-wrap: break-word;
|
||||
max-width: 100%;
|
||||
overflow-x: auto;
|
||||
margin-top: 5px;
|
||||
margin-bottom: 10px;
|
||||
line-height: 1.4;
|
||||
">
|
||||
{% if value is iterable and value is not string %}
|
||||
{% if value|length == 1 %}
|
||||
{{ value[0] }}
|
||||
{% else %}
|
||||
[{% for item in value %}{{ item }}{% if not loop.last %}, {% endif %}{% endfor %}]
|
||||
{% endif %}
|
||||
{% else %}
|
||||
{{ value }}
|
||||
{% endif %}
|
||||
</div>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</div>
|
||||
{% endfor %}
|
||||
</div>
|
||||
{% endif %}
|
||||
@@ -1,343 +0,0 @@
|
||||
{# Base Template #}
|
||||
{# Template variables:
|
||||
data.message: the message to display in the email
|
||||
data.title: the title of the email
|
||||
data.unsubscribe_link: the link to unsubscribe from the email
|
||||
#}
|
||||
<!doctype html>
|
||||
<html lang="ltr" xmlns:v="urn:schemas-microsoft-com:vml" xmlns:o="urn:schemas-microsoft-com:office:office">
|
||||
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1, user-scalable=yes">
|
||||
<meta name="format-detection" content="telephone=no, date=no, address=no, email=no, url=no">
|
||||
<meta name="x-apple-disable-message-reformatting">
|
||||
<!--[if !mso]>
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
||||
<![endif]-->
|
||||
<!--[if mso]>
|
||||
<style>
|
||||
* { font-family: sans-serif !important; }
|
||||
</style>
|
||||
<noscript>
|
||||
<xml>
|
||||
<o:OfficeDocumentSettings>
|
||||
<o:PixelsPerInch>96</o:PixelsPerInch>
|
||||
</o:OfficeDocumentSettings>
|
||||
</xml>
|
||||
</noscript>
|
||||
<![endif]-->
|
||||
<style type="text/css">
|
||||
/* RESET STYLES */
|
||||
html,
|
||||
body {
|
||||
margin: 0 !important;
|
||||
padding: 0 !important;
|
||||
width: 100% !important;
|
||||
height: 100% !important;
|
||||
}
|
||||
|
||||
body {
|
||||
-webkit-font-smoothing: antialiased;
|
||||
-moz-osx-font-smoothing: grayscale;
|
||||
text-rendering: optimizeLegibility;
|
||||
}
|
||||
|
||||
.document {
|
||||
margin: 0 !important;
|
||||
padding: 0 !important;
|
||||
width: 100% !important;
|
||||
}
|
||||
|
||||
img {
|
||||
border: 0;
|
||||
outline: none;
|
||||
text-decoration: none;
|
||||
-ms-interpolation-mode: bicubic;
|
||||
}
|
||||
|
||||
table {
|
||||
border-collapse: collapse;
|
||||
}
|
||||
|
||||
table,
|
||||
td {
|
||||
mso-table-lspace: 0pt;
|
||||
mso-table-rspace: 0pt;
|
||||
}
|
||||
|
||||
body,
|
||||
table,
|
||||
td,
|
||||
a {
|
||||
-webkit-text-size-adjust: 100%;
|
||||
-ms-text-size-adjust: 100%;
|
||||
}
|
||||
|
||||
h1,
|
||||
h2,
|
||||
h3,
|
||||
h4,
|
||||
h5,
|
||||
p {
|
||||
margin: 0;
|
||||
word-break: break-word;
|
||||
}
|
||||
|
||||
/* iOS BLUE LINKS */
|
||||
a[x-apple-data-detectors] {
|
||||
color: inherit !important;
|
||||
text-decoration: none !important;
|
||||
font-size: inherit !important;
|
||||
font-family: inherit !important;
|
||||
font-weight: inherit !important;
|
||||
line-height: inherit !important;
|
||||
}
|
||||
|
||||
/* ANDROID CENTER FIX */
|
||||
div[style*="margin: 16px 0;"] {
|
||||
margin: 0 !important;
|
||||
}
|
||||
|
||||
/* MEDIA QUERIES */
|
||||
@media all and (max-width:639px) {
|
||||
.wrapper {
|
||||
width: 100% !important;
|
||||
}
|
||||
|
||||
.container {
|
||||
width: 100% !important;
|
||||
min-width: 100% !important;
|
||||
padding: 0 !important;
|
||||
}
|
||||
|
||||
.row {
|
||||
padding-left: 20px !important;
|
||||
padding-right: 20px !important;
|
||||
}
|
||||
|
||||
.col-mobile {
|
||||
width: 20px !important;
|
||||
}
|
||||
|
||||
.col {
|
||||
display: block !important;
|
||||
width: 100% !important;
|
||||
}
|
||||
|
||||
.mobile-center {
|
||||
text-align: center !important;
|
||||
float: none !important;
|
||||
}
|
||||
|
||||
.mobile-mx-auto {
|
||||
margin: 0 auto !important;
|
||||
float: none !important;
|
||||
}
|
||||
|
||||
.mobile-left {
|
||||
text-align: center !important;
|
||||
float: left !important;
|
||||
}
|
||||
|
||||
.mobile-hide {
|
||||
display: none !important;
|
||||
}
|
||||
|
||||
.img {
|
||||
width: 100% !important;
|
||||
height: auto !important;
|
||||
}
|
||||
|
||||
.ml-btn {
|
||||
width: 100% !important;
|
||||
max-width: 100% !important;
|
||||
}
|
||||
|
||||
.ml-btn-container {
|
||||
width: 100% !important;
|
||||
max-width: 100% !important;
|
||||
}
|
||||
}
|
||||
</style>
|
||||
<style type="text/css">
|
||||
@import url("https://assets.mlcdn.com/fonts-v2.css?version=1729862");
|
||||
</style>
|
||||
<style type="text/css">
|
||||
@media screen {
|
||||
body {
|
||||
font-family: 'Poppins', sans-serif;
|
||||
}
|
||||
}
|
||||
</style>
|
||||
<title>{{data.title}}</title>
|
||||
</head>
|
||||
|
||||
<body style="margin: 0 !important; padding: 0 !important; background-color:#070629;">
|
||||
<div class="document" role="article" aria-roledescription="email" aria-label lang dir="ltr"
|
||||
style="background-color:#070629; line-height: 100%; font-size:medium; font-size:max(16px, 1rem);">
|
||||
<!-- Main Content -->
|
||||
<table width="100%" align="center" cellspacing="0" cellpadding="0" border="0">
|
||||
<tr>
|
||||
<td class="background" bgcolor="#070629" align="center" valign="top" style="padding: 0 8px;">
|
||||
<!-- Email Content -->
|
||||
<table class="container" align="center" width="640" cellpadding="0" cellspacing="0" border="0"
|
||||
style="max-width: 640px;">
|
||||
<tr>
|
||||
<td align="center">
|
||||
<!-- Logo Section -->
|
||||
<table class="container ml-4 ml-default-border" width="640" bgcolor="#E2ECFD" align="center" border="0"
|
||||
cellspacing="0" cellpadding="0" style="width: 640px; min-width: 640px;">
|
||||
<tr>
|
||||
<td class="ml-default-border container" height="40" style="line-height: 40px; min-width: 640px;">
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<table align="center" width="100%" border="0" cellspacing="0" cellpadding="0">
|
||||
<tr>
|
||||
<td class="row" align="center" style="padding: 0 50px;">
|
||||
<img
|
||||
src="https://storage.mlcdn.com/account_image/597379/8QJ8kOjXakVvfe1kJLY2wWCObU1mp5EiDLfBlbQa.png"
|
||||
border="0" alt="" width="120" class="logo"
|
||||
style="max-width: 120px; display: inline-block;">
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
<!-- Main Content Section -->
|
||||
<table class="container ml-6 ml-default-border" width="640" bgcolor="#E2ECFD" align="center" border="0"
|
||||
cellspacing="0" cellpadding="0" style="color: #070629; width: 640px; min-width: 640px;">
|
||||
<tr>
|
||||
<td class="row" style="padding: 0 50px;">
|
||||
{{data.message|safe}}
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
<!-- Signature Section -->
|
||||
<table class="container ml-8 ml-default-border" width="640" bgcolor="#E2ECFD" align="center" border="0"
|
||||
cellspacing="0" cellpadding="0" style="color: #070629; width: 640px; min-width: 640px;">
|
||||
<tr>
|
||||
<td class="row mobile-center" align="left" style="padding: 0 50px;">
|
||||
<table class="ml-8 wrapper" border="0" cellspacing="0" cellpadding="0"
|
||||
style="color: #070629; text-align: left;">
|
||||
<tr>
|
||||
<td class="col center mobile-center" align>
|
||||
<p
|
||||
style="font-family: 'Poppins', sans-serif; color: #070629; font-size: 16px; line-height: 165%; margin-top: 0; margin-bottom: 0;">
|
||||
Thank you for being a part of the AutoGPT community! Join the conversation on our Discord <a href="https://discord.gg/autogpt" style="color: #4285F4; text-decoration: underline;">here</a> and share your thoughts with us anytime.
|
||||
</p>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
<!-- Footer Section -->
|
||||
<table class="container ml-10 ml-default-border" width="640" bgcolor="#ffffff" align="center" border="0"
|
||||
cellspacing="0" cellpadding="0" style="width: 640px; min-width: 640px;">
|
||||
<tr>
|
||||
<td class="row" style="padding: 0 50px;">
|
||||
<table align="center" width="100%" border="0" cellspacing="0" cellpadding="0">
|
||||
<tr>
|
||||
<td>
|
||||
<!-- Footer Content -->
|
||||
<table align="center" width="100%" border="0" cellspacing="0" cellpadding="0">
|
||||
<tr>
|
||||
<td class="col" align="left" valign="middle" width="120">
|
||||
<img
|
||||
src="https://storage.mlcdn.com/account_image/597379/8QJ8kOjXakVvfe1kJLY2wWCObU1mp5EiDLfBlbQa.png"
|
||||
border="0" alt="" width="120" class="logo"
|
||||
style="max-width: 120px; display: inline-block;">
|
||||
</td>
|
||||
<td class="col" width="40" height="30" style="line-height: 30px;"></td>
|
||||
<td class="col mobile-left" align="right" valign="middle" width="250">
|
||||
<table role="presentation" cellpadding="0" cellspacing="0" border="0">
|
||||
<tr>
|
||||
<td align="center" valign="middle" width="18" style="padding: 0 5px 0 0;">
|
||||
<a href="https://x.com/auto_gpt" target="blank" style="text-decoration: none;">
|
||||
<img
|
||||
src="https://assets.mlcdn.com/ml/images/icons/default/rounded_corners/black/x.png"
|
||||
width="18" alt="x">
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" valign="middle" width="18" style="padding: 0 5px;">
|
||||
<a href="https://discord.gg/autogpt" target="blank"
|
||||
style="text-decoration: none;">
|
||||
<img
|
||||
src="https://assets.mlcdn.com/ml/images/icons/default/rounded_corners/black/discord.png"
|
||||
width="18" alt="discord">
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" valign="middle" width="18" style="padding: 0 0 0 5px;">
|
||||
<a href="https://agpt.co/" target="blank" style="text-decoration: none;">
|
||||
<img
|
||||
src="https://assets.mlcdn.com/ml/images/icons/default/rounded_corners/black/website.png"
|
||||
width="18" alt="website">
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="text-align: left!important;">
|
||||
<h5
|
||||
style="font-family: 'Poppins', sans-serif; color: #070629; font-size: 15px; line-height: 125%; font-weight: bold; font-style: normal; text-decoration: none; margin-bottom: 6px;">
|
||||
AutoGPT
|
||||
</h5>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="text-align: left!important;">
|
||||
<p
|
||||
style="font-family: 'Poppins', sans-serif; color: #070629; font-size: 14px; line-height: 150%; display: inline-block; margin-bottom: 0;">
|
||||
3rd Floor 1 Ashley Road, Cheshire, United Kingdom, WA14 2DT, Altrincham<br>United Kingdom
|
||||
</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td height="8" style="line-height: 8px;"></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="left" style="text-align: left!important;">
|
||||
<p
|
||||
style="font-family: 'Poppins', sans-serif; color: #070629; font-size: 14px; line-height: 150%; display: inline-block; margin-bottom: 0;">
|
||||
You received this email because you signed up on our website.</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td height="1" style="line-height: 12px;"></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="left">
|
||||
<p
|
||||
style="font-family: 'Poppins', sans-serif; color: #070629; font-size: 14px; line-height: 150%; display: inline-block; margin-bottom: 0;">
|
||||
<a href="{{data.unsubscribe_link}}"
|
||||
style="color: #4285F4; font-weight: normal; font-style: normal; text-decoration: underline;">Unsubscribe</a>
|
||||
</p>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
</div>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user