mirror of
https://github.com/simstudioai/sim.git
synced 2026-01-10 23:48:09 -05:00
Compare commits
18 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7f82ed381a | ||
|
|
3dd36a8a35 | ||
|
|
09cccd5487 | ||
|
|
1773530325 | ||
|
|
2da7a6755c | ||
|
|
1e81cd6850 | ||
|
|
ec73e2e9ce | ||
|
|
4937d72d70 | ||
|
|
219a065a7c | ||
|
|
8f06aec68b | ||
|
|
1de6f09069 | ||
|
|
b10b2461a5 | ||
|
|
34fc8f84f5 | ||
|
|
ee77dea2d6 | ||
|
|
bba407b507 | ||
|
|
ab63978ce8 | ||
|
|
e697e50d4e | ||
|
|
41ec229431 |
@@ -1,69 +0,0 @@
|
||||
# Sim Development Environment Bashrc
|
||||
# This gets sourced by post-create.sh
|
||||
|
||||
# Enhanced prompt with git branch info
|
||||
parse_git_branch() {
|
||||
git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/ (\1)/'
|
||||
}
|
||||
|
||||
export PS1="\[\033[01;32m\]\u@simstudio\[\033[00m\]:\[\033[01;34m\]\w\[\033[33m\]\$(parse_git_branch)\[\033[00m\]\$ "
|
||||
|
||||
# Helpful aliases
|
||||
alias ll="ls -la"
|
||||
alias ..="cd .."
|
||||
alias ...="cd ../.."
|
||||
|
||||
# Database aliases
|
||||
alias pgc="PGPASSWORD=postgres psql -h db -U postgres -d simstudio"
|
||||
alias check-db="PGPASSWORD=postgres psql -h db -U postgres -c '\l'"
|
||||
|
||||
# Sim specific aliases
|
||||
alias logs="cd /workspace/apps/sim && tail -f logs/*.log 2>/dev/null || echo 'No log files found'"
|
||||
alias sim-start="cd /workspace && bun run dev"
|
||||
alias sim-migrate="cd /workspace/apps/sim && bunx drizzle-kit push"
|
||||
alias sim-generate="cd /workspace/apps/sim && bunx drizzle-kit generate"
|
||||
alias sim-rebuild="cd /workspace && bun run build && bun run start"
|
||||
alias docs-dev="cd /workspace/apps/docs && bun run dev"
|
||||
|
||||
# Turbo related commands
|
||||
alias turbo-build="cd /workspace && bunx turbo run build"
|
||||
alias turbo-dev="cd /workspace && bunx turbo run dev"
|
||||
alias turbo-test="cd /workspace && bunx turbo run test"
|
||||
|
||||
# Bun specific commands
|
||||
alias bun-update="cd /workspace && bun update"
|
||||
alias bun-add="cd /workspace && bun add"
|
||||
alias bun-pm="cd /workspace && bun pm"
|
||||
alias bun-canary="bun upgrade --canary"
|
||||
|
||||
# Default to workspace directory
|
||||
cd /workspace 2>/dev/null || true
|
||||
|
||||
# Welcome message - only show once per session
|
||||
if [ -z "$SIM_WELCOME_SHOWN" ]; then
|
||||
export SIM_WELCOME_SHOWN=1
|
||||
|
||||
echo ""
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo "🚀 Welcome to Sim development environment!"
|
||||
echo ""
|
||||
echo "Available commands:"
|
||||
echo " sim-start - Start all apps in development mode"
|
||||
echo " sim-migrate - Push schema changes to the database for sim app"
|
||||
echo " sim-generate - Generate new migrations for sim app"
|
||||
echo " sim-rebuild - Build and start all apps"
|
||||
echo " docs-dev - Start only the docs app in development mode"
|
||||
echo ""
|
||||
echo "Turbo commands:"
|
||||
echo " turbo-build - Build all apps using Turborepo"
|
||||
echo " turbo-dev - Start development mode for all apps"
|
||||
echo " turbo-test - Run tests for all packages"
|
||||
echo ""
|
||||
echo "Bun commands:"
|
||||
echo " bun-update - Update dependencies"
|
||||
echo " bun-add - Add a new dependency"
|
||||
echo " bun-pm - Manage dependencies"
|
||||
echo " bun-canary - Upgrade to the latest canary version of Bun"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo ""
|
||||
fi
|
||||
@@ -1,38 +1,43 @@
|
||||
# Use the latest Bun canary image for development
|
||||
FROM oven/bun:canary
|
||||
|
||||
# Avoid warnings by switching to noninteractive
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
FROM oven/bun:1.2.22-alpine
|
||||
|
||||
# Install necessary packages for development
|
||||
RUN apt-get update \
|
||||
&& apt-get -y install --no-install-recommends \
|
||||
git curl wget jq sudo postgresql-client vim nano \
|
||||
bash-completion ca-certificates lsb-release gnupg \
|
||||
&& apt-get clean -y \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
RUN apk add --no-cache \
|
||||
git \
|
||||
curl \
|
||||
wget \
|
||||
jq \
|
||||
sudo \
|
||||
postgresql-client \
|
||||
vim \
|
||||
nano \
|
||||
bash \
|
||||
bash-completion \
|
||||
zsh \
|
||||
zsh-vcs \
|
||||
ca-certificates \
|
||||
shadow
|
||||
|
||||
# Create a non-root user
|
||||
# Create a non-root user with matching UID/GID
|
||||
ARG USERNAME=bun
|
||||
ARG USER_UID=1000
|
||||
ARG USER_GID=$USER_UID
|
||||
|
||||
# Create user group if it doesn't exist
|
||||
RUN if ! getent group $USER_GID >/dev/null; then \
|
||||
addgroup -g $USER_GID $USERNAME; \
|
||||
fi
|
||||
|
||||
# Create user if it doesn't exist
|
||||
RUN if ! getent passwd $USER_UID >/dev/null; then \
|
||||
adduser -D -u $USER_UID -G $(getent group $USER_GID | cut -d: -f1) $USERNAME; \
|
||||
fi
|
||||
|
||||
# Add sudo support
|
||||
RUN echo "$USERNAME ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/$USERNAME \
|
||||
&& chmod 0440 /etc/sudoers.d/$USERNAME
|
||||
|
||||
# Install global packages for development
|
||||
RUN bun install -g turbo drizzle-kit typescript @types/node
|
||||
|
||||
# Install bun completions
|
||||
RUN bun completions > /etc/bash_completion.d/bun
|
||||
|
||||
# Set up shell environment
|
||||
RUN echo "export PATH=$PATH:/home/$USERNAME/.bun/bin" >> /etc/profile
|
||||
RUN echo "source /etc/profile" >> /etc/bash.bashrc
|
||||
|
||||
# Switch back to dialog for any ad-hoc use of apt-get
|
||||
ENV DEBIAN_FRONTEND=dialog
|
||||
RUN echo "export PATH=\$PATH:/home/$USERNAME/.bun/bin" >> /etc/profile
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
|
||||
@@ -1,78 +1,75 @@
|
||||
# Sim Development Container
|
||||
|
||||
This directory contains configuration files for Visual Studio Code Dev Containers / GitHub Codespaces. Dev containers provide a consistent, isolated development environment for this project.
|
||||
Development container configuration for VS Code Dev Containers and GitHub Codespaces.
|
||||
|
||||
## Contents
|
||||
|
||||
- `devcontainer.json` - The main configuration file that defines the development container settings
|
||||
- `Dockerfile` - Defines the container image and development environment
|
||||
- `docker-compose.yml` - Sets up the application and database containers
|
||||
- `post-create.sh` - Script that runs when the container is created
|
||||
- `.bashrc` - Custom shell configuration with helpful aliases
|
||||
|
||||
## Usage
|
||||
|
||||
### Prerequisites
|
||||
## Prerequisites
|
||||
|
||||
- Visual Studio Code
|
||||
- Docker installation:
|
||||
- Docker Desktop (Windows/macOS)
|
||||
- Docker Engine (Linux)
|
||||
- VS Code Remote - Containers extension
|
||||
- Docker Desktop or Podman Desktop
|
||||
- VS Code Dev Containers extension
|
||||
|
||||
### Getting Started
|
||||
## Getting Started
|
||||
|
||||
1. Open this project in Visual Studio Code
|
||||
2. When prompted, click "Reopen in Container"
|
||||
- Alternatively, press `F1` and select "Remote-Containers: Reopen in Container"
|
||||
1. Open this project in VS Code
|
||||
2. Click "Reopen in Container" when prompted (or press `F1` → "Dev Containers: Reopen in Container")
|
||||
3. Wait for the container to build and initialize
|
||||
4. The post-creation script will automatically:
|
||||
4. Start developing with `sim-start`
|
||||
|
||||
- Install dependencies
|
||||
- Set up environment variables
|
||||
- Run database migrations
|
||||
- Configure helpful aliases
|
||||
The setup script will automatically install dependencies and run migrations.
|
||||
|
||||
5. Start the application with `sim-start` (alias for `bun run dev`)
|
||||
## Development Commands
|
||||
|
||||
### Development Commands
|
||||
### Running Services
|
||||
|
||||
The development environment includes these helpful aliases:
|
||||
You have two options for running the development environment:
|
||||
|
||||
**Option 1: Run everything together (recommended for most development)**
|
||||
```bash
|
||||
sim-start # Runs both app and socket server using concurrently
|
||||
```
|
||||
|
||||
**Option 2: Run services separately (useful for debugging individual services)**
|
||||
- In the **app** container terminal: `sim-app` (starts Next.js app on port 3000)
|
||||
- In the **realtime** container terminal: `sim-sockets` (starts socket server on port 3002)
|
||||
|
||||
### Other Commands
|
||||
|
||||
- `sim-start` - Start the development server
|
||||
- `sim-migrate` - Push schema changes to the database
|
||||
- `sim-generate` - Generate new migrations
|
||||
- `sim-rebuild` - Build and start the production version
|
||||
- `pgc` - Connect to the PostgreSQL database
|
||||
- `check-db` - List all databases
|
||||
|
||||
### Using GitHub Codespaces
|
||||
|
||||
This project is also configured for GitHub Codespaces. To use it:
|
||||
|
||||
1. Go to the GitHub repository
|
||||
2. Click the "Code" button
|
||||
3. Select the "Codespaces" tab
|
||||
4. Click "Create codespace on main"
|
||||
|
||||
This will start a new Codespace with the development environment already set up.
|
||||
|
||||
## Customization
|
||||
|
||||
You can customize the development environment by:
|
||||
|
||||
- Modifying `devcontainer.json` to add VS Code extensions or settings
|
||||
- Updating the `Dockerfile` to install additional packages
|
||||
- Editing `docker-compose.yml` to add services or change configuration
|
||||
- Modifying `.bashrc` to add custom aliases or configurations
|
||||
- `build` - Build the application
|
||||
- `pgc` - Connect to PostgreSQL database
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If you encounter issues:
|
||||
**Build errors**: Rebuild the container with `F1` → "Dev Containers: Rebuild Container"
|
||||
|
||||
1. Rebuild the container: `F1` → "Remote-Containers: Rebuild Container"
|
||||
2. Check Docker logs for build errors
|
||||
3. Verify Docker Desktop is running
|
||||
4. Ensure all prerequisites are installed
|
||||
**Port conflicts**: Ensure ports 3000, 3002, and 5432 are available
|
||||
|
||||
For more information, see the [VS Code Remote Development documentation](https://code.visualstudio.com/docs/remote/containers).
|
||||
**Container runtime issues**: Verify Docker Desktop or Podman Desktop is running
|
||||
|
||||
## Technical Details
|
||||
|
||||
Services:
|
||||
- **App container** (8GB memory limit) - Main Next.js application
|
||||
- **Realtime container** (4GB memory limit) - Socket.io server for real-time features
|
||||
- **Database** - PostgreSQL with pgvector extension
|
||||
- **Migrations** - Runs automatically on container creation
|
||||
|
||||
You can develop with services running together or independently.
|
||||
|
||||
### Personalization
|
||||
|
||||
**Project commands** (`sim-start`, `sim-app`, etc.) are automatically available via `/workspace/.devcontainer/sim-commands.sh`.
|
||||
|
||||
**Personal shell customization** (aliases, prompts, etc.) should use VS Code's dotfiles feature:
|
||||
1. Create a dotfiles repository (e.g., `github.com/youruser/dotfiles`)
|
||||
2. Add your `.bashrc`, `.zshrc`, or other configs
|
||||
3. Configure in VS Code Settings:
|
||||
```json
|
||||
{
|
||||
"dotfiles.repository": "youruser/dotfiles",
|
||||
"dotfiles.installCommand": "install.sh"
|
||||
}
|
||||
```
|
||||
|
||||
This separates project-specific commands from personal preferences, following VS Code best practices.
|
||||
|
||||
@@ -13,13 +13,6 @@
|
||||
"source.fixAll.biome": "explicit",
|
||||
"source.organizeImports.biome": "explicit"
|
||||
},
|
||||
"terminal.integrated.defaultProfile.linux": "bash",
|
||||
"terminal.integrated.profiles.linux": {
|
||||
"bash": {
|
||||
"path": "/bin/bash",
|
||||
"args": ["--login"]
|
||||
}
|
||||
},
|
||||
"terminal.integrated.shellIntegration.enabled": true
|
||||
},
|
||||
"extensions": [
|
||||
@@ -36,18 +29,9 @@
|
||||
}
|
||||
},
|
||||
|
||||
"forwardPorts": [3000, 5432],
|
||||
"forwardPorts": [3000, 3002, 5432],
|
||||
|
||||
"postCreateCommand": "bash -c 'bash .devcontainer/post-create.sh || true'",
|
||||
|
||||
"postStartCommand": "bash -c 'if [ ! -f ~/.bashrc ] || ! grep -q \"sim-start\" ~/.bashrc; then cp .devcontainer/.bashrc ~/.bashrc; fi'",
|
||||
|
||||
"remoteUser": "bun",
|
||||
|
||||
"features": {
|
||||
"ghcr.io/devcontainers/features/git:1": {},
|
||||
"ghcr.io/prulloac/devcontainer-features/bun:1": {
|
||||
"version": "latest"
|
||||
}
|
||||
}
|
||||
"remoteUser": "bun"
|
||||
}
|
||||
|
||||
@@ -7,52 +7,56 @@ services:
|
||||
- ..:/workspace:cached
|
||||
- bun-cache:/home/bun/.bun/cache:delegated
|
||||
command: sleep infinity
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 8G
|
||||
environment:
|
||||
- NODE_ENV=development
|
||||
- DATABASE_URL=postgresql://postgres:postgres@db:5432/simstudio
|
||||
- BETTER_AUTH_URL=http://localhost:3000
|
||||
- NEXT_PUBLIC_APP_URL=http://localhost:3000
|
||||
- BETTER_AUTH_SECRET=${BETTER_AUTH_SECRET:-your_auth_secret_here}
|
||||
- ENCRYPTION_KEY=${ENCRYPTION_KEY:-your_encryption_key_here}
|
||||
- COPILOT_API_KEY=${COPILOT_API_KEY}
|
||||
- SIM_AGENT_API_URL=${SIM_AGENT_API_URL}
|
||||
- OLLAMA_URL=${OLLAMA_URL:-http://localhost:11434}
|
||||
- NEXT_PUBLIC_SOCKET_URL=${NEXT_PUBLIC_SOCKET_URL:-http://localhost:3002}
|
||||
- BUN_INSTALL_CACHE_DIR=/home/bun/.bun/cache
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
realtime:
|
||||
condition: service_healthy
|
||||
migrations:
|
||||
condition: service_completed_successfully
|
||||
ports:
|
||||
- "3000:3000"
|
||||
- "3001:3001"
|
||||
working_dir: /workspace
|
||||
healthcheck:
|
||||
test: ['CMD', 'wget', '--spider', '--quiet', 'http://127.0.0.1:3000']
|
||||
interval: 90s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 10s
|
||||
|
||||
realtime:
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: .devcontainer/Dockerfile
|
||||
volumes:
|
||||
- ..:/workspace:cached
|
||||
- bun-cache:/home/bun/.bun/cache:delegated
|
||||
command: sleep infinity
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 4G
|
||||
environment:
|
||||
- NODE_ENV=development
|
||||
- DATABASE_URL=postgresql://postgres:postgres@db:5432/simstudio
|
||||
- BETTER_AUTH_URL=http://localhost:3000
|
||||
- NEXT_PUBLIC_APP_URL=http://localhost:3000
|
||||
- BETTER_AUTH_SECRET=${BETTER_AUTH_SECRET:-your_auth_secret_here}
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- "3002:3002"
|
||||
working_dir: /workspace
|
||||
healthcheck:
|
||||
test: ['CMD', 'wget', '--spider', '--quiet', 'http://127.0.0.1:3002']
|
||||
interval: 90s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 10s
|
||||
|
||||
migrations:
|
||||
build:
|
||||
|
||||
@@ -8,11 +8,43 @@ echo "🔧 Setting up Sim development environment..."
|
||||
# Change to the workspace root directory
|
||||
cd /workspace
|
||||
|
||||
# Setup .bashrc
|
||||
echo "📄 Setting up .bashrc with aliases..."
|
||||
cp /workspace/.devcontainer/.bashrc ~/.bashrc
|
||||
# Add to .profile to ensure .bashrc is sourced in non-interactive shells
|
||||
echo 'if [ -f ~/.bashrc ]; then . ~/.bashrc; fi' >> ~/.profile
|
||||
# Install global packages for development (done at runtime, not build time)
|
||||
echo "📦 Installing global development tools..."
|
||||
bun install -g turbo drizzle-kit typescript @types/node 2>/dev/null || {
|
||||
echo "⚠️ Some global packages may already be installed, continuing..."
|
||||
}
|
||||
|
||||
# Set up bun completions (with proper shell detection)
|
||||
echo "🔧 Setting up shell completions..."
|
||||
if [ -n "$SHELL" ] && [ -f "$SHELL" ]; then
|
||||
SHELL=/bin/bash bun completions 2>/dev/null | sudo tee /etc/bash_completion.d/bun > /dev/null || {
|
||||
echo "⚠️ Could not install bun completions, but continuing..."
|
||||
}
|
||||
fi
|
||||
|
||||
# Add project commands to shell profile
|
||||
echo "📄 Setting up project commands..."
|
||||
# Add sourcing of sim-commands.sh to user's shell config files if they exist
|
||||
for rcfile in ~/.bashrc ~/.zshrc; do
|
||||
if [ -f "$rcfile" ]; then
|
||||
# Check if already added
|
||||
if ! grep -q "sim-commands.sh" "$rcfile"; then
|
||||
echo "" >> "$rcfile"
|
||||
echo "# Sim project commands" >> "$rcfile"
|
||||
echo "if [ -f /workspace/.devcontainer/sim-commands.sh ]; then" >> "$rcfile"
|
||||
echo " source /workspace/.devcontainer/sim-commands.sh" >> "$rcfile"
|
||||
echo "fi" >> "$rcfile"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
# If no rc files exist yet, create a minimal one
|
||||
if [ ! -f ~/.bashrc ] && [ ! -f ~/.zshrc ]; then
|
||||
echo "# Source Sim project commands" > ~/.bashrc
|
||||
echo "if [ -f /workspace/.devcontainer/sim-commands.sh ]; then" >> ~/.bashrc
|
||||
echo " source /workspace/.devcontainer/sim-commands.sh" >> ~/.bashrc
|
||||
echo "fi" >> ~/.bashrc
|
||||
fi
|
||||
|
||||
# Clean and reinstall dependencies to ensure platform compatibility
|
||||
echo "📦 Cleaning and reinstalling dependencies..."
|
||||
@@ -29,18 +61,12 @@ chmod 700 ~/.bun ~/.bun/cache
|
||||
|
||||
# Install dependencies with platform-specific binaries
|
||||
echo "Installing dependencies with Bun..."
|
||||
bun install || {
|
||||
echo "⚠️ bun install had issues but continuing setup..."
|
||||
}
|
||||
bun install
|
||||
|
||||
# Check for native dependencies
|
||||
echo "Checking for native dependencies compatibility..."
|
||||
NATIVE_DEPS=$(grep '"trustedDependencies"' apps/sim/package.json || echo "")
|
||||
if [ ! -z "$NATIVE_DEPS" ]; then
|
||||
echo "⚠️ Native dependencies detected. Ensuring compatibility with Bun..."
|
||||
for pkg in $(echo $NATIVE_DEPS | grep -oP '"[^"]*"' | tr -d '"' | grep -v "trustedDependencies"); do
|
||||
echo "Checking compatibility for $pkg..."
|
||||
done
|
||||
if grep -q '"trustedDependencies"' apps/sim/package.json 2>/dev/null; then
|
||||
echo "⚠️ Native dependencies detected. Bun will handle compatibility during install."
|
||||
fi
|
||||
|
||||
# Set up environment variables if .env doesn't exist for the sim app
|
||||
@@ -82,23 +108,6 @@ echo "Waiting for database to be ready..."
|
||||
fi
|
||||
) || echo "⚠️ Database setup had issues but continuing..."
|
||||
|
||||
# Add additional helpful aliases to .bashrc
|
||||
cat << EOF >> ~/.bashrc
|
||||
|
||||
# Additional Sim Development Aliases
|
||||
alias migrate="cd /workspace/apps/sim && DATABASE_URL=postgresql://postgres:postgres@db:5432/simstudio bunx drizzle-kit push"
|
||||
alias generate="cd /workspace/apps/sim && bunx drizzle-kit generate"
|
||||
alias dev="cd /workspace && bun run dev"
|
||||
alias build="cd /workspace && bun run build"
|
||||
alias start="cd /workspace && bun run dev"
|
||||
alias lint="cd /workspace/apps/sim && bun run lint"
|
||||
alias test="cd /workspace && bun run test"
|
||||
alias bun-update="cd /workspace && bun update"
|
||||
EOF
|
||||
|
||||
# Source the .bashrc to make aliases available immediately
|
||||
. ~/.bashrc
|
||||
|
||||
# Clear the welcome message flag to ensure it shows after setup
|
||||
unset SIM_WELCOME_SHOWN
|
||||
|
||||
|
||||
42
.devcontainer/sim-commands.sh
Executable file
42
.devcontainer/sim-commands.sh
Executable file
@@ -0,0 +1,42 @@
|
||||
#!/bin/bash
|
||||
# Sim Project Commands
|
||||
# Source this file to add project-specific commands to your shell
|
||||
# Add to your ~/.bashrc or ~/.zshrc: source /workspace/.devcontainer/sim-commands.sh
|
||||
|
||||
# Project-specific aliases for Sim development
|
||||
alias sim-start="cd /workspace && bun run dev:full"
|
||||
alias sim-app="cd /workspace && bun run dev"
|
||||
alias sim-sockets="cd /workspace && bun run dev:sockets"
|
||||
alias sim-migrate="cd /workspace/apps/sim && bunx drizzle-kit push"
|
||||
alias sim-generate="cd /workspace/apps/sim && bunx drizzle-kit generate"
|
||||
alias sim-rebuild="cd /workspace && bun run build && bun run start"
|
||||
alias docs-dev="cd /workspace/apps/docs && bun run dev"
|
||||
|
||||
# Database connection helpers
|
||||
alias pgc="PGPASSWORD=postgres psql -h db -U postgres -d simstudio"
|
||||
alias check-db="PGPASSWORD=postgres psql -h db -U postgres -c '\l'"
|
||||
|
||||
# Default to workspace directory
|
||||
cd /workspace 2>/dev/null || true
|
||||
|
||||
# Welcome message - show once per session
|
||||
if [ -z "$SIM_WELCOME_SHOWN" ]; then
|
||||
export SIM_WELCOME_SHOWN=1
|
||||
|
||||
echo ""
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo "🚀 Sim Development Environment"
|
||||
echo ""
|
||||
echo "Project commands:"
|
||||
echo " sim-start - Start app + socket server"
|
||||
echo " sim-app - Start only main app"
|
||||
echo " sim-sockets - Start only socket server"
|
||||
echo " sim-migrate - Push schema changes"
|
||||
echo " sim-generate - Generate migrations"
|
||||
echo ""
|
||||
echo "Database:"
|
||||
echo " pgc - Connect to PostgreSQL"
|
||||
echo " check-db - List databases"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo ""
|
||||
fi
|
||||
2
.github/workflows/publish-ts-sdk.yml
vendored
2
.github/workflows/publish-ts-sdk.yml
vendored
@@ -21,7 +21,7 @@ jobs:
|
||||
- name: Setup Node.js for npm publishing
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '18'
|
||||
node-version: '22'
|
||||
registry-url: 'https://registry.npmjs.org/'
|
||||
|
||||
- name: Install dependencies
|
||||
|
||||
251
apps/docs/content/docs/en/blocks/guardrails.mdx
Normal file
251
apps/docs/content/docs/en/blocks/guardrails.mdx
Normal file
@@ -0,0 +1,251 @@
|
||||
---
|
||||
title: Guardrails
|
||||
---
|
||||
|
||||
import { Callout } from 'fumadocs-ui/components/callout'
|
||||
import { Step, Steps } from 'fumadocs-ui/components/steps'
|
||||
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
import { Image } from '@/components/ui/image'
|
||||
import { Video } from '@/components/ui/video'
|
||||
|
||||
The Guardrails block validates and protects your AI workflows by checking content against multiple validation types. Ensure data quality, prevent hallucinations, detect PII, and enforce format requirements before content moves through your workflow.
|
||||
|
||||
<div className="flex justify-center">
|
||||
<Image
|
||||
src="/static/blocks/guardrails.png"
|
||||
alt="Guardrails Block"
|
||||
width={500}
|
||||
height={350}
|
||||
className="my-6"
|
||||
/>
|
||||
</div>
|
||||
|
||||
## Overview
|
||||
|
||||
The Guardrails block enables you to:
|
||||
|
||||
<Steps>
|
||||
<Step>
|
||||
<strong>Validate JSON Structure</strong>: Ensure LLM outputs are valid JSON before parsing
|
||||
</Step>
|
||||
<Step>
|
||||
<strong>Match Regex Patterns</strong>: Verify content matches specific formats (emails, phone numbers, URLs, etc.)
|
||||
</Step>
|
||||
<Step>
|
||||
<strong>Detect Hallucinations</strong>: Use RAG + LLM scoring to validate AI outputs against knowledge base content
|
||||
</Step>
|
||||
<Step>
|
||||
<strong>Detect PII</strong>: Identify and optionally mask personally identifiable information across 40+ entity types
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## Validation Types
|
||||
|
||||
### JSON Validation
|
||||
|
||||
Validates that content is properly formatted JSON. Perfect for ensuring structured LLM outputs can be safely parsed.
|
||||
|
||||
**Use Cases:**
|
||||
- Validate JSON responses from Agent blocks before parsing
|
||||
- Ensure API payloads are properly formatted
|
||||
- Check structured data integrity
|
||||
|
||||
**Output:**
|
||||
- `passed`: `true` if valid JSON, `false` otherwise
|
||||
- `error`: Error message if validation fails (e.g., "Invalid JSON: Unexpected token...")
|
||||
|
||||
### Regex Validation
|
||||
|
||||
Checks if content matches a specified regular expression pattern.
|
||||
|
||||
**Use Cases:**
|
||||
- Validate email addresses
|
||||
- Check phone number formats
|
||||
- Verify URLs or custom identifiers
|
||||
- Enforce specific text patterns
|
||||
|
||||
**Configuration:**
|
||||
- **Regex Pattern**: The regular expression to match against (e.g., `^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$` for emails)
|
||||
|
||||
**Output:**
|
||||
- `passed`: `true` if content matches pattern, `false` otherwise
|
||||
- `error`: Error message if validation fails
|
||||
|
||||
### Hallucination Detection
|
||||
|
||||
Uses Retrieval-Augmented Generation (RAG) with LLM scoring to detect when AI-generated content contradicts or isn't grounded in your knowledge base.
|
||||
|
||||
**How It Works:**
|
||||
1. Queries your knowledge base for relevant context
|
||||
2. Sends both the AI output and retrieved context to an LLM
|
||||
3. LLM assigns a confidence score (0-10 scale)
|
||||
- **0** = Full hallucination (completely ungrounded)
|
||||
- **10** = Fully grounded (completely supported by knowledge base)
|
||||
4. Validation passes if score ≥ threshold (default: 3)
|
||||
|
||||
**Configuration:**
|
||||
- **Knowledge Base**: Select from your existing knowledge bases
|
||||
- **Model**: Choose LLM for scoring (requires strong reasoning - GPT-4o, Claude 3.7 Sonnet recommended)
|
||||
- **API Key**: Authentication for selected LLM provider (auto-hidden for hosted/Ollama models)
|
||||
- **Confidence Threshold**: Minimum score to pass (0-10, default: 3)
|
||||
- **Top K** (Advanced): Number of knowledge base chunks to retrieve (default: 10)
|
||||
|
||||
**Output:**
|
||||
- `passed`: `true` if confidence score ≥ threshold
|
||||
- `score`: Confidence score (0-10)
|
||||
- `reasoning`: LLM's explanation for the score
|
||||
- `error`: Error message if validation fails
|
||||
|
||||
**Use Cases:**
|
||||
- Validate Agent responses against documentation
|
||||
- Ensure customer support answers are factually accurate
|
||||
- Verify generated content matches source material
|
||||
- Quality control for RAG applications
|
||||
|
||||
### PII Detection
|
||||
|
||||
Detects personally identifiable information using Microsoft Presidio. Supports 40+ entity types across multiple countries and languages.
|
||||
|
||||
<div className="mx-auto w-3/5 overflow-hidden rounded-lg">
|
||||
<Video src="guardrails.mp4" width={500} height={350} />
|
||||
</div>
|
||||
|
||||
**How It Works:**
|
||||
1. Scans content for PII entities using pattern matching and NLP
|
||||
2. Returns detected entities with locations and confidence scores
|
||||
3. Optionally masks detected PII in the output
|
||||
|
||||
**Configuration:**
|
||||
- **PII Types to Detect**: Select from grouped categories via modal selector
|
||||
- **Common**: Person name, Email, Phone, Credit card, IP address, etc.
|
||||
- **USA**: SSN, Driver's license, Passport, etc.
|
||||
- **UK**: NHS number, National insurance number
|
||||
- **Spain**: NIF, NIE, CIF
|
||||
- **Italy**: Fiscal code, Driver's license, VAT code
|
||||
- **Poland**: PESEL, NIP, REGON
|
||||
- **Singapore**: NRIC/FIN, UEN
|
||||
- **Australia**: ABN, ACN, TFN, Medicare
|
||||
- **India**: Aadhaar, PAN, Passport, Voter number
|
||||
- **Mode**:
|
||||
- **Detect**: Only identify PII (default)
|
||||
- **Mask**: Replace detected PII with masked values
|
||||
- **Language**: Detection language (default: English)
|
||||
|
||||
**Output:**
|
||||
- `passed`: `false` if any selected PII types are detected
|
||||
- `detectedEntities`: Array of detected PII with type, location, and confidence
|
||||
- `maskedText`: Content with PII masked (only if mode = "Mask")
|
||||
- `error`: Error message if validation fails
|
||||
|
||||
**Use Cases:**
|
||||
- Block content containing sensitive personal information
|
||||
- Mask PII before logging or storing data
|
||||
- Compliance with GDPR, HIPAA, and other privacy regulations
|
||||
- Sanitize user inputs before processing
|
||||
|
||||
## Configuration
|
||||
|
||||
### Content to Validate
|
||||
|
||||
The input content to validate. This typically comes from:
|
||||
- Agent block outputs: `<agent.content>`
|
||||
- Function block results: `<function.output>`
|
||||
- API responses: `<api.output>`
|
||||
- Any other block output
|
||||
|
||||
### Validation Type
|
||||
|
||||
Choose from four validation types:
|
||||
- **Valid JSON**: Check if content is properly formatted JSON
|
||||
- **Regex Match**: Verify content matches a regex pattern
|
||||
- **Hallucination Check**: Validate against knowledge base with LLM scoring
|
||||
- **PII Detection**: Detect and optionally mask personally identifiable information
|
||||
|
||||
## Outputs
|
||||
|
||||
All validation types return:
|
||||
|
||||
- **`<guardrails.passed>`**: Boolean indicating if validation passed
|
||||
- **`<guardrails.validationType>`**: The type of validation performed
|
||||
- **`<guardrails.input>`**: The original input that was validated
|
||||
- **`<guardrails.error>`**: Error message if validation failed (optional)
|
||||
|
||||
Additional outputs by type:
|
||||
|
||||
**Hallucination Check:**
|
||||
- **`<guardrails.score>`**: Confidence score (0-10)
|
||||
- **`<guardrails.reasoning>`**: LLM's explanation
|
||||
|
||||
**PII Detection:**
|
||||
- **`<guardrails.detectedEntities>`**: Array of detected PII entities
|
||||
- **`<guardrails.maskedText>`**: Content with PII masked (if mode = "Mask")
|
||||
|
||||
## Example Use Cases
|
||||
|
||||
### Validate JSON Before Parsing
|
||||
|
||||
<div className="mb-4 rounded-md border p-4">
|
||||
<h4 className="font-medium">Scenario: Ensure Agent output is valid JSON</h4>
|
||||
<ol className="list-decimal pl-5 text-sm">
|
||||
<li>Agent generates structured JSON response</li>
|
||||
<li>Guardrails validates JSON format</li>
|
||||
<li>Condition block checks `<guardrails.passed>`</li>
|
||||
<li>If passed → Parse and use data, If failed → Retry or handle error</li>
|
||||
</ol>
|
||||
</div>
|
||||
|
||||
### Prevent Hallucinations
|
||||
|
||||
<div className="mb-4 rounded-md border p-4">
|
||||
<h4 className="font-medium">Scenario: Validate customer support responses</h4>
|
||||
<ol className="list-decimal pl-5 text-sm">
|
||||
<li>Agent generates response to customer question</li>
|
||||
<li>Guardrails checks against support documentation knowledge base</li>
|
||||
<li>If confidence score ≥ 3 → Send response</li>
|
||||
<li>If confidence score \< 3 → Flag for human review</li>
|
||||
</ol>
|
||||
</div>
|
||||
|
||||
### Block PII in User Inputs
|
||||
|
||||
<div className="mb-4 rounded-md border p-4">
|
||||
<h4 className="font-medium">Scenario: Sanitize user-submitted content</h4>
|
||||
<ol className="list-decimal pl-5 text-sm">
|
||||
<li>User submits form with text content</li>
|
||||
<li>Guardrails detects PII (emails, phone numbers, SSN, etc.)</li>
|
||||
<li>If PII detected → Reject submission or mask sensitive data</li>
|
||||
<li>If no PII → Process normally</li>
|
||||
</ol>
|
||||
</div>
|
||||
|
||||
<div className="mx-auto w-3/5 overflow-hidden rounded-lg">
|
||||
<Video src="guardrails-example.mp4" width={500} height={350} />
|
||||
</div>
|
||||
|
||||
### Validate Email Format
|
||||
|
||||
<div className="mb-4 rounded-md border p-4">
|
||||
<h4 className="font-medium">Scenario: Check email address format</h4>
|
||||
<ol className="list-decimal pl-5 text-sm">
|
||||
<li>Agent extracts email from text</li>
|
||||
<li>Guardrails validates with regex pattern</li>
|
||||
<li>If valid → Use email for notification</li>
|
||||
<li>If invalid → Request correction</li>
|
||||
</ol>
|
||||
</div>
|
||||
|
||||
## Best Practices
|
||||
|
||||
- **Chain with Condition blocks**: Use `<guardrails.passed>` to branch workflow logic based on validation results
|
||||
- **Use JSON validation before parsing**: Always validate JSON structure before attempting to parse LLM outputs
|
||||
- **Choose appropriate PII types**: Only select the PII entity types relevant to your use case for better performance
|
||||
- **Set reasonable confidence thresholds**: For hallucination detection, adjust threshold based on your accuracy requirements (higher = stricter)
|
||||
- **Use strong models for hallucination detection**: GPT-4o or Claude 3.7 Sonnet provide more accurate confidence scoring
|
||||
- **Mask PII for logging**: Use "Mask" mode when you need to log or store content that may contain PII
|
||||
- **Test regex patterns**: Validate your regex patterns thoroughly before deploying to production
|
||||
- **Monitor validation failures**: Track `<guardrails.error>` messages to identify common validation issues
|
||||
|
||||
<Callout type="info">
|
||||
Guardrails validation happens synchronously in your workflow. For hallucination detection, choose faster models (like GPT-4o-mini) if latency is critical.
|
||||
</Callout>
|
||||
|
||||
@@ -57,7 +57,7 @@ In Sim, the Airtable integration enables your agents to interact with your Airta
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrates Airtable into the workflow. Can create, get, list, or update Airtable records. Requires OAuth. Can be used in trigger mode to trigger a workflow when an update is made to an Airtable table.
|
||||
Integrates Airtable into the workflow. Can create, get, list, or update Airtable records. Can be used in trigger mode to trigger a workflow when an update is made to an Airtable table.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -57,7 +57,7 @@ In Sim, the BrowserUse integration allows your agents to interact with the web a
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Browser Use into the workflow. Can navigate the web and perform actions as if a real user was interacting with the browser. Requires API Key.
|
||||
Integrate Browser Use into the workflow. Can navigate the web and perform actions as if a real user was interacting with the browser.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -198,7 +198,7 @@ In Sim, the Clay integration allows your agents to push structured data into Cla
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Clay into the workflow. Can populate a table with data. Requires an API Key.
|
||||
Integrate Clay into the workflow. Can populate a table with data.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -43,7 +43,7 @@ In Sim, the Confluence integration enables your agents to access and leverage yo
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Confluence into the workflow. Can read and update a page. Requires OAuth.
|
||||
Integrate Confluence into the workflow. Can read and update a page.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -57,7 +57,7 @@ Discord components in Sim use efficient lazy loading, only fetching data when ne
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Discord into the workflow. Can send and get messages, get server information, and get a user’s information. Requires bot API key.
|
||||
Integrate Discord into the workflow. Can send and get messages, get server information, and get a user’s information.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -39,7 +39,7 @@ In Sim, the ElevenLabs integration enables your agents to convert text to lifeli
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate ElevenLabs into the workflow. Can convert text to speech. Requires API key.
|
||||
Integrate ElevenLabs into the workflow. Can convert text to speech.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -44,7 +44,7 @@ In Sim, the Exa integration allows your agents to search the web for information
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Exa into the workflow. Can search, get contents, find similar links, answer a question, and perform research. Requires API Key.
|
||||
Integrate Exa into the workflow. Can search, get contents, find similar links, answer a question, and perform research.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -59,7 +59,7 @@ This allows your agents to gather information from websites, extract structured
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Firecrawl into the workflow. Can search, scrape, or crawl websites. Requires API Key.
|
||||
Integrate Firecrawl into the workflow. Can search, scrape, or crawl websites.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -35,7 +35,7 @@ In Sim, the GitHub integration enables your agents to interact directly with Git
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Github into the workflow. Can get get PR details, create PR comment, get repository info, and get latest commit. Requires github token API Key. Can be used in trigger mode to trigger a workflow when a PR is created, commented on, or a commit is pushed.
|
||||
Integrate Github into the workflow. Can get get PR details, create PR comment, get repository info, and get latest commit. Can be used in trigger mode to trigger a workflow when a PR is created, commented on, or a commit is pushed.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -51,7 +51,7 @@ In Sim, the Gmail integration enables your agents to send, read, and search emai
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Gmail into the workflow. Can send, read, and search emails. Requires OAuth. Can be used in trigger mode to trigger a workflow when a new email is received.
|
||||
Integrate Gmail into the workflow. Can send, read, and search emails. Can be used in trigger mode to trigger a workflow when a new email is received.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -90,7 +90,7 @@ In Sim, the Google Calendar integration enables your agents to programmatically
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Google Calendar into the workflow. Can create, read, update, and list calendar events. Requires OAuth.
|
||||
Integrate Google Calendar into the workflow. Can create, read, update, and list calendar events.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -81,7 +81,7 @@ In Sim, the Google Docs integration enables your agents to interact directly wit
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Google Docs into the workflow. Can read, write, and create documents. Requires OAuth.
|
||||
Integrate Google Docs into the workflow. Can read, write, and create documents.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -73,7 +73,7 @@ In Sim, the Google Drive integration enables your agents to interact directly wi
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Google Drive into the workflow. Can create, upload, and list files. Requires OAuth.
|
||||
Integrate Google Drive into the workflow. Can create, upload, and list files.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -69,9 +69,6 @@ Integrate Google Forms into your workflow. Provide a Form ID to list responses,
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| formId | string | Yes | The ID of the Google Form |
|
||||
| responseId | string | No | If provided, returns this specific response |
|
||||
| pageSize | number | No | Max responses to return (service may return fewer). Defaults to 5000 |
|
||||
|
||||
#### Output
|
||||
|
||||
|
||||
@@ -58,7 +58,7 @@ In Sim, the Google Search integration enables your agents to search the web prog
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Google Search into the workflow. Can search the web. Requires API Key.
|
||||
Integrate Google Search into the workflow. Can search the web.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -96,7 +96,7 @@ In Sim, the Google Sheets integration enables your agents to interact directly w
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Google Sheets into the workflow. Can read, write, append, and update data. Requires OAuth.
|
||||
Integrate Google Sheets into the workflow. Can read, write, append, and update data.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -66,7 +66,7 @@ In Sim, the HuggingFace integration enables your agents to programmatically gene
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Hugging Face into the workflow. Can generate completions using the Hugging Face Inference API. Requires API Key.
|
||||
Integrate Hugging Face into the workflow. Can generate completions using the Hugging Face Inference API.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -41,7 +41,7 @@ In Sim, the Hunter.io integration enables your agents to programmatically search
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Hunter into the workflow. Can search domains, find email addresses, verify email addresses, discover companies, find companies, and count email addresses. Requires API Key.
|
||||
Integrate Hunter into the workflow. Can search domains, find email addresses, verify email addresses, discover companies, find companies, and count email addresses.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -46,7 +46,7 @@ In Sim, the DALL-E integration enables your agents to generate images programmat
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Image Generator into the workflow. Can generate images using DALL-E 3 or GPT Image. Requires API Key.
|
||||
Integrate Image Generator into the workflow. Can generate images using DALL-E 3 or GPT Image.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -63,7 +63,7 @@ This integration is particularly valuable for building agents that need to gathe
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Jina into the workflow. Extracts content from websites. Requires API Key.
|
||||
Integrate Jina into the workflow. Extracts content from websites.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -43,7 +43,7 @@ In Sim, the Jira integration allows your agents to seamlessly interact with your
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Jira into the workflow. Can read, write, and update issues. Requires OAuth.
|
||||
Integrate Jira into the workflow. Can read, write, and update issues.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -10,8 +10,11 @@ import { BlockInfoCard } from "@/components/ui/block-info-card"
|
||||
color="#5E6AD2"
|
||||
icon={true}
|
||||
iconSvg={`<svg className="block-icon"
|
||||
|
||||
xmlns='http://www.w3.org/2000/svg'
|
||||
fill='currentColor'
|
||||
|
||||
|
||||
viewBox='0 0 100 100'
|
||||
>
|
||||
<path
|
||||
@@ -39,7 +42,7 @@ In Sim, the Linear integration allows your agents to seamlessly interact with yo
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Linear into the workflow. Can read and create issues. Requires OAuth.
|
||||
Integrate Linear into the workflow. Can read and create issues.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -43,7 +43,7 @@ To implement Linkup in your agent, simply add the tool to your agent's configura
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Linkup into the workflow. Can search the web. Requires API Key.
|
||||
Integrate Linkup into the workflow. Can search the web.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -44,7 +44,7 @@ In Sim, the Mem0 integration enables your agents to maintain persistent memory a
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Mem0 into the workflow. Can add, search, and retrieve memories. Requires API Key.
|
||||
Integrate Mem0 into the workflow. Can add, search, and retrieve memories.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
"google_forms",
|
||||
"google_search",
|
||||
"google_sheets",
|
||||
"google_vault",
|
||||
"huggingface",
|
||||
"hunter",
|
||||
"image_generator",
|
||||
|
||||
@@ -94,7 +94,7 @@ In Sim, the Microsoft Excel integration provides seamless access to spreadsheet
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Microsoft Excel into the workflow. Can read, write, update, and add to table. Requires OAuth.
|
||||
Integrate Microsoft Excel into the workflow. Can read, write, update, and add to table.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -122,7 +122,7 @@ In Sim, the Microsoft Planner integration allows your agents to programmatically
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Microsoft Planner into the workflow. Can read and create tasks. Requires OAuth.
|
||||
Integrate Microsoft Planner into the workflow. Can read and create tasks.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -98,7 +98,7 @@ In Sim, the Microsoft Teams integration enables your agents to interact directly
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Microsoft Teams into the workflow. Can read and write chat messages, and read and write channel messages. Requires OAuth. Can be used in trigger mode to trigger a workflow when a message is sent to a chat or channel.
|
||||
Integrate Microsoft Teams into the workflow. Can read and write chat messages, and read and write channel messages. Can be used in trigger mode to trigger a workflow when a message is sent to a chat or channel.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -79,7 +79,7 @@ The Mistral Parse tool is particularly useful for scenarios where your agents ne
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Mistral Parse into the workflow. Can extract text from uploaded PDF documents, or from a URL. Requires API Key.
|
||||
Integrate Mistral Parse into the workflow. Can extract text from uploaded PDF documents, or from a URL.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -45,7 +45,7 @@ This integration bridges the gap between your AI workflows and your knowledge ba
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate with Notion into the workflow. Can read page, read database, create page, create database, append content, query database, and search workspace. Requires OAuth.
|
||||
Integrate with Notion into the workflow. Can read page, read database, create page, create database, append content, query database, and search workspace.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -51,7 +51,7 @@ In Sim, the OneDrive integration enables your agents to directly interact with y
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate OneDrive into the workflow. Can create, upload, and list files. Requires OAuth.
|
||||
Integrate OneDrive into the workflow. Can create, upload, and list files.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -43,7 +43,7 @@ In Sim, the OpenAI integration enables your agents to leverage these powerful AI
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Embeddings into the workflow. Can generate embeddings from text. Requires API Key.
|
||||
Integrate Embeddings into the workflow. Can generate embeddings from text.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -140,7 +140,7 @@ In Sim, the Microsoft Outlook integration enables your agents to interact direct
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Outlook into the workflow. Can read, draft, and send email messages. Requires OAuth. Can be used in trigger mode to trigger a workflow when a new email is received.
|
||||
Integrate Outlook into the workflow. Can read, draft, and send email messages. Can be used in trigger mode to trigger a workflow when a new email is received.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -71,7 +71,7 @@ In Sim, the Parallel AI integration empowers your agents to perform web searches
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Parallel AI into the workflow. Can search the web. Requires API Key.
|
||||
Integrate Parallel AI into the workflow. Can search the web.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -37,7 +37,7 @@ In Sim, the Perplexity integration enables your agents to leverage these powerfu
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Perplexity into the workflow. Can generate completions using Perplexity AI chat models. Requires API Key.
|
||||
Integrate Perplexity into the workflow. Can generate completions using Perplexity AI chat models.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -45,7 +45,7 @@ In Sim, the Pinecone integration enables your agents to leverage vector search c
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Pinecone into the workflow. Can generate embeddings, upsert text, search with text, fetch vectors, and search with vectors. Requires API Key.
|
||||
Integrate Pinecone into the workflow. Can generate embeddings, upsert text, search with text, fetch vectors, and search with vectors.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -103,7 +103,7 @@ This integration allows your agents to leverage powerful vector search and manag
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Qdrant into the workflow. Can upsert, search, and fetch points. Requires API Key.
|
||||
Integrate Qdrant into the workflow. Can upsert, search, and fetch points.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -39,7 +39,7 @@ These operations let your agents access and analyze Reddit content as part of yo
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Reddit into the workflow. Can get posts and comments from a subreddit. Requires OAuth.
|
||||
Integrate Reddit into the workflow. Can get posts and comments from a subreddit.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ import { BlockInfoCard } from "@/components/ui/block-info-card"
|
||||
|
||||
<BlockInfoCard
|
||||
type="schedule"
|
||||
color="#7B68EE"
|
||||
color="#6366F1"
|
||||
icon={true}
|
||||
iconSvg={`<svg className="block-icon"
|
||||
|
||||
|
||||
@@ -78,7 +78,7 @@ In Sim, the Serper integration enables your agents to leverage the power of web
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Serper into the workflow. Can search the web. Requires API Key.
|
||||
Integrate Serper into the workflow. Can search the web.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -64,7 +64,7 @@ This allows for powerful automation scenarios such as sending notifications, ale
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Slack into the workflow. Can send messages, create canvases, and read messages. Requires OAuth. Can be used in trigger mode to trigger a workflow when a message is sent to a channel.
|
||||
Integrate Slack into the workflow. Can send messages, create canvases, and read messages. Requires Bot Token instead of OAuth in advanced mode. Can be used in trigger mode to trigger a workflow when a message is sent to a channel.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -191,7 +191,7 @@ In Sim, the Stagehand integration enables your agents to extract structured data
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Stagehand into the workflow. Can extract structured data from webpages. Requires API Key.
|
||||
Integrate Stagehand into the workflow. Can extract structured data from webpages.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -195,7 +195,7 @@ In Sim, the Stagehand integration enables your agents to seamlessly interact wit
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Stagehand Agent into the workflow. Can navigate the web and perform tasks. Requires API Key.
|
||||
Integrate Stagehand Agent into the workflow. Can navigate the web and perform tasks.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -47,7 +47,7 @@ In Sim, the Vision integration enables your agents to analyze images with vision
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Vision into the workflow. Can analyze images with vision models. Requires API Key.
|
||||
Integrate Vision into the workflow. Can analyze images with vision models.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -42,7 +42,7 @@ In Sim, the Wealthbox integration enables your agents to seamlessly interact wit
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Wealthbox into the workflow. Can read and write notes, read and write contacts, and read and write tasks. Requires OAuth.
|
||||
Integrate Wealthbox into the workflow. Can read and write notes, read and write contacts, and read and write tasks.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@ In Sim, the X integration enables sophisticated social media automation scenario
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate X into the workflow. Can post a new tweet, get tweet details, search tweets, and get user profile. Requires OAuth.
|
||||
Integrate X into the workflow. Can post a new tweet, get tweet details, search tweets, and get user profile.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -40,7 +40,7 @@ In Sim, the YouTube integration enables your agents to programmatically search a
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate YouTube into the workflow. Can search for videos. Requires API Key.
|
||||
Integrate YouTube into the workflow. Can search for videos.
|
||||
|
||||
|
||||
|
||||
|
||||
BIN
apps/docs/public/static/blocks/guardrails.png
Normal file
BIN
apps/docs/public/static/blocks/guardrails.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 107 KiB |
@@ -1,9 +1,6 @@
|
||||
import { Footer, Nav } from '@/app/(landing)/components'
|
||||
|
||||
/**
|
||||
* Layout for the building/blog section with navigation and footer
|
||||
*/
|
||||
export default function BuildingLayout({ children }: { children: React.ReactNode }) {
|
||||
export default function BlogLayout({ children }: { children: React.ReactNode }) {
|
||||
return (
|
||||
<>
|
||||
<Nav hideAuthButtons={false} variant='landing' />
|
||||
@@ -2,14 +2,9 @@ import Image from 'next/image'
|
||||
import { Avatar, AvatarFallback, AvatarImage } from '@/components/ui/avatar'
|
||||
import { soehne } from '@/app/fonts/soehne/soehne'
|
||||
|
||||
/**
|
||||
* Blog post component comparing OpenAI AgentKit, n8n, and Sim workflow builders for building AI agents.
|
||||
* Layout inspired by Anthropic's engineering blog posts.
|
||||
* Includes structured data (JSON-LD) for enhanced SEO and LLM discoverability.
|
||||
*/
|
||||
export default function OpenAiN8nSim() {
|
||||
const baseUrl = 'https://sim.ai'
|
||||
const articleUrl = `${baseUrl}/building/openai-vs-n8n-vs-sim`
|
||||
const articleUrl = `${baseUrl}/blog/openai-vs-n8n-vs-sim`
|
||||
|
||||
const articleStructuredData = {
|
||||
'@context': 'https://schema.org',
|
||||
@@ -17,7 +12,7 @@ export default function OpenAiN8nSim() {
|
||||
headline: 'OpenAI AgentKit vs n8n vs Sim: AI Agent Workflow Builder Comparison',
|
||||
description:
|
||||
'Compare OpenAI AgentKit, n8n, and Sim for building AI agent workflows. Explore key differences in capabilities, integrations, collaboration, and which platform best fits your production AI agent needs.',
|
||||
image: `${baseUrl}/building/openai-vs-n8n-vs-sim/workflow.png`,
|
||||
image: `${baseUrl}/blog/openai-vs-n8n-vs-sim/workflow.png`,
|
||||
datePublished: '2025-10-06T00:00:00.000Z',
|
||||
dateModified: '2025-10-06T00:00:00.000Z',
|
||||
author: {
|
||||
@@ -80,8 +75,8 @@ export default function OpenAiN8nSim() {
|
||||
{
|
||||
'@type': 'ListItem',
|
||||
position: 2,
|
||||
name: 'Building',
|
||||
item: `${baseUrl}/building`,
|
||||
name: 'Blog',
|
||||
item: `${baseUrl}/blog`,
|
||||
},
|
||||
{
|
||||
'@type': 'ListItem',
|
||||
@@ -120,7 +115,7 @@ export default function OpenAiN8nSim() {
|
||||
<div className='h-[180px] w-full flex-shrink-0 sm:h-[200px] md:h-auto md:w-[300px]'>
|
||||
<div className='relative h-full w-full overflow-hidden rounded-lg md:aspect-[5/4]'>
|
||||
<Image
|
||||
src='/building/openai-vs-n8n-vs-sim/workflow.png'
|
||||
src='/blog/openai-vs-n8n-vs-sim/workflow.png'
|
||||
alt='Sim AI agent workflow builder interface'
|
||||
width={300}
|
||||
height={240}
|
||||
@@ -149,7 +144,7 @@ export default function OpenAiN8nSim() {
|
||||
>
|
||||
<Avatar className='size-6'>
|
||||
<AvatarImage
|
||||
src='/building/openai-vs-n8n-vs-sim/emir-karabeg.png'
|
||||
src='/blog/openai-vs-n8n-vs-sim/emir-karabeg.png'
|
||||
alt='Emir Karabeg'
|
||||
/>
|
||||
<AvatarFallback>EK</AvatarFallback>
|
||||
@@ -195,7 +190,7 @@ export default function OpenAiN8nSim() {
|
||||
>
|
||||
<Avatar className='size-6'>
|
||||
<AvatarImage
|
||||
src='/building/openai-vs-n8n-vs-sim/emir-karabeg.png'
|
||||
src='/blog/openai-vs-n8n-vs-sim/emir-karabeg.png'
|
||||
alt='Emir Karabeg'
|
||||
/>
|
||||
<AvatarFallback>EK</AvatarFallback>
|
||||
@@ -253,7 +248,7 @@ export default function OpenAiN8nSim() {
|
||||
|
||||
<figure className='my-8 overflow-hidden rounded-lg'>
|
||||
<Image
|
||||
src='/building/openai-vs-n8n-vs-sim/openai.png'
|
||||
src='/blog/openai-vs-n8n-vs-sim/openai.png'
|
||||
alt='OpenAI AgentKit workflow interface'
|
||||
width={800}
|
||||
height={450}
|
||||
@@ -288,7 +283,7 @@ export default function OpenAiN8nSim() {
|
||||
|
||||
<figure className='my-8 overflow-hidden rounded-lg'>
|
||||
<Image
|
||||
src='/building/openai-vs-n8n-vs-sim/widgets.png'
|
||||
src='/blog/openai-vs-n8n-vs-sim/widgets.png'
|
||||
alt='OpenAI AgentKit custom widgets interface'
|
||||
width={800}
|
||||
height={450}
|
||||
@@ -371,7 +366,7 @@ export default function OpenAiN8nSim() {
|
||||
|
||||
<figure className='my-8 overflow-hidden rounded-lg'>
|
||||
<Image
|
||||
src='/building/openai-vs-n8n-vs-sim/n8n.png'
|
||||
src='/blog/openai-vs-n8n-vs-sim/n8n.png'
|
||||
alt='n8n workflow automation interface'
|
||||
width={800}
|
||||
height={450}
|
||||
@@ -489,7 +484,7 @@ export default function OpenAiN8nSim() {
|
||||
|
||||
<figure className='my-8 overflow-hidden rounded-lg'>
|
||||
<Image
|
||||
src='/building/openai-vs-n8n-vs-sim/sim.png'
|
||||
src='/blog/openai-vs-n8n-vs-sim/sim.png'
|
||||
alt='Sim visual workflow builder with AI agent blocks'
|
||||
width={800}
|
||||
height={450}
|
||||
@@ -515,7 +510,7 @@ export default function OpenAiN8nSim() {
|
||||
|
||||
<figure className='my-8 overflow-hidden rounded-lg'>
|
||||
<Image
|
||||
src='/building/openai-vs-n8n-vs-sim/copilot.png'
|
||||
src='/blog/openai-vs-n8n-vs-sim/copilot.png'
|
||||
alt='Sim AI Copilot assisting with workflow development'
|
||||
width={800}
|
||||
height={450}
|
||||
@@ -540,7 +535,7 @@ export default function OpenAiN8nSim() {
|
||||
|
||||
<figure className='my-8 overflow-hidden rounded-lg'>
|
||||
<Image
|
||||
src='/building/openai-vs-n8n-vs-sim/templates.png'
|
||||
src='/blog/openai-vs-n8n-vs-sim/templates.png'
|
||||
alt='Sim workflow templates gallery'
|
||||
width={800}
|
||||
height={450}
|
||||
@@ -621,7 +616,7 @@ export default function OpenAiN8nSim() {
|
||||
|
||||
<figure className='my-8 overflow-hidden rounded-lg'>
|
||||
<Image
|
||||
src='/building/openai-vs-n8n-vs-sim/logs.png'
|
||||
src='/blog/openai-vs-n8n-vs-sim/logs.png'
|
||||
alt='Sim execution logs and monitoring dashboard'
|
||||
width={800}
|
||||
height={450}
|
||||
@@ -2,7 +2,7 @@ import type { Metadata } from 'next'
|
||||
import OpenAiN8nSim from './openai-n8n-sim'
|
||||
|
||||
const baseUrl = 'https://sim.ai'
|
||||
const canonicalUrl = `${baseUrl}/building/openai-vs-n8n-vs-sim`
|
||||
const canonicalUrl = `${baseUrl}/blog/openai-vs-n8n-vs-sim`
|
||||
|
||||
export const metadata: Metadata = {
|
||||
title: 'OpenAI AgentKit vs n8n vs Sim: AI Agent Workflow Builder Comparison | Sim',
|
||||
@@ -106,7 +106,7 @@ export const metadata: Metadata = {
|
||||
],
|
||||
images: [
|
||||
{
|
||||
url: `${baseUrl}/building/openai-vs-n8n-vs-sim/workflow.png`,
|
||||
url: `${baseUrl}/blog/openai-vs-n8n-vs-sim/workflow.png`,
|
||||
width: 1200,
|
||||
height: 630,
|
||||
alt: 'Sim AI agent workflow builder interface comparison',
|
||||
@@ -118,7 +118,7 @@ export const metadata: Metadata = {
|
||||
title: 'OpenAI AgentKit vs n8n vs Sim: AI Agent Workflow Builder Comparison',
|
||||
description:
|
||||
'Compare OpenAI AgentKit, n8n, and Sim for building AI agent workflows. Explore key differences in capabilities, integrations, and which platform fits your production needs.',
|
||||
images: ['/building/openai-vs-n8n-vs-sim/workflow.png'],
|
||||
images: ['/blog/openai-vs-n8n-vs-sim/workflow.png'],
|
||||
creator: '@karabegemir',
|
||||
site: '@simai',
|
||||
},
|
||||
@@ -130,10 +130,6 @@ export const metadata: Metadata = {
|
||||
},
|
||||
}
|
||||
|
||||
/**
|
||||
* Blog post page comparing OpenAI AgentKit, n8n, and Sim workflow builders for AI agents.
|
||||
* Optimized for SEO with structured data, canonical URLs, and comprehensive metadata.
|
||||
*/
|
||||
export default function Page() {
|
||||
return <OpenAiN8nSim />
|
||||
}
|
||||
5
apps/sim/app/(landing)/blog/page.tsx
Normal file
5
apps/sim/app/(landing)/blog/page.tsx
Normal file
@@ -0,0 +1,5 @@
|
||||
import { redirect } from 'next/navigation'
|
||||
|
||||
export default function BlogPage() {
|
||||
redirect('/blog/openai-vs-n8n-vs-sim')
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
import { redirect } from 'next/navigation'
|
||||
|
||||
/**
|
||||
* Redirects /building to the latest blog post
|
||||
*/
|
||||
export default function BuildingPage() {
|
||||
redirect('/building/openai-vs-n8n-vs-sim')
|
||||
}
|
||||
@@ -216,10 +216,10 @@ export default function Footer({ fullWidth = false }: FooterProps) {
|
||||
Enterprise
|
||||
</Link>
|
||||
<Link
|
||||
href='/building'
|
||||
href='/blog'
|
||||
className='text-[14px] text-muted-foreground transition-colors hover:text-foreground'
|
||||
>
|
||||
Building
|
||||
Blog
|
||||
</Link>
|
||||
<Link
|
||||
href='/changelog'
|
||||
|
||||
@@ -15,15 +15,16 @@ export default function PrivacyPolicy() {
|
||||
return (
|
||||
<LegalLayout title='Privacy Policy'>
|
||||
<section>
|
||||
<p className='mb-4'>Last Updated: September 10, 2025</p>
|
||||
<p className='mb-4'>Last Updated: October 11, 2025</p>
|
||||
<p>
|
||||
This Privacy Policy describes how your personal information is collected, used, and shared
|
||||
when you visit or use Sim ("the Service", "we", "us", or "our").
|
||||
This Privacy Policy describes how Sim ("we", "us", "our", or "the Service") collects,
|
||||
uses, discloses, and protects personal data — including data obtained from Google APIs
|
||||
(including Google Workspace APIs) — and your rights and controls regarding that data.
|
||||
</p>
|
||||
<p className='mt-4'>
|
||||
By using the Service, you agree to the collection and use of information in accordance
|
||||
with this policy. Unless otherwise defined in this Privacy Policy, terms used in this
|
||||
Privacy Policy have the same meanings as in our Terms of Service.
|
||||
By using or accessing the Service, you confirm that you have read and understood this
|
||||
Privacy Policy, and you consent to the collection, use, and disclosure of your information
|
||||
as described herein.
|
||||
</p>
|
||||
</section>
|
||||
|
||||
@@ -39,6 +40,10 @@ export default function PrivacyPolicy() {
|
||||
<h3 className='mb-2 font-medium text-xl'>Definitions</h3>
|
||||
<p className='mb-4'>For the purposes of this Privacy Policy:</p>
|
||||
<ul className='mb-4 list-disc space-y-2 pl-6'>
|
||||
<li>
|
||||
<strong>Application</strong> or <strong>Service</strong> means the Sim web or mobile
|
||||
application or related services.
|
||||
</li>
|
||||
<li>
|
||||
<strong>Account</strong> means a unique account created for You to access our Service or
|
||||
parts of our Service.
|
||||
@@ -49,10 +54,6 @@ export default function PrivacyPolicy() {
|
||||
shares, equity interest or other securities entitled to vote for election of directors
|
||||
or other managing authority.
|
||||
</li>
|
||||
<li>
|
||||
<strong>Application</strong> means the software program provided by the Company
|
||||
downloaded by You on any electronic device.
|
||||
</li>
|
||||
<li>
|
||||
<strong>Business</strong>, for the purpose of the CCPA (California Consumer Privacy
|
||||
Act), refers to the Company as the legal entity that collects Consumers' personal
|
||||
@@ -90,13 +91,26 @@ export default function PrivacyPolicy() {
|
||||
tracking of their online activities across websites.
|
||||
</li>
|
||||
<li>
|
||||
<strong>Personal Data</strong> is any information that relates to an identified or
|
||||
identifiable individual. For the purposes for GDPR, Personal Data means any information
|
||||
relating to You such as a name, an identification number, location data, online
|
||||
identifier or to one or more factors specific to the physical, physiological, genetic,
|
||||
mental, economic, cultural or social identity. For the purposes of the CCPA, Personal
|
||||
Data means any information that identifies, relates to, describes or is capable of being
|
||||
associated with, or could reasonably be linked, directly or indirectly, with You.
|
||||
<strong>Personal Data</strong> (or "Personal Information") is any information that
|
||||
relates to an identified or identifiable individual. For the purposes for GDPR, Personal
|
||||
Data means any information relating to You such as a name, an identification number,
|
||||
location data, online identifier or to one or more factors specific to the physical,
|
||||
physiological, genetic, mental, economic, cultural or social identity. For the purposes
|
||||
of the CCPA, Personal Data means any information that identifies, relates to, describes
|
||||
or is capable of being associated with, or could reasonably be linked, directly or
|
||||
indirectly, with You.
|
||||
</li>
|
||||
<li>
|
||||
<strong>Google Data</strong> means any data, content, or metadata obtained via Google
|
||||
APIs (including Google Workspace APIs).
|
||||
</li>
|
||||
<li>
|
||||
<strong>Generalized AI/ML model</strong> means an AI or ML model intended to be broadly
|
||||
trained across multiple users, not specific to a single user's data or behavior.
|
||||
</li>
|
||||
<li>
|
||||
<strong>User-facing features</strong> means features directly visible or used by the
|
||||
individual user through the app UI.
|
||||
</li>
|
||||
<li>
|
||||
<strong>Sale</strong>, for the purpose of the CCPA (California Consumer Privacy Act),
|
||||
@@ -105,9 +119,6 @@ export default function PrivacyPolicy() {
|
||||
means, a Consumer's Personal information to another business or a third party for
|
||||
monetary or other valuable consideration.
|
||||
</li>
|
||||
<li>
|
||||
<strong>Service</strong> refers to the Application or the Website or both.
|
||||
</li>
|
||||
<li>
|
||||
<strong>Service Provider</strong> means any natural or legal person who processes the
|
||||
data on behalf of the Company. It refers to third-party companies or individuals
|
||||
@@ -140,20 +151,37 @@ export default function PrivacyPolicy() {
|
||||
|
||||
<section>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>1. Information We Collect</h2>
|
||||
<h3 className='mb-2 font-medium text-xl'>Personal Information</h3>
|
||||
<h3 className='mb-2 font-medium text-xl'>Personal Data You Provide</h3>
|
||||
<p className='mb-4'>
|
||||
While using our Service, we may ask you to provide us with certain personally identifiable
|
||||
information that can be used to contact or identify you ("Personal Information").
|
||||
Personally identifiable information may include, but is not limited to:
|
||||
When you sign up, link accounts, or use features, you may provide Personal Data such as:
|
||||
</p>
|
||||
<ul className='mb-4 list-disc space-y-2 pl-6'>
|
||||
<li>Email address</li>
|
||||
<li>First name and last name</li>
|
||||
<li>Phone number</li>
|
||||
<li>Address, State, Province, ZIP/Postal code, City</li>
|
||||
<li>Cookies and Usage Data</li>
|
||||
<li>Name and email address</li>
|
||||
<li>Phone number and mailing address</li>
|
||||
<li>Profile picture, settings, and preferences</li>
|
||||
<li>Content you upload (e.g., documents, files) within Sim</li>
|
||||
<li>Any data you explicitly input or connect, including via Google integrations</li>
|
||||
</ul>
|
||||
|
||||
<h3 className='mb-2 font-medium text-xl'>Google Data via API Scopes</h3>
|
||||
<p className='mb-4'>
|
||||
If you choose to connect your Google account (e.g., Google Workspace, Gmail, Drive,
|
||||
Calendar, Contacts), we may request specific scopes. Types of Google Data we may access
|
||||
include:
|
||||
</p>
|
||||
<ul className='mb-4 list-disc space-y-2 pl-6'>
|
||||
<li>Basic profile (name, email)</li>
|
||||
<li>Drive files and documents</li>
|
||||
<li>Calendar events</li>
|
||||
<li>Contacts</li>
|
||||
<li>Gmail messages (only if explicitly requested for a specific feature)</li>
|
||||
<li>Other Google Workspace content or metadata as needed per feature</li>
|
||||
</ul>
|
||||
<p className='mb-4'>
|
||||
We only request the minimal scopes necessary for the features you enable. We do not
|
||||
request scopes for unimplemented features.
|
||||
</p>
|
||||
|
||||
<h3 className='mb-2 font-medium text-xl'>Usage Data</h3>
|
||||
<p className='mb-4'>
|
||||
We may also collect information on how the Service is accessed and used ("Usage Data").
|
||||
@@ -212,7 +240,21 @@ export default function PrivacyPolicy() {
|
||||
To contact You by email, telephone calls, SMS, or other equivalent forms of electronic
|
||||
communication
|
||||
</li>
|
||||
<li>
|
||||
To enable and support user-enabled integrations with Google services (e.g., syncing
|
||||
files or calendar) and provide personalization, suggestions, and user-specific
|
||||
automation for that individual user.
|
||||
</li>
|
||||
<li>
|
||||
To detect and prevent fraud, abuse, or security incidents and to comply with legal
|
||||
obligations.
|
||||
</li>
|
||||
</ul>
|
||||
<p className='mt-4'>
|
||||
<strong>Importantly:</strong> any Google Data used within Sim is used only for features
|
||||
tied to that specific user (user-facing features), and <strong>never</strong> for
|
||||
generalized AI/ML training or shared model improvement across users.
|
||||
</p>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
@@ -290,32 +332,10 @@ export default function PrivacyPolicy() {
|
||||
<section>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>7. Analytics</h2>
|
||||
<p className='mb-4'>
|
||||
We may use third-party Service Providers to monitor and analyze the use of our Service.
|
||||
</p>
|
||||
<h3 className='mb-2 font-medium text-xl'>Google Analytics</h3>
|
||||
<p className='mb-4'>
|
||||
Google Analytics is a web analytics service offered by Google that tracks and reports
|
||||
website traffic. Google uses the data collected to track and monitor the use of our
|
||||
Service. This data is shared with other Google services. Google may use the collected data
|
||||
to contextualize and personalize the ads of its own advertising network.
|
||||
</p>
|
||||
<p className='mb-4'>
|
||||
You can opt-out of having made your activity on the Service available to Google Analytics
|
||||
by installing the Google Analytics opt-out browser add-on. The add-on prevents the Google
|
||||
Analytics JavaScript (ga.js, analytics.js, and dc.js) from sharing information with Google
|
||||
Analytics about visits activity.
|
||||
</p>
|
||||
<p>
|
||||
For more information on the privacy practices of Google, please visit the Google Privacy &
|
||||
Terms web page:{' '}
|
||||
<Link
|
||||
href='https://policies.google.com/privacy?hl=en'
|
||||
className='text-[var(--brand-primary-hex)] underline hover:text-[var(--brand-primary-hover-hex)]'
|
||||
target='_blank'
|
||||
rel='noopener noreferrer'
|
||||
>
|
||||
https://policies.google.com/privacy
|
||||
</Link>
|
||||
We may aggregate or anonymize non-Google data (not tied to personal identity) for internal
|
||||
analytics, product improvement, usage trends, or performance monitoring. This data cannot
|
||||
be tied back to individual users and is not used for generalized AI/ML training with
|
||||
Google Data.
|
||||
</p>
|
||||
</section>
|
||||
|
||||
@@ -366,29 +386,117 @@ export default function PrivacyPolicy() {
|
||||
</ul>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>10. Google Workspace APIs</h2>
|
||||
<p className='mb-4'>
|
||||
We want to explicitly affirm that any user data obtained through Google Workspace APIs is{' '}
|
||||
<strong>not</strong> used to develop, improve, or train generalized AI and/or machine
|
||||
learning models. We use data obtained through Google Workspace APIs solely for the purpose
|
||||
of providing and improving the specific functionality of our Service for which the API
|
||||
access was granted.
|
||||
</p>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>
|
||||
11. Information Collected while Using Google APIs
|
||||
10. Use of Google / Workspace APIs & Data — Limited Use
|
||||
</h2>
|
||||
<h3 className='mb-2 font-medium text-xl'>Affirmative Statement & Compliance</h3>
|
||||
<p className='mb-4'>
|
||||
Sim's use and transfer to any other app of information received from Google APIs will
|
||||
adhere to Google API Services User Data Policy, including the Limited Use requirements.
|
||||
Sim’s use, storage, processing, and transfer of Google Data (raw or derived) strictly
|
||||
adheres to the Google API Services User Data Policy, including the Limited Use
|
||||
requirements, and to the Google Workspace API user data policy (when applicable). We
|
||||
explicitly affirm that:
|
||||
</p>
|
||||
<ul className='mb-4 list-disc space-y-2 pl-6'>
|
||||
<li>
|
||||
Sim does not use, transfer, or allow Google Data to be used to train, improve, or
|
||||
develop generalized or non-personalized AI/ML models.
|
||||
</li>
|
||||
<li>
|
||||
Any processing of Google Data is limited to providing or improving user-facing features
|
||||
visible in the app UI.
|
||||
</li>
|
||||
<li>
|
||||
We do not allow third parties to access Google Data for purposes of training or model
|
||||
improvement.
|
||||
</li>
|
||||
<li>Transfers of Google Data are disallowed except in limited permitted cases.</li>
|
||||
</ul>
|
||||
|
||||
<h3 className='mb-2 font-medium text-xl'>Permitted Transfers & Data Use</h3>
|
||||
<p className='mb-4'>
|
||||
We may only transfer Google Data (raw or derived) to third parties under the following
|
||||
limited conditions and always aligned with user disclosures and consent:
|
||||
</p>
|
||||
<ul className='mb-4 list-disc space-y-2 pl-6'>
|
||||
<li>To provide or improve user-facing features (with the user's explicit consent)</li>
|
||||
<li>For security, abuse investigation, or system integrity</li>
|
||||
<li>To comply with laws or legal obligations</li>
|
||||
<li>
|
||||
As part of a merger, acquisition, divestiture, or sale of assets, with explicit user
|
||||
consent
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<h3 className='mb-2 font-medium text-xl'>Human Access Restrictions</h3>
|
||||
<p className='mb-4'>
|
||||
We restrict human review of Google Data strictly. No employee, contractor, or agent may
|
||||
view Google Data unless one of the following is true:
|
||||
</p>
|
||||
<ul className='mb-4 list-disc space-y-2 pl-6'>
|
||||
<li>
|
||||
The user gave explicit, documented consent to view specific items (e.g., "Let customer
|
||||
support view this email/file").
|
||||
</li>
|
||||
<li>It is necessary for security, abuse investigation, or legal process.</li>
|
||||
<li>
|
||||
Data is aggregated, anonymized, and used for internal operations only (without
|
||||
re-identification).
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<h3 className='mb-2 font-medium text-xl'>Scope Minimization & Justification</h3>
|
||||
<p className='mb-4'>
|
||||
We only request scopes essential to features you opt into; we do not request broad or
|
||||
unused permissions. For each Google API scope we request, we maintain internal
|
||||
documentation justifying why that scope is needed and why narrower scopes are
|
||||
insufficient. Where possible, we follow incremental authorization and request additional
|
||||
scopes only when needed in context.
|
||||
</p>
|
||||
|
||||
<h3 className='mb-2 font-medium text-xl'>Secure Handling & Storage</h3>
|
||||
<ul className='mb-4 list-disc space-y-2 pl-6'>
|
||||
<li>Google Data is encrypted in transit (TLS/HTTPS) and at rest.</li>
|
||||
<li>Access controls, role-based permissions, logging, and auditing protect data.</li>
|
||||
<li>
|
||||
OAuth tokens and credentials are stored securely (e.g., encrypted vault, hardware or
|
||||
secure key management).
|
||||
</li>
|
||||
<li>We regularly review security practices and infrastructure.</li>
|
||||
<li>
|
||||
If a security incident affects Google Data, we will notify Google as required and
|
||||
cooperate fully.
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<h3 className='mb-2 font-medium text-xl'>Retention & Deletion</h3>
|
||||
<p className='mb-4'>We retain data only as long as necessary for the purposes disclosed:</p>
|
||||
<ul className='mb-4 list-disc space-y-2 pl-6'>
|
||||
<li>
|
||||
<strong>Account Data:</strong> Retained during active account + 30 days after deletion
|
||||
request
|
||||
</li>
|
||||
<li>
|
||||
<strong>Google API Data:</strong> Retained during feature use + 7 days after revocation
|
||||
or account deletion
|
||||
</li>
|
||||
<li>
|
||||
<strong>Usage Logs:</strong> 90 days for analytics; up to 1 year for security
|
||||
investigations
|
||||
</li>
|
||||
<li>
|
||||
<strong>Transaction Records:</strong> Up to 7 years for legal and tax compliance
|
||||
</li>
|
||||
</ul>
|
||||
<p className='mb-4'>
|
||||
When you revoke access, delete your account, or stop using a feature, we remove associated
|
||||
data within the timeframes above. You may request deletion via in-app settings or by
|
||||
contacting us; we will comply promptly.
|
||||
</p>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>12. Links To Other Sites</h2>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>11. Links To Other Sites</h2>
|
||||
<p className='mb-4'>
|
||||
Our Service may contain links to other sites that are not operated by us. If you click on
|
||||
a third party link, you will be directed to that third party's site. We strongly advise
|
||||
@@ -401,7 +509,7 @@ export default function PrivacyPolicy() {
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>13. Children's Privacy</h2>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>12. Children's Privacy</h2>
|
||||
<p className='mb-4'>
|
||||
Our Service does not address anyone under the age of 18 ("Children").
|
||||
</p>
|
||||
@@ -415,7 +523,7 @@ export default function PrivacyPolicy() {
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>14. Changes To This Privacy Policy</h2>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>13. Changes To This Privacy Policy</h2>
|
||||
<p className='mb-4'>
|
||||
We may update our Privacy Policy from time to time. We will notify you of any changes by
|
||||
posting the new Privacy Policy on this page.
|
||||
@@ -433,7 +541,7 @@ export default function PrivacyPolicy() {
|
||||
|
||||
<section>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>
|
||||
15. Your Data Protection Rights Under General Data Protection Regulation (GDPR)
|
||||
14. Your Data Protection Rights Under General Data Protection Regulation (GDPR)
|
||||
</h2>
|
||||
<p className='mb-4'>
|
||||
If you are a resident of the European Economic Area (EEA), you have certain data
|
||||
@@ -482,23 +590,50 @@ export default function PrivacyPolicy() {
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>16. California Privacy Rights</h2>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>15. California Privacy Rights</h2>
|
||||
<p className='mb-4'>
|
||||
California Civil Code Section 1798.83, also known as the "Shine The Light" law, permits
|
||||
our users who are California residents to request and obtain from us, once a year and free
|
||||
of charge, information about categories of personal information (if any) we disclosed to
|
||||
third parties for direct marketing purposes and the names and addresses of all third
|
||||
parties with which we shared personal information in the immediately preceding calendar
|
||||
year.
|
||||
If you are a California resident, you have specific rights under the California Consumer
|
||||
Privacy Act (CCPA) and California Privacy Rights Act (CPRA), including the right to know
|
||||
what personal information we collect, the right to delete your information, and the right
|
||||
to opt-out of the sale or sharing of your personal information.
|
||||
</p>
|
||||
|
||||
<h3 className='mb-2 font-medium text-xl'>Do Not Sell or Share My Personal Information</h3>
|
||||
<p className='mb-4'>
|
||||
We do not sell your personal information for monetary consideration. However, some data
|
||||
sharing practices (such as analytics or advertising services) may be considered a "sale"
|
||||
or "share" under CCPA/CPRA. You have the right to opt-out of such data sharing. To
|
||||
exercise this right, contact us at{' '}
|
||||
<Link
|
||||
href='mailto:privacy@sim.ai'
|
||||
className='text-[var(--brand-primary-hex)] underline hover:text-[var(--brand-primary-hover-hex)]'
|
||||
>
|
||||
privacy@sim.ai
|
||||
</Link>
|
||||
.
|
||||
</p>
|
||||
|
||||
<h3 className='mb-2 font-medium text-xl'>Global Privacy Control (GPC)</h3>
|
||||
<p className='mb-4'>
|
||||
We recognize and honor Global Privacy Control (GPC) signals. When your browser sends a GPC
|
||||
signal, we will treat it as a valid request to opt-out of the sale or sharing of your
|
||||
personal information.
|
||||
</p>
|
||||
|
||||
<h3 className='mb-2 font-medium text-xl'>Shine The Light Law</h3>
|
||||
<p className='mb-4'>
|
||||
California Civil Code Section 1798.83 permits California residents to request information
|
||||
about categories of personal information we disclosed to third parties for direct
|
||||
marketing purposes in the preceding calendar year.
|
||||
</p>
|
||||
<p>
|
||||
If you are a California resident and would like to make such a request, please submit your
|
||||
request in writing to us using the contact information provided below.
|
||||
To make a request under CCPA or the Shine The Light law, please submit your request using
|
||||
the contact information provided below.
|
||||
</p>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>17. Vulnerability Disclosure Policy</h2>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>16. Vulnerability Disclosure Policy</h2>
|
||||
|
||||
<h3 className='mb-2 font-medium text-xl'>Introduction</h3>
|
||||
<p className='mb-4'>
|
||||
@@ -617,16 +752,24 @@ export default function PrivacyPolicy() {
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>18. Contact Us</h2>
|
||||
<p>
|
||||
If you have any questions about this Privacy Policy, please contact us at:{' '}
|
||||
<Link
|
||||
href='mailto:privacy@sim.ai'
|
||||
className='text-[var(--brand-primary-hex)] underline hover:text-[var(--brand-primary-hover-hex)]'
|
||||
>
|
||||
privacy@sim.ai
|
||||
</Link>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>17. Contact & Dispute Resolution</h2>
|
||||
<p className='mb-4'>
|
||||
If you have questions, requests, or complaints regarding this Privacy Policy or our data
|
||||
practices, you may contact us at:
|
||||
</p>
|
||||
<ul className='mb-4 list-disc space-y-2 pl-6'>
|
||||
<li>
|
||||
Email:{' '}
|
||||
<Link
|
||||
href='mailto:privacy@sim.ai'
|
||||
className='text-[var(--brand-primary-hex)] underline hover:text-[var(--brand-primary-hover-hex)]'
|
||||
>
|
||||
privacy@sim.ai
|
||||
</Link>
|
||||
</li>
|
||||
<li>Mailing Address: Sim, 80 Langton St, San Francisco, CA 94133, USA</li>
|
||||
</ul>
|
||||
<p>We will respond to your request within a reasonable timeframe.</p>
|
||||
</section>
|
||||
</LegalLayout>
|
||||
)
|
||||
|
||||
@@ -15,7 +15,7 @@ export default function TermsOfService() {
|
||||
return (
|
||||
<LegalLayout title='Terms of Service'>
|
||||
<section>
|
||||
<p className='mb-4'>Last Updated: September 10, 2025</p>
|
||||
<p className='mb-4'>Last Updated: October 11, 2025</p>
|
||||
<p>
|
||||
Please read these Terms of Service ("Terms") carefully before using the Sim platform (the
|
||||
"Service") operated by Sim, Inc ("us", "we", or "our").
|
||||
@@ -44,7 +44,71 @@ export default function TermsOfService() {
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>2. Intellectual Property</h2>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>2. License to Use Service</h2>
|
||||
<p className='mb-4'>
|
||||
Subject to your compliance with these Terms, we grant you a limited, non-exclusive,
|
||||
non-transferable, revocable license to access and use the Service for your internal
|
||||
business or personal purposes.
|
||||
</p>
|
||||
<p>
|
||||
This license does not permit you to resell, redistribute, or make the Service available to
|
||||
third parties, or to use the Service to build a competitive product or service.
|
||||
</p>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>3. Subscription Plans & Payment Terms</h2>
|
||||
<p className='mb-4'>
|
||||
We offer Free, Pro, Team, and Enterprise subscription plans. Paid plans include a base
|
||||
subscription fee plus usage-based charges for inference and other services that exceed
|
||||
your plan's included limits.
|
||||
</p>
|
||||
<p className='mb-4'>
|
||||
You agree to pay all fees associated with your account. Your base subscription fee is
|
||||
charged at the beginning of each billing cycle (monthly or annually). Inference overages
|
||||
are charged incrementally every $50 during your billing period, which may result in
|
||||
multiple invoices within a single billing cycle. Payment is due upon receipt of invoice.
|
||||
If payment fails, we may suspend or terminate your access to paid features.
|
||||
</p>
|
||||
<p>
|
||||
We reserve the right to change our pricing with 30 days' notice to paid subscribers. Price
|
||||
changes will take effect at your next renewal.
|
||||
</p>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>4. Auto-Renewal & Cancellation</h2>
|
||||
<p className='mb-4'>
|
||||
Paid subscriptions automatically renew at the end of each billing period unless you cancel
|
||||
before the renewal date. You can cancel your subscription at any time through your account
|
||||
settings or by contacting us.
|
||||
</p>
|
||||
<p className='mb-4'>
|
||||
Cancellations take effect at the end of the current billing period. You will retain access
|
||||
to paid features until that time. We do not provide refunds for partial billing periods.
|
||||
</p>
|
||||
<p>
|
||||
Upon cancellation or termination, you may export your data within 30 days. After 30 days,
|
||||
we may delete your data in accordance with our data retention policies.
|
||||
</p>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>5. Data Ownership & Retention</h2>
|
||||
<p className='mb-4'>
|
||||
You retain all ownership rights to data, content, and information you submit to the
|
||||
Service ("Your Data"). You grant us a limited license to process, store, and transmit Your
|
||||
Data solely to provide and improve the Service as described in our Privacy Policy.
|
||||
</p>
|
||||
<p>
|
||||
We retain Your Data while your account is active and for 30 days after account termination
|
||||
or cancellation. You may request data export or deletion at any time through your account
|
||||
settings.
|
||||
</p>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>6. Intellectual Property</h2>
|
||||
<p className='mb-4'>
|
||||
The Service and its original content, features, and functionality are and will remain the
|
||||
exclusive property of Sim, Inc and its licensors. The Service is protected by copyright,
|
||||
@@ -57,7 +121,7 @@ export default function TermsOfService() {
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>3. User Content</h2>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>7. User Content</h2>
|
||||
<p className='mb-4'>
|
||||
Our Service allows you to post, link, store, share and otherwise make available certain
|
||||
information, text, graphics, videos, or other material ("User Content"). You are
|
||||
@@ -84,7 +148,21 @@ export default function TermsOfService() {
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>4. Acceptable Use</h2>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>8. Third-Party Services</h2>
|
||||
<p className='mb-4'>
|
||||
The Service may integrate with third-party services (such as Google Workspace, cloud
|
||||
storage providers, and AI model providers). Your use of third-party services is subject to
|
||||
their respective terms and privacy policies.
|
||||
</p>
|
||||
<p>
|
||||
We are not responsible for the availability, functionality, or actions of third-party
|
||||
services. Any issues with third-party integrations should be directed to the respective
|
||||
provider.
|
||||
</p>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>9. Acceptable Use</h2>
|
||||
<p className='mb-4'>You agree not to use the Service:</p>
|
||||
<ul className='mb-4 list-disc space-y-2 pl-6'>
|
||||
<li>
|
||||
@@ -115,7 +193,7 @@ export default function TermsOfService() {
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>5. Termination</h2>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>10. Termination</h2>
|
||||
<p className='mb-4'>
|
||||
We may terminate or suspend your account immediately, without prior notice or liability,
|
||||
for any reason whatsoever, including without limitation if you breach the Terms.
|
||||
@@ -127,7 +205,7 @@ export default function TermsOfService() {
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>6. Limitation of Liability</h2>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>11. Limitation of Liability</h2>
|
||||
<p className='mb-4'>
|
||||
In no event shall Sim, Inc, nor its directors, employees, partners, agents, suppliers, or
|
||||
affiliates, be liable for any indirect, incidental, special, consequential or punitive
|
||||
@@ -147,7 +225,7 @@ export default function TermsOfService() {
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>7. Disclaimer</h2>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>12. Disclaimer</h2>
|
||||
<p className='mb-4'>
|
||||
Your use of the Service is at your sole risk. The Service is provided on an "AS IS" and
|
||||
"AS AVAILABLE" basis. The Service is provided without warranties of any kind, whether
|
||||
@@ -167,7 +245,17 @@ export default function TermsOfService() {
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>8. Governing Law</h2>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>13. Indemnification</h2>
|
||||
<p>
|
||||
You agree to indemnify, defend, and hold harmless Sim, Inc and its officers, directors,
|
||||
employees, and agents from any claims, damages, losses, liabilities, and expenses
|
||||
(including reasonable attorneys' fees) arising from your use of the Service, your
|
||||
violation of these Terms, or your violation of any rights of another party.
|
||||
</p>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>14. Governing Law</h2>
|
||||
<p>
|
||||
These Terms shall be governed and construed in accordance with the laws of the United
|
||||
States, without regard to its conflict of law provisions.
|
||||
@@ -180,7 +268,7 @@ export default function TermsOfService() {
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>9. Arbitration Agreement</h2>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>15. Arbitration Agreement</h2>
|
||||
<p className='mb-4'>
|
||||
Please read the following arbitration agreement carefully. It requires you to arbitrate
|
||||
disputes with Sim, Inc, its parent companies, subsidiaries, affiliates, successors and
|
||||
@@ -221,7 +309,7 @@ export default function TermsOfService() {
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>10. Changes to Terms</h2>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>16. Changes to Terms</h2>
|
||||
<p>
|
||||
We reserve the right, at our sole discretion, to modify or replace these Terms at any
|
||||
time. If a revision is material, we will try to provide at least 30 days' notice prior to
|
||||
@@ -236,7 +324,7 @@ export default function TermsOfService() {
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>11. Copyright Policy</h2>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>17. Copyright Policy</h2>
|
||||
<p className='mb-4'>
|
||||
We respect the intellectual property of others and ask that users of our Service do the
|
||||
same. If you believe that one of our users is, through the use of our Service, unlawfully
|
||||
@@ -270,7 +358,7 @@ export default function TermsOfService() {
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>12. Contact Us</h2>
|
||||
<h2 className='mb-4 font-semibold text-2xl'>18. Contact Us</h2>
|
||||
<p>
|
||||
If you have any questions about these Terms, please contact us at:{' '}
|
||||
<Link
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
/**
|
||||
* API Test Setup
|
||||
*/
|
||||
import { afterEach, beforeEach, vi } from 'vitest'
|
||||
|
||||
vi.mock('next/headers', () => ({
|
||||
cookies: () => ({
|
||||
get: vi.fn().mockReturnValue({ value: 'test-session-token' }),
|
||||
}),
|
||||
headers: () => ({
|
||||
get: vi.fn().mockReturnValue('test-value'),
|
||||
}),
|
||||
}))
|
||||
|
||||
vi.mock('@/lib/auth/session', () => ({
|
||||
getSession: vi.fn().mockResolvedValue({
|
||||
user: {
|
||||
id: 'user-id',
|
||||
email: 'test@example.com',
|
||||
},
|
||||
sessionToken: 'test-session-token',
|
||||
}),
|
||||
}))
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks()
|
||||
})
|
||||
@@ -1364,24 +1364,6 @@ export function setupKnowledgeApiMocks(
|
||||
}
|
||||
}
|
||||
|
||||
// Legacy functions for backward compatibility (DO NOT REMOVE - still used in tests)
|
||||
|
||||
/**
|
||||
* @deprecated Use mockAuth instead - provides same functionality with improved interface
|
||||
*/
|
||||
export function mockAuthSession(isAuthenticated = true, user: MockUser = mockUser) {
|
||||
const authMocks = mockAuth(user)
|
||||
if (isAuthenticated) {
|
||||
authMocks.setAuthenticated(user)
|
||||
} else {
|
||||
authMocks.setUnauthenticated()
|
||||
}
|
||||
return authMocks
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use setupComprehensiveTestMocks instead - provides better organization and features
|
||||
*/
|
||||
export function setupApiTestMocks(
|
||||
options: {
|
||||
authenticated?: boolean
|
||||
@@ -1412,9 +1394,6 @@ export function setupApiTestMocks(
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use createStorageProviderMocks instead
|
||||
*/
|
||||
export function mockUploadUtils(
|
||||
options: { isCloudStorage?: boolean; uploadResult?: any; uploadError?: boolean } = {}
|
||||
) {
|
||||
@@ -1452,10 +1431,6 @@ export function mockUploadUtils(
|
||||
}))
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a mock transaction function for database testing
|
||||
* @deprecated Use createMockDatabase instead
|
||||
*/
|
||||
export function createMockTransaction(
|
||||
mockData: {
|
||||
selectData?: DatabaseSelectResult[]
|
||||
|
||||
@@ -4,6 +4,7 @@ import { eq } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { getSession } from '@/lib/auth'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { validateMicrosoftGraphId } from '@/lib/security/input-validation'
|
||||
import { generateRequestId } from '@/lib/utils'
|
||||
import { refreshAccessTokenIfNeeded } from '@/app/api/auth/oauth/utils'
|
||||
|
||||
@@ -11,21 +12,15 @@ export const dynamic = 'force-dynamic'
|
||||
|
||||
const logger = createLogger('MicrosoftFileAPI')
|
||||
|
||||
/**
|
||||
* Get a single file from Microsoft OneDrive
|
||||
*/
|
||||
export async function GET(request: NextRequest) {
|
||||
const requestId = generateRequestId()
|
||||
try {
|
||||
// Get the session
|
||||
const session = await getSession()
|
||||
|
||||
// Check if the user is authenticated
|
||||
if (!session?.user?.id) {
|
||||
return NextResponse.json({ error: 'User not authenticated' }, { status: 401 })
|
||||
}
|
||||
|
||||
// Get the credential ID and file ID from the query params
|
||||
const { searchParams } = new URL(request.url)
|
||||
const credentialId = searchParams.get('credentialId')
|
||||
const fileId = searchParams.get('fileId')
|
||||
@@ -34,7 +29,12 @@ export async function GET(request: NextRequest) {
|
||||
return NextResponse.json({ error: 'Credential ID and File ID are required' }, { status: 400 })
|
||||
}
|
||||
|
||||
// Get the credential from the database
|
||||
const fileIdValidation = validateMicrosoftGraphId(fileId, 'fileId')
|
||||
if (!fileIdValidation.isValid) {
|
||||
logger.warn(`[${requestId}] Invalid file ID: ${fileIdValidation.error}`)
|
||||
return NextResponse.json({ error: fileIdValidation.error }, { status: 400 })
|
||||
}
|
||||
|
||||
const credentials = await db.select().from(account).where(eq(account.id, credentialId)).limit(1)
|
||||
|
||||
if (!credentials.length) {
|
||||
@@ -43,12 +43,10 @@ export async function GET(request: NextRequest) {
|
||||
|
||||
const credential = credentials[0]
|
||||
|
||||
// Check if the credential belongs to the user
|
||||
if (credential.userId !== session.user.id) {
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 403 })
|
||||
}
|
||||
|
||||
// Refresh access token if needed using the utility function
|
||||
const accessToken = await refreshAccessTokenIfNeeded(credentialId, session.user.id, requestId)
|
||||
|
||||
if (!accessToken) {
|
||||
@@ -80,7 +78,6 @@ export async function GET(request: NextRequest) {
|
||||
|
||||
const file = await response.json()
|
||||
|
||||
// Transform the response to match expected format
|
||||
const transformedFile = {
|
||||
id: file.id,
|
||||
name: file.name,
|
||||
|
||||
@@ -4,6 +4,7 @@ import { eq } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { getSession } from '@/lib/auth'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { validateEnum, validatePathSegment } from '@/lib/security/input-validation'
|
||||
import { generateRequestId } from '@/lib/utils'
|
||||
import { refreshAccessTokenIfNeeded } from '@/app/api/auth/oauth/utils'
|
||||
|
||||
@@ -38,16 +39,24 @@ export async function GET(request: NextRequest) {
|
||||
return NextResponse.json({ error: 'Credential ID and Item ID are required' }, { status: 400 })
|
||||
}
|
||||
|
||||
// Validate item type - only handle contacts now
|
||||
if (type !== 'contact') {
|
||||
logger.warn(`[${requestId}] Invalid item type: ${type}`)
|
||||
return NextResponse.json(
|
||||
{ error: 'Invalid item type. Only contact is supported.' },
|
||||
{ status: 400 }
|
||||
)
|
||||
const typeValidation = validateEnum(type, ['contact'] as const, 'type')
|
||||
if (!typeValidation.isValid) {
|
||||
logger.warn(`[${requestId}] Invalid item type: ${typeValidation.error}`)
|
||||
return NextResponse.json({ error: typeValidation.error }, { status: 400 })
|
||||
}
|
||||
|
||||
const itemIdValidation = validatePathSegment(itemId, {
|
||||
paramName: 'itemId',
|
||||
maxLength: 100,
|
||||
allowHyphens: true,
|
||||
allowUnderscores: true,
|
||||
allowDots: false,
|
||||
})
|
||||
if (!itemIdValidation.isValid) {
|
||||
logger.warn(`[${requestId}] Invalid item ID: ${itemIdValidation.error}`)
|
||||
return NextResponse.json({ error: itemIdValidation.error }, { status: 400 })
|
||||
}
|
||||
|
||||
// Get the credential from the database
|
||||
const credentials = await db.select().from(account).where(eq(account.id, credentialId)).limit(1)
|
||||
|
||||
if (!credentials.length) {
|
||||
@@ -57,7 +66,6 @@ export async function GET(request: NextRequest) {
|
||||
|
||||
const credential = credentials[0]
|
||||
|
||||
// Check if the credential belongs to the user
|
||||
if (credential.userId !== session.user.id) {
|
||||
logger.warn(`[${requestId}] Unauthorized credential access attempt`, {
|
||||
credentialUserId: credential.userId,
|
||||
@@ -66,7 +74,6 @@ export async function GET(request: NextRequest) {
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 403 })
|
||||
}
|
||||
|
||||
// Refresh access token if needed
|
||||
const accessToken = await refreshAccessTokenIfNeeded(credentialId, session.user.id, requestId)
|
||||
|
||||
if (!accessToken) {
|
||||
@@ -74,7 +81,6 @@ export async function GET(request: NextRequest) {
|
||||
return NextResponse.json({ error: 'Failed to obtain valid access token' }, { status: 401 })
|
||||
}
|
||||
|
||||
// Determine the endpoint based on item type - only contacts
|
||||
const endpoints = {
|
||||
contact: 'contacts',
|
||||
}
|
||||
@@ -82,7 +88,6 @@ export async function GET(request: NextRequest) {
|
||||
|
||||
logger.info(`[${requestId}] Fetching ${type} ${itemId} from Wealthbox`)
|
||||
|
||||
// Make request to Wealthbox API
|
||||
const response = await fetch(`https://api.crmworkspace.com/v1/${endpoint}/${itemId}`, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${accessToken}`,
|
||||
@@ -120,13 +125,10 @@ export async function GET(request: NextRequest) {
|
||||
totalCount: data.meta?.total_count,
|
||||
})
|
||||
|
||||
// Transform the response to match our expected format
|
||||
let items: any[] = []
|
||||
|
||||
if (type === 'contact') {
|
||||
// Handle single contact response - API returns contact data directly when fetching by ID
|
||||
if (data?.id) {
|
||||
// Single contact response
|
||||
const item = {
|
||||
id: data.id?.toString() || '',
|
||||
name: `${data.first_name || ''} ${data.last_name || ''}`.trim() || `Contact ${data.id}`,
|
||||
|
||||
@@ -6,7 +6,7 @@ import { z } from 'zod'
|
||||
import { renderOTPEmail } from '@/components/emails/render-email'
|
||||
import { sendEmail } from '@/lib/email/mailer'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { getRedisClient } from '@/lib/redis'
|
||||
import { getRedisClient, markMessageAsProcessed, releaseLock } from '@/lib/redis'
|
||||
import { generateRequestId } from '@/lib/utils'
|
||||
import { addCorsHeaders, setChatAuthCookie } from '@/app/api/chat/utils'
|
||||
import { createErrorResponse, createSuccessResponse } from '@/app/api/workflows/utils'
|
||||
@@ -21,52 +21,83 @@ function generateOTP() {
|
||||
// We use 15 minutes (900 seconds) expiry for OTPs
|
||||
const OTP_EXPIRY = 15 * 60
|
||||
|
||||
async function storeOTP(email: string, chatId: string, otp: string): Promise<boolean> {
|
||||
// Store OTP in Redis
|
||||
async function storeOTP(email: string, chatId: string, otp: string): Promise<void> {
|
||||
const key = `otp:${email}:${chatId}`
|
||||
const redis = getRedisClient()
|
||||
|
||||
if (!redis) {
|
||||
logger.warn('Redis not available, OTP functionality requires Redis')
|
||||
return false
|
||||
}
|
||||
|
||||
try {
|
||||
if (redis) {
|
||||
// Use Redis if available
|
||||
await redis.set(key, otp, 'EX', OTP_EXPIRY)
|
||||
return true
|
||||
} catch (error) {
|
||||
logger.error('Error storing OTP in Redis:', error)
|
||||
return false
|
||||
} else {
|
||||
// Use the existing function as fallback to mark that an OTP exists
|
||||
await markMessageAsProcessed(key, OTP_EXPIRY)
|
||||
|
||||
// For the fallback case, we need to handle storing the OTP value separately
|
||||
// since markMessageAsProcessed only stores "1"
|
||||
const valueKey = `${key}:value`
|
||||
try {
|
||||
// Access the in-memory cache directly - hacky but works for fallback
|
||||
const inMemoryCache = (global as any).inMemoryCache
|
||||
if (inMemoryCache) {
|
||||
const fullKey = `processed:${valueKey}`
|
||||
const expiry = OTP_EXPIRY ? Date.now() + OTP_EXPIRY * 1000 : null
|
||||
inMemoryCache.set(fullKey, { value: otp, expiry })
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Error storing OTP in fallback cache:', error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get OTP from Redis
|
||||
async function getOTP(email: string, chatId: string): Promise<string | null> {
|
||||
const key = `otp:${email}:${chatId}`
|
||||
const redis = getRedisClient()
|
||||
|
||||
if (!redis) {
|
||||
return null
|
||||
}
|
||||
|
||||
try {
|
||||
if (redis) {
|
||||
// Use Redis if available
|
||||
return await redis.get(key)
|
||||
} catch (error) {
|
||||
logger.error('Error getting OTP from Redis:', error)
|
||||
}
|
||||
// Use the existing function as fallback - check if it exists
|
||||
const exists = await new Promise((resolve) => {
|
||||
try {
|
||||
// Check the in-memory cache directly - hacky but works for fallback
|
||||
const inMemoryCache = (global as any).inMemoryCache
|
||||
const fullKey = `processed:${key}`
|
||||
const cacheEntry = inMemoryCache?.get(fullKey)
|
||||
resolve(!!cacheEntry)
|
||||
} catch {
|
||||
resolve(false)
|
||||
}
|
||||
})
|
||||
|
||||
if (!exists) return null
|
||||
|
||||
// Try to get the value key
|
||||
const valueKey = `${key}:value`
|
||||
try {
|
||||
const inMemoryCache = (global as any).inMemoryCache
|
||||
const fullKey = `processed:${valueKey}`
|
||||
const cacheEntry = inMemoryCache?.get(fullKey)
|
||||
return cacheEntry?.value || null
|
||||
} catch {
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
// Delete OTP from Redis
|
||||
async function deleteOTP(email: string, chatId: string): Promise<void> {
|
||||
const key = `otp:${email}:${chatId}`
|
||||
const redis = getRedisClient()
|
||||
|
||||
if (!redis) {
|
||||
return
|
||||
}
|
||||
|
||||
try {
|
||||
if (redis) {
|
||||
// Use Redis if available
|
||||
await redis.del(key)
|
||||
} catch (error) {
|
||||
logger.error('Error deleting OTP from Redis:', error)
|
||||
} else {
|
||||
// Use the existing function as fallback
|
||||
await releaseLock(`processed:${key}`)
|
||||
await releaseLock(`processed:${key}:value`)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -146,17 +177,7 @@ export async function POST(
|
||||
|
||||
const otp = generateOTP()
|
||||
|
||||
const stored = await storeOTP(email, deployment.id, otp)
|
||||
if (!stored) {
|
||||
logger.error(`[${requestId}] Failed to store OTP - Redis unavailable`)
|
||||
return addCorsHeaders(
|
||||
createErrorResponse(
|
||||
'Email verification temporarily unavailable, please try again later',
|
||||
503
|
||||
),
|
||||
request
|
||||
)
|
||||
}
|
||||
await storeOTP(email, deployment.id, otp)
|
||||
|
||||
const emailHtml = await renderOTPEmail(
|
||||
otp,
|
||||
|
||||
@@ -168,7 +168,7 @@ describe('Copilot Checkpoints Revert API Route', () => {
|
||||
// Mock checkpoint found but workflow not found
|
||||
const mockCheckpoint = {
|
||||
id: 'checkpoint-123',
|
||||
workflowId: 'workflow-456',
|
||||
workflowId: 'a1b2c3d4-e5f6-4a78-b9c0-d1e2f3a4b5c6',
|
||||
userId: 'user-123',
|
||||
workflowState: { blocks: {}, edges: [] },
|
||||
}
|
||||
@@ -196,13 +196,13 @@ describe('Copilot Checkpoints Revert API Route', () => {
|
||||
// Mock checkpoint found but workflow belongs to different user
|
||||
const mockCheckpoint = {
|
||||
id: 'checkpoint-123',
|
||||
workflowId: 'workflow-456',
|
||||
workflowId: 'b2c3d4e5-f6a7-4b89-a0d1-e2f3a4b5c6d7',
|
||||
userId: 'user-123',
|
||||
workflowState: { blocks: {}, edges: [] },
|
||||
}
|
||||
|
||||
const mockWorkflow = {
|
||||
id: 'workflow-456',
|
||||
id: 'b2c3d4e5-f6a7-4b89-a0d1-e2f3a4b5c6d7',
|
||||
userId: 'different-user',
|
||||
}
|
||||
|
||||
@@ -228,7 +228,7 @@ describe('Copilot Checkpoints Revert API Route', () => {
|
||||
|
||||
const mockCheckpoint = {
|
||||
id: 'checkpoint-123',
|
||||
workflowId: 'workflow-456',
|
||||
workflowId: 'c3d4e5f6-a7b8-4c09-a1e2-f3a4b5c6d7e8',
|
||||
userId: 'user-123',
|
||||
workflowState: {
|
||||
blocks: { block1: { type: 'start' } },
|
||||
@@ -241,7 +241,7 @@ describe('Copilot Checkpoints Revert API Route', () => {
|
||||
}
|
||||
|
||||
const mockWorkflow = {
|
||||
id: 'workflow-456',
|
||||
id: 'c3d4e5f6-a7b8-4c09-a1e2-f3a4b5c6d7e8',
|
||||
userId: 'user-123',
|
||||
}
|
||||
|
||||
@@ -274,7 +274,7 @@ describe('Copilot Checkpoints Revert API Route', () => {
|
||||
const responseData = await response.json()
|
||||
expect(responseData).toEqual({
|
||||
success: true,
|
||||
workflowId: 'workflow-456',
|
||||
workflowId: 'c3d4e5f6-a7b8-4c09-a1e2-f3a4b5c6d7e8',
|
||||
checkpointId: 'checkpoint-123',
|
||||
revertedAt: '2024-01-01T00:00:00.000Z',
|
||||
checkpoint: {
|
||||
@@ -293,7 +293,7 @@ describe('Copilot Checkpoints Revert API Route', () => {
|
||||
|
||||
// Verify fetch was called with correct parameters
|
||||
expect(global.fetch).toHaveBeenCalledWith(
|
||||
'http://localhost:3000/api/workflows/workflow-456/state',
|
||||
'http://localhost:3000/api/workflows/c3d4e5f6-a7b8-4c09-a1e2-f3a4b5c6d7e8/state',
|
||||
{
|
||||
method: 'PUT',
|
||||
headers: {
|
||||
@@ -319,7 +319,7 @@ describe('Copilot Checkpoints Revert API Route', () => {
|
||||
|
||||
const mockCheckpoint = {
|
||||
id: 'checkpoint-with-date',
|
||||
workflowId: 'workflow-456',
|
||||
workflowId: 'd4e5f6a7-b8c9-4d10-a2e3-a4b5c6d7e8f9',
|
||||
userId: 'user-123',
|
||||
workflowState: {
|
||||
blocks: {},
|
||||
@@ -330,7 +330,7 @@ describe('Copilot Checkpoints Revert API Route', () => {
|
||||
}
|
||||
|
||||
const mockWorkflow = {
|
||||
id: 'workflow-456',
|
||||
id: 'd4e5f6a7-b8c9-4d10-a2e3-a4b5c6d7e8f9',
|
||||
userId: 'user-123',
|
||||
}
|
||||
|
||||
@@ -360,7 +360,7 @@ describe('Copilot Checkpoints Revert API Route', () => {
|
||||
|
||||
const mockCheckpoint = {
|
||||
id: 'checkpoint-invalid-date',
|
||||
workflowId: 'workflow-456',
|
||||
workflowId: 'e5f6a7b8-c9d0-4e11-a3f4-b5c6d7e8f9a0',
|
||||
userId: 'user-123',
|
||||
workflowState: {
|
||||
blocks: {},
|
||||
@@ -371,7 +371,7 @@ describe('Copilot Checkpoints Revert API Route', () => {
|
||||
}
|
||||
|
||||
const mockWorkflow = {
|
||||
id: 'workflow-456',
|
||||
id: 'e5f6a7b8-c9d0-4e11-a3f4-b5c6d7e8f9a0',
|
||||
userId: 'user-123',
|
||||
}
|
||||
|
||||
@@ -401,7 +401,7 @@ describe('Copilot Checkpoints Revert API Route', () => {
|
||||
|
||||
const mockCheckpoint = {
|
||||
id: 'checkpoint-null-values',
|
||||
workflowId: 'workflow-456',
|
||||
workflowId: 'f6a7b8c9-d0e1-4f23-a4b5-c6d7e8f9a0b1',
|
||||
userId: 'user-123',
|
||||
workflowState: {
|
||||
blocks: null,
|
||||
@@ -413,7 +413,7 @@ describe('Copilot Checkpoints Revert API Route', () => {
|
||||
}
|
||||
|
||||
const mockWorkflow = {
|
||||
id: 'workflow-456',
|
||||
id: 'f6a7b8c9-d0e1-4f23-a4b5-c6d7e8f9a0b1',
|
||||
userId: 'user-123',
|
||||
}
|
||||
|
||||
@@ -452,13 +452,13 @@ describe('Copilot Checkpoints Revert API Route', () => {
|
||||
|
||||
const mockCheckpoint = {
|
||||
id: 'checkpoint-123',
|
||||
workflowId: 'workflow-456',
|
||||
workflowId: 'a7b8c9d0-e1f2-4a34-b5c6-d7e8f9a0b1c2',
|
||||
userId: 'user-123',
|
||||
workflowState: { blocks: {}, edges: [] },
|
||||
}
|
||||
|
||||
const mockWorkflow = {
|
||||
id: 'workflow-456',
|
||||
id: 'a7b8c9d0-e1f2-4a34-b5c6-d7e8f9a0b1c2',
|
||||
userId: 'user-123',
|
||||
}
|
||||
|
||||
@@ -510,7 +510,7 @@ describe('Copilot Checkpoints Revert API Route', () => {
|
||||
|
||||
const mockCheckpoint = {
|
||||
id: 'checkpoint-123',
|
||||
workflowId: 'workflow-456',
|
||||
workflowId: 'b8c9d0e1-f2a3-4b45-a6d7-e8f9a0b1c2d3',
|
||||
userId: 'user-123',
|
||||
workflowState: { blocks: {}, edges: [] },
|
||||
}
|
||||
@@ -537,13 +537,13 @@ describe('Copilot Checkpoints Revert API Route', () => {
|
||||
|
||||
const mockCheckpoint = {
|
||||
id: 'checkpoint-123',
|
||||
workflowId: 'workflow-456',
|
||||
workflowId: 'c9d0e1f2-a3b4-4c56-a7e8-f9a0b1c2d3e4',
|
||||
userId: 'user-123',
|
||||
workflowState: { blocks: {}, edges: [] },
|
||||
}
|
||||
|
||||
const mockWorkflow = {
|
||||
id: 'workflow-456',
|
||||
id: 'c9d0e1f2-a3b4-4c56-a7e8-f9a0b1c2d3e4',
|
||||
userId: 'user-123',
|
||||
}
|
||||
|
||||
@@ -594,13 +594,13 @@ describe('Copilot Checkpoints Revert API Route', () => {
|
||||
|
||||
const mockCheckpoint = {
|
||||
id: 'checkpoint-123',
|
||||
workflowId: 'workflow-456',
|
||||
workflowId: 'd0e1f2a3-b4c5-4d67-a8f9-a0b1c2d3e4f5',
|
||||
userId: 'user-123',
|
||||
workflowState: { blocks: {}, edges: [] },
|
||||
}
|
||||
|
||||
const mockWorkflow = {
|
||||
id: 'workflow-456',
|
||||
id: 'd0e1f2a3-b4c5-4d67-a8f9-a0b1c2d3e4f5',
|
||||
userId: 'user-123',
|
||||
}
|
||||
|
||||
@@ -626,7 +626,7 @@ describe('Copilot Checkpoints Revert API Route', () => {
|
||||
await POST(req)
|
||||
|
||||
expect(global.fetch).toHaveBeenCalledWith(
|
||||
'http://localhost:3000/api/workflows/workflow-456/state',
|
||||
'http://localhost:3000/api/workflows/d0e1f2a3-b4c5-4d67-a8f9-a0b1c2d3e4f5/state',
|
||||
{
|
||||
method: 'PUT',
|
||||
headers: {
|
||||
@@ -644,13 +644,13 @@ describe('Copilot Checkpoints Revert API Route', () => {
|
||||
|
||||
const mockCheckpoint = {
|
||||
id: 'checkpoint-123',
|
||||
workflowId: 'workflow-456',
|
||||
workflowId: 'e1f2a3b4-c5d6-4e78-a9a0-b1c2d3e4f5a6',
|
||||
userId: 'user-123',
|
||||
workflowState: { blocks: {}, edges: [] },
|
||||
}
|
||||
|
||||
const mockWorkflow = {
|
||||
id: 'workflow-456',
|
||||
id: 'e1f2a3b4-c5d6-4e78-a9a0-b1c2d3e4f5a6',
|
||||
userId: 'user-123',
|
||||
}
|
||||
|
||||
@@ -677,7 +677,7 @@ describe('Copilot Checkpoints Revert API Route', () => {
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
expect(global.fetch).toHaveBeenCalledWith(
|
||||
'http://localhost:3000/api/workflows/workflow-456/state',
|
||||
'http://localhost:3000/api/workflows/e1f2a3b4-c5d6-4e78-a9a0-b1c2d3e4f5a6/state',
|
||||
{
|
||||
method: 'PUT',
|
||||
headers: {
|
||||
@@ -695,7 +695,7 @@ describe('Copilot Checkpoints Revert API Route', () => {
|
||||
|
||||
const mockCheckpoint = {
|
||||
id: 'checkpoint-complex',
|
||||
workflowId: 'workflow-456',
|
||||
workflowId: 'f2a3b4c5-d6e7-4f89-a0b1-c2d3e4f5a6b7',
|
||||
userId: 'user-123',
|
||||
workflowState: {
|
||||
blocks: {
|
||||
@@ -723,7 +723,7 @@ describe('Copilot Checkpoints Revert API Route', () => {
|
||||
}
|
||||
|
||||
const mockWorkflow = {
|
||||
id: 'workflow-456',
|
||||
id: 'f2a3b4c5-d6e7-4f89-a0b1-c2d3e4f5a6b7',
|
||||
userId: 'user-123',
|
||||
}
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ import {
|
||||
createUnauthorizedResponse,
|
||||
} from '@/lib/copilot/auth'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { validateUUID } from '@/lib/security/input-validation'
|
||||
|
||||
const logger = createLogger('CheckpointRevertAPI')
|
||||
|
||||
@@ -36,7 +37,6 @@ export async function POST(request: NextRequest) {
|
||||
|
||||
logger.info(`[${tracker.requestId}] Reverting to checkpoint ${checkpointId}`)
|
||||
|
||||
// Get the checkpoint and verify ownership
|
||||
const checkpoint = await db
|
||||
.select()
|
||||
.from(workflowCheckpoints)
|
||||
@@ -47,7 +47,6 @@ export async function POST(request: NextRequest) {
|
||||
return createNotFoundResponse('Checkpoint not found or access denied')
|
||||
}
|
||||
|
||||
// Verify user still has access to the workflow
|
||||
const workflowData = await db
|
||||
.select()
|
||||
.from(workflowTable)
|
||||
@@ -62,10 +61,8 @@ export async function POST(request: NextRequest) {
|
||||
return createUnauthorizedResponse()
|
||||
}
|
||||
|
||||
// Apply the checkpoint state to the workflow using the existing state endpoint
|
||||
const checkpointState = checkpoint.workflowState as any // Cast to any for property access
|
||||
|
||||
// Clean the checkpoint state to remove any null/undefined values that could cause validation errors
|
||||
const cleanedState = {
|
||||
blocks: checkpointState?.blocks || {},
|
||||
edges: checkpointState?.edges || [],
|
||||
@@ -74,7 +71,6 @@ export async function POST(request: NextRequest) {
|
||||
isDeployed: checkpointState?.isDeployed || false,
|
||||
deploymentStatuses: checkpointState?.deploymentStatuses || {},
|
||||
lastSaved: Date.now(),
|
||||
// Only include deployedAt if it's a valid date string that can be converted
|
||||
...(checkpointState?.deployedAt &&
|
||||
checkpointState.deployedAt !== null &&
|
||||
checkpointState.deployedAt !== undefined &&
|
||||
@@ -90,13 +86,19 @@ export async function POST(request: NextRequest) {
|
||||
isDeployed: cleanedState.isDeployed,
|
||||
})
|
||||
|
||||
const workflowIdValidation = validateUUID(checkpoint.workflowId, 'workflowId')
|
||||
if (!workflowIdValidation.isValid) {
|
||||
logger.error(`[${tracker.requestId}] Invalid workflow ID: ${workflowIdValidation.error}`)
|
||||
return NextResponse.json({ error: 'Invalid workflow ID format' }, { status: 400 })
|
||||
}
|
||||
|
||||
const stateResponse = await fetch(
|
||||
`${request.nextUrl.origin}/api/workflows/${checkpoint.workflowId}/state`,
|
||||
{
|
||||
method: 'PUT',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
Cookie: request.headers.get('Cookie') || '', // Forward auth cookies
|
||||
Cookie: request.headers.get('Cookie') || '',
|
||||
},
|
||||
body: JSON.stringify(cleanedState),
|
||||
}
|
||||
@@ -123,7 +125,7 @@ export async function POST(request: NextRequest) {
|
||||
revertedAt: new Date().toISOString(),
|
||||
checkpoint: {
|
||||
id: checkpoint.id,
|
||||
workflowState: cleanedState, // Return the reverted state for frontend use
|
||||
workflowState: cleanedState,
|
||||
},
|
||||
})
|
||||
} catch (error) {
|
||||
|
||||
@@ -6,6 +6,7 @@ import binaryExtensionsList from 'binary-extensions'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { isSupportedFileType, parseFile } from '@/lib/file-parsers'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { validateExternalUrl } from '@/lib/security/input-validation'
|
||||
import { downloadFile, isUsingCloudStorage } from '@/lib/uploads'
|
||||
import { UPLOAD_DIR_SERVER } from '@/lib/uploads/setup.server'
|
||||
import '@/lib/uploads/setup.server'
|
||||
@@ -220,6 +221,16 @@ async function handleExternalUrl(url: string, fileType?: string): Promise<ParseR
|
||||
try {
|
||||
logger.info('Fetching external URL:', url)
|
||||
|
||||
const urlValidation = validateExternalUrl(url, 'fileUrl')
|
||||
if (!urlValidation.isValid) {
|
||||
logger.warn(`Blocked external URL request: ${urlValidation.error}`)
|
||||
return {
|
||||
success: false,
|
||||
error: urlValidation.error || 'Invalid external URL',
|
||||
filePath: url,
|
||||
}
|
||||
}
|
||||
|
||||
const response = await fetch(url, {
|
||||
signal: AbortSignal.timeout(DOWNLOAD_TIMEOUT_MS),
|
||||
})
|
||||
|
||||
@@ -9,23 +9,22 @@ import {
|
||||
InvalidRequestError,
|
||||
} from '@/app/api/files/utils'
|
||||
|
||||
// Allowlist of permitted file extensions for security
|
||||
const ALLOWED_EXTENSIONS = new Set([
|
||||
// Documents
|
||||
'pdf',
|
||||
'doc',
|
||||
'docx',
|
||||
'txt',
|
||||
'md',
|
||||
// Images (safe formats)
|
||||
'png',
|
||||
'jpg',
|
||||
'jpeg',
|
||||
'gif',
|
||||
// Data files
|
||||
'csv',
|
||||
'xlsx',
|
||||
'xls',
|
||||
'json',
|
||||
'yaml',
|
||||
'yml',
|
||||
])
|
||||
|
||||
/**
|
||||
@@ -50,19 +49,16 @@ export async function POST(request: NextRequest) {
|
||||
|
||||
const formData = await request.formData()
|
||||
|
||||
// Check if multiple files are being uploaded or a single file
|
||||
const files = formData.getAll('file') as File[]
|
||||
|
||||
if (!files || files.length === 0) {
|
||||
throw new InvalidRequestError('No files provided')
|
||||
}
|
||||
|
||||
// Get optional scoping parameters for execution-scoped storage
|
||||
const workflowId = formData.get('workflowId') as string | null
|
||||
const executionId = formData.get('executionId') as string | null
|
||||
const workspaceId = formData.get('workspaceId') as string | null
|
||||
|
||||
// Log storage mode
|
||||
const usingCloudStorage = isUsingCloudStorage()
|
||||
logger.info(`Using storage mode: ${usingCloudStorage ? 'Cloud' : 'Local'} for file upload`)
|
||||
|
||||
@@ -74,7 +70,6 @@ export async function POST(request: NextRequest) {
|
||||
|
||||
const uploadResults = []
|
||||
|
||||
// Process each file
|
||||
for (const file of files) {
|
||||
const originalName = file.name
|
||||
|
||||
@@ -88,9 +83,7 @@ export async function POST(request: NextRequest) {
|
||||
const bytes = await file.arrayBuffer()
|
||||
const buffer = Buffer.from(bytes)
|
||||
|
||||
// For execution-scoped files, use the dedicated execution file storage
|
||||
if (workflowId && executionId) {
|
||||
// Use the dedicated execution file storage system
|
||||
const { uploadExecutionFile } = await import('@/lib/workflows/execution-file-storage')
|
||||
const userFile = await uploadExecutionFile(
|
||||
{
|
||||
@@ -107,13 +100,10 @@ export async function POST(request: NextRequest) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Upload to cloud or local storage using the standard uploadFile function
|
||||
try {
|
||||
logger.info(`Uploading file: ${originalName}`)
|
||||
const result = await uploadFile(buffer, originalName, file.type, file.size)
|
||||
|
||||
// Generate a presigned URL for cloud storage with appropriate expiry
|
||||
// Regular files get 24 hours (execution files are handled above)
|
||||
let presignedUrl: string | undefined
|
||||
if (usingCloudStorage) {
|
||||
try {
|
||||
@@ -144,7 +134,6 @@ export async function POST(request: NextRequest) {
|
||||
}
|
||||
}
|
||||
|
||||
// Return all file information
|
||||
if (uploadResults.length === 1) {
|
||||
return NextResponse.json(uploadResults[0])
|
||||
}
|
||||
@@ -155,7 +144,6 @@ export async function POST(request: NextRequest) {
|
||||
}
|
||||
}
|
||||
|
||||
// Handle preflight requests
|
||||
export async function OPTIONS() {
|
||||
return createOptionsResponse()
|
||||
}
|
||||
|
||||
@@ -6,9 +6,6 @@ import { UPLOAD_DIR } from '@/lib/uploads/setup'
|
||||
|
||||
const logger = createLogger('FilesUtils')
|
||||
|
||||
/**
|
||||
* Response type definitions
|
||||
*/
|
||||
export interface ApiSuccessResponse {
|
||||
success: true
|
||||
[key: string]: any
|
||||
@@ -25,9 +22,6 @@ export interface FileResponse {
|
||||
filename: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Custom error types
|
||||
*/
|
||||
export class FileNotFoundError extends Error {
|
||||
constructor(message: string) {
|
||||
super(message)
|
||||
@@ -42,9 +36,6 @@ export class InvalidRequestError extends Error {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Maps file extensions to MIME types
|
||||
*/
|
||||
export const contentTypeMap: Record<string, string> = {
|
||||
// Text formats
|
||||
txt: 'text/plain',
|
||||
@@ -79,9 +70,6 @@ export const contentTypeMap: Record<string, string> = {
|
||||
googleFolder: 'application/vnd.google-apps.folder',
|
||||
}
|
||||
|
||||
/**
|
||||
* List of binary file extensions
|
||||
*/
|
||||
export const binaryExtensions = [
|
||||
'doc',
|
||||
'docx',
|
||||
@@ -97,38 +85,23 @@ export const binaryExtensions = [
|
||||
'pdf',
|
||||
]
|
||||
|
||||
/**
|
||||
* Determine content type from file extension
|
||||
*/
|
||||
export function getContentType(filename: string): string {
|
||||
const extension = filename.split('.').pop()?.toLowerCase() || ''
|
||||
return contentTypeMap[extension] || 'application/octet-stream'
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a path is an S3 path
|
||||
*/
|
||||
export function isS3Path(path: string): boolean {
|
||||
return path.includes('/api/files/serve/s3/')
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a path is a Blob path
|
||||
*/
|
||||
export function isBlobPath(path: string): boolean {
|
||||
return path.includes('/api/files/serve/blob/')
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a path points to cloud storage (S3, Blob, or generic cloud)
|
||||
*/
|
||||
export function isCloudPath(path: string): boolean {
|
||||
return isS3Path(path) || isBlobPath(path)
|
||||
}
|
||||
|
||||
/**
|
||||
* Generic function to extract storage key from a path
|
||||
*/
|
||||
export function extractStorageKey(path: string, storageType: 's3' | 'blob'): string {
|
||||
const prefix = `/api/files/serve/${storageType}/`
|
||||
if (path.includes(prefix)) {
|
||||
@@ -137,23 +110,14 @@ export function extractStorageKey(path: string, storageType: 's3' | 'blob'): str
|
||||
return path
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract S3 key from a path
|
||||
*/
|
||||
export function extractS3Key(path: string): string {
|
||||
return extractStorageKey(path, 's3')
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract Blob key from a path
|
||||
*/
|
||||
export function extractBlobKey(path: string): string {
|
||||
return extractStorageKey(path, 'blob')
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract filename from a serve path
|
||||
*/
|
||||
export function extractFilename(path: string): string {
|
||||
let filename: string
|
||||
|
||||
@@ -168,25 +132,20 @@ export function extractFilename(path: string): string {
|
||||
.replace(/\/\.\./g, '')
|
||||
.replace(/\.\.\//g, '')
|
||||
|
||||
// Handle cloud storage paths (s3/key, blob/key) - preserve forward slashes for these
|
||||
if (filename.startsWith('s3/') || filename.startsWith('blob/')) {
|
||||
// For cloud paths, only sanitize the key portion after the prefix
|
||||
const parts = filename.split('/')
|
||||
const prefix = parts[0] // 's3' or 'blob'
|
||||
const keyParts = parts.slice(1)
|
||||
|
||||
// Sanitize each part of the key to prevent traversal
|
||||
const sanitizedKeyParts = keyParts
|
||||
.map((part) => part.replace(/\.\./g, '').replace(/^\./g, '').trim())
|
||||
.filter((part) => part.length > 0)
|
||||
|
||||
filename = `${prefix}/${sanitizedKeyParts.join('/')}`
|
||||
} else {
|
||||
// For regular filenames, remove any remaining path separators
|
||||
filename = filename.replace(/[/\\]/g, '')
|
||||
}
|
||||
|
||||
// Additional validation: ensure filename is not empty after sanitization
|
||||
if (!filename || filename.trim().length === 0) {
|
||||
throw new Error('Invalid or empty filename after sanitization')
|
||||
}
|
||||
@@ -194,19 +153,12 @@ export function extractFilename(path: string): string {
|
||||
return filename
|
||||
}
|
||||
|
||||
/**
|
||||
* Sanitize filename to prevent path traversal attacks
|
||||
*/
|
||||
function sanitizeFilename(filename: string): string {
|
||||
if (!filename || typeof filename !== 'string') {
|
||||
throw new Error('Invalid filename provided')
|
||||
}
|
||||
|
||||
const sanitized = filename
|
||||
.replace(/\.\./g, '') // Remove .. sequences
|
||||
.replace(/[/\\]/g, '') // Remove path separators
|
||||
.replace(/^\./g, '') // Remove leading dots
|
||||
.trim()
|
||||
const sanitized = filename.replace(/\.\./g, '').replace(/[/\\]/g, '').replace(/^\./g, '').trim()
|
||||
|
||||
if (!sanitized || sanitized.length === 0) {
|
||||
throw new Error('Invalid or empty filename after sanitization')
|
||||
@@ -217,8 +169,8 @@ function sanitizeFilename(filename: string): string {
|
||||
sanitized.includes('|') ||
|
||||
sanitized.includes('?') ||
|
||||
sanitized.includes('*') ||
|
||||
sanitized.includes('\x00') || // Null bytes
|
||||
/[\x00-\x1F\x7F]/.test(sanitized) // Control characters
|
||||
sanitized.includes('\x00') ||
|
||||
/[\x00-\x1F\x7F]/.test(sanitized)
|
||||
) {
|
||||
throw new Error('Filename contains invalid characters')
|
||||
}
|
||||
@@ -226,9 +178,6 @@ function sanitizeFilename(filename: string): string {
|
||||
return sanitized
|
||||
}
|
||||
|
||||
/**
|
||||
* Find a file in possible local storage locations with proper path validation
|
||||
*/
|
||||
export function findLocalFile(filename: string): string | null {
|
||||
try {
|
||||
const sanitizedFilename = sanitizeFilename(filename)
|
||||
@@ -247,7 +196,7 @@ export function findLocalFile(filename: string): string | null {
|
||||
)
|
||||
|
||||
if (!isWithinAllowedDir) {
|
||||
continue // Skip this path as it's outside allowed directories
|
||||
continue
|
||||
}
|
||||
|
||||
if (existsSync(resolvedPath)) {
|
||||
@@ -273,32 +222,24 @@ const SAFE_INLINE_TYPES = new Set([
|
||||
'application/json',
|
||||
])
|
||||
|
||||
// File extensions that should always be served as attachment for security
|
||||
const FORCE_ATTACHMENT_EXTENSIONS = new Set(['html', 'htm', 'svg', 'js', 'css', 'xml'])
|
||||
|
||||
/**
|
||||
* Determines safe content type and disposition for file serving
|
||||
*/
|
||||
function getSecureFileHeaders(filename: string, originalContentType: string) {
|
||||
const extension = filename.split('.').pop()?.toLowerCase() || ''
|
||||
|
||||
// Force attachment for potentially dangerous file types
|
||||
if (FORCE_ATTACHMENT_EXTENSIONS.has(extension)) {
|
||||
return {
|
||||
contentType: 'application/octet-stream', // Force download
|
||||
contentType: 'application/octet-stream',
|
||||
disposition: 'attachment',
|
||||
}
|
||||
}
|
||||
|
||||
// Override content type for safety while preserving legitimate use cases
|
||||
let safeContentType = originalContentType
|
||||
|
||||
// Handle potentially dangerous content types
|
||||
if (originalContentType === 'text/html' || originalContentType === 'image/svg+xml') {
|
||||
safeContentType = 'text/plain' // Prevent browser rendering
|
||||
safeContentType = 'text/plain'
|
||||
}
|
||||
|
||||
// Use inline only for verified safe content types
|
||||
const disposition = SAFE_INLINE_TYPES.has(safeContentType) ? 'inline' : 'attachment'
|
||||
|
||||
return {
|
||||
@@ -307,10 +248,6 @@ function getSecureFileHeaders(filename: string, originalContentType: string) {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Encode filename for Content-Disposition header to support non-ASCII characters
|
||||
* Uses RFC 5987 encoding for international characters
|
||||
*/
|
||||
function encodeFilenameForHeader(filename: string): string {
|
||||
const hasNonAscii = /[^\x00-\x7F]/.test(filename)
|
||||
|
||||
@@ -323,9 +260,6 @@ function encodeFilenameForHeader(filename: string): string {
|
||||
return `filename="${asciiSafe}"; filename*=UTF-8''${encodedFilename}`
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a file response with appropriate security headers
|
||||
*/
|
||||
export function createFileResponse(file: FileResponse): NextResponse {
|
||||
const { contentType, disposition } = getSecureFileHeaders(file.filename, file.contentType)
|
||||
|
||||
@@ -334,18 +268,14 @@ export function createFileResponse(file: FileResponse): NextResponse {
|
||||
headers: {
|
||||
'Content-Type': contentType,
|
||||
'Content-Disposition': `${disposition}; ${encodeFilenameForHeader(file.filename)}`,
|
||||
'Cache-Control': 'public, max-age=31536000', // Cache for 1 year
|
||||
'Cache-Control': 'public, max-age=31536000',
|
||||
'X-Content-Type-Options': 'nosniff',
|
||||
'Content-Security-Policy': "default-src 'none'; style-src 'unsafe-inline'; sandbox;",
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a standardized error response
|
||||
*/
|
||||
export function createErrorResponse(error: Error, status = 500): NextResponse {
|
||||
// Map error types to appropriate status codes
|
||||
const statusCode =
|
||||
error instanceof FileNotFoundError ? 404 : error instanceof InvalidRequestError ? 400 : status
|
||||
|
||||
@@ -358,16 +288,10 @@ export function createErrorResponse(error: Error, status = 500): NextResponse {
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a standardized success response
|
||||
*/
|
||||
export function createSuccessResponse(data: ApiSuccessResponse): NextResponse {
|
||||
return NextResponse.json(data)
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle CORS preflight requests
|
||||
*/
|
||||
export function createOptionsResponse(): NextResponse {
|
||||
return new NextResponse(null, {
|
||||
status: 204,
|
||||
|
||||
@@ -67,7 +67,7 @@ describe('Function Execute API Route', () => {
|
||||
})
|
||||
|
||||
it.concurrent('should block SSRF attacks through secure fetch wrapper', async () => {
|
||||
const { validateProxyUrl } = await import('@/lib/security/url-validation')
|
||||
const { validateProxyUrl } = await import('@/lib/security/input-validation')
|
||||
|
||||
expect(validateProxyUrl('http://169.254.169.254/latest/meta-data/').isValid).toBe(false)
|
||||
expect(validateProxyUrl('http://127.0.0.1:8080/admin').isValid).toBe(false)
|
||||
@@ -76,15 +76,15 @@ describe('Function Execute API Route', () => {
|
||||
})
|
||||
|
||||
it.concurrent('should allow legitimate external URLs', async () => {
|
||||
const { validateProxyUrl } = await import('@/lib/security/url-validation')
|
||||
const { validateProxyUrl } = await import('@/lib/security/input-validation')
|
||||
|
||||
expect(validateProxyUrl('https://api.github.com/user').isValid).toBe(true)
|
||||
expect(validateProxyUrl('https://httpbin.org/get').isValid).toBe(true)
|
||||
expect(validateProxyUrl('http://example.com/api').isValid).toBe(true)
|
||||
expect(validateProxyUrl('https://example.com/api').isValid).toBe(true)
|
||||
})
|
||||
|
||||
it.concurrent('should block dangerous protocols', async () => {
|
||||
const { validateProxyUrl } = await import('@/lib/security/url-validation')
|
||||
const { validateProxyUrl } = await import('@/lib/security/input-validation')
|
||||
|
||||
expect(validateProxyUrl('file:///etc/passwd').isValid).toBe(false)
|
||||
expect(validateProxyUrl('ftp://internal.server/files').isValid).toBe(false)
|
||||
|
||||
@@ -4,7 +4,7 @@ import { env, isTruthy } from '@/lib/env'
|
||||
import { executeInE2B } from '@/lib/execution/e2b'
|
||||
import { CodeLanguage, DEFAULT_CODE_LANGUAGE, isValidCodeLanguage } from '@/lib/execution/languages'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { validateProxyUrl } from '@/lib/security/url-validation'
|
||||
import { validateProxyUrl } from '@/lib/security/input-validation'
|
||||
import { generateRequestId } from '@/lib/utils'
|
||||
export const dynamic = 'force-dynamic'
|
||||
export const runtime = 'nodejs'
|
||||
|
||||
239
apps/sim/app/api/guardrails/validate/route.ts
Normal file
239
apps/sim/app/api/guardrails/validate/route.ts
Normal file
@@ -0,0 +1,239 @@
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { validateHallucination } from '@/lib/guardrails/validate_hallucination'
|
||||
import { validateJson } from '@/lib/guardrails/validate_json'
|
||||
import { validatePII } from '@/lib/guardrails/validate_pii'
|
||||
import { validateRegex } from '@/lib/guardrails/validate_regex'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { generateRequestId } from '@/lib/utils'
|
||||
|
||||
const logger = createLogger('GuardrailsValidateAPI')
|
||||
|
||||
export async function POST(request: NextRequest) {
|
||||
const requestId = generateRequestId()
|
||||
logger.info(`[${requestId}] Guardrails validation request received`)
|
||||
|
||||
try {
|
||||
const body = await request.json()
|
||||
const {
|
||||
validationType,
|
||||
input,
|
||||
regex,
|
||||
knowledgeBaseId,
|
||||
threshold,
|
||||
topK,
|
||||
model,
|
||||
apiKey,
|
||||
workflowId,
|
||||
piiEntityTypes,
|
||||
piiMode,
|
||||
piiLanguage,
|
||||
} = body
|
||||
|
||||
if (!validationType) {
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
output: {
|
||||
passed: false,
|
||||
validationType: 'unknown',
|
||||
input: input || '',
|
||||
error: 'Missing required field: validationType',
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
if (input === undefined || input === null) {
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
output: {
|
||||
passed: false,
|
||||
validationType,
|
||||
input: '',
|
||||
error: 'Input is missing or undefined',
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
if (
|
||||
validationType !== 'json' &&
|
||||
validationType !== 'regex' &&
|
||||
validationType !== 'hallucination' &&
|
||||
validationType !== 'pii'
|
||||
) {
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
output: {
|
||||
passed: false,
|
||||
validationType,
|
||||
input: input || '',
|
||||
error: 'Invalid validationType. Must be "json", "regex", "hallucination", or "pii"',
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
if (validationType === 'regex' && !regex) {
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
output: {
|
||||
passed: false,
|
||||
validationType,
|
||||
input: input || '',
|
||||
error: 'Regex pattern is required for regex validation',
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
if (validationType === 'hallucination' && !model) {
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
output: {
|
||||
passed: false,
|
||||
validationType,
|
||||
input: input || '',
|
||||
error: 'Model is required for hallucination validation',
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
const inputStr = convertInputToString(input)
|
||||
|
||||
logger.info(`[${requestId}] Executing validation locally`, {
|
||||
validationType,
|
||||
inputType: typeof input,
|
||||
})
|
||||
|
||||
const validationResult = await executeValidation(
|
||||
validationType,
|
||||
inputStr,
|
||||
regex,
|
||||
knowledgeBaseId,
|
||||
threshold,
|
||||
topK,
|
||||
model,
|
||||
apiKey,
|
||||
workflowId,
|
||||
piiEntityTypes,
|
||||
piiMode,
|
||||
piiLanguage,
|
||||
requestId
|
||||
)
|
||||
|
||||
logger.info(`[${requestId}] Validation completed`, {
|
||||
passed: validationResult.passed,
|
||||
hasError: !!validationResult.error,
|
||||
score: validationResult.score,
|
||||
})
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
output: {
|
||||
passed: validationResult.passed,
|
||||
validationType,
|
||||
input,
|
||||
error: validationResult.error,
|
||||
score: validationResult.score,
|
||||
reasoning: validationResult.reasoning,
|
||||
detectedEntities: validationResult.detectedEntities,
|
||||
maskedText: validationResult.maskedText,
|
||||
},
|
||||
})
|
||||
} catch (error: any) {
|
||||
logger.error(`[${requestId}] Guardrails validation failed`, { error })
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
output: {
|
||||
passed: false,
|
||||
validationType: 'unknown',
|
||||
input: '',
|
||||
error: error.message || 'Validation failed due to unexpected error',
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert input to string for validation
|
||||
*/
|
||||
function convertInputToString(input: any): string {
|
||||
if (typeof input === 'string') {
|
||||
return input
|
||||
}
|
||||
if (input === null || input === undefined) {
|
||||
return ''
|
||||
}
|
||||
if (typeof input === 'object') {
|
||||
return JSON.stringify(input)
|
||||
}
|
||||
return String(input)
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute validation using TypeScript validators
|
||||
*/
|
||||
async function executeValidation(
|
||||
validationType: string,
|
||||
inputStr: string,
|
||||
regex: string | undefined,
|
||||
knowledgeBaseId: string | undefined,
|
||||
threshold: string | undefined,
|
||||
topK: string | undefined,
|
||||
model: string,
|
||||
apiKey: string | undefined,
|
||||
workflowId: string | undefined,
|
||||
piiEntityTypes: string[] | undefined,
|
||||
piiMode: string | undefined,
|
||||
piiLanguage: string | undefined,
|
||||
requestId: string
|
||||
): Promise<{
|
||||
passed: boolean
|
||||
error?: string
|
||||
score?: number
|
||||
reasoning?: string
|
||||
detectedEntities?: any[]
|
||||
maskedText?: string
|
||||
}> {
|
||||
// Use TypeScript validators for all validation types
|
||||
if (validationType === 'json') {
|
||||
return validateJson(inputStr)
|
||||
}
|
||||
if (validationType === 'regex') {
|
||||
if (!regex) {
|
||||
return {
|
||||
passed: false,
|
||||
error: 'Regex pattern is required',
|
||||
}
|
||||
}
|
||||
return validateRegex(inputStr, regex)
|
||||
}
|
||||
if (validationType === 'hallucination') {
|
||||
if (!knowledgeBaseId) {
|
||||
return {
|
||||
passed: false,
|
||||
error: 'Knowledge base ID is required for hallucination check',
|
||||
}
|
||||
}
|
||||
|
||||
return await validateHallucination({
|
||||
userInput: inputStr,
|
||||
knowledgeBaseId,
|
||||
threshold: threshold != null ? Number.parseFloat(threshold) : 3, // Default threshold is 3 (confidence score, scores < 3 fail)
|
||||
topK: topK ? Number.parseInt(topK) : 10, // Default topK is 10
|
||||
model: model,
|
||||
apiKey,
|
||||
workflowId,
|
||||
requestId,
|
||||
})
|
||||
}
|
||||
if (validationType === 'pii') {
|
||||
return await validatePII({
|
||||
text: inputStr,
|
||||
entityTypes: piiEntityTypes || [], // Empty array = detect all PII types
|
||||
mode: (piiMode as 'block' | 'mask') || 'block', // Default to block mode
|
||||
language: piiLanguage || 'en',
|
||||
requestId,
|
||||
})
|
||||
}
|
||||
return {
|
||||
passed: false,
|
||||
error: 'Unknown validation type',
|
||||
}
|
||||
}
|
||||
@@ -32,6 +32,7 @@ vi.stubGlobal(
|
||||
|
||||
vi.mock('@/lib/env', () => ({
|
||||
env: {},
|
||||
getEnv: (key: string) => process.env[key],
|
||||
isTruthy: (value: string | boolean | number | undefined) =>
|
||||
typeof value === 'string' ? value === 'true' || value === '1' : Boolean(value),
|
||||
}))
|
||||
|
||||
@@ -17,6 +17,7 @@ vi.mock('drizzle-orm', () => ({
|
||||
|
||||
vi.mock('@/lib/env', () => ({
|
||||
env: { OPENAI_API_KEY: 'test-key' },
|
||||
getEnv: (key: string) => process.env[key],
|
||||
isTruthy: (value: string | boolean | number | undefined) =>
|
||||
typeof value === 'string' ? value === 'true' || value === '1' : Boolean(value),
|
||||
}))
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { validateImageUrl } from '@/lib/security/url-validation'
|
||||
import { validateImageUrl } from '@/lib/security/input-validation'
|
||||
import { generateRequestId } from '@/lib/utils'
|
||||
|
||||
const logger = createLogger('ImageProxyAPI')
|
||||
|
||||
@@ -2,7 +2,7 @@ import { NextResponse } from 'next/server'
|
||||
import { generateInternalToken } from '@/lib/auth/internal'
|
||||
import { isDev } from '@/lib/environment'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { validateProxyUrl } from '@/lib/security/url-validation'
|
||||
import { validateProxyUrl } from '@/lib/security/input-validation'
|
||||
import { getBaseUrl } from '@/lib/urls/utils'
|
||||
import { generateRequestId } from '@/lib/utils'
|
||||
import { executeTool } from '@/tools'
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { NextResponse } from 'next/server'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { validateAlphanumericId } from '@/lib/security/input-validation'
|
||||
import { uploadFile } from '@/lib/uploads/storage-client'
|
||||
import { getBaseUrl } from '@/lib/urls/utils'
|
||||
|
||||
@@ -14,6 +15,12 @@ export async function POST(request: Request) {
|
||||
return new NextResponse('Missing required parameters', { status: 400 })
|
||||
}
|
||||
|
||||
const voiceIdValidation = validateAlphanumericId(voiceId, 'voiceId', 255)
|
||||
if (!voiceIdValidation.isValid) {
|
||||
logger.error(`Invalid voice ID: ${voiceIdValidation.error}`)
|
||||
return new NextResponse(voiceIdValidation.error, { status: 400 })
|
||||
}
|
||||
|
||||
logger.info('Proxying TTS request for voice:', voiceId)
|
||||
|
||||
const endpoint = `https://api.elevenlabs.io/v1/text-to-speech/${voiceId}`
|
||||
@@ -46,13 +53,11 @@ export async function POST(request: Request) {
|
||||
return new NextResponse('Empty audio received', { status: 422 })
|
||||
}
|
||||
|
||||
// Upload the audio file to storage and return multiple URL options
|
||||
const audioBuffer = Buffer.from(await audioBlob.arrayBuffer())
|
||||
const timestamp = Date.now()
|
||||
const fileName = `elevenlabs-tts-${timestamp}.mp3`
|
||||
const fileInfo = await uploadFile(audioBuffer, fileName, 'audio/mpeg')
|
||||
|
||||
// Generate the full URL for external use using the configured base URL
|
||||
const audioUrl = `${getBaseUrl()}${fileInfo.path}`
|
||||
|
||||
return NextResponse.json({
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import type { NextRequest } from 'next/server'
|
||||
import { env } from '@/lib/env'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { validateAlphanumericId } from '@/lib/security/input-validation'
|
||||
|
||||
const logger = createLogger('ProxyTTSStreamAPI')
|
||||
|
||||
@@ -13,6 +14,12 @@ export async function POST(request: NextRequest) {
|
||||
return new Response('Missing required parameters', { status: 400 })
|
||||
}
|
||||
|
||||
const voiceIdValidation = validateAlphanumericId(voiceId, 'voiceId', 255)
|
||||
if (!voiceIdValidation.isValid) {
|
||||
logger.error(`Invalid voice ID: ${voiceIdValidation.error}`)
|
||||
return new Response(voiceIdValidation.error, { status: 400 })
|
||||
}
|
||||
|
||||
const apiKey = env.ELEVENLABS_API_KEY
|
||||
if (!apiKey) {
|
||||
logger.error('ELEVENLABS_API_KEY not configured on server')
|
||||
@@ -31,7 +38,6 @@ export async function POST(request: NextRequest) {
|
||||
body: JSON.stringify({
|
||||
text,
|
||||
model_id: modelId,
|
||||
// Maximum performance settings
|
||||
optimize_streaming_latency: 4,
|
||||
output_format: 'mp3_22050_32', // Fastest format
|
||||
voice_settings: {
|
||||
@@ -42,9 +48,7 @@ export async function POST(request: NextRequest) {
|
||||
},
|
||||
enable_ssml_parsing: false,
|
||||
apply_text_normalization: 'off',
|
||||
// Use auto mode for fastest possible streaming
|
||||
// Note: This may sacrifice some quality for speed
|
||||
use_pvc_as_ivc: false, // Use fastest voice processing
|
||||
use_pvc_as_ivc: false,
|
||||
}),
|
||||
})
|
||||
|
||||
@@ -60,14 +64,11 @@ export async function POST(request: NextRequest) {
|
||||
return new Response('No audio stream received', { status: 422 })
|
||||
}
|
||||
|
||||
// Create optimized streaming response
|
||||
const { readable, writable } = new TransformStream({
|
||||
transform(chunk, controller) {
|
||||
// Pass through chunks immediately without buffering
|
||||
controller.enqueue(chunk)
|
||||
},
|
||||
flush(controller) {
|
||||
// Ensure all data is flushed immediately
|
||||
controller.terminate()
|
||||
},
|
||||
})
|
||||
@@ -83,7 +84,6 @@ export async function POST(request: NextRequest) {
|
||||
await writer.close()
|
||||
break
|
||||
}
|
||||
// Write immediately without waiting
|
||||
writer.write(value).catch(logger.error)
|
||||
}
|
||||
} catch (error) {
|
||||
@@ -102,19 +102,15 @@ export async function POST(request: NextRequest) {
|
||||
'X-Content-Type-Options': 'nosniff',
|
||||
'Access-Control-Allow-Origin': '*',
|
||||
Connection: 'keep-alive',
|
||||
// Stream headers for better streaming
|
||||
'X-Accel-Buffering': 'no', // Disable nginx buffering
|
||||
'X-Accel-Buffering': 'no',
|
||||
'X-Stream-Type': 'real-time',
|
||||
},
|
||||
})
|
||||
} catch (error) {
|
||||
logger.error('Error in Stream TTS:', error)
|
||||
|
||||
return new Response(
|
||||
`Internal Server Error: ${error instanceof Error ? error.message : 'Unknown error'}`,
|
||||
{
|
||||
status: 500,
|
||||
}
|
||||
)
|
||||
return new Response('Internal Server Error', {
|
||||
status: 500,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,81 +3,50 @@
|
||||
*
|
||||
* @vitest-environment node
|
||||
*/
|
||||
import type { NextRequest } from 'next/server'
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
import {
|
||||
mockExecutionDependencies,
|
||||
mockScheduleExecuteDb,
|
||||
sampleWorkflowState,
|
||||
} from '@/app/api/__test-utils__/utils'
|
||||
|
||||
function createMockRequest(): NextRequest {
|
||||
const mockHeaders = new Map([
|
||||
['authorization', 'Bearer test-cron-secret'],
|
||||
['content-type', 'application/json'],
|
||||
])
|
||||
|
||||
return {
|
||||
headers: {
|
||||
get: (key: string) => mockHeaders.get(key.toLowerCase()) || null,
|
||||
},
|
||||
url: 'http://localhost:3000/api/schedules/execute',
|
||||
} as NextRequest
|
||||
}
|
||||
|
||||
describe('Scheduled Workflow Execution API Route', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
vi.resetModules()
|
||||
})
|
||||
|
||||
mockExecutionDependencies()
|
||||
afterEach(() => {
|
||||
vi.clearAllMocks()
|
||||
vi.resetModules()
|
||||
})
|
||||
|
||||
// Mock all dependencies
|
||||
vi.doMock('@/services/queue', () => ({
|
||||
RateLimiter: vi.fn().mockImplementation(() => ({
|
||||
checkRateLimitWithSubscription: vi.fn().mockResolvedValue({
|
||||
allowed: true,
|
||||
remaining: 100,
|
||||
resetAt: new Date(Date.now() + 60000),
|
||||
}),
|
||||
})),
|
||||
it('should execute scheduled workflows with Trigger.dev disabled', async () => {
|
||||
const mockExecuteScheduleJob = vi.fn().mockResolvedValue(undefined)
|
||||
|
||||
vi.doMock('@/lib/auth/internal', () => ({
|
||||
verifyCronAuth: vi.fn().mockReturnValue(null),
|
||||
}))
|
||||
|
||||
vi.doMock('@/lib/billing', () => ({
|
||||
checkServerSideUsageLimits: vi.fn().mockResolvedValue({ isExceeded: false }),
|
||||
vi.doMock('@/background/schedule-execution', () => ({
|
||||
executeScheduleJob: mockExecuteScheduleJob,
|
||||
}))
|
||||
|
||||
vi.doMock('@/lib/billing/core/subscription', () => ({
|
||||
getHighestPrioritySubscription: vi.fn().mockResolvedValue({
|
||||
plan: 'pro',
|
||||
status: 'active',
|
||||
}),
|
||||
}))
|
||||
|
||||
vi.doMock('@/lib/environment/utils', () => ({
|
||||
getPersonalAndWorkspaceEnv: vi.fn().mockResolvedValue({
|
||||
personalEncrypted: {},
|
||||
workspaceEncrypted: {},
|
||||
}),
|
||||
}))
|
||||
|
||||
vi.doMock('@/lib/logs/execution/logging-session', () => ({
|
||||
LoggingSession: vi.fn().mockImplementation(() => ({
|
||||
safeStart: vi.fn().mockResolvedValue(undefined),
|
||||
safeComplete: vi.fn().mockResolvedValue(undefined),
|
||||
safeCompleteWithError: vi.fn().mockResolvedValue(undefined),
|
||||
setupExecutor: vi.fn(),
|
||||
})),
|
||||
}))
|
||||
|
||||
vi.doMock('@/lib/workflows/db-helpers', () => ({
|
||||
loadDeployedWorkflowState: vi.fn().mockResolvedValue({
|
||||
blocks: sampleWorkflowState.blocks,
|
||||
edges: sampleWorkflowState.edges || [],
|
||||
loops: sampleWorkflowState.loops || {},
|
||||
parallels: sampleWorkflowState.parallels || {},
|
||||
}),
|
||||
loadWorkflowFromNormalizedTables: vi.fn().mockResolvedValue({
|
||||
blocks: sampleWorkflowState.blocks,
|
||||
edges: sampleWorkflowState.edges || [],
|
||||
loops: sampleWorkflowState.loops || {},
|
||||
parallels: {},
|
||||
isFromNormalizedTables: true,
|
||||
}),
|
||||
}))
|
||||
|
||||
vi.doMock('@/stores/workflows/server-utils', () => ({
|
||||
mergeSubblockState: vi.fn().mockReturnValue(sampleWorkflowState.blocks),
|
||||
}))
|
||||
|
||||
vi.doMock('@/lib/schedules/utils', () => ({
|
||||
calculateNextRunTime: vi.fn().mockReturnValue(new Date(Date.now() + 60000)),
|
||||
getScheduleTimeValues: vi.fn().mockReturnValue({}),
|
||||
getSubBlockValue: vi.fn().mockReturnValue('manual'),
|
||||
vi.doMock('@/lib/env', () => ({
|
||||
env: {
|
||||
TRIGGER_DEV_ENABLED: false,
|
||||
},
|
||||
isTruthy: vi.fn(() => false),
|
||||
}))
|
||||
|
||||
vi.doMock('drizzle-orm', () => ({
|
||||
@@ -85,198 +54,209 @@ describe('Scheduled Workflow Execution API Route', () => {
|
||||
eq: vi.fn((field, value) => ({ field, value, type: 'eq' })),
|
||||
lte: vi.fn((field, value) => ({ field, value, type: 'lte' })),
|
||||
not: vi.fn((condition) => ({ type: 'not', condition })),
|
||||
sql: vi.fn((strings, ...values) => ({ strings, values, type: 'sql' })),
|
||||
}))
|
||||
|
||||
vi.doMock('croner', () => ({
|
||||
Cron: vi.fn().mockImplementation(() => ({
|
||||
nextRun: vi.fn().mockReturnValue(new Date(Date.now() + 60000)), // Next run in 1 minute
|
||||
})),
|
||||
}))
|
||||
|
||||
vi.doMock('@sim/db', () => {
|
||||
const mockDb = {
|
||||
select: vi.fn().mockImplementation(() => ({
|
||||
from: vi.fn().mockImplementation((_table: any) => ({
|
||||
where: vi.fn().mockImplementation((_cond: any) => ({
|
||||
limit: vi.fn().mockImplementation((n?: number) => {
|
||||
// Always return empty array - no due schedules
|
||||
return []
|
||||
}),
|
||||
})),
|
||||
})),
|
||||
})),
|
||||
update: vi.fn().mockImplementation(() => ({
|
||||
set: vi.fn().mockImplementation(() => ({
|
||||
where: vi.fn().mockResolvedValue([]),
|
||||
from: vi.fn().mockImplementation(() => ({
|
||||
where: vi.fn().mockImplementation(() => [
|
||||
{
|
||||
id: 'schedule-1',
|
||||
workflowId: 'workflow-1',
|
||||
blockId: null,
|
||||
cronExpression: null,
|
||||
lastRanAt: null,
|
||||
failedCount: 0,
|
||||
},
|
||||
]),
|
||||
})),
|
||||
})),
|
||||
}
|
||||
|
||||
return {
|
||||
db: mockDb,
|
||||
userStats: {
|
||||
userId: 'userId',
|
||||
totalScheduledExecutions: 'totalScheduledExecutions',
|
||||
lastActive: 'lastActive',
|
||||
},
|
||||
workflow: { id: 'id', userId: 'userId', state: 'state' },
|
||||
workflowSchedule: {
|
||||
id: 'id',
|
||||
workflowId: 'workflowId',
|
||||
nextRunAt: 'nextRunAt',
|
||||
status: 'status',
|
||||
},
|
||||
workflowSchedule: {},
|
||||
}
|
||||
})
|
||||
|
||||
const { GET } = await import('@/app/api/schedules/execute/route')
|
||||
const response = await GET(createMockRequest())
|
||||
|
||||
expect(response).toBeDefined()
|
||||
expect(response.status).toBe(200)
|
||||
const data = await response.json()
|
||||
expect(data).toHaveProperty('message')
|
||||
expect(data).toHaveProperty('executedCount', 1)
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
vi.clearAllMocks()
|
||||
})
|
||||
it('should queue schedules to Trigger.dev when enabled', async () => {
|
||||
const mockTrigger = vi.fn().mockResolvedValue({ id: 'task-id-123' })
|
||||
|
||||
it('should execute scheduled workflows successfully', async () => {
|
||||
const executeMock = vi.fn().mockResolvedValue({
|
||||
success: true,
|
||||
output: { response: 'Scheduled execution completed' },
|
||||
logs: [],
|
||||
metadata: {
|
||||
duration: 100,
|
||||
startTime: new Date().toISOString(),
|
||||
endTime: new Date().toISOString(),
|
||||
vi.doMock('@/lib/auth/internal', () => ({
|
||||
verifyCronAuth: vi.fn().mockReturnValue(null),
|
||||
}))
|
||||
|
||||
vi.doMock('@trigger.dev/sdk', () => ({
|
||||
tasks: {
|
||||
trigger: mockTrigger,
|
||||
},
|
||||
})
|
||||
|
||||
vi.doMock('@/executor', () => ({
|
||||
Executor: vi.fn().mockImplementation(() => ({
|
||||
execute: executeMock,
|
||||
})),
|
||||
}))
|
||||
|
||||
const { GET } = await import('@/app/api/schedules/execute/route')
|
||||
const response = await GET()
|
||||
expect(response).toBeDefined()
|
||||
|
||||
const data = await response.json()
|
||||
expect(data).toHaveProperty('message')
|
||||
expect(data).toHaveProperty('executedCount')
|
||||
})
|
||||
|
||||
it('should handle errors during scheduled execution gracefully', async () => {
|
||||
vi.doMock('@/executor', () => ({
|
||||
Executor: vi.fn().mockImplementation(() => ({
|
||||
execute: vi.fn().mockRejectedValue(new Error('Execution failed')),
|
||||
})),
|
||||
vi.doMock('@/lib/env', () => ({
|
||||
env: {
|
||||
TRIGGER_DEV_ENABLED: true,
|
||||
},
|
||||
isTruthy: vi.fn(() => true),
|
||||
}))
|
||||
|
||||
const { GET } = await import('@/app/api/schedules/execute/route')
|
||||
const response = await GET()
|
||||
vi.doMock('drizzle-orm', () => ({
|
||||
and: vi.fn((...conditions) => ({ type: 'and', conditions })),
|
||||
eq: vi.fn((field, value) => ({ field, value, type: 'eq' })),
|
||||
lte: vi.fn((field, value) => ({ field, value, type: 'lte' })),
|
||||
not: vi.fn((condition) => ({ type: 'not', condition })),
|
||||
}))
|
||||
|
||||
expect(response).toBeDefined()
|
||||
|
||||
const data = await response.json()
|
||||
expect(data).toHaveProperty('message')
|
||||
})
|
||||
|
||||
it('should handle case with no due schedules', async () => {
|
||||
vi.doMock('@sim/db', () => {
|
||||
const mockDb = {
|
||||
select: vi.fn().mockImplementation(() => ({
|
||||
from: vi.fn().mockImplementation(() => ({
|
||||
where: vi.fn().mockImplementation(() => ({
|
||||
limit: vi.fn().mockImplementation(() => []),
|
||||
})),
|
||||
})),
|
||||
})),
|
||||
update: vi.fn().mockImplementation(() => ({
|
||||
set: vi.fn().mockImplementation(() => ({
|
||||
where: vi.fn().mockResolvedValue([]),
|
||||
where: vi.fn().mockImplementation(() => [
|
||||
{
|
||||
id: 'schedule-1',
|
||||
workflowId: 'workflow-1',
|
||||
blockId: null,
|
||||
cronExpression: null,
|
||||
lastRanAt: null,
|
||||
failedCount: 0,
|
||||
},
|
||||
]),
|
||||
})),
|
||||
})),
|
||||
}
|
||||
|
||||
return { db: mockDb }
|
||||
return {
|
||||
db: mockDb,
|
||||
workflowSchedule: {},
|
||||
}
|
||||
})
|
||||
|
||||
const { GET } = await import('@/app/api/schedules/execute/route')
|
||||
const response = await GET()
|
||||
const response = await GET(createMockRequest())
|
||||
|
||||
expect(response).toBeDefined()
|
||||
expect(response.status).toBe(200)
|
||||
const data = await response.json()
|
||||
expect(data).toHaveProperty('executedCount', 0)
|
||||
|
||||
const executeMock = vi.fn()
|
||||
vi.doMock('@/executor', () => ({
|
||||
Executor: vi.fn().mockImplementation(() => ({
|
||||
execute: executeMock,
|
||||
})),
|
||||
}))
|
||||
|
||||
expect(executeMock).not.toHaveBeenCalled()
|
||||
expect(data).toHaveProperty('executedCount', 1)
|
||||
})
|
||||
|
||||
// Removed: Test isolation issues with mocks make this unreliable
|
||||
|
||||
it('should execute schedules that are explicitly marked as active', async () => {
|
||||
const executeMock = vi.fn().mockResolvedValue({ success: true, metadata: {} })
|
||||
|
||||
vi.doMock('@/executor', () => ({
|
||||
Executor: vi.fn().mockImplementation(() => ({
|
||||
execute: executeMock,
|
||||
})),
|
||||
it('should handle case with no due schedules', async () => {
|
||||
vi.doMock('@/lib/auth/internal', () => ({
|
||||
verifyCronAuth: vi.fn().mockReturnValue(null),
|
||||
}))
|
||||
|
||||
mockScheduleExecuteDb({
|
||||
schedules: [
|
||||
{
|
||||
id: 'schedule-active',
|
||||
workflowId: 'workflow-id',
|
||||
userId: 'user-id',
|
||||
status: 'active',
|
||||
nextRunAt: new Date(Date.now() - 60_000),
|
||||
lastRanAt: null,
|
||||
cronExpression: null,
|
||||
failedCount: 0,
|
||||
},
|
||||
],
|
||||
vi.doMock('@/background/schedule-execution', () => ({
|
||||
executeScheduleJob: vi.fn().mockResolvedValue(undefined),
|
||||
}))
|
||||
|
||||
vi.doMock('@/lib/env', () => ({
|
||||
env: {
|
||||
TRIGGER_DEV_ENABLED: false,
|
||||
},
|
||||
isTruthy: vi.fn(() => false),
|
||||
}))
|
||||
|
||||
vi.doMock('drizzle-orm', () => ({
|
||||
and: vi.fn((...conditions) => ({ type: 'and', conditions })),
|
||||
eq: vi.fn((field, value) => ({ field, value, type: 'eq' })),
|
||||
lte: vi.fn((field, value) => ({ field, value, type: 'lte' })),
|
||||
not: vi.fn((condition) => ({ type: 'not', condition })),
|
||||
}))
|
||||
|
||||
vi.doMock('@sim/db', () => {
|
||||
const mockDb = {
|
||||
select: vi.fn().mockImplementation(() => ({
|
||||
from: vi.fn().mockImplementation(() => ({
|
||||
where: vi.fn().mockImplementation(() => []),
|
||||
})),
|
||||
})),
|
||||
}
|
||||
|
||||
return {
|
||||
db: mockDb,
|
||||
workflowSchedule: {},
|
||||
}
|
||||
})
|
||||
|
||||
const { GET } = await import('@/app/api/schedules/execute/route')
|
||||
const response = await GET()
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
})
|
||||
|
||||
it('should not execute schedules that are disabled', async () => {
|
||||
const executeMock = vi.fn()
|
||||
|
||||
vi.doMock('@/executor', () => ({
|
||||
Executor: vi.fn().mockImplementation(() => ({
|
||||
execute: executeMock,
|
||||
})),
|
||||
}))
|
||||
|
||||
mockScheduleExecuteDb({
|
||||
schedules: [
|
||||
{
|
||||
id: 'schedule-disabled',
|
||||
workflowId: 'workflow-id',
|
||||
userId: 'user-id',
|
||||
status: 'disabled',
|
||||
nextRunAt: new Date(Date.now() - 60_000),
|
||||
lastRanAt: null,
|
||||
cronExpression: null,
|
||||
failedCount: 0,
|
||||
},
|
||||
],
|
||||
})
|
||||
|
||||
const { GET } = await import('@/app/api/schedules/execute/route')
|
||||
const response = await GET()
|
||||
const response = await GET(createMockRequest())
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
const data = await response.json()
|
||||
expect(data).toHaveProperty('message')
|
||||
expect(data).toHaveProperty('executedCount', 0)
|
||||
})
|
||||
|
||||
expect(executeMock).not.toHaveBeenCalled()
|
||||
it('should execute multiple schedules in parallel', async () => {
|
||||
vi.doMock('@/lib/auth/internal', () => ({
|
||||
verifyCronAuth: vi.fn().mockReturnValue(null),
|
||||
}))
|
||||
|
||||
vi.doMock('@/background/schedule-execution', () => ({
|
||||
executeScheduleJob: vi.fn().mockResolvedValue(undefined),
|
||||
}))
|
||||
|
||||
vi.doMock('@/lib/env', () => ({
|
||||
env: {
|
||||
TRIGGER_DEV_ENABLED: false,
|
||||
},
|
||||
isTruthy: vi.fn(() => false),
|
||||
}))
|
||||
|
||||
vi.doMock('drizzle-orm', () => ({
|
||||
and: vi.fn((...conditions) => ({ type: 'and', conditions })),
|
||||
eq: vi.fn((field, value) => ({ field, value, type: 'eq' })),
|
||||
lte: vi.fn((field, value) => ({ field, value, type: 'lte' })),
|
||||
not: vi.fn((condition) => ({ type: 'not', condition })),
|
||||
}))
|
||||
|
||||
vi.doMock('@sim/db', () => {
|
||||
const mockDb = {
|
||||
select: vi.fn().mockImplementation(() => ({
|
||||
from: vi.fn().mockImplementation(() => ({
|
||||
where: vi.fn().mockImplementation(() => [
|
||||
{
|
||||
id: 'schedule-1',
|
||||
workflowId: 'workflow-1',
|
||||
blockId: null,
|
||||
cronExpression: null,
|
||||
lastRanAt: null,
|
||||
failedCount: 0,
|
||||
},
|
||||
{
|
||||
id: 'schedule-2',
|
||||
workflowId: 'workflow-2',
|
||||
blockId: null,
|
||||
cronExpression: null,
|
||||
lastRanAt: null,
|
||||
failedCount: 0,
|
||||
},
|
||||
]),
|
||||
})),
|
||||
})),
|
||||
}
|
||||
|
||||
return {
|
||||
db: mockDb,
|
||||
workflowSchedule: {},
|
||||
}
|
||||
})
|
||||
|
||||
const { GET } = await import('@/app/api/schedules/execute/route')
|
||||
const response = await GET(createMockRequest())
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
const data = await response.json()
|
||||
expect(data).toHaveProperty('executedCount', 2)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -1,673 +1,108 @@
|
||||
import { db, userStats, workflow, workflowSchedule } from '@sim/db'
|
||||
import { Cron } from 'croner'
|
||||
import { and, eq, lte, not, sql } from 'drizzle-orm'
|
||||
import { NextResponse } from 'next/server'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
import { z } from 'zod'
|
||||
import { getApiKeyOwnerUserId } from '@/lib/api-key/service'
|
||||
import { checkServerSideUsageLimits } from '@/lib/billing'
|
||||
import { getHighestPrioritySubscription } from '@/lib/billing/core/subscription'
|
||||
import { getPersonalAndWorkspaceEnv } from '@/lib/environment/utils'
|
||||
import { db, workflowSchedule } from '@sim/db'
|
||||
import { tasks } from '@trigger.dev/sdk'
|
||||
import { and, eq, lte, not } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { verifyCronAuth } from '@/lib/auth/internal'
|
||||
import { env, isTruthy } from '@/lib/env'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { LoggingSession } from '@/lib/logs/execution/logging-session'
|
||||
import { buildTraceSpans } from '@/lib/logs/execution/trace-spans/trace-spans'
|
||||
import {
|
||||
type BlockState,
|
||||
calculateNextRunTime as calculateNextTime,
|
||||
getScheduleTimeValues,
|
||||
getSubBlockValue,
|
||||
} from '@/lib/schedules/utils'
|
||||
import { decryptSecret, generateRequestId } from '@/lib/utils'
|
||||
import { blockExistsInDeployment, loadDeployedWorkflowState } from '@/lib/workflows/db-helpers'
|
||||
import { updateWorkflowRunCounts } from '@/lib/workflows/utils'
|
||||
import { Executor } from '@/executor'
|
||||
import { Serializer } from '@/serializer'
|
||||
import { RateLimiter } from '@/services/queue'
|
||||
import { mergeSubblockState } from '@/stores/workflows/server-utils'
|
||||
import { generateRequestId } from '@/lib/utils'
|
||||
import { executeScheduleJob } from '@/background/schedule-execution'
|
||||
|
||||
export const dynamic = 'force-dynamic'
|
||||
|
||||
const logger = createLogger('ScheduledExecuteAPI')
|
||||
|
||||
// Maximum number of consecutive failures before disabling a schedule
|
||||
const MAX_CONSECUTIVE_FAILURES = 3
|
||||
export async function GET(request: NextRequest) {
|
||||
const requestId = generateRequestId()
|
||||
logger.info(`[${requestId}] Scheduled execution triggered at ${new Date().toISOString()}`)
|
||||
|
||||
/**
|
||||
* Calculate the next run time for a schedule
|
||||
* This is a wrapper around the utility function in schedule-utils.ts
|
||||
*/
|
||||
function calculateNextRunTime(
|
||||
schedule: typeof workflowSchedule.$inferSelect,
|
||||
blocks: Record<string, BlockState>
|
||||
): Date {
|
||||
// Look for either starter block or schedule trigger block
|
||||
const scheduleBlock = Object.values(blocks).find(
|
||||
(block) => block.type === 'starter' || block.type === 'schedule'
|
||||
)
|
||||
if (!scheduleBlock) throw new Error('No starter or schedule block found')
|
||||
const scheduleType = getSubBlockValue(scheduleBlock, 'scheduleType')
|
||||
const scheduleValues = getScheduleTimeValues(scheduleBlock)
|
||||
|
||||
if (schedule.cronExpression) {
|
||||
const cron = new Cron(schedule.cronExpression)
|
||||
const nextDate = cron.nextRun()
|
||||
if (!nextDate) throw new Error('Invalid cron expression or no future occurrences')
|
||||
return nextDate
|
||||
const authError = verifyCronAuth(request, 'Schedule execution')
|
||||
if (authError) {
|
||||
return authError
|
||||
}
|
||||
|
||||
const lastRanAt = schedule.lastRanAt ? new Date(schedule.lastRanAt) : null
|
||||
return calculateNextTime(scheduleType, scheduleValues, lastRanAt)
|
||||
}
|
||||
|
||||
const EnvVarsSchema = z.record(z.string())
|
||||
|
||||
const runningExecutions = new Set<string>()
|
||||
|
||||
export async function GET() {
|
||||
logger.info(`Scheduled execution triggered at ${new Date().toISOString()}`)
|
||||
const requestId = generateRequestId()
|
||||
const now = new Date()
|
||||
|
||||
let dueSchedules: (typeof workflowSchedule.$inferSelect)[] = []
|
||||
|
||||
try {
|
||||
dueSchedules = await db
|
||||
const dueSchedules = await db
|
||||
.select()
|
||||
.from(workflowSchedule)
|
||||
.where(
|
||||
and(lte(workflowSchedule.nextRunAt, now), not(eq(workflowSchedule.status, 'disabled')))
|
||||
)
|
||||
.limit(10)
|
||||
|
||||
logger.debug(`[${requestId}] Successfully queried schedules: ${dueSchedules.length} found`)
|
||||
|
||||
logger.info(`[${requestId}] Processing ${dueSchedules.length} due scheduled workflows`)
|
||||
|
||||
for (const schedule of dueSchedules) {
|
||||
const executionId = uuidv4()
|
||||
|
||||
try {
|
||||
if (runningExecutions.has(schedule.workflowId)) {
|
||||
logger.debug(`[${requestId}] Skipping workflow ${schedule.workflowId} - already running`)
|
||||
continue
|
||||
}
|
||||
|
||||
runningExecutions.add(schedule.workflowId)
|
||||
logger.debug(`[${requestId}] Starting execution of workflow ${schedule.workflowId}`)
|
||||
|
||||
const [workflowRecord] = await db
|
||||
.select()
|
||||
.from(workflow)
|
||||
.where(eq(workflow.id, schedule.workflowId))
|
||||
.limit(1)
|
||||
|
||||
if (!workflowRecord) {
|
||||
logger.warn(`[${requestId}] Workflow ${schedule.workflowId} not found`)
|
||||
runningExecutions.delete(schedule.workflowId)
|
||||
continue
|
||||
}
|
||||
|
||||
const actorUserId = await getApiKeyOwnerUserId(workflowRecord.pinnedApiKeyId)
|
||||
|
||||
if (!actorUserId) {
|
||||
logger.warn(
|
||||
`[${requestId}] Skipping schedule ${schedule.id}: pinned API key required to attribute usage.`
|
||||
)
|
||||
runningExecutions.delete(schedule.workflowId)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check rate limits for scheduled execution (checks both personal and org subscriptions)
|
||||
const userSubscription = await getHighestPrioritySubscription(actorUserId)
|
||||
|
||||
const rateLimiter = new RateLimiter()
|
||||
const rateLimitCheck = await rateLimiter.checkRateLimitWithSubscription(
|
||||
actorUserId,
|
||||
userSubscription,
|
||||
'schedule',
|
||||
false // schedules are always sync
|
||||
)
|
||||
|
||||
if (!rateLimitCheck.allowed) {
|
||||
logger.warn(
|
||||
`[${requestId}] Rate limit exceeded for scheduled workflow ${schedule.workflowId}`,
|
||||
{
|
||||
userId: workflowRecord.userId,
|
||||
remaining: rateLimitCheck.remaining,
|
||||
resetAt: rateLimitCheck.resetAt,
|
||||
}
|
||||
)
|
||||
|
||||
// Retry in 5 minutes for rate limit
|
||||
const retryDelay = 5 * 60 * 1000 // 5 minutes
|
||||
const nextRetryAt = new Date(now.getTime() + retryDelay)
|
||||
|
||||
try {
|
||||
await db
|
||||
.update(workflowSchedule)
|
||||
.set({
|
||||
updatedAt: now,
|
||||
nextRunAt: nextRetryAt,
|
||||
})
|
||||
.where(eq(workflowSchedule.id, schedule.id))
|
||||
|
||||
logger.debug(`[${requestId}] Updated next retry time due to rate limit`)
|
||||
} catch (updateError) {
|
||||
logger.error(`[${requestId}] Error updating schedule for rate limit:`, updateError)
|
||||
}
|
||||
|
||||
runningExecutions.delete(schedule.workflowId)
|
||||
continue
|
||||
}
|
||||
|
||||
const usageCheck = await checkServerSideUsageLimits(actorUserId)
|
||||
if (usageCheck.isExceeded) {
|
||||
logger.warn(
|
||||
`[${requestId}] User ${workflowRecord.userId} has exceeded usage limits. Skipping scheduled execution.`,
|
||||
{
|
||||
currentUsage: usageCheck.currentUsage,
|
||||
limit: usageCheck.limit,
|
||||
workflowId: schedule.workflowId,
|
||||
}
|
||||
)
|
||||
try {
|
||||
const deployedData = await loadDeployedWorkflowState(schedule.workflowId)
|
||||
const nextRunAt = calculateNextRunTime(schedule, deployedData.blocks as any)
|
||||
await db
|
||||
.update(workflowSchedule)
|
||||
.set({ updatedAt: now, nextRunAt })
|
||||
.where(eq(workflowSchedule.id, schedule.id))
|
||||
} catch (calcErr) {
|
||||
logger.warn(
|
||||
`[${requestId}] Unable to calculate nextRunAt while skipping schedule ${schedule.id}`,
|
||||
calcErr
|
||||
)
|
||||
}
|
||||
runningExecutions.delete(schedule.workflowId)
|
||||
continue
|
||||
}
|
||||
|
||||
// Execute scheduled workflow immediately (no queuing)
|
||||
logger.info(`[${requestId}] Executing scheduled workflow ${schedule.workflowId}`)
|
||||
const useTrigger = isTruthy(env.TRIGGER_DEV_ENABLED)
|
||||
|
||||
if (useTrigger) {
|
||||
const triggerPromises = dueSchedules.map(async (schedule) => {
|
||||
try {
|
||||
const executionSuccess = await (async () => {
|
||||
// Create logging session inside the execution callback
|
||||
const loggingSession = new LoggingSession(
|
||||
schedule.workflowId,
|
||||
executionId,
|
||||
'schedule',
|
||||
requestId
|
||||
)
|
||||
|
||||
try {
|
||||
logger.debug(`[${requestId}] Loading deployed workflow ${schedule.workflowId}`)
|
||||
const deployedData = await loadDeployedWorkflowState(schedule.workflowId)
|
||||
|
||||
const blocks = deployedData.blocks
|
||||
const edges = deployedData.edges
|
||||
const loops = deployedData.loops
|
||||
const parallels = deployedData.parallels
|
||||
logger.info(`[${requestId}] Loaded deployed workflow ${schedule.workflowId}`)
|
||||
|
||||
// Validate that the schedule's trigger block exists in the deployed state
|
||||
if (schedule.blockId) {
|
||||
const blockExists = await blockExistsInDeployment(
|
||||
schedule.workflowId,
|
||||
schedule.blockId
|
||||
)
|
||||
if (!blockExists) {
|
||||
logger.warn(
|
||||
`[${requestId}] Schedule trigger block ${schedule.blockId} not found in deployed workflow ${schedule.workflowId}. Skipping execution.`
|
||||
)
|
||||
return { skip: true, blocks: {} as Record<string, BlockState> }
|
||||
}
|
||||
}
|
||||
|
||||
const mergedStates = mergeSubblockState(blocks)
|
||||
|
||||
// Retrieve environment variables with workspace precedence
|
||||
const { personalEncrypted, workspaceEncrypted } = await getPersonalAndWorkspaceEnv(
|
||||
actorUserId,
|
||||
workflowRecord.workspaceId || undefined
|
||||
)
|
||||
const variables = EnvVarsSchema.parse({
|
||||
...personalEncrypted,
|
||||
...workspaceEncrypted,
|
||||
})
|
||||
|
||||
const currentBlockStates = await Object.entries(mergedStates).reduce(
|
||||
async (accPromise, [id, block]) => {
|
||||
const acc = await accPromise
|
||||
acc[id] = await Object.entries(block.subBlocks).reduce(
|
||||
async (subAccPromise, [key, subBlock]) => {
|
||||
const subAcc = await subAccPromise
|
||||
let value = subBlock.value
|
||||
|
||||
if (
|
||||
typeof value === 'string' &&
|
||||
value.includes('{{') &&
|
||||
value.includes('}}')
|
||||
) {
|
||||
const matches = value.match(/{{([^}]+)}}/g)
|
||||
if (matches) {
|
||||
for (const match of matches) {
|
||||
const varName = match.slice(2, -2)
|
||||
const encryptedValue = variables[varName]
|
||||
if (!encryptedValue) {
|
||||
throw new Error(`Environment variable "${varName}" was not found`)
|
||||
}
|
||||
|
||||
try {
|
||||
const { decrypted } = await decryptSecret(encryptedValue)
|
||||
value = (value as string).replace(match, decrypted)
|
||||
} catch (error: any) {
|
||||
logger.error(
|
||||
`[${requestId}] Error decrypting value for variable "${varName}"`,
|
||||
error
|
||||
)
|
||||
throw new Error(
|
||||
`Failed to decrypt environment variable "${varName}": ${error.message}`
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
subAcc[key] = value
|
||||
return subAcc
|
||||
},
|
||||
Promise.resolve({} as Record<string, any>)
|
||||
)
|
||||
return acc
|
||||
},
|
||||
Promise.resolve({} as Record<string, Record<string, any>>)
|
||||
)
|
||||
|
||||
const decryptedEnvVars: Record<string, string> = {}
|
||||
for (const [key, encryptedValue] of Object.entries(variables)) {
|
||||
try {
|
||||
const { decrypted } = await decryptSecret(encryptedValue)
|
||||
decryptedEnvVars[key] = decrypted
|
||||
} catch (error: any) {
|
||||
logger.error(
|
||||
`[${requestId}] Failed to decrypt environment variable "${key}"`,
|
||||
error
|
||||
)
|
||||
throw new Error(
|
||||
`Failed to decrypt environment variable "${key}": ${error.message}`
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Process the block states to ensure response formats are properly parsed
|
||||
const processedBlockStates = Object.entries(currentBlockStates).reduce(
|
||||
(acc, [blockId, blockState]) => {
|
||||
// Check if this block has a responseFormat that needs to be parsed
|
||||
if (blockState.responseFormat && typeof blockState.responseFormat === 'string') {
|
||||
const responseFormatValue = blockState.responseFormat.trim()
|
||||
|
||||
// Check for variable references like <start.input>
|
||||
if (responseFormatValue.startsWith('<') && responseFormatValue.includes('>')) {
|
||||
logger.debug(
|
||||
`[${requestId}] Response format contains variable reference for block ${blockId}`
|
||||
)
|
||||
// Keep variable references as-is - they will be resolved during execution
|
||||
acc[blockId] = blockState
|
||||
} else if (responseFormatValue === '') {
|
||||
// Empty string - remove response format
|
||||
acc[blockId] = {
|
||||
...blockState,
|
||||
responseFormat: undefined,
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
logger.debug(`[${requestId}] Parsing responseFormat for block ${blockId}`)
|
||||
// Attempt to parse the responseFormat if it's a string
|
||||
const parsedResponseFormat = JSON.parse(responseFormatValue)
|
||||
|
||||
acc[blockId] = {
|
||||
...blockState,
|
||||
responseFormat: parsedResponseFormat,
|
||||
}
|
||||
} catch (error) {
|
||||
logger.warn(
|
||||
`[${requestId}] Failed to parse responseFormat for block ${blockId}, using undefined`,
|
||||
error
|
||||
)
|
||||
// Set to undefined instead of keeping malformed JSON - this allows execution to continue
|
||||
acc[blockId] = {
|
||||
...blockState,
|
||||
responseFormat: undefined,
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
acc[blockId] = blockState
|
||||
}
|
||||
return acc
|
||||
},
|
||||
{} as Record<string, Record<string, any>>
|
||||
)
|
||||
|
||||
// Get workflow variables
|
||||
let workflowVariables = {}
|
||||
if (workflowRecord.variables) {
|
||||
try {
|
||||
if (typeof workflowRecord.variables === 'string') {
|
||||
workflowVariables = JSON.parse(workflowRecord.variables)
|
||||
} else {
|
||||
workflowVariables = workflowRecord.variables
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error(`Failed to parse workflow variables: ${schedule.workflowId}`, error)
|
||||
}
|
||||
}
|
||||
|
||||
const serializedWorkflow = new Serializer().serializeWorkflow(
|
||||
mergedStates,
|
||||
edges,
|
||||
loops,
|
||||
parallels,
|
||||
true // Enable validation during execution
|
||||
)
|
||||
|
||||
const input = {
|
||||
_context: {
|
||||
workflowId: schedule.workflowId,
|
||||
},
|
||||
}
|
||||
|
||||
// Start logging with environment variables
|
||||
await loggingSession.safeStart({
|
||||
userId: actorUserId,
|
||||
workspaceId: workflowRecord.workspaceId || '',
|
||||
variables: variables || {},
|
||||
})
|
||||
|
||||
const executor = new Executor({
|
||||
workflow: serializedWorkflow,
|
||||
currentBlockStates: processedBlockStates,
|
||||
envVarValues: decryptedEnvVars,
|
||||
workflowInput: input,
|
||||
workflowVariables,
|
||||
contextExtensions: {
|
||||
executionId,
|
||||
workspaceId: workflowRecord.workspaceId || '',
|
||||
isDeployedContext: true,
|
||||
},
|
||||
})
|
||||
|
||||
// Set up logging on the executor
|
||||
loggingSession.setupExecutor(executor)
|
||||
|
||||
const result = await executor.execute(
|
||||
schedule.workflowId,
|
||||
schedule.blockId || undefined
|
||||
)
|
||||
|
||||
const executionResult =
|
||||
'stream' in result && 'execution' in result ? result.execution : result
|
||||
|
||||
logger.info(`[${requestId}] Workflow execution completed: ${schedule.workflowId}`, {
|
||||
success: executionResult.success,
|
||||
executionTime: executionResult.metadata?.duration,
|
||||
})
|
||||
|
||||
if (executionResult.success) {
|
||||
await updateWorkflowRunCounts(schedule.workflowId)
|
||||
|
||||
try {
|
||||
await db
|
||||
.update(userStats)
|
||||
.set({
|
||||
totalScheduledExecutions: sql`total_scheduled_executions + 1`,
|
||||
lastActive: now,
|
||||
})
|
||||
.where(eq(userStats.userId, actorUserId))
|
||||
|
||||
logger.debug(`[${requestId}] Updated user stats for scheduled execution`)
|
||||
} catch (statsError) {
|
||||
logger.error(`[${requestId}] Error updating user stats:`, statsError)
|
||||
}
|
||||
}
|
||||
|
||||
const { traceSpans, totalDuration } = buildTraceSpans(executionResult)
|
||||
|
||||
// Complete logging
|
||||
await loggingSession.safeComplete({
|
||||
endedAt: new Date().toISOString(),
|
||||
totalDurationMs: totalDuration || 0,
|
||||
finalOutput: executionResult.output || {},
|
||||
traceSpans: (traceSpans || []) as any,
|
||||
})
|
||||
|
||||
return { success: executionResult.success, blocks, executionResult }
|
||||
} catch (earlyError: any) {
|
||||
// Handle errors that occur before workflow execution (e.g., missing data, env vars, etc.)
|
||||
logger.error(
|
||||
`[${requestId}] Early failure in scheduled workflow ${schedule.workflowId}`,
|
||||
earlyError
|
||||
)
|
||||
|
||||
// Create a minimal log entry for early failures
|
||||
try {
|
||||
await loggingSession.safeStart({
|
||||
userId: workflowRecord.userId,
|
||||
workspaceId: workflowRecord.workspaceId || '',
|
||||
variables: {},
|
||||
})
|
||||
|
||||
await loggingSession.safeCompleteWithError({
|
||||
error: {
|
||||
message: `Schedule execution failed before workflow started: ${earlyError.message}`,
|
||||
stackTrace: earlyError.stack,
|
||||
},
|
||||
traceSpans: [],
|
||||
})
|
||||
} catch (loggingError) {
|
||||
logger.error(
|
||||
`[${requestId}] Failed to create log entry for early schedule failure`,
|
||||
loggingError
|
||||
)
|
||||
}
|
||||
|
||||
// Re-throw the error to be handled by the outer catch block
|
||||
throw earlyError
|
||||
}
|
||||
})()
|
||||
|
||||
// Check if execution was skipped (e.g., trigger block not found)
|
||||
if ('skip' in executionSuccess && executionSuccess.skip) {
|
||||
runningExecutions.delete(schedule.workflowId)
|
||||
continue
|
||||
const payload = {
|
||||
scheduleId: schedule.id,
|
||||
workflowId: schedule.workflowId,
|
||||
blockId: schedule.blockId || undefined,
|
||||
cronExpression: schedule.cronExpression || undefined,
|
||||
lastRanAt: schedule.lastRanAt?.toISOString(),
|
||||
failedCount: schedule.failedCount || 0,
|
||||
now: now.toISOString(),
|
||||
}
|
||||
|
||||
if (executionSuccess.success) {
|
||||
logger.info(`[${requestId}] Workflow ${schedule.workflowId} executed successfully`)
|
||||
|
||||
const nextRunAt = calculateNextRunTime(schedule, executionSuccess.blocks)
|
||||
|
||||
logger.debug(
|
||||
`[${requestId}] Calculated next run time: ${nextRunAt.toISOString()} for workflow ${schedule.workflowId}`
|
||||
)
|
||||
|
||||
try {
|
||||
await db
|
||||
.update(workflowSchedule)
|
||||
.set({
|
||||
lastRanAt: now,
|
||||
updatedAt: now,
|
||||
nextRunAt,
|
||||
failedCount: 0, // Reset failure count on success
|
||||
})
|
||||
.where(eq(workflowSchedule.id, schedule.id))
|
||||
|
||||
logger.debug(
|
||||
`[${requestId}] Updated next run time for workflow ${schedule.workflowId} to ${nextRunAt.toISOString()}`
|
||||
)
|
||||
} catch (updateError) {
|
||||
logger.error(`[${requestId}] Error updating schedule after success:`, updateError)
|
||||
}
|
||||
} else {
|
||||
logger.warn(`[${requestId}] Workflow ${schedule.workflowId} execution failed`)
|
||||
|
||||
const newFailedCount = (schedule.failedCount || 0) + 1
|
||||
const shouldDisable = newFailedCount >= MAX_CONSECUTIVE_FAILURES
|
||||
const nextRunAt = calculateNextRunTime(schedule, executionSuccess.blocks)
|
||||
|
||||
if (shouldDisable) {
|
||||
logger.warn(
|
||||
`[${requestId}] Disabling schedule for workflow ${schedule.workflowId} after ${MAX_CONSECUTIVE_FAILURES} consecutive failures`
|
||||
)
|
||||
}
|
||||
|
||||
try {
|
||||
await db
|
||||
.update(workflowSchedule)
|
||||
.set({
|
||||
updatedAt: now,
|
||||
nextRunAt,
|
||||
failedCount: newFailedCount,
|
||||
lastFailedAt: now,
|
||||
status: shouldDisable ? 'disabled' : 'active',
|
||||
})
|
||||
.where(eq(workflowSchedule.id, schedule.id))
|
||||
|
||||
logger.debug(`[${requestId}] Updated schedule after failure`)
|
||||
} catch (updateError) {
|
||||
logger.error(`[${requestId}] Error updating schedule after failure:`, updateError)
|
||||
}
|
||||
}
|
||||
} catch (error: any) {
|
||||
// Handle sync queue overload
|
||||
if (error.message?.includes('Service overloaded')) {
|
||||
logger.warn(`[${requestId}] Service overloaded, retrying schedule in 5 minutes`)
|
||||
|
||||
const retryDelay = 5 * 60 * 1000 // 5 minutes
|
||||
const nextRetryAt = new Date(now.getTime() + retryDelay)
|
||||
|
||||
try {
|
||||
await db
|
||||
.update(workflowSchedule)
|
||||
.set({
|
||||
updatedAt: now,
|
||||
nextRunAt: nextRetryAt,
|
||||
})
|
||||
.where(eq(workflowSchedule.id, schedule.id))
|
||||
|
||||
logger.debug(`[${requestId}] Updated schedule retry time due to service overload`)
|
||||
} catch (updateError) {
|
||||
logger.error(
|
||||
`[${requestId}] Error updating schedule for service overload:`,
|
||||
updateError
|
||||
)
|
||||
}
|
||||
} else {
|
||||
logger.error(
|
||||
`[${requestId}] Error executing scheduled workflow ${schedule.workflowId}`,
|
||||
error
|
||||
)
|
||||
|
||||
// Ensure we create a log entry for this failed execution
|
||||
try {
|
||||
const failureLoggingSession = new LoggingSession(
|
||||
schedule.workflowId,
|
||||
executionId,
|
||||
'schedule',
|
||||
requestId
|
||||
)
|
||||
|
||||
await failureLoggingSession.safeStart({
|
||||
userId: workflowRecord.userId,
|
||||
workspaceId: workflowRecord.workspaceId || '',
|
||||
variables: {},
|
||||
})
|
||||
|
||||
await failureLoggingSession.safeCompleteWithError({
|
||||
error: {
|
||||
message: `Schedule execution failed: ${error.message}`,
|
||||
stackTrace: error.stack,
|
||||
},
|
||||
traceSpans: [],
|
||||
})
|
||||
} catch (loggingError) {
|
||||
logger.error(
|
||||
`[${requestId}] Failed to create log entry for failed schedule execution`,
|
||||
loggingError
|
||||
)
|
||||
}
|
||||
|
||||
let nextRunAt: Date
|
||||
try {
|
||||
const [workflowRecord] = await db
|
||||
.select()
|
||||
.from(workflow)
|
||||
.where(eq(workflow.id, schedule.workflowId))
|
||||
.limit(1)
|
||||
|
||||
if (workflowRecord?.isDeployed) {
|
||||
try {
|
||||
const deployedData = await loadDeployedWorkflowState(schedule.workflowId)
|
||||
nextRunAt = calculateNextRunTime(schedule, deployedData.blocks as any)
|
||||
} catch {
|
||||
nextRunAt = new Date(now.getTime() + 24 * 60 * 60 * 1000)
|
||||
}
|
||||
} else {
|
||||
nextRunAt = new Date(now.getTime() + 24 * 60 * 60 * 1000)
|
||||
}
|
||||
} catch (workflowError) {
|
||||
logger.error(
|
||||
`[${requestId}] Error retrieving workflow for next run calculation`,
|
||||
workflowError
|
||||
)
|
||||
nextRunAt = new Date(now.getTime() + 24 * 60 * 60 * 1000) // 24 hours as a fallback
|
||||
}
|
||||
|
||||
const newFailedCount = (schedule.failedCount || 0) + 1
|
||||
const shouldDisable = newFailedCount >= MAX_CONSECUTIVE_FAILURES
|
||||
|
||||
if (shouldDisable) {
|
||||
logger.warn(
|
||||
`[${requestId}] Disabling schedule for workflow ${schedule.workflowId} after ${MAX_CONSECUTIVE_FAILURES} consecutive failures`
|
||||
)
|
||||
}
|
||||
|
||||
try {
|
||||
await db
|
||||
.update(workflowSchedule)
|
||||
.set({
|
||||
updatedAt: now,
|
||||
nextRunAt,
|
||||
failedCount: newFailedCount,
|
||||
lastFailedAt: now,
|
||||
status: shouldDisable ? 'disabled' : 'active',
|
||||
})
|
||||
.where(eq(workflowSchedule.id, schedule.id))
|
||||
|
||||
logger.debug(`[${requestId}] Updated schedule after execution error`)
|
||||
} catch (updateError) {
|
||||
logger.error(
|
||||
`[${requestId}] Error updating schedule after execution error:`,
|
||||
updateError
|
||||
)
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
runningExecutions.delete(schedule.workflowId)
|
||||
const handle = await tasks.trigger('schedule-execution', payload)
|
||||
logger.info(
|
||||
`[${requestId}] Queued schedule execution task ${handle.id} for workflow ${schedule.workflowId}`
|
||||
)
|
||||
return handle
|
||||
} catch (error) {
|
||||
logger.error(
|
||||
`[${requestId}] Failed to trigger schedule execution for workflow ${schedule.workflowId}`,
|
||||
error
|
||||
)
|
||||
return null
|
||||
}
|
||||
} catch (error: any) {
|
||||
logger.error(`[${requestId}] Error in scheduled execution handler`, error)
|
||||
return NextResponse.json({ error: error.message }, { status: 500 })
|
||||
}
|
||||
})
|
||||
|
||||
await Promise.allSettled(triggerPromises)
|
||||
|
||||
logger.info(`[${requestId}] Queued ${dueSchedules.length} schedule executions to Trigger.dev`)
|
||||
} else {
|
||||
const directExecutionPromises = dueSchedules.map(async (schedule) => {
|
||||
const payload = {
|
||||
scheduleId: schedule.id,
|
||||
workflowId: schedule.workflowId,
|
||||
blockId: schedule.blockId || undefined,
|
||||
cronExpression: schedule.cronExpression || undefined,
|
||||
lastRanAt: schedule.lastRanAt?.toISOString(),
|
||||
failedCount: schedule.failedCount || 0,
|
||||
now: now.toISOString(),
|
||||
}
|
||||
|
||||
void executeScheduleJob(payload).catch((error) => {
|
||||
logger.error(
|
||||
`[${requestId}] Direct schedule execution failed for workflow ${schedule.workflowId}`,
|
||||
error
|
||||
)
|
||||
})
|
||||
|
||||
logger.info(
|
||||
`[${requestId}] Queued direct schedule execution for workflow ${schedule.workflowId} (Trigger.dev disabled)`
|
||||
)
|
||||
})
|
||||
|
||||
await Promise.allSettled(directExecutionPromises)
|
||||
|
||||
logger.info(
|
||||
`[${requestId}] Queued ${dueSchedules.length} direct schedule executions (Trigger.dev disabled)`
|
||||
)
|
||||
}
|
||||
|
||||
return NextResponse.json({
|
||||
message: 'Scheduled workflow executions processed',
|
||||
executedCount: dueSchedules.length,
|
||||
})
|
||||
} catch (error: any) {
|
||||
logger.error(`[${requestId}] Error in scheduled execution handler`, error)
|
||||
return NextResponse.json({ error: error.message }, { status: 500 })
|
||||
}
|
||||
|
||||
return NextResponse.json({
|
||||
message: 'Scheduled workflow executions processed',
|
||||
executedCount: dueSchedules.length,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { NextResponse } from 'next/server'
|
||||
import { validateAlphanumericId, validateJiraCloudId } from '@/lib/security/input-validation'
|
||||
import { getConfluenceCloudId } from '@/tools/confluence/utils'
|
||||
|
||||
export const dynamic = 'force-dynamic'
|
||||
@@ -19,13 +20,20 @@ export async function POST(request: Request) {
|
||||
return NextResponse.json({ error: 'Page ID is required' }, { status: 400 })
|
||||
}
|
||||
|
||||
// Use provided cloudId or fetch it if not provided
|
||||
const pageIdValidation = validateAlphanumericId(pageId, 'pageId', 255)
|
||||
if (!pageIdValidation.isValid) {
|
||||
return NextResponse.json({ error: pageIdValidation.error }, { status: 400 })
|
||||
}
|
||||
|
||||
const cloudId = providedCloudId || (await getConfluenceCloudId(domain, accessToken))
|
||||
|
||||
// Build the URL for the Confluence API
|
||||
const cloudIdValidation = validateJiraCloudId(cloudId, 'cloudId')
|
||||
if (!cloudIdValidation.isValid) {
|
||||
return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 })
|
||||
}
|
||||
|
||||
const url = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/api/v2/pages/${pageId}?expand=body.storage,body.view,body.atlas_doc_format`
|
||||
|
||||
// Make the request to Confluence API
|
||||
const response = await fetch(url, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
@@ -52,7 +60,6 @@ export async function POST(request: Request) {
|
||||
|
||||
const data = await response.json()
|
||||
|
||||
// If body is empty, try to provide a minimal valid response
|
||||
return NextResponse.json({
|
||||
id: data.id,
|
||||
title: data.title,
|
||||
@@ -103,9 +110,18 @@ export async function PUT(request: Request) {
|
||||
return NextResponse.json({ error: 'Page ID is required' }, { status: 400 })
|
||||
}
|
||||
|
||||
const pageIdValidation = validateAlphanumericId(pageId, 'pageId', 255)
|
||||
if (!pageIdValidation.isValid) {
|
||||
return NextResponse.json({ error: pageIdValidation.error }, { status: 400 })
|
||||
}
|
||||
|
||||
const cloudId = providedCloudId || (await getConfluenceCloudId(domain, accessToken))
|
||||
|
||||
// First, get the current page to check its version
|
||||
const cloudIdValidation = validateJiraCloudId(cloudId, 'cloudId')
|
||||
if (!cloudIdValidation.isValid) {
|
||||
return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 })
|
||||
}
|
||||
|
||||
const currentPageUrl = `https://api.atlassian.com/ex/confluence/${cloudId}/wiki/api/v2/pages/${pageId}`
|
||||
const currentPageResponse = await fetch(currentPageUrl, {
|
||||
headers: {
|
||||
@@ -121,7 +137,6 @@ export async function PUT(request: Request) {
|
||||
const currentPage = await currentPageResponse.json()
|
||||
const currentVersion = currentPage.version.number
|
||||
|
||||
// Build the update body with incremented version
|
||||
const updateBody: any = {
|
||||
id: pageId,
|
||||
version: {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { NextResponse } from 'next/server'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { validateNumericId } from '@/lib/security/input-validation'
|
||||
|
||||
interface DiscordChannel {
|
||||
id: string
|
||||
@@ -26,11 +27,21 @@ export async function POST(request: Request) {
|
||||
return NextResponse.json({ error: 'Server ID is required' }, { status: 400 })
|
||||
}
|
||||
|
||||
// If channelId is provided, we'll fetch just that specific channel
|
||||
const serverIdValidation = validateNumericId(serverId, 'serverId')
|
||||
if (!serverIdValidation.isValid) {
|
||||
logger.error(`Invalid server ID: ${serverIdValidation.error}`)
|
||||
return NextResponse.json({ error: serverIdValidation.error }, { status: 400 })
|
||||
}
|
||||
|
||||
if (channelId) {
|
||||
const channelIdValidation = validateNumericId(channelId, 'channelId')
|
||||
if (!channelIdValidation.isValid) {
|
||||
logger.error(`Invalid channel ID: ${channelIdValidation.error}`)
|
||||
return NextResponse.json({ error: channelIdValidation.error }, { status: 400 })
|
||||
}
|
||||
|
||||
logger.info(`Fetching single Discord channel: ${channelId}`)
|
||||
|
||||
// Fetch a specific channel by ID
|
||||
const response = await fetch(`https://discord.com/api/v10/channels/${channelId}`, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
@@ -58,7 +69,6 @@ export async function POST(request: Request) {
|
||||
|
||||
const channel = (await response.json()) as DiscordChannel
|
||||
|
||||
// Verify this is a text channel and belongs to the requested server
|
||||
if (channel.guild_id !== serverId) {
|
||||
logger.error('Channel does not belong to the specified server')
|
||||
return NextResponse.json(
|
||||
@@ -85,8 +95,6 @@ export async function POST(request: Request) {
|
||||
|
||||
logger.info(`Fetching all Discord channels for server: ${serverId}`)
|
||||
|
||||
// Listing guild channels with a bot token is allowed if the bot is in the guild.
|
||||
// Keep the request, but if unauthorized, return an empty list so the selector doesn't hard fail.
|
||||
const response = await fetch(`https://discord.com/api/v10/guilds/${serverId}/channels`, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
@@ -108,7 +116,6 @@ export async function POST(request: Request) {
|
||||
|
||||
const channels = (await response.json()) as DiscordChannel[]
|
||||
|
||||
// Filter to just text channels (type 0)
|
||||
const textChannels = channels.filter((channel: DiscordChannel) => channel.type === 0)
|
||||
|
||||
logger.info(`Successfully fetched ${textChannels.length} text channels`)
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { NextResponse } from 'next/server'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { validateNumericId } from '@/lib/security/input-validation'
|
||||
|
||||
interface DiscordServer {
|
||||
id: string
|
||||
@@ -20,11 +21,15 @@ export async function POST(request: Request) {
|
||||
return NextResponse.json({ error: 'Bot token is required' }, { status: 400 })
|
||||
}
|
||||
|
||||
// If serverId is provided, we'll fetch just that server
|
||||
if (serverId) {
|
||||
const serverIdValidation = validateNumericId(serverId, 'serverId')
|
||||
if (!serverIdValidation.isValid) {
|
||||
logger.error(`Invalid server ID: ${serverIdValidation.error}`)
|
||||
return NextResponse.json({ error: serverIdValidation.error }, { status: 400 })
|
||||
}
|
||||
|
||||
logger.info(`Fetching single Discord server: ${serverId}`)
|
||||
|
||||
// Fetch a specific server by ID
|
||||
const response = await fetch(`https://discord.com/api/v10/guilds/${serverId}`, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
@@ -64,10 +69,6 @@ export async function POST(request: Request) {
|
||||
})
|
||||
}
|
||||
|
||||
// Listing guilds via REST requires a user OAuth2 access token with the 'guilds' scope.
|
||||
// A bot token cannot call /users/@me/guilds and will return 401.
|
||||
// Since this selector only has a bot token, return an empty list instead of erroring
|
||||
// and let users provide a Server ID in advanced mode.
|
||||
logger.info(
|
||||
'Skipping guild listing: bot token cannot list /users/@me/guilds; returning empty list'
|
||||
)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { authorizeCredentialUse } from '@/lib/auth/credential-access'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { validateAlphanumericId } from '@/lib/security/input-validation'
|
||||
import { generateRequestId } from '@/lib/utils'
|
||||
import { refreshAccessTokenIfNeeded } from '@/app/api/auth/oauth/utils'
|
||||
export const dynamic = 'force-dynamic'
|
||||
@@ -25,6 +26,12 @@ export async function GET(request: NextRequest) {
|
||||
return NextResponse.json({ error: 'Credential ID and File ID are required' }, { status: 400 })
|
||||
}
|
||||
|
||||
const fileIdValidation = validateAlphanumericId(fileId, 'fileId', 255)
|
||||
if (!fileIdValidation.isValid) {
|
||||
logger.warn(`[${requestId}] Invalid file ID: ${fileIdValidation.error}`)
|
||||
return NextResponse.json({ error: fileIdValidation.error }, { status: 400 })
|
||||
}
|
||||
|
||||
const authz = await authorizeCredentialUse(request, { credentialId: credentialId, workflowId })
|
||||
if (!authz.ok || !authz.credentialOwnerUserId) {
|
||||
return NextResponse.json({ error: authz.error || 'Unauthorized' }, { status: 403 })
|
||||
@@ -67,10 +74,10 @@ export async function GET(request: NextRequest) {
|
||||
const file = await response.json()
|
||||
|
||||
const exportFormats: { [key: string]: string } = {
|
||||
'application/vnd.google-apps.document': 'application/pdf', // Google Docs to PDF
|
||||
'application/vnd.google-apps.document': 'application/pdf',
|
||||
'application/vnd.google-apps.spreadsheet':
|
||||
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', // Google Sheets to XLSX
|
||||
'application/vnd.google-apps.presentation': 'application/pdf', // Google Slides to PDF
|
||||
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
|
||||
'application/vnd.google-apps.presentation': 'application/pdf',
|
||||
}
|
||||
|
||||
if (
|
||||
|
||||
@@ -8,9 +8,10 @@ export const dynamic = 'force-dynamic'
|
||||
|
||||
const logger = createLogger('GoogleDriveFilesAPI')
|
||||
|
||||
/**
|
||||
* Get files from Google Drive
|
||||
*/
|
||||
function escapeForDriveQuery(value: string): string {
|
||||
return value.replace(/\\/g, '\\\\').replace(/'/g, "\\'")
|
||||
}
|
||||
|
||||
export async function GET(request: NextRequest) {
|
||||
const requestId = generateRequestId()
|
||||
logger.info(`[${requestId}] Google Drive files request received`)
|
||||
@@ -53,13 +54,13 @@ export async function GET(request: NextRequest) {
|
||||
|
||||
const qParts: string[] = ['trashed = false']
|
||||
if (folderId) {
|
||||
qParts.push(`'${folderId.replace(/'/g, "\\'")}' in parents`)
|
||||
qParts.push(`'${escapeForDriveQuery(folderId)}' in parents`)
|
||||
}
|
||||
if (mimeType) {
|
||||
qParts.push(`mimeType = '${mimeType.replace(/'/g, "\\'")}'`)
|
||||
qParts.push(`mimeType = '${escapeForDriveQuery(mimeType)}'`)
|
||||
}
|
||||
if (query) {
|
||||
qParts.push(`name contains '${query.replace(/'/g, "\\'")}'`)
|
||||
qParts.push(`name contains '${escapeForDriveQuery(query)}'`)
|
||||
}
|
||||
const q = encodeURIComponent(qParts.join(' and '))
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import { and, eq } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { getSession } from '@/lib/auth'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { validateAlphanumericId } from '@/lib/security/input-validation'
|
||||
import { generateRequestId } from '@/lib/utils'
|
||||
import { refreshAccessTokenIfNeeded } from '@/app/api/auth/oauth/utils'
|
||||
|
||||
@@ -15,10 +16,8 @@ export async function GET(request: NextRequest) {
|
||||
const requestId = generateRequestId()
|
||||
|
||||
try {
|
||||
// Get the session
|
||||
const session = await getSession()
|
||||
|
||||
// Check if the user is authenticated
|
||||
if (!session?.user?.id) {
|
||||
logger.warn(`[${requestId}] Unauthenticated label request rejected`)
|
||||
return NextResponse.json({ error: 'User not authenticated' }, { status: 401 })
|
||||
@@ -36,7 +35,12 @@ export async function GET(request: NextRequest) {
|
||||
)
|
||||
}
|
||||
|
||||
// Get the credential from the database
|
||||
const labelIdValidation = validateAlphanumericId(labelId, 'labelId', 255)
|
||||
if (!labelIdValidation.isValid) {
|
||||
logger.warn(`[${requestId}] Invalid label ID: ${labelIdValidation.error}`)
|
||||
return NextResponse.json({ error: labelIdValidation.error }, { status: 400 })
|
||||
}
|
||||
|
||||
const credentials = await db
|
||||
.select()
|
||||
.from(account)
|
||||
@@ -50,19 +54,16 @@ export async function GET(request: NextRequest) {
|
||||
|
||||
const credential = credentials[0]
|
||||
|
||||
// Log the credential info (without exposing sensitive data)
|
||||
logger.info(
|
||||
`[${requestId}] Using credential: ${credential.id}, provider: ${credential.providerId}`
|
||||
)
|
||||
|
||||
// Refresh access token if needed using the utility function
|
||||
const accessToken = await refreshAccessTokenIfNeeded(credentialId, session.user.id, requestId)
|
||||
|
||||
if (!accessToken) {
|
||||
return NextResponse.json({ error: 'Failed to obtain valid access token' }, { status: 401 })
|
||||
}
|
||||
|
||||
// Fetch specific label from Gmail API
|
||||
logger.info(`[${requestId}] Fetching label ${labelId} from Gmail API`)
|
||||
const response = await fetch(
|
||||
`https://gmail.googleapis.com/gmail/v1/users/me/labels/${labelId}`,
|
||||
@@ -73,7 +74,6 @@ export async function GET(request: NextRequest) {
|
||||
}
|
||||
)
|
||||
|
||||
// Log the response status
|
||||
logger.info(`[${requestId}] Gmail API response status: ${response.status}`)
|
||||
|
||||
if (!response.ok) {
|
||||
@@ -90,13 +90,9 @@ export async function GET(request: NextRequest) {
|
||||
|
||||
const label = await response.json()
|
||||
|
||||
// Transform the label to a more usable format
|
||||
// Format the label name with proper capitalization
|
||||
let formattedName = label.name
|
||||
|
||||
// Handle system labels (INBOX, SENT, etc.)
|
||||
if (label.type === 'system') {
|
||||
// Convert to title case (first letter uppercase, rest lowercase)
|
||||
formattedName = label.name.charAt(0).toUpperCase() + label.name.slice(1).toLowerCase()
|
||||
}
|
||||
|
||||
|
||||
@@ -22,10 +22,8 @@ export async function GET(request: NextRequest) {
|
||||
const requestId = generateRequestId()
|
||||
|
||||
try {
|
||||
// Get the session
|
||||
const session = await getSession()
|
||||
|
||||
// Check if the user is authenticated
|
||||
if (!session?.user?.id) {
|
||||
logger.warn(`[${requestId}] Unauthenticated labels request rejected`)
|
||||
return NextResponse.json({ error: 'User not authenticated' }, { status: 401 })
|
||||
@@ -40,8 +38,6 @@ export async function GET(request: NextRequest) {
|
||||
return NextResponse.json({ error: 'Credential ID is required' }, { status: 400 })
|
||||
}
|
||||
|
||||
// Get the credential from the database. Prefer session-owned credential, but
|
||||
// if not found, resolve by credential ID to support collaborator-owned credentials.
|
||||
let credentials = await db
|
||||
.select()
|
||||
.from(account)
|
||||
@@ -58,26 +54,22 @@ export async function GET(request: NextRequest) {
|
||||
|
||||
const credential = credentials[0]
|
||||
|
||||
// Log the credential info (without exposing sensitive data)
|
||||
logger.info(
|
||||
`[${requestId}] Using credential: ${credential.id}, provider: ${credential.providerId}`
|
||||
)
|
||||
|
||||
// Refresh access token if needed using the utility function
|
||||
const accessToken = await refreshAccessTokenIfNeeded(credentialId, credential.userId, requestId)
|
||||
|
||||
if (!accessToken) {
|
||||
return NextResponse.json({ error: 'Failed to obtain valid access token' }, { status: 401 })
|
||||
}
|
||||
|
||||
// Fetch labels from Gmail API
|
||||
const response = await fetch('https://gmail.googleapis.com/gmail/v1/users/me/labels', {
|
||||
headers: {
|
||||
Authorization: `Bearer ${accessToken}`,
|
||||
},
|
||||
})
|
||||
|
||||
// Log the response status
|
||||
logger.info(`[${requestId}] Gmail API response status: ${response.status}`)
|
||||
|
||||
if (!response.ok) {
|
||||
@@ -98,14 +90,10 @@ export async function GET(request: NextRequest) {
|
||||
return NextResponse.json({ error: 'Invalid labels response' }, { status: 500 })
|
||||
}
|
||||
|
||||
// Transform the labels to a more usable format
|
||||
const labels = data.labels.map((label: GmailLabel) => {
|
||||
// Format the label name with proper capitalization
|
||||
let formattedName = label.name
|
||||
|
||||
// Handle system labels (INBOX, SENT, etc.)
|
||||
if (label.type === 'system') {
|
||||
// Convert to title case (first letter uppercase, rest lowercase)
|
||||
formattedName = label.name.charAt(0).toUpperCase() + label.name.slice(1).toLowerCase()
|
||||
}
|
||||
|
||||
@@ -118,7 +106,6 @@ export async function GET(request: NextRequest) {
|
||||
}
|
||||
})
|
||||
|
||||
// Filter labels if a query is provided
|
||||
const filteredLabels = query
|
||||
? labels.filter((label: GmailLabel) =>
|
||||
label.name.toLowerCase().includes((query as string).toLowerCase())
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { NextResponse } from 'next/server'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { validateJiraCloudId, validateJiraIssueKey } from '@/lib/security/input-validation'
|
||||
import { getJiraCloudId } from '@/tools/jira/utils'
|
||||
|
||||
export const dynamic = 'force-dynamic'
|
||||
@@ -9,7 +10,6 @@ const logger = createLogger('JiraIssueAPI')
|
||||
export async function POST(request: Request) {
|
||||
try {
|
||||
const { domain, accessToken, issueId, cloudId: providedCloudId } = await request.json()
|
||||
// Add detailed request logging
|
||||
if (!domain) {
|
||||
logger.error('Missing domain in request')
|
||||
return NextResponse.json({ error: 'Domain is required' }, { status: 400 })
|
||||
@@ -25,16 +25,23 @@ export async function POST(request: Request) {
|
||||
return NextResponse.json({ error: 'Issue ID is required' }, { status: 400 })
|
||||
}
|
||||
|
||||
// Use provided cloudId or fetch it if not provided
|
||||
const cloudId = providedCloudId || (await getJiraCloudId(domain, accessToken))
|
||||
logger.info('Using cloud ID:', cloudId)
|
||||
|
||||
// Build the URL using cloudId for Jira API
|
||||
const cloudIdValidation = validateJiraCloudId(cloudId, 'cloudId')
|
||||
if (!cloudIdValidation.isValid) {
|
||||
return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 })
|
||||
}
|
||||
|
||||
const issueIdValidation = validateJiraIssueKey(issueId, 'issueId')
|
||||
if (!issueIdValidation.isValid) {
|
||||
return NextResponse.json({ error: issueIdValidation.error }, { status: 400 })
|
||||
}
|
||||
|
||||
const url = `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/issue/${issueId}`
|
||||
|
||||
logger.info('Fetching Jira issue from:', url)
|
||||
|
||||
// Make the request to Jira API
|
||||
const response = await fetch(url, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
@@ -63,7 +70,6 @@ export async function POST(request: Request) {
|
||||
const data = await response.json()
|
||||
logger.info('Successfully fetched issue:', data.key)
|
||||
|
||||
// Transform the Jira issue data into our expected format
|
||||
const issueInfo: any = {
|
||||
id: data.key,
|
||||
name: data.fields.summary,
|
||||
@@ -71,7 +77,6 @@ export async function POST(request: Request) {
|
||||
url: `https://${domain}/browse/${data.key}`,
|
||||
modifiedTime: data.fields.updated,
|
||||
webViewLink: `https://${domain}/browse/${data.key}`,
|
||||
// Add additional fields that might be needed for the workflow
|
||||
status: data.fields.status?.name,
|
||||
description: data.fields.description,
|
||||
priority: data.fields.priority?.name,
|
||||
@@ -85,11 +90,10 @@ export async function POST(request: Request) {
|
||||
|
||||
return NextResponse.json({
|
||||
issue: issueInfo,
|
||||
cloudId, // Return the cloudId so it can be cached
|
||||
cloudId,
|
||||
})
|
||||
} catch (error) {
|
||||
logger.error('Error processing request:', error)
|
||||
// Add more context to the error response
|
||||
return NextResponse.json(
|
||||
{
|
||||
error: 'Failed to retrieve Jira issue',
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
import { NextResponse } from 'next/server'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { validateAlphanumericId, validateJiraCloudId } from '@/lib/security/input-validation'
|
||||
import { getJiraCloudId } from '@/tools/jira/utils'
|
||||
|
||||
export const dynamic = 'force-dynamic'
|
||||
|
||||
const logger = createLogger('JiraIssuesAPI')
|
||||
|
||||
// Helper functions
|
||||
const createErrorResponse = async (response: Response, defaultMessage: string) => {
|
||||
try {
|
||||
const errorData = await response.json()
|
||||
@@ -38,13 +38,15 @@ export async function POST(request: Request) {
|
||||
return NextResponse.json({ issues: [] })
|
||||
}
|
||||
|
||||
// Use provided cloudId or fetch it if not provided
|
||||
const cloudId = providedCloudId || (await getJiraCloudId(domain!, accessToken!))
|
||||
|
||||
// Build the URL using cloudId for Jira API
|
||||
const cloudIdValidation = validateJiraCloudId(cloudId, 'cloudId')
|
||||
if (!cloudIdValidation.isValid) {
|
||||
return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 })
|
||||
}
|
||||
|
||||
const url = `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/issue/bulkfetch`
|
||||
|
||||
// Prepare the request body for bulk fetch
|
||||
const requestBody = {
|
||||
expand: ['names'],
|
||||
fields: ['summary', 'status', 'assignee', 'updated', 'project'],
|
||||
@@ -53,7 +55,6 @@ export async function POST(request: Request) {
|
||||
properties: [],
|
||||
}
|
||||
|
||||
// Make the request to Jira API with OAuth Bearer token
|
||||
const requestConfig = {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
@@ -112,6 +113,29 @@ export async function GET(request: Request) {
|
||||
if (validationError) return validationError
|
||||
|
||||
const cloudId = providedCloudId || (await getJiraCloudId(domain!, accessToken!))
|
||||
|
||||
const cloudIdValidation = validateJiraCloudId(cloudId, 'cloudId')
|
||||
if (!cloudIdValidation.isValid) {
|
||||
return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 })
|
||||
}
|
||||
|
||||
if (projectId) {
|
||||
const projectIdValidation = validateAlphanumericId(projectId, 'projectId', 100)
|
||||
if (!projectIdValidation.isValid) {
|
||||
return NextResponse.json({ error: projectIdValidation.error }, { status: 400 })
|
||||
}
|
||||
}
|
||||
if (manualProjectId) {
|
||||
const manualProjectIdValidation = validateAlphanumericId(
|
||||
manualProjectId,
|
||||
'manualProjectId',
|
||||
100
|
||||
)
|
||||
if (!manualProjectIdValidation.isValid) {
|
||||
return NextResponse.json({ error: manualProjectIdValidation.error }, { status: 400 })
|
||||
}
|
||||
}
|
||||
|
||||
let data: any
|
||||
|
||||
if (query) {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { NextResponse } from 'next/server'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { validateAlphanumericId, validateJiraCloudId } from '@/lib/security/input-validation'
|
||||
import { getJiraCloudId } from '@/tools/jira/utils'
|
||||
|
||||
export const dynamic = 'force-dynamic'
|
||||
@@ -22,19 +23,20 @@ export async function GET(request: Request) {
|
||||
return NextResponse.json({ error: 'Access token is required' }, { status: 400 })
|
||||
}
|
||||
|
||||
// Use provided cloudId or fetch it if not provided
|
||||
const cloudId = providedCloudId || (await getJiraCloudId(domain, accessToken))
|
||||
logger.info(`Using cloud ID: ${cloudId}`)
|
||||
|
||||
// Build the URL for the Jira API projects endpoint
|
||||
const cloudIdValidation = validateJiraCloudId(cloudId, 'cloudId')
|
||||
if (!cloudIdValidation.isValid) {
|
||||
return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 })
|
||||
}
|
||||
|
||||
const apiUrl = `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/project/search`
|
||||
|
||||
// Add query parameters if searching
|
||||
const queryParams = new URLSearchParams()
|
||||
if (query) {
|
||||
queryParams.append('query', query)
|
||||
}
|
||||
// Add other useful parameters
|
||||
queryParams.append('orderBy', 'name')
|
||||
queryParams.append('expand', 'description,lead,url,projectKeys')
|
||||
|
||||
@@ -66,18 +68,16 @@ export async function GET(request: Request) {
|
||||
|
||||
const data = await response.json()
|
||||
|
||||
// Add detailed logging
|
||||
logger.info(`Jira API Response Status: ${response.status}`)
|
||||
logger.info(`Found projects: ${data.values?.length || 0}`)
|
||||
|
||||
// Transform the response to match our expected format
|
||||
const projects =
|
||||
data.values?.map((project: any) => ({
|
||||
id: project.id,
|
||||
key: project.key,
|
||||
name: project.name,
|
||||
url: project.self,
|
||||
avatarUrl: project.avatarUrls?.['48x48'], // Use the medium size avatar
|
||||
avatarUrl: project.avatarUrls?.['48x48'],
|
||||
description: project.description,
|
||||
projectTypeKey: project.projectTypeKey,
|
||||
simplified: project.simplified,
|
||||
@@ -87,7 +87,7 @@ export async function GET(request: Request) {
|
||||
|
||||
return NextResponse.json({
|
||||
projects,
|
||||
cloudId, // Return the cloudId so it can be cached
|
||||
cloudId,
|
||||
})
|
||||
} catch (error) {
|
||||
logger.error('Error fetching Jira projects:', error)
|
||||
@@ -98,7 +98,6 @@ export async function GET(request: Request) {
|
||||
}
|
||||
}
|
||||
|
||||
// For individual project retrieval if needed
|
||||
export async function POST(request: Request) {
|
||||
try {
|
||||
const { domain, accessToken, projectId, cloudId: providedCloudId } = await request.json()
|
||||
@@ -115,9 +114,18 @@ export async function POST(request: Request) {
|
||||
return NextResponse.json({ error: 'Project ID is required' }, { status: 400 })
|
||||
}
|
||||
|
||||
// Use provided cloudId or fetch it if not provided
|
||||
const cloudId = providedCloudId || (await getJiraCloudId(domain, accessToken))
|
||||
|
||||
const cloudIdValidation = validateJiraCloudId(cloudId, 'cloudId')
|
||||
if (!cloudIdValidation.isValid) {
|
||||
return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 })
|
||||
}
|
||||
|
||||
const projectIdValidation = validateAlphanumericId(projectId, 'projectId', 100)
|
||||
if (!projectIdValidation.isValid) {
|
||||
return NextResponse.json({ error: projectIdValidation.error }, { status: 400 })
|
||||
}
|
||||
|
||||
const apiUrl = `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/project/${projectId}`
|
||||
|
||||
const response = await fetch(apiUrl, {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { NextResponse } from 'next/server'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { validateJiraCloudId, validateJiraIssueKey } from '@/lib/security/input-validation'
|
||||
import { getJiraCloudId } from '@/tools/jira/utils'
|
||||
|
||||
export const dynamic = 'force-dynamic'
|
||||
@@ -13,7 +14,7 @@ export async function PUT(request: Request) {
|
||||
accessToken,
|
||||
issueKey,
|
||||
summary,
|
||||
title, // Support both summary and title for backwards compatibility
|
||||
title,
|
||||
description,
|
||||
status,
|
||||
priority,
|
||||
@@ -21,7 +22,6 @@ export async function PUT(request: Request) {
|
||||
cloudId: providedCloudId,
|
||||
} = await request.json()
|
||||
|
||||
// Validate required parameters
|
||||
if (!domain) {
|
||||
logger.error('Missing domain in request')
|
||||
return NextResponse.json({ error: 'Domain is required' }, { status: 400 })
|
||||
@@ -37,16 +37,23 @@ export async function PUT(request: Request) {
|
||||
return NextResponse.json({ error: 'Issue key is required' }, { status: 400 })
|
||||
}
|
||||
|
||||
// Use provided cloudId or fetch it if not provided
|
||||
const cloudId = providedCloudId || (await getJiraCloudId(domain, accessToken))
|
||||
logger.info('Using cloud ID:', cloudId)
|
||||
|
||||
// Build the URL using cloudId for Jira API
|
||||
const cloudIdValidation = validateJiraCloudId(cloudId, 'cloudId')
|
||||
if (!cloudIdValidation.isValid) {
|
||||
return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 })
|
||||
}
|
||||
|
||||
const issueKeyValidation = validateJiraIssueKey(issueKey, 'issueKey')
|
||||
if (!issueKeyValidation.isValid) {
|
||||
return NextResponse.json({ error: issueKeyValidation.error }, { status: 400 })
|
||||
}
|
||||
|
||||
const url = `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/issue/${issueKey}`
|
||||
|
||||
logger.info('Updating Jira issue at:', url)
|
||||
|
||||
// Map the summary from either summary or title field
|
||||
const summaryValue = summary || title
|
||||
const fields: Record<string, any> = {}
|
||||
|
||||
@@ -92,7 +99,6 @@ export async function PUT(request: Request) {
|
||||
|
||||
const body = { fields }
|
||||
|
||||
// Make the request to Jira API
|
||||
const response = await fetch(url, {
|
||||
method: 'PUT',
|
||||
headers: {
|
||||
@@ -117,7 +123,6 @@ export async function PUT(request: Request) {
|
||||
)
|
||||
}
|
||||
|
||||
// Note: Jira update API typically returns 204 No Content on success
|
||||
const responseData = response.status === 204 ? {} : await response.json()
|
||||
logger.info('Successfully updated Jira issue:', issueKey)
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { NextResponse } from 'next/server'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { validateAlphanumericId, validateJiraCloudId } from '@/lib/security/input-validation'
|
||||
import { getJiraCloudId } from '@/tools/jira/utils'
|
||||
|
||||
export const dynamic = 'force-dynamic'
|
||||
@@ -21,7 +22,6 @@ export async function POST(request: Request) {
|
||||
parent,
|
||||
} = await request.json()
|
||||
|
||||
// Validate required parameters
|
||||
if (!domain) {
|
||||
logger.error('Missing domain in request')
|
||||
return NextResponse.json({ error: 'Domain is required' }, { status: 400 })
|
||||
@@ -44,16 +44,23 @@ export async function POST(request: Request) {
|
||||
|
||||
const normalizedIssueType = issueType || 'Task'
|
||||
|
||||
// Use provided cloudId or fetch it if not provided
|
||||
const cloudId = providedCloudId || (await getJiraCloudId(domain, accessToken))
|
||||
logger.info('Using cloud ID:', cloudId)
|
||||
|
||||
// Build the URL using cloudId for Jira API
|
||||
const cloudIdValidation = validateJiraCloudId(cloudId, 'cloudId')
|
||||
if (!cloudIdValidation.isValid) {
|
||||
return NextResponse.json({ error: cloudIdValidation.error }, { status: 400 })
|
||||
}
|
||||
|
||||
const projectIdValidation = validateAlphanumericId(projectId, 'projectId', 100)
|
||||
if (!projectIdValidation.isValid) {
|
||||
return NextResponse.json({ error: projectIdValidation.error }, { status: 400 })
|
||||
}
|
||||
|
||||
const url = `https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/issue`
|
||||
|
||||
logger.info('Creating Jira issue at:', url)
|
||||
|
||||
// Construct fields object with only the necessary fields
|
||||
const fields: Record<string, any> = {
|
||||
project: {
|
||||
id: projectId,
|
||||
@@ -64,7 +71,6 @@ export async function POST(request: Request) {
|
||||
summary: summary,
|
||||
}
|
||||
|
||||
// Only add description if it exists
|
||||
if (description) {
|
||||
fields.description = {
|
||||
type: 'doc',
|
||||
@@ -83,19 +89,16 @@ export async function POST(request: Request) {
|
||||
}
|
||||
}
|
||||
|
||||
// Only add parent if it exists
|
||||
if (parent) {
|
||||
fields.parent = parent
|
||||
}
|
||||
|
||||
// Only add priority if it exists
|
||||
if (priority) {
|
||||
fields.priority = {
|
||||
name: priority,
|
||||
}
|
||||
}
|
||||
|
||||
// Only add assignee if it exists
|
||||
if (assignee) {
|
||||
fields.assignee = {
|
||||
id: assignee,
|
||||
@@ -104,7 +107,6 @@ export async function POST(request: Request) {
|
||||
|
||||
const body = { fields }
|
||||
|
||||
// Make the request to Jira API
|
||||
const response = await fetch(url, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
|
||||
@@ -5,6 +5,7 @@ import { eq } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { getSession } from '@/lib/auth'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { validateMicrosoftGraphId } from '@/lib/security/input-validation'
|
||||
import { refreshAccessTokenIfNeeded } from '@/app/api/auth/oauth/utils'
|
||||
import type { PlannerTask } from '@/tools/microsoft_planner/types'
|
||||
|
||||
@@ -35,7 +36,12 @@ export async function GET(request: NextRequest) {
|
||||
return NextResponse.json({ error: 'Plan ID is required' }, { status: 400 })
|
||||
}
|
||||
|
||||
// Get the credential from the database
|
||||
const planIdValidation = validateMicrosoftGraphId(planId, 'planId')
|
||||
if (!planIdValidation.isValid) {
|
||||
logger.error(`[${requestId}] Invalid planId: ${planIdValidation.error}`)
|
||||
return NextResponse.json({ error: planIdValidation.error }, { status: 400 })
|
||||
}
|
||||
|
||||
const credentials = await db.select().from(account).where(eq(account.id, credentialId)).limit(1)
|
||||
|
||||
if (!credentials.length) {
|
||||
@@ -45,7 +51,6 @@ export async function GET(request: NextRequest) {
|
||||
|
||||
const credential = credentials[0]
|
||||
|
||||
// Check if the credential belongs to the user
|
||||
if (credential.userId !== session.user.id) {
|
||||
logger.warn(`[${requestId}] Unauthorized credential access attempt`, {
|
||||
credentialUserId: credential.userId,
|
||||
@@ -54,7 +59,6 @@ export async function GET(request: NextRequest) {
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 403 })
|
||||
}
|
||||
|
||||
// Refresh access token if needed
|
||||
const accessToken = await refreshAccessTokenIfNeeded(credentialId, session.user.id, requestId)
|
||||
|
||||
if (!accessToken) {
|
||||
@@ -62,7 +66,6 @@ export async function GET(request: NextRequest) {
|
||||
return NextResponse.json({ error: 'Failed to obtain valid access token' }, { status: 401 })
|
||||
}
|
||||
|
||||
// Fetch tasks directly from Microsoft Graph API
|
||||
const response = await fetch(`https://graph.microsoft.com/v1.0/planner/plans/${planId}/tasks`, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${accessToken}`,
|
||||
@@ -81,7 +84,6 @@ export async function GET(request: NextRequest) {
|
||||
const data = await response.json()
|
||||
const tasks = data.value || []
|
||||
|
||||
// Filter tasks to only include useful fields (matching our read_task tool)
|
||||
const filteredTasks = tasks.map((task: PlannerTask) => ({
|
||||
id: task.id,
|
||||
title: task.title,
|
||||
|
||||
@@ -5,15 +5,13 @@ import { eq } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { getSession } from '@/lib/auth'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { validateMicrosoftGraphId } from '@/lib/security/input-validation'
|
||||
import { refreshAccessTokenIfNeeded } from '@/app/api/auth/oauth/utils'
|
||||
|
||||
export const dynamic = 'force-dynamic'
|
||||
|
||||
const logger = createLogger('OneDriveFolderAPI')
|
||||
|
||||
/**
|
||||
* Get a single folder from Microsoft OneDrive
|
||||
*/
|
||||
export async function GET(request: NextRequest) {
|
||||
const requestId = randomUUID().slice(0, 8)
|
||||
|
||||
@@ -31,6 +29,11 @@ export async function GET(request: NextRequest) {
|
||||
return NextResponse.json({ error: 'Credential ID and File ID are required' }, { status: 400 })
|
||||
}
|
||||
|
||||
const fileIdValidation = validateMicrosoftGraphId(fileId, 'fileId')
|
||||
if (!fileIdValidation.isValid) {
|
||||
return NextResponse.json({ error: fileIdValidation.error }, { status: 400 })
|
||||
}
|
||||
|
||||
const credentials = await db.select().from(account).where(eq(account.id, credentialId)).limit(1)
|
||||
if (!credentials.length) {
|
||||
return NextResponse.json({ error: 'Credential not found' }, { status: 404 })
|
||||
@@ -65,7 +68,6 @@ export async function GET(request: NextRequest) {
|
||||
|
||||
const folder = await response.json()
|
||||
|
||||
// Transform the response to match expected format
|
||||
const transformedFolder = {
|
||||
id: folder.id,
|
||||
name: folder.name,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user