Merge remote-tracking branch 'origin/main' into feat/ai-product

This commit is contained in:
Sheen Capadngan
2025-12-19 18:18:34 +08:00
323 changed files with 7279 additions and 18461 deletions

150
.env.dev.example Normal file
View File

@@ -0,0 +1,150 @@
# Keys
# Required key for platform encryption/decryption ops
# THIS IS A SAMPLE ENCRYPTION KEY AND SHOULD NEVER BE USED FOR PRODUCTION
ENCRYPTION_KEY=VVHnGZ0w98WLgISK4XSJcagezuG6EWRFTk48KE4Y5Mw=
# JWT
# Required secrets to sign JWT tokens
# THIS IS A SAMPLE AUTH_SECRET KEY AND SHOULD NEVER BE USED FOR PRODUCTION
AUTH_SECRET=5lrMXKKWCVocS/uerPsl7V+TX/aaUaI7iDkgl3tSmLE=
# Postgres creds
POSTGRES_PASSWORD=infisical
POSTGRES_USER=infisical
POSTGRES_DB=infisical
# Required
DB_CONNECTION_URI=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@db:5432/${POSTGRES_DB}
# Redis
REDIS_URL=redis://redis:6379
# Website URL
# Required
SITE_URL=http://localhost:8080
# Mail/SMTP
SMTP_HOST=
SMTP_PORT=
SMTP_FROM_ADDRESS=
SMTP_FROM_NAME=
SMTP_USERNAME=
SMTP_PASSWORD=
# Integration
# Optional only if integration is used
CLIENT_ID_HEROKU=
CLIENT_ID_VERCEL=
CLIENT_ID_NETLIFY=
CLIENT_ID_GITHUB=
CLIENT_ID_GITHUB_APP=
CLIENT_SLUG_GITHUB_APP=
CLIENT_ID_GITLAB=
CLIENT_ID_BITBUCKET=
CLIENT_SECRET_HEROKU=
CLIENT_SECRET_VERCEL=
CLIENT_SECRET_NETLIFY=
CLIENT_SECRET_GITHUB=
CLIENT_SECRET_GITHUB_APP=
CLIENT_SECRET_GITLAB=
CLIENT_SECRET_BITBUCKET=
CLIENT_SLUG_VERCEL=
CLIENT_PRIVATE_KEY_GITHUB_APP=
CLIENT_APP_ID_GITHUB_APP=
# Sentry (optional) for monitoring errors
SENTRY_DSN=
# Infisical Cloud-specific configs
# Ignore - Not applicable for self-hosted version
POSTHOG_HOST=
POSTHOG_PROJECT_API_KEY=
# SSO-specific variables
CLIENT_ID_GOOGLE_LOGIN=
CLIENT_SECRET_GOOGLE_LOGIN=
CLIENT_ID_GITHUB_LOGIN=
CLIENT_SECRET_GITHUB_LOGIN=
CLIENT_ID_GITLAB_LOGIN=
CLIENT_SECRET_GITLAB_LOGIN=
CAPTCHA_SECRET=
NEXT_PUBLIC_CAPTCHA_SITE_KEY=
OTEL_TELEMETRY_COLLECTION_ENABLED=false
OTEL_EXPORT_TYPE=prometheus
OTEL_EXPORT_OTLP_ENDPOINT=
OTEL_OTLP_PUSH_INTERVAL=
OTEL_COLLECTOR_BASIC_AUTH_USERNAME=
OTEL_COLLECTOR_BASIC_AUTH_PASSWORD=
PLAIN_API_KEY=
PLAIN_WISH_LABEL_IDS=
SSL_CLIENT_CERTIFICATE_HEADER_KEY=
ENABLE_MSSQL_SECRET_ROTATION_ENCRYPT=true
# App Connections
# aws assume-role connection
INF_APP_CONNECTION_AWS_ACCESS_KEY_ID=
INF_APP_CONNECTION_AWS_SECRET_ACCESS_KEY=
# github oauth connection
INF_APP_CONNECTION_GITHUB_OAUTH_CLIENT_ID=
INF_APP_CONNECTION_GITHUB_OAUTH_CLIENT_SECRET=
#github app connection
INF_APP_CONNECTION_GITHUB_APP_CLIENT_ID=
INF_APP_CONNECTION_GITHUB_APP_CLIENT_SECRET=
INF_APP_CONNECTION_GITHUB_APP_PRIVATE_KEY=
INF_APP_CONNECTION_GITHUB_APP_SLUG=
INF_APP_CONNECTION_GITHUB_APP_ID=
#gitlab app connection
INF_APP_CONNECTION_GITLAB_OAUTH_CLIENT_ID=
INF_APP_CONNECTION_GITLAB_OAUTH_CLIENT_SECRET=
#github radar app connection
INF_APP_CONNECTION_GITHUB_RADAR_APP_CLIENT_ID=
INF_APP_CONNECTION_GITHUB_RADAR_APP_CLIENT_SECRET=
INF_APP_CONNECTION_GITHUB_RADAR_APP_PRIVATE_KEY=
INF_APP_CONNECTION_GITHUB_RADAR_APP_SLUG=
INF_APP_CONNECTION_GITHUB_RADAR_APP_ID=
INF_APP_CONNECTION_GITHUB_RADAR_APP_WEBHOOK_SECRET=
#gcp app connection
INF_APP_CONNECTION_GCP_SERVICE_ACCOUNT_CREDENTIAL=
# azure app connections
INF_APP_CONNECTION_AZURE_APP_CONFIGURATION_CLIENT_ID=
INF_APP_CONNECTION_AZURE_APP_CONFIGURATION_CLIENT_SECRET=
INF_APP_CONNECTION_AZURE_KEY_VAULT_CLIENT_ID=
INF_APP_CONNECTION_AZURE_KEY_VAULT_CLIENT_SECRET=
INF_APP_CONNECTION_AZURE_CLIENT_SECRETS_CLIENT_ID=
INF_APP_CONNECTION_AZURE_CLIENT_SECRETS_CLIENT_SECRET=
INF_APP_CONNECTION_AZURE_DEVOPS_CLIENT_ID=
INF_APP_CONNECTION_AZURE_DEVOPS_CLIENT_SECRET=
# heroku app connection
INF_APP_CONNECTION_HEROKU_OAUTH_CLIENT_ID=
INF_APP_CONNECTION_HEROKU_OAUTH_CLIENT_SECRET=
# datadog
SHOULD_USE_DATADOG_TRACER=
DATADOG_PROFILING_ENABLED=
DATADOG_ENV=
DATADOG_SERVICE=
DATADOG_HOSTNAME=
# kubernetes
KUBERNETES_AUTO_FETCH_SERVICE_ACCOUNT_TOKEN=false

View File

@@ -1,7 +1,7 @@
# Keys
# Required key for platform encryption/decryption ops
# THIS IS A SAMPLE ENCRYPTION KEY AND SHOULD NEVER BE USED FOR PRODUCTION
ENCRYPTION_KEY=VVHnGZ0w98WLgISK4XSJcagezuG6EWRFTk48KE4Y5Mw=
ENCRYPTION_KEY=f13dbc92aaaf86fa7cb0ed8ac3265f47
# JWT
# Required secrets to sign JWT tokens
@@ -21,7 +21,7 @@ REDIS_URL=redis://redis:6379
# Website URL
# Required
SITE_URL=http://localhost:8080
SITE_URL=http://localhost:80
# Mail/SMTP
SMTP_HOST=

View File

@@ -1,59 +0,0 @@
name: Release K8 Operator Helm Chart
on:
workflow_dispatch:
jobs:
test-helm:
name: Test Helm Chart
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Set up Helm
uses: azure/setup-helm@v4.2.0
with:
version: v3.17.0
- uses: actions/setup-python@v5.3.0
with:
python-version: "3.x"
check-latest: true
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.7.0
- name: Run chart-testing (lint)
run: ct lint --config ct.yaml --charts helm-charts/secrets-operator
- name: Create kind cluster
uses: helm/kind-action@v1.12.0
- name: Run chart-testing (install)
run: ct install --config ct.yaml --charts helm-charts/secrets-operator
release-helm:
name: Release Helm Chart
needs: test-helm
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Install Helm
uses: azure/setup-helm@v3
with:
version: v3.10.0
- name: Install python
uses: actions/setup-python@v4
- name: Install Cloudsmith CLI
run: pip install --upgrade cloudsmith-cli
- name: Build and push helm package to CloudSmith
run: cd helm-charts && sh upload-k8s-operator-cloudsmith.sh
env:
CLOUDSMITH_API_KEY: ${{ secrets.CLOUDSMITH_API_KEY }}

View File

@@ -1,104 +0,0 @@
name: Release K8 Operator Docker Image
on:
push:
tags:
- "infisical-k8-operator/v*.*.*"
permissions:
contents: write
pull-requests: write
jobs:
release-image:
name: Generate Helm Chart PR
runs-on: ubuntu-latest
outputs:
pr_number: ${{ steps.create-pr.outputs.pull-request-number }}
steps:
- name: Extract version from tag
id: extract_version
run: echo "::set-output name=version::${GITHUB_REF_NAME#infisical-k8-operator/}"
- name: Checkout code
uses: actions/checkout@v2
# Dependency for helm generation
- name: Install Helm
uses: azure/setup-helm@v3
with:
version: v3.10.0
# Dependency for helm generation
- name: Install Go
uses: actions/setup-go@v4
with:
go-version: 1.21
# Install binaries for helm generation
- name: Install dependencies
working-directory: k8-operator
run: |
make helmify
make kustomize
make controller-gen
- name: Generate Helm Chart
working-directory: k8-operator
run: make helm VERSION=${{ steps.extract_version.outputs.version }}
- name: Debug - Check file changes
run: |
echo "Current git status:"
git status
echo ""
echo "Modified files:"
git diff --name-only
# If there is no diff, exit with error. Version should always be changed, so if there is no diff, something is wrong and we should exit.
if [ -z "$(git diff --name-only)" ]; then
echo "No helm changes or version changes. Invalid release detected, Exiting."
exit 1
fi
- name: Create Helm Chart PR
id: create-pr
uses: peter-evans/create-pull-request@v5
with:
token: ${{ secrets.GITHUB_TOKEN }}
commit-message: "Update Helm chart to version ${{ steps.extract_version.outputs.version }}"
committer: GitHub <noreply@github.com>
author: ${{ github.actor }} <${{ github.actor }}@users.noreply.github.com>
branch: helm-update-${{ steps.extract_version.outputs.version }}
delete-branch: true
title: "Update Helm chart to version ${{ steps.extract_version.outputs.version }}"
body: |
This PR updates the Helm chart to version `${{ steps.extract_version.outputs.version }}`.
Additionally the helm chart has been updated to match the latest operator code changes.
Associated Release Workflow: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}
Once you have approved this PR, you can trigger the helm release workflow manually.
base: main
- name: 🔧 Set up QEMU
uses: docker/setup-qemu-action@v1
- name: 🔧 Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: 🐋 Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push
id: docker_build
uses: docker/build-push-action@v2
with:
context: k8-operator
push: true
platforms: linux/amd64,linux/arm64
tags: |
infisical/kubernetes-operator:latest
infisical/kubernetes-operator:${{ steps.extract_version.outputs.version }}

View File

@@ -47,10 +47,13 @@ jobs:
- name: Output .env file and enable feature flags for BDD tests
run: |
cp .env.example .env
cp .env.dev.example .env
echo "ACME_DEVELOPMENT_MODE=true" >> .env
echo "ACME_DEVELOPMENT_HTTP01_CHALLENGE_HOST_OVERRIDES={\"localhost\": \"host.docker.internal:8087\", \"infisical.com\": \"host.docker.internal:8087\", \"example.com\": \"host.docker.internal:8087\"}" >> .env
echo "BDD_NOCK_API_ENABLED=true" >> .env
# use Technitium DNS server for BDD tests
echo "ACME_DNS_RESOLVE_RESOLVER_SERVERS_HOST_ENABLED=true" >> .env
echo "ACME_DNS_RESOLVER_SERVERS=technitium" >> .env
# Skip upstream validation, otherwise the ACME client for the upstream will try to
# validate the DNS records, which will fail because the DNS records are not actually created.
echo "ACME_SKIP_UPSTREAM_VALIDATION=true" >> .env

View File

@@ -1,38 +0,0 @@
name: Run Helm Chart Tests for Secret Operator
on:
pull_request:
paths:
- "helm-charts/secrets-operator/**"
- ".github/workflows/run-helm-chart-tests-secret-operator.yml"
jobs:
test-helm:
name: Test Helm Chart
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Set up Helm
uses: azure/setup-helm@v4.2.0
with:
version: v3.17.0
- uses: actions/setup-python@v5.3.0
with:
python-version: "3.x"
check-latest: true
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.7.0
- name: Run chart-testing (lint)
run: ct lint --config ct.yaml --charts helm-charts/secrets-operator
- name: Create kind cluster
uses: helm/kind-action@v1.12.0
- name: Run chart-testing (install)
run: ct install --config ct.yaml --charts helm-charts/secrets-operator

View File

@@ -16,7 +16,7 @@ jobs:
const title = context.payload.pull_request.title;
// Valid PR types based on pull_request_template.md
const validTypes = ['fix', 'feature', 'improvement', 'breaking', 'docs', 'chore'];
const validTypes = ['fix', 'feature', 'improvement', 'breaking', 'docs', 'chore', 'feat'];
// Regex pattern: type(optional-scope): short description
// - Type must be one of the valid types

View File

@@ -100,13 +100,13 @@ To set up and run Infisical locally, make sure you have Git and Docker installed
Linux/macOS:
```console
git clone https://github.com/Infisical/infisical && cd "$(basename $_ .git)" && cp .env.example .env && docker compose -f docker-compose.prod.yml up
git clone https://github.com/Infisical/infisical && cd "$(basename $_ .git)" && cp .env.dev.example .env && docker compose -f docker-compose.prod.yml up
```
Windows Command Prompt:
```console
git clone https://github.com/Infisical/infisical && cd infisical && copy .env.example .env && docker compose -f docker-compose.prod.yml up
git clone https://github.com/Infisical/infisical && cd infisical && copy .env.dev.example .env && docker compose -f docker-compose.prod.yml up
```
Create an account at `http://localhost:80`

View File

@@ -23,6 +23,9 @@ CERT_CA_ID = os.environ.get("CERT_CA_ID")
CERT_TEMPLATE_ID = os.environ.get("CERT_TEMPLATE_ID")
AUTH_TOKEN = os.environ.get("INFISICAL_TOKEN")
BOOTSTRAP_INFISICAL = int(os.environ.get("BOOTSTRAP_INFISICAL", 0))
TECHNITIUM_URL = os.environ.get("TECHNITIUM_URL", "http://localhost:5380")
TECHNITIUM_USER = os.environ.get("TECHNITIUM_USER", "admin")
TECHNITIUM_PASSWORD = os.environ.get("TECHNITIUM_PASSWORD", "infisical")
# Called mostly from a CI to setup the new Infisical instance to get it ready for BDD tests
@@ -188,6 +191,9 @@ def before_all(context: Context):
base_vars = {
"BASE_URL": BASE_URL,
"PEBBLE_URL": PEBBLE_URL,
"TECHNITIUM_URL": TECHNITIUM_URL,
"TECHNITIUM_USER": TECHNITIUM_USER,
"TECHNITIUM_PASSWORD": TECHNITIUM_PASSWORD,
}
if BOOTSTRAP_INFISICAL:
details = bootstrap_infisical(context)
@@ -206,6 +212,7 @@ def before_all(context: Context):
}
context._initial_vars = vars
context.http_client = httpx.Client(base_url=BASE_URL)
context.technitium_http_client = httpx.Client(base_url=TECHNITIUM_URL)
def before_scenario(context: Context, scenario: typing.Any):

View File

@@ -19,13 +19,17 @@ Feature: Authorization
And the value order.authorizations[0].body with jq ".challenges | map(pick(.type, .status)) | sort_by(.type)" should be equal to json
"""
[
{
"type": "dns-01",
"status": "pending"
},
{
"type": "http-01",
"status": "pending"
}
]
"""
And the value order.authorizations[0].body with jq ".challenges | map(.status) | sort" should be equal to ["pending"]
And the value order.authorizations[0].body with jq ".challenges | map(.status) | sort" should be equal to ["pending", "pending"]
And the value order.authorizations[0].body with jq ".identifier" should be equal to json
"""
{

View File

@@ -1,6 +1,6 @@
Feature: Challenge
Scenario: Validate challenge
Scenario: Validate challenge with HTTP-01
Given I have an ACME cert profile as "acme_profile"
When I have an ACME client connecting to "{BASE_URL}/api/v1/cert-manager/acme/profiles/{acme_profile.id}/directory"
Then I register a new ACME account with email fangpen@infisical.com and EAB key id "{acme_profile.eab_kid}" with secret "{acme_profile.eab_secret}" as acme_account
@@ -22,6 +22,28 @@ Feature: Challenge
And I parse the full-chain certificate from order finalized_order as cert
And the value cert with jq ".subject.common_name" should be equal to "localhost"
Scenario: Validate challenge with DNS-01
Given I have an ACME cert profile as "acme_profile"
When I have an ACME client connecting to "{BASE_URL}/api/v1/cert-manager/acme/profiles/{acme_profile.id}/directory"
Then I register a new ACME account with email fangpen@infisical.com and EAB key id "{acme_profile.eab_kid}" with secret "{acme_profile.eab_secret}" as acme_account
When I create certificate signing request as csr
Then I add names to certificate signing request csr
"""
{
"COMMON_NAME": "example.com"
}
"""
And I create a RSA private key pair as cert_key
And I sign the certificate signing request csr with private key cert_key and output it as csr_pem in PEM format
And I submit the certificate signing request PEM csr_pem certificate order to the ACME server as order
And I select challenge with type dns-01 for domain example.com from order in order as challenge
Then I add domain example.com challenge response DNS records for challenge
And I tell ACME server that challenge is ready to be verified
And I poll and finalize the ACME order order as finalized_order
And the value finalized_order.body with jq ".status" should be equal to "valid"
And I parse the full-chain certificate from order finalized_order as cert
And the value cert with jq ".subject.common_name" should be equal to "example.com"
Scenario: Validate challenge with retry
Given I have an ACME cert profile as "acme_profile"
When I have an ACME client connecting to "{BASE_URL}/api/v1/cert-manager/acme/profiles/{acme_profile.id}/directory"

View File

@@ -266,9 +266,7 @@ def step_impl(context: Context, ca_id: str, template_id: str, profile_var: str):
)
@given(
'I create an ACME profile with config as "{profile_var}"'
)
@given('I create an ACME profile with config as "{profile_var}"')
def step_impl(context: Context, profile_var: str):
profile_slug = faker.slug()
jwt_token = context.vars["AUTH_TOKEN"]
@@ -1030,6 +1028,58 @@ def step_impl(context: Context, var_path: str, hostname: str):
serve_challenges(context=context, challenges=[challenge])
@then("I add domain {domain} challenge response DNS records for {var_path}")
def step_impl(context: Context, domain: str, var_path: str):
client = context.technitium_http_client
challenge = eval_var(context, var_path, as_json=False)
zone = domain
domain = f"{challenge.chall.LABEL}.{domain}"
value = challenge.chall.validation(context.acme_client.net.key)
resp = client.post(
"/api/user/login",
data={
"user": context.vars["TECHNITIUM_USER"],
"pass": context.vars["TECHNITIUM_PASSWORD"],
},
)
resp.raise_for_status()
token = resp.json()["token"]
resp = client.post(
"/api/zones/create",
params=dict(
token=token,
zone=zone,
type="Primary",
),
)
resp.raise_for_status()
error_msg = resp.json().get("errorMessage")
if error_msg is not None and not error_msg.startswith("Zone already exists:"):
raise RuntimeError(f"Unexpected error while creating zone {zone}: {error_msg}")
resp = client.post(
"/api/zones/records/add",
params=dict(
token=token,
zone=zone,
domain=domain,
type="TXT",
text=value,
),
)
resp.raise_for_status()
error_msg = resp.json().get("errorMessage")
if error_msg is not None and not error_msg.startswith(
"Cannot add record: record already exists"
):
raise RuntimeError(
f"Unexpected error while creating TXT record {domain} for zone {zone}: {error_msg}"
)
@then("I tell ACME server that {var_path} is ready to be verified")
def step_impl(context: Context, var_path: str):
challenge = eval_var(context, var_path, as_json=False)

View File

@@ -98,9 +98,11 @@ const main = async () => {
(el) =>
!el.tableName.includes("_migrations") &&
!el.tableName.includes("audit_logs_") &&
!el.tableName.includes("certificate_requests_") &&
!el.tableName.includes("user_notifications_") &&
!el.tableName.includes("active_locks") &&
el.tableName !== "intermediate_audit_logs"
el.tableName !== "intermediate_audit_logs" &&
el.tableName !== "intermediate_certificate_requests"
);
for (let i = 0; i < tables.length; i += 1) {

View File

@@ -0,0 +1,171 @@
/* eslint-disable no-console */
import { Knex } from "knex";
import { TableName } from "../schemas";
import { createOnUpdateTrigger } from "../utils";
const INTERMEDIATE_CERTIFICATE_REQUESTS_TABLE = "intermediate_certificate_requests";
const formatPartitionDate = (date: Date) => {
const year = date.getFullYear();
const month = String(date.getMonth() + 1).padStart(2, "0");
const day = String(date.getDate()).padStart(2, "0");
return `${year}-${month}-${day}`;
};
const createCertificateRequestPartition = async (knex: Knex, startDate: Date, endDate: Date) => {
const startDateStr = formatPartitionDate(startDate);
const endDateStr = formatPartitionDate(endDate);
const partitionName = `${TableName.CertificateRequests}_${startDateStr.replace(/-/g, "")}_${endDateStr.replace(/-/g, "")}`;
await knex.schema.raw(
`CREATE TABLE ${partitionName} PARTITION OF ${TableName.CertificateRequests} FOR VALUES FROM ('${startDateStr}') TO ('${endDateStr}')`
);
};
export async function up(knex: Knex): Promise<void> {
// Check if table is already partitioned by looking for partition information
const partitionInfo: { rows: { schemaname: string; tablename: string }[] } = await knex.raw(
`
SELECT schemaname, tablename
FROM pg_tables
WHERE tablename LIKE '${TableName.CertificateRequests}_%'
AND schemaname = 'public'
`
);
if (partitionInfo.rows.length > 0) {
console.info("Certificate requests table is already partitioned, skipping migration...");
return;
}
if (await knex.schema.hasTable(TableName.CertificateRequests)) {
console.info("Converting existing certificate_requests table to partitioned table...");
// Drop primary key constraint
console.info("Dropping primary key of certificate_requests table...");
await knex.schema.alterTable(TableName.CertificateRequests, (t) => {
t.dropPrimary();
});
// Get all indices of the certificate_requests table and drop them
const indexNames: { rows: { indexname: string }[] } = await knex.raw(
`
SELECT indexname
FROM pg_indexes
WHERE tablename = '${TableName.CertificateRequests}'
`
);
console.log(
"Deleting existing certificate_requests indices:",
indexNames.rows.map((e) => e.indexname)
);
for await (const row of indexNames.rows) {
await knex.raw(`DROP INDEX IF EXISTS ??`, [row.indexname]);
}
// Rename existing table to intermediate name
console.log("Renaming certificate_requests table to intermediate name");
await knex.schema.renameTable(TableName.CertificateRequests, INTERMEDIATE_CERTIFICATE_REQUESTS_TABLE);
// Create new partitioned table with same schema - MUST MATCH EXACTLY the original table
const createTableSql = knex.schema
.createTable(TableName.CertificateRequests, (t) => {
t.uuid("id").defaultTo(knex.fn.uuid());
t.timestamps(true, true, true);
t.string("status").notNullable();
t.string("projectId").notNullable();
t.uuid("profileId").nullable();
t.uuid("caId").nullable();
t.uuid("certificateId").nullable();
t.text("csr").nullable();
t.string("commonName").nullable();
t.text("altNames").nullable();
t.specificType("keyUsages", "text[]").nullable();
t.specificType("extendedKeyUsages", "text[]").nullable();
t.datetime("notBefore").nullable();
t.datetime("notAfter").nullable();
t.string("keyAlgorithm").nullable();
t.string("signatureAlgorithm").nullable();
t.text("errorMessage").nullable();
t.text("metadata").nullable();
t.uuid("acmeOrderId").nullable();
t.primary(["id", "createdAt"]);
})
.toString();
console.info("Creating partitioned certificate_requests table...");
await knex.schema.raw(`${createTableSql} PARTITION BY RANGE ("createdAt")`);
console.log("Adding indices...");
await knex.schema.alterTable(TableName.CertificateRequests, (t) => {
t.foreign("projectId").references("id").inTable(TableName.Project).onDelete("CASCADE");
t.foreign("profileId").references("id").inTable(TableName.PkiCertificateProfile).onDelete("SET NULL");
t.foreign("caId").references("id").inTable(TableName.CertificateAuthority).onDelete("SET NULL");
t.foreign("certificateId").references("id").inTable(TableName.Certificate).onDelete("SET NULL");
t.index("status");
t.index(["projectId", "status"]);
t.index(["projectId", "createdAt"]);
t.index("acmeOrderId", "certificate_requests_acme_order_id_idx");
});
// Create default partition
console.log("Creating default partition...");
await knex.schema.raw(
`CREATE TABLE ${TableName.CertificateRequests}_default PARTITION OF ${TableName.CertificateRequests} DEFAULT`
);
const nextDate = new Date();
nextDate.setDate(nextDate.getDate() + 1);
const nextDateStr = formatPartitionDate(nextDate);
console.log("Attaching existing certificate_requests table as a partition...");
await knex.schema.raw(
`
ALTER TABLE ${INTERMEDIATE_CERTIFICATE_REQUESTS_TABLE} ADD CONSTRAINT certificate_requests_old
CHECK ( "createdAt" < DATE '${nextDateStr}' );
ALTER TABLE ${TableName.CertificateRequests} ATTACH PARTITION ${INTERMEDIATE_CERTIFICATE_REQUESTS_TABLE}
FOR VALUES FROM (MINVALUE) TO ('${nextDateStr}' );
`
);
// Create partition from next day until end of month
console.log("Creating certificate_requests partitions ahead of time... next date:", nextDateStr);
await createCertificateRequestPartition(
knex,
nextDate,
new Date(nextDate.getFullYear(), nextDate.getMonth() + 1, 1)
);
// Create partitions 20 years ahead for certificate requests
const partitionMonths = 20 * 12;
const partitionPromises: Promise<void>[] = [];
for (let x = 1; x <= partitionMonths; x += 1) {
partitionPromises.push(
createCertificateRequestPartition(
knex,
new Date(nextDate.getFullYear(), nextDate.getMonth() + x, 1),
new Date(nextDate.getFullYear(), nextDate.getMonth() + (x + 1), 1)
)
);
}
await Promise.all(partitionPromises);
await createOnUpdateTrigger(knex, TableName.CertificateRequests);
console.log("Certificate requests partition migration complete");
} else {
console.log("Certificate requests table does not exist, skipping partitioning migration");
}
}
export async function down(): Promise<void> {
// skip
}

View File

@@ -1,6 +1,7 @@
import { z } from "zod";
import { DynamicSecretLeasesSchema } from "@app/db/schemas";
import { EventType } from "@app/ee/services/audit-log/audit-log-types";
import { ApiDocsTags, DYNAMIC_SECRET_LEASES } from "@app/lib/api-docs";
import { removeTrailingSlash } from "@app/lib/fn";
import { ms } from "@app/lib/ms";
@@ -48,14 +49,35 @@ export const registerDynamicSecretLeaseRouter = async (server: FastifyZodProvide
},
onRequest: verifyAuth([AuthMode.JWT, AuthMode.IDENTITY_ACCESS_TOKEN]),
handler: async (req) => {
const { data, lease, dynamicSecret } = await server.services.dynamicSecretLease.create({
actor: req.permission.type,
actorId: req.permission.id,
actorAuthMethod: req.permission.authMethod,
actorOrgId: req.permission.orgId,
name: req.body.dynamicSecretName,
...req.body
const { data, lease, dynamicSecret, projectId, environment, secretPath } =
await server.services.dynamicSecretLease.create({
actor: req.permission.type,
actorId: req.permission.id,
actorAuthMethod: req.permission.authMethod,
actorOrgId: req.permission.orgId,
name: req.body.dynamicSecretName,
...req.body
});
await server.services.auditLog.createAuditLog({
...req.auditLogInfo,
projectId,
event: {
type: EventType.CREATE_DYNAMIC_SECRET_LEASE,
metadata: {
dynamicSecretName: dynamicSecret.name,
dynamicSecretType: dynamicSecret.type,
dynamicSecretId: dynamicSecret.id,
projectId,
environment,
secretPath,
leaseId: lease.id,
leaseExternalEntityId: lease.externalEntityId,
leaseExpireAt: lease.expireAt
}
}
});
return { lease, data, dynamicSecret };
}
});
@@ -92,14 +114,36 @@ export const registerDynamicSecretLeaseRouter = async (server: FastifyZodProvide
},
onRequest: verifyAuth([AuthMode.JWT, AuthMode.IDENTITY_ACCESS_TOKEN]),
handler: async (req) => {
const lease = await server.services.dynamicSecretLease.revokeLease({
actor: req.permission.type,
actorId: req.permission.id,
actorAuthMethod: req.permission.authMethod,
actorOrgId: req.permission.orgId,
leaseId: req.params.leaseId,
...req.body
const { lease, dynamicSecret, projectId, environment, secretPath } =
await server.services.dynamicSecretLease.revokeLease({
actor: req.permission.type,
actorId: req.permission.id,
actorAuthMethod: req.permission.authMethod,
actorOrgId: req.permission.orgId,
leaseId: req.params.leaseId,
...req.body
});
await server.services.auditLog.createAuditLog({
...req.auditLogInfo,
projectId,
event: {
type: EventType.DELETE_DYNAMIC_SECRET_LEASE,
metadata: {
dynamicSecretName: dynamicSecret.name,
dynamicSecretType: dynamicSecret.type,
dynamicSecretId: dynamicSecret.id,
leaseId: lease.id,
leaseExternalEntityId: lease.externalEntityId,
leaseStatus: lease.status,
environment,
secretPath,
projectId,
isForced: req.body.isForced
}
}
});
return { lease };
}
});
@@ -147,14 +191,35 @@ export const registerDynamicSecretLeaseRouter = async (server: FastifyZodProvide
},
onRequest: verifyAuth([AuthMode.JWT, AuthMode.IDENTITY_ACCESS_TOKEN]),
handler: async (req) => {
const lease = await server.services.dynamicSecretLease.renewLease({
actor: req.permission.type,
actorId: req.permission.id,
actorAuthMethod: req.permission.authMethod,
actorOrgId: req.permission.orgId,
leaseId: req.params.leaseId,
...req.body
const { lease, dynamicSecret, projectId, environment, secretPath } =
await server.services.dynamicSecretLease.renewLease({
actor: req.permission.type,
actorId: req.permission.id,
actorAuthMethod: req.permission.authMethod,
actorOrgId: req.permission.orgId,
leaseId: req.params.leaseId,
...req.body
});
await server.services.auditLog.createAuditLog({
...req.auditLogInfo,
projectId,
event: {
type: EventType.RENEW_DYNAMIC_SECRET_LEASE,
metadata: {
dynamicSecretName: dynamicSecret.name,
dynamicSecretType: dynamicSecret.type,
dynamicSecretId: dynamicSecret.id,
leaseId: lease.id,
leaseExternalEntityId: lease.externalEntityId,
newLeaseExpireAt: lease.expireAt,
environment,
secretPath,
projectId
}
}
});
return { lease };
}
});
@@ -191,15 +256,41 @@ export const registerDynamicSecretLeaseRouter = async (server: FastifyZodProvide
},
onRequest: verifyAuth([AuthMode.JWT, AuthMode.IDENTITY_ACCESS_TOKEN]),
handler: async (req) => {
const lease = await server.services.dynamicSecretLease.getLeaseDetails({
actor: req.permission.type,
actorId: req.permission.id,
actorAuthMethod: req.permission.authMethod,
actorOrgId: req.permission.orgId,
leaseId: req.params.leaseId,
...req.query
const { lease, dynamicSecret, projectId, environment, secretPath } =
await server.services.dynamicSecretLease.getLeaseDetails({
actor: req.permission.type,
actorId: req.permission.id,
actorAuthMethod: req.permission.authMethod,
actorOrgId: req.permission.orgId,
leaseId: req.params.leaseId,
...req.query
});
await server.services.auditLog.createAuditLog({
...req.auditLogInfo,
projectId,
event: {
type: EventType.GET_DYNAMIC_SECRET_LEASE,
metadata: {
dynamicSecretName: dynamicSecret.name,
dynamicSecretId: dynamicSecret.id,
dynamicSecretType: dynamicSecret.type,
leaseId: lease.id,
leaseExternalEntityId: lease.externalEntityId,
leaseExpireAt: lease.expireAt,
environment,
secretPath,
projectId
}
}
});
return { lease };
return {
lease: {
...lease,
dynamicSecret
}
};
}
});
};

View File

@@ -1,6 +1,7 @@
import { z } from "zod";
import { DynamicSecretLeasesSchema } from "@app/db/schemas";
import { EventType } from "@app/ee/services/audit-log/audit-log-types";
import { DynamicSecretProviderSchema } from "@app/ee/services/dynamic-secret/providers/models";
import { ApiDocsTags, DYNAMIC_SECRETS } from "@app/lib/api-docs";
import { removeTrailingSlash } from "@app/lib/fn";
@@ -98,6 +99,27 @@ export const registerDynamicSecretRouter = async (server: FastifyZodProvider) =>
actorOrgId: req.permission.orgId,
...req.body
});
await server.services.auditLog.createAuditLog({
...req.auditLogInfo,
projectId: dynamicSecretCfg.projectId,
event: {
type: EventType.CREATE_DYNAMIC_SECRET,
metadata: {
dynamicSecretName: dynamicSecretCfg.name,
dynamicSecretType: dynamicSecretCfg.type,
dynamicSecretId: dynamicSecretCfg.id,
defaultTTL: dynamicSecretCfg.defaultTTL,
maxTTL: dynamicSecretCfg.maxTTL,
gatewayV2Id: dynamicSecretCfg.gatewayV2Id,
usernameTemplate: dynamicSecretCfg.usernameTemplate,
environment: dynamicSecretCfg.environment,
secretPath: dynamicSecretCfg.secretPath,
projectId: dynamicSecretCfg.projectId
}
}
});
return { dynamicSecret: dynamicSecretCfg };
}
});
@@ -160,18 +182,36 @@ export const registerDynamicSecretRouter = async (server: FastifyZodProvider) =>
},
onRequest: verifyAuth([AuthMode.JWT, AuthMode.IDENTITY_ACCESS_TOKEN]),
handler: async (req) => {
const dynamicSecretCfg = await server.services.dynamicSecret.updateByName({
actor: req.permission.type,
actorId: req.permission.id,
actorAuthMethod: req.permission.authMethod,
actorOrgId: req.permission.orgId,
name: req.params.name,
path: req.body.path,
projectSlug: req.body.projectSlug,
environmentSlug: req.body.environmentSlug,
...req.body.data
const { dynamicSecret, updatedFields, projectId, environment, secretPath } =
await server.services.dynamicSecret.updateByName({
actor: req.permission.type,
actorId: req.permission.id,
actorAuthMethod: req.permission.authMethod,
actorOrgId: req.permission.orgId,
name: req.params.name,
path: req.body.path,
projectSlug: req.body.projectSlug,
environmentSlug: req.body.environmentSlug,
...req.body.data
});
await server.services.auditLog.createAuditLog({
...req.auditLogInfo,
projectId,
event: {
type: EventType.UPDATE_DYNAMIC_SECRET,
metadata: {
dynamicSecretName: dynamicSecret.name,
dynamicSecretType: dynamicSecret.type,
dynamicSecretId: dynamicSecret.id,
environment,
secretPath,
projectId,
updatedFields
}
}
});
return { dynamicSecret: dynamicSecretCfg };
return { dynamicSecret };
}
});
@@ -209,6 +249,23 @@ export const registerDynamicSecretRouter = async (server: FastifyZodProvider) =>
name: req.params.name,
...req.body
});
await server.services.auditLog.createAuditLog({
...req.auditLogInfo,
projectId: dynamicSecretCfg.projectId,
event: {
type: EventType.DELETE_DYNAMIC_SECRET,
metadata: {
dynamicSecretName: dynamicSecretCfg.name,
dynamicSecretType: dynamicSecretCfg.type,
dynamicSecretId: dynamicSecretCfg.id,
environment: dynamicSecretCfg.environment,
secretPath: dynamicSecretCfg.secretPath,
projectId: dynamicSecretCfg.projectId
}
}
});
return { dynamicSecret: dynamicSecretCfg };
}
});
@@ -249,6 +306,22 @@ export const registerDynamicSecretRouter = async (server: FastifyZodProvider) =>
...req.query
});
await server.services.auditLog.createAuditLog({
...req.auditLogInfo,
projectId: dynamicSecretCfg.projectId,
event: {
type: EventType.GET_DYNAMIC_SECRET,
metadata: {
dynamicSecretName: dynamicSecretCfg.name,
dynamicSecretType: dynamicSecretCfg.type,
dynamicSecretId: dynamicSecretCfg.id,
environment: dynamicSecretCfg.environment,
secretPath: dynamicSecretCfg.secretPath,
projectId: dynamicSecretCfg.projectId
}
}
});
return { dynamicSecret: dynamicSecretCfg };
}
});
@@ -275,14 +348,29 @@ export const registerDynamicSecretRouter = async (server: FastifyZodProvider) =>
},
onRequest: verifyAuth([AuthMode.JWT, AuthMode.IDENTITY_ACCESS_TOKEN]),
handler: async (req) => {
const dynamicSecretCfgs = await server.services.dynamicSecret.listDynamicSecretsByEnv({
actor: req.permission.type,
actorId: req.permission.id,
actorAuthMethod: req.permission.authMethod,
actorOrgId: req.permission.orgId,
...req.query
const { dynamicSecrets, environment, secretPath, projectId } =
await server.services.dynamicSecret.listDynamicSecretsByEnv({
actor: req.permission.type,
actorId: req.permission.id,
actorAuthMethod: req.permission.authMethod,
actorOrgId: req.permission.orgId,
...req.query
});
await server.services.auditLog.createAuditLog({
...req.auditLogInfo,
projectId,
event: {
type: EventType.LIST_DYNAMIC_SECRETS,
metadata: {
environment,
secretPath,
projectId
}
}
});
return { dynamicSecrets: dynamicSecretCfgs };
return { dynamicSecrets };
}
});
@@ -316,14 +404,33 @@ export const registerDynamicSecretRouter = async (server: FastifyZodProvider) =>
},
onRequest: verifyAuth([AuthMode.JWT, AuthMode.IDENTITY_ACCESS_TOKEN]),
handler: async (req) => {
const leases = await server.services.dynamicSecretLease.listLeases({
actor: req.permission.type,
actorId: req.permission.id,
actorAuthMethod: req.permission.authMethod,
actorOrgId: req.permission.orgId,
name: req.params.name,
...req.query
const { leases, dynamicSecret, projectId, environment, secretPath } =
await server.services.dynamicSecretLease.listLeases({
actor: req.permission.type,
actorId: req.permission.id,
actorAuthMethod: req.permission.authMethod,
actorOrgId: req.permission.orgId,
name: req.params.name,
...req.query
});
await server.services.auditLog.createAuditLog({
...req.auditLogInfo,
projectId,
event: {
type: EventType.LIST_DYNAMIC_SECRET_LEASES,
metadata: {
dynamicSecretName: dynamicSecret.name,
dynamicSecretType: dynamicSecret.type,
dynamicSecretId: dynamicSecret.id,
environment,
secretPath,
projectId,
leaseCount: leases.length
}
}
});
return { leases };
}
});

View File

@@ -395,6 +395,7 @@ export enum EventType {
CREATE_CERTIFICATE_REQUEST = "create-certificate-request",
GET_CERTIFICATE_REQUEST = "get-certificate-request",
GET_CERTIFICATE_FROM_REQUEST = "get-certificate-from-request",
LIST_CERTIFICATE_REQUESTS = "list-certificate-requests",
ATTEMPT_CREATE_SLACK_INTEGRATION = "attempt-create-slack-integration",
ATTEMPT_REINSTALL_SLACK_INTEGRATION = "attempt-reinstall-slack-integration",
GET_PROJECT_SLACK_CONFIG = "get-project-slack-config",
@@ -615,7 +616,21 @@ export enum EventType {
MCP_SERVER_SYNC_TOOLS = "mcp-server-sync-tools",
// MCP Activity Logs
MCP_ACTIVITY_LOG_LIST = "mcp-activity-log-list"
MCP_ACTIVITY_LOG_LIST = "mcp-activity-log-list",
// Dynamic Secrets
CREATE_DYNAMIC_SECRET = "create-dynamic-secret",
UPDATE_DYNAMIC_SECRET = "update-dynamic-secret",
DELETE_DYNAMIC_SECRET = "delete-dynamic-secret",
GET_DYNAMIC_SECRET = "get-dynamic-secret",
LIST_DYNAMIC_SECRETS = "list-dynamic-secrets",
// Dynamic Secret Leases
CREATE_DYNAMIC_SECRET_LEASE = "create-dynamic-secret-lease",
DELETE_DYNAMIC_SECRET_LEASE = "delete-dynamic-secret-lease",
RENEW_DYNAMIC_SECRET_LEASE = "renew-dynamic-secret-lease",
LIST_DYNAMIC_SECRET_LEASES = "list-dynamic-secret-leases",
GET_DYNAMIC_SECRET_LEASE = "get-dynamic-secret-lease"
}
export const filterableSecretEvents: EventType[] = [
@@ -4316,6 +4331,18 @@ interface GetCertificateFromRequestEvent {
};
}
interface ListCertificateRequestsEvent {
type: EventType.LIST_CERTIFICATE_REQUESTS;
metadata: {
offset: number;
limit: number;
search?: string;
status?: string;
count: number;
certificateRequestIds: string[];
};
}
interface ApprovalPolicyCreateEvent {
type: EventType.APPROVAL_POLICY_CREATE;
metadata: {
@@ -4701,6 +4728,157 @@ interface McpActivityLogListEvent {
};
}
interface GetDynamicSecretLeaseEvent {
type: EventType.GET_DYNAMIC_SECRET_LEASE;
metadata: {
dynamicSecretName: string;
dynamicSecretId: string;
dynamicSecretType: string;
leaseId: string;
leaseExternalEntityId: string;
leaseExpireAt: Date;
projectId: string;
environment: string;
secretPath: string;
};
}
interface RenewDynamicSecretLeaseEvent {
type: EventType.RENEW_DYNAMIC_SECRET_LEASE;
metadata: {
dynamicSecretName: string;
dynamicSecretId: string;
dynamicSecretType: string;
leaseId: string;
leaseExternalEntityId: string;
newLeaseExpireAt: Date;
environment: string;
secretPath: string;
projectId: string;
};
}
interface CreateDynamicSecretLeaseEvent {
type: EventType.CREATE_DYNAMIC_SECRET_LEASE;
metadata: {
dynamicSecretName: string;
dynamicSecretId: string;
dynamicSecretType: string;
leaseId: string;
leaseExternalEntityId: string;
leaseExpireAt: Date;
environment: string;
secretPath: string;
projectId: string;
};
}
interface DeleteDynamicSecretLeaseEvent {
type: EventType.DELETE_DYNAMIC_SECRET_LEASE;
metadata: {
dynamicSecretName: string;
dynamicSecretId: string;
dynamicSecretType: string;
leaseId: string;
leaseExternalEntityId: string;
leaseStatus?: string | null;
environment: string;
secretPath: string;
projectId: string;
isForced: boolean;
};
}
interface CreateDynamicSecretEvent {
type: EventType.CREATE_DYNAMIC_SECRET;
metadata: {
dynamicSecretName: string;
dynamicSecretType: string;
dynamicSecretId: string;
defaultTTL: string;
maxTTL?: string | null;
gatewayV2Id?: string | null;
usernameTemplate?: string | null;
environment: string;
secretPath: string;
projectId: string;
};
}
interface UpdateDynamicSecretEvent {
type: EventType.UPDATE_DYNAMIC_SECRET;
metadata: {
dynamicSecretName: string;
dynamicSecretId: string;
dynamicSecretType: string;
updatedFields: string[];
environment: string;
secretPath: string;
projectId: string;
};
}
interface DeleteDynamicSecretEvent {
type: EventType.DELETE_DYNAMIC_SECRET;
metadata: {
dynamicSecretName: string;
dynamicSecretId: string;
dynamicSecretType: string;
environment: string;
secretPath: string;
projectId: string;
};
}
interface GetDynamicSecretEvent {
type: EventType.GET_DYNAMIC_SECRET;
metadata: {
dynamicSecretName: string;
dynamicSecretId: string;
dynamicSecretType: string;
environment: string;
secretPath: string;
projectId: string;
};
}
interface ListDynamicSecretsEvent {
type: EventType.LIST_DYNAMIC_SECRETS;
metadata: {
environment: string;
secretPath: string;
projectId: string;
};
}
interface ListDynamicSecretLeasesEvent {
type: EventType.LIST_DYNAMIC_SECRET_LEASES;
metadata: {
dynamicSecretName: string;
dynamicSecretId: string;
dynamicSecretType: string;
environment: string;
secretPath: string;
projectId: string;
leaseCount: number;
};
}
export type Event =
| CreateSubOrganizationEvent
| UpdateSubOrganizationEvent
@@ -5083,6 +5261,7 @@ export type Event =
| CreateCertificateRequestEvent
| GetCertificateRequestEvent
| GetCertificateFromRequestEvent
| ListCertificateRequestsEvent
| AutomatedRenewCertificate
| AutomatedRenewCertificateFailed
| UserLoginEvent
@@ -5131,4 +5310,14 @@ export type Event =
| McpServerListEvent
| McpServerListToolsEvent
| McpServerSyncToolsEvent
| McpActivityLogListEvent;
| McpActivityLogListEvent
| CreateDynamicSecretEvent
| UpdateDynamicSecretEvent
| DeleteDynamicSecretEvent
| GetDynamicSecretEvent
| ListDynamicSecretsEvent
| ListDynamicSecretLeasesEvent
| CreateDynamicSecretLeaseEvent
| DeleteDynamicSecretLeaseEvent
| RenewDynamicSecretLeaseEvent
| GetDynamicSecretLeaseEvent;

View File

@@ -179,7 +179,14 @@ export const dynamicSecretLeaseServiceFactory = ({
});
await dynamicSecretQueueService.setLeaseRevocation(dynamicSecretLease.id, dynamicSecretCfg.id, expireAt);
return { lease: dynamicSecretLease, dynamicSecret: dynamicSecretCfg, data };
return {
lease: dynamicSecretLease,
dynamicSecret: dynamicSecretCfg,
data,
projectId,
environment: environmentSlug,
secretPath: path
};
};
const renewLease: TDynamicSecretLeaseServiceFactory["renewLease"] = async ({
@@ -277,7 +284,13 @@ export const dynamicSecretLeaseServiceFactory = ({
expireAt,
externalEntityId: entityId
});
return updatedDynamicSecretLease;
return {
lease: updatedDynamicSecretLease,
dynamicSecret: dynamicSecretCfg,
projectId,
environment: environmentSlug,
secretPath: path
};
};
const revokeLease: TDynamicSecretLeaseServiceFactory["revokeLease"] = async ({
@@ -364,12 +377,24 @@ export const dynamicSecretLeaseServiceFactory = ({
});
// queue a job to retry the revocation at a later time
await dynamicSecretQueueService.queueFailedRevocation(dynamicSecretLease.id, dynamicSecretCfg.id);
return updatedDynamicSecretLease;
return {
lease: updatedDynamicSecretLease,
dynamicSecret: dynamicSecretCfg,
projectId,
environment: environmentSlug,
secretPath: path
};
}
await dynamicSecretQueueService.unsetLeaseRevocation(dynamicSecretLease.id);
const deletedDynamicSecretLease = await dynamicSecretLeaseDAL.deleteById(dynamicSecretLease.id);
return deletedDynamicSecretLease;
return {
lease: deletedDynamicSecretLease,
dynamicSecret: dynamicSecretCfg,
projectId,
environment: environmentSlug,
secretPath: path
};
};
const listLeases: TDynamicSecretLeaseServiceFactory["listLeases"] = async ({
@@ -417,7 +442,13 @@ export const dynamicSecretLeaseServiceFactory = ({
);
const dynamicSecretLeases = await dynamicSecretLeaseDAL.find({ dynamicSecretId: dynamicSecretCfg.id });
return dynamicSecretLeases;
return {
leases: dynamicSecretLeases,
dynamicSecret: dynamicSecretCfg,
projectId,
environment: environmentSlug,
secretPath: path
};
};
const getLeaseDetails: TDynamicSecretLeaseServiceFactory["getLeaseDetails"] = async ({
@@ -469,7 +500,13 @@ export const dynamicSecretLeaseServiceFactory = ({
})
);
return dynamicSecretLease;
return {
lease: dynamicSecretLease,
dynamicSecret: dynamicSecretCfg,
projectId,
environment: environmentSlug,
secretPath: path
};
};
return {

View File

@@ -55,34 +55,36 @@ export type TDynamicSecretLeaseServiceFactory = {
lease: TDynamicSecretLeases;
dynamicSecret: TDynamicSecretWithMetadata;
data: unknown;
projectId: string;
environment: string;
secretPath: string;
}>;
listLeases: (arg: TListDynamicSecretLeasesDTO) => Promise<{
leases: TDynamicSecretLeases[];
dynamicSecret: TDynamicSecretWithMetadata;
projectId: string;
environment: string;
secretPath: string;
}>;
revokeLease: (arg: TDeleteDynamicSecretLeaseDTO) => Promise<{
lease: TDynamicSecretLeases;
dynamicSecret: TDynamicSecretWithMetadata;
projectId: string;
environment: string;
secretPath: string;
}>;
renewLease: (arg: TRenewDynamicSecretLeaseDTO) => Promise<{
lease: TDynamicSecretLeases;
dynamicSecret: TDynamicSecretWithMetadata;
projectId: string;
environment: string;
secretPath: string;
}>;
listLeases: (arg: TListDynamicSecretLeasesDTO) => Promise<TDynamicSecretLeases[]>;
revokeLease: (arg: TDeleteDynamicSecretLeaseDTO) => Promise<TDynamicSecretLeases>;
renewLease: (arg: TRenewDynamicSecretLeaseDTO) => Promise<TDynamicSecretLeases>;
getLeaseDetails: (arg: TDetailsDynamicSecretLeaseDTO) => Promise<{
dynamicSecret: {
id: string;
name: string;
version: number;
type: string;
defaultTTL: string;
maxTTL: string | null | undefined;
encryptedInput: Buffer;
folderId: string;
status: string | null | undefined;
statusDetails: string | null | undefined;
createdAt: Date;
updatedAt: Date;
};
version: number;
id: string;
createdAt: Date;
updatedAt: Date;
externalEntityId: string;
expireAt: Date;
dynamicSecretId: string;
status?: string | null | undefined;
config?: unknown;
statusDetails?: string | null | undefined;
dynamicSecret: TDynamicSecretWithMetadata;
lease: TDynamicSecretLeases;
projectId: string;
environment: string;
secretPath: string;
}>;
};

View File

@@ -6,6 +6,8 @@ import { BadRequestError } from "@app/lib/errors";
import { isPrivateIp } from "@app/lib/ip/ipRange";
import { getDbConnectionHost } from "@app/lib/knex";
const ERROR_MESSAGE = "Invalid host";
export const verifyHostInputValidity = async (host: string, isGateway = false) => {
const appCfg = getConfig();
@@ -40,13 +42,13 @@ export const verifyHostInputValidity = async (host: string, isGateway = false) =
}
}
const normalizedHost = host.split(":")[0];
const normalizedHost = host.split(":")[0].toLowerCase();
const inputHostIps: string[] = [];
if (net.isIPv4(host)) {
inputHostIps.push(host);
} else {
if (normalizedHost === "localhost" || normalizedHost === "host.docker.internal") {
throw new BadRequestError({ message: "Invalid db host" });
throw new BadRequestError({ message: ERROR_MESSAGE });
}
try {
const resolvedIps = await dns.resolve4(host);
@@ -62,10 +64,10 @@ export const verifyHostInputValidity = async (host: string, isGateway = false) =
if (!(appCfg.DYNAMIC_SECRET_ALLOW_INTERNAL_IP || appCfg.ALLOW_INTERNAL_IP_CONNECTIONS)) {
const isInternalIp = inputHostIps.some((el) => isPrivateIp(el));
if (isInternalIp) throw new BadRequestError({ message: "Invalid db host" });
if (isInternalIp) throw new BadRequestError({ message: ERROR_MESSAGE });
}
const isAppUsedIps = inputHostIps.some((el) => exclusiveIps.includes(el));
if (isAppUsedIps) throw new BadRequestError({ message: "Invalid db host" });
if (isAppUsedIps) throw new BadRequestError({ message: ERROR_MESSAGE });
return inputHostIps;
};

View File

@@ -9,6 +9,7 @@ import {
} from "@app/ee/services/permission/project-permission";
import { crypto } from "@app/lib/crypto";
import { BadRequestError, NotFoundError } from "@app/lib/errors";
import { extractObjectFieldPaths } from "@app/lib/fn";
import { OrderByDirection } from "@app/lib/types";
import { TKmsServiceFactory } from "@app/services/kms/kms-service";
import { KmsDataKey } from "@app/services/kms/kms-types";
@@ -44,6 +45,34 @@ type TDynamicSecretServiceFactoryDep = {
resourceMetadataDAL: Pick<TResourceMetadataDALFactory, "insertMany" | "delete">;
};
const getUpdatedFieldPaths = (
oldData: Record<string, unknown> | null | undefined,
newData: Record<string, unknown> | null | undefined
): string[] => {
const updatedPaths = new Set<string>();
if (!newData || typeof newData !== "object") {
return [];
}
if (!oldData || typeof oldData !== "object") {
return [];
}
Object.keys(newData).forEach((key) => {
const oldValue = oldData?.[key];
const newValue = newData[key];
if (JSON.stringify(oldValue) !== JSON.stringify(newValue)) {
// Extract paths from the new value
const paths = extractObjectFieldPaths(newValue, key);
paths.forEach((path) => updatedPaths.add(path));
}
});
return Array.from(updatedPaths).sort();
};
export const dynamicSecretServiceFactory = ({
dynamicSecretDAL,
dynamicSecretLeaseDAL,
@@ -191,7 +220,13 @@ export const dynamicSecretServiceFactory = ({
return cfg;
});
return { ...dynamicSecretCfg, inputs };
return {
...dynamicSecretCfg,
inputs,
projectId: project.id,
environment: environmentSlug,
secretPath: path
};
};
const updateByName: TDynamicSecretServiceFactory["updateByName"] = async ({
@@ -278,8 +313,26 @@ export const dynamicSecretServiceFactory = ({
secretManagerDecryptor({ cipherTextBlob: dynamicSecretCfg.encryptedInput }).toString()
) as object;
const newInput = { ...decryptedStoredInput, ...(inputs || {}) };
const oldInput = await selectedProvider.validateProviderInputs(decryptedStoredInput, { projectId });
const updatedInput = await selectedProvider.validateProviderInputs(newInput, { projectId });
const updatedFields = getUpdatedFieldPaths(
{
...(oldInput as object),
maxTTL: dynamicSecretCfg.maxTTL,
defaultTTL: dynamicSecretCfg.defaultTTL,
name: dynamicSecretCfg.name,
usernameTemplate
},
{
...(updatedInput as object),
maxTTL,
defaultTTL,
name: newName ?? name,
usernameTemplate
}
);
let selectedGatewayId: string | null = null;
let isGatewayV1 = true;
if (updatedInput && typeof updatedInput === "object" && "gatewayId" in updatedInput && updatedInput?.gatewayId) {
@@ -357,7 +410,13 @@ export const dynamicSecretServiceFactory = ({
return cfg;
});
return { ...updatedDynamicCfg, inputs: updatedInput };
return {
dynamicSecret: updatedDynamicCfg,
updatedFields,
projectId: project.id,
environment: environmentSlug,
secretPath: path
};
};
const deleteByName: TDynamicSecretServiceFactory["deleteByName"] = async ({
@@ -412,7 +471,12 @@ export const dynamicSecretServiceFactory = ({
await Promise.all(leases.map(({ id: leaseId }) => dynamicSecretQueueService.unsetLeaseRevocation(leaseId)));
const deletedDynamicSecretCfg = await dynamicSecretDAL.deleteById(dynamicSecretCfg.id);
return deletedDynamicSecretCfg;
return {
...deletedDynamicSecretCfg,
environment: environmentSlug,
secretPath: path,
projectId: project.id
};
}
// if leases exist we should flag it as deleting and then remove leases in background
// then delete the main one
@@ -421,11 +485,21 @@ export const dynamicSecretServiceFactory = ({
status: DynamicSecretStatus.Deleting
});
await dynamicSecretQueueService.pruneDynamicSecret(updatedDynamicSecretCfg.id);
return updatedDynamicSecretCfg;
return {
...updatedDynamicSecretCfg,
environment: environmentSlug,
secretPath: path,
projectId: project.id
};
}
// if no leases just delete the config
const deletedDynamicSecretCfg = await dynamicSecretDAL.deleteById(dynamicSecretCfg.id);
return deletedDynamicSecretCfg;
return {
...deletedDynamicSecretCfg,
projectId: project.id,
environment: environmentSlug,
secretPath: path
};
};
const getDetails: TDynamicSecretServiceFactory["getDetails"] = async ({
@@ -491,7 +565,13 @@ export const dynamicSecretServiceFactory = ({
projectId
})) as object;
return { ...dynamicSecretCfg, inputs: providerInputs };
return {
...dynamicSecretCfg,
inputs: providerInputs,
projectId: project.id,
environment: environmentSlug,
secretPath: path
};
};
// get unique dynamic secret count across multiple envs
@@ -622,16 +702,21 @@ export const dynamicSecretServiceFactory = ({
}
);
return dynamicSecretCfg.filter((dynamicSecret) => {
return permission.can(
ProjectPermissionDynamicSecretActions.ReadRootCredential,
subject(ProjectPermissionSub.DynamicSecrets, {
environment: environmentSlug,
secretPath: path,
metadata: dynamicSecret.metadata
})
);
});
return {
dynamicSecrets: dynamicSecretCfg.filter((dynamicSecret) => {
return permission.can(
ProjectPermissionDynamicSecretActions.ReadRootCredential,
subject(ProjectPermissionSub.DynamicSecrets, {
environment: environmentSlug,
secretPath: path,
metadata: dynamicSecret.metadata
})
);
}),
environment: environmentSlug,
secretPath: path,
projectId
};
};
const listDynamicSecretsByFolderIds: TDynamicSecretServiceFactory["listDynamicSecretsByFolderIds"] = async (

View File

@@ -86,11 +86,28 @@ export type TGetDynamicSecretsCountDTO = Omit<TListDynamicSecretsDTO, "projectSl
};
export type TDynamicSecretServiceFactory = {
create: (arg: TCreateDynamicSecretDTO) => Promise<TDynamicSecrets>;
updateByName: (arg: TUpdateDynamicSecretDTO) => Promise<TDynamicSecrets>;
deleteByName: (arg: TDeleteDynamicSecretDTO) => Promise<TDynamicSecrets>;
getDetails: (arg: TDetailsDynamicSecretDTO) => Promise<TDynamicSecretWithMetadata>;
listDynamicSecretsByEnv: (arg: TListDynamicSecretsDTO) => Promise<TDynamicSecretWithMetadata[]>;
create: (
arg: TCreateDynamicSecretDTO
) => Promise<TDynamicSecrets & { projectId: string; environment: string; secretPath: string }>;
updateByName: (arg: TUpdateDynamicSecretDTO) => Promise<{
dynamicSecret: TDynamicSecrets;
updatedFields: string[];
projectId: string;
environment: string;
secretPath: string;
}>;
deleteByName: (
arg: TDeleteDynamicSecretDTO
) => Promise<TDynamicSecrets & { projectId: string; environment: string; secretPath: string }>;
getDetails: (
arg: TDetailsDynamicSecretDTO
) => Promise<TDynamicSecretWithMetadata & { projectId: string; environment: string; secretPath: string }>;
listDynamicSecretsByEnv: (arg: TListDynamicSecretsDTO) => Promise<{
dynamicSecrets: Array<TDynamicSecretWithMetadata>;
environment: string;
secretPath: string;
projectId: string;
}>;
listDynamicSecretsByEnvs: (
arg: TListDynamicSecretsMultiEnvDTO
) => Promise<Array<TDynamicSecretWithMetadata & { environment: string }>>;

View File

@@ -1,7 +1,10 @@
import { resolve4, Resolver } from "node:dns/promises";
import axios, { AxiosError } from "axios";
import { TPkiAcmeChallenges } from "@app/db/schemas/pki-acme-challenges";
import { getConfig } from "@app/lib/config/env";
import { crypto } from "@app/lib/crypto/cryptography";
import { BadRequestError, NotFoundError } from "@app/lib/errors";
import { isPrivateIp } from "@app/lib/ip/ipRange";
import { logger } from "@app/lib/logger";
@@ -17,6 +20,7 @@ import {
} from "./pki-acme-errors";
import { AcmeAuthStatus, AcmeChallengeStatus, AcmeChallengeType } from "./pki-acme-schemas";
import { TPkiAcmeChallengeServiceFactory } from "./pki-acme-types";
import { isValidIp } from "@app/lib/ip";
type TPkiAcmeChallengeServiceFactoryDep = {
acmeChallengeDAL: Pick<
@@ -35,6 +39,9 @@ export const pkiAcmeChallengeServiceFactory = ({
auditLogService
}: TPkiAcmeChallengeServiceFactoryDep): TPkiAcmeChallengeServiceFactory => {
const appCfg = getConfig();
type ChallengeWithAuth = NonNullable<Awaited<ReturnType<typeof acmeChallengeDAL.findByIdForChallengeValidation>>>;
const markChallengeAsReady = async (challengeId: string): Promise<TPkiAcmeChallenges> => {
return acmeChallengeDAL.transaction(async (tx) => {
logger.info({ challengeId }, "Validating ACME challenge response");
@@ -55,20 +62,163 @@ export const pkiAcmeChallengeServiceFactory = ({
message: `ACME auth status is ${challenge.auth.status} instead of ${AcmeAuthStatus.Pending}`
});
}
// TODO: support other challenge types here. Currently only HTTP-01 is supported
if (challenge.type !== AcmeChallengeType.HTTP_01) {
throw new BadRequestError({ message: "Only HTTP-01 challenges are supported for now" });
}
const host = challenge.auth.identifierValue;
// check if host is a private ip address
if (isPrivateIp(host)) {
throw new BadRequestError({ message: "Private IP addresses are not allowed" });
}
if (challenge.type !== AcmeChallengeType.HTTP_01 && challenge.type !== AcmeChallengeType.DNS_01) {
throw new BadRequestError({ message: "Only HTTP-01 or DNS-01 challenges are supported for now" });
}
return acmeChallengeDAL.updateById(challengeId, { status: AcmeChallengeStatus.Processing }, tx);
});
};
const validateHttp01Challenge = async (challenge: ChallengeWithAuth): Promise<void> => {
let host = challenge.auth.identifierValue;
if (appCfg.isAcmeDevelopmentMode && appCfg.ACME_DEVELOPMENT_HTTP01_CHALLENGE_HOST_OVERRIDES[host]) {
host = appCfg.ACME_DEVELOPMENT_HTTP01_CHALLENGE_HOST_OVERRIDES[host];
logger.warn(
{ srcHost: challenge.auth.identifierValue, dstHost: host },
"Using ACME development HTTP-01 challenge host override"
);
}
const challengeUrl = new URL(`/.well-known/acme-challenge/${challenge.auth.token}`, `http://${host}`);
logger.info({ challengeUrl }, "Performing ACME HTTP-01 challenge validation");
// TODO: read config from the profile to get the timeout instead
const timeoutMs = 10 * 1000; // 10 seconds
// Notice: well, we are in a transaction, ideally we should not hold transaction and perform
// a long running operation for long time. But assuming we are not performing a tons of
// challenge validation at the same time, it should be fine.
const challengeResponse = await axios.get<string>(challengeUrl.toString(), {
// In case if we override the host in the development mode, still provide the original host in the header
// to help the upstream server to validate the request
headers: { Host: challenge.auth.identifierValue },
timeout: timeoutMs,
responseType: "text",
validateStatus: () => true
});
if (challengeResponse.status !== 200) {
throw new AcmeIncorrectResponseError({
message: `ACME challenge response is not 200: ${challengeResponse.status}`
});
}
const challengeResponseBody: string = challengeResponse.data;
const thumbprint = challenge.auth.account.publicKeyThumbprint;
const expectedChallengeResponseBody = `${challenge.auth.token}.${thumbprint}`;
if (challengeResponseBody.trimEnd() !== expectedChallengeResponseBody) {
throw new AcmeIncorrectResponseError({ message: "ACME HTTP-01 challenge response is not correct" });
}
};
const validateDns01Challenge = async (challenge: ChallengeWithAuth): Promise<void> => {
const resolver = new Resolver();
if (appCfg.ACME_DNS_RESOLVER_SERVERS.length > 0) {
const servers = appCfg.ACME_DNS_RESOLVE_RESOLVER_SERVERS_HOST_ENABLED
? await Promise.all(
appCfg.ACME_DNS_RESOLVER_SERVERS.map(async (server) => {
if (isValidIp(server)) {
return server;
}
const ips = await resolve4(server);
return ips[0];
})
)
: appCfg.ACME_DNS_RESOLVER_SERVERS;
resolver.setServers(servers);
}
const recordName = `_acme-challenge.${challenge.auth.identifierValue}`;
const records = await resolver.resolveTxt(recordName);
const recordValues = records.map((chunks) => chunks.join(""));
const thumbprint = challenge.auth.account.publicKeyThumbprint;
const keyAuthorization = `${challenge.auth.token}.${thumbprint}`;
const digest = crypto.nativeCrypto.createHash("sha256").update(keyAuthorization).digest();
const expectedChallengeResponseValue = Buffer.from(digest).toString("base64url");
if (!recordValues.some((recordValue) => recordValue.trim() === expectedChallengeResponseValue)) {
throw new AcmeIncorrectResponseError({ message: "ACME DNS-01 challenge response is not correct" });
}
};
const handleChallengeValidationError = async (
exp: unknown,
challenge: ChallengeWithAuth,
challengeId: string,
retryCount: number
): Promise<never> => {
let finalAttempt = false;
if (retryCount >= 2) {
logger.error(
exp,
`Last attempt to validate ACME challenge response failed, marking ${challengeId} challenge as invalid`
);
// This is the last attempt to validate the challenge response, if it fails, we mark the challenge as invalid
await acmeChallengeDAL.markAsInvalidCascadeById(challengeId);
finalAttempt = true;
}
try {
// Properly type and inspect the error
if (axios.isAxiosError(exp)) {
const axiosError = exp as AxiosError;
const errorCode = axiosError.code;
const errorMessage = axiosError.message;
if (errorCode === "ECONNREFUSED" || errorMessage.includes("ECONNREFUSED")) {
throw new AcmeConnectionError({ message: "Connection refused" });
}
if (errorCode === "ENOTFOUND" || errorMessage.includes("ENOTFOUND")) {
throw new AcmeDnsFailureError({ message: "Hostname could not be resolved (DNS failure)" });
}
if (errorCode === "ECONNRESET" || errorMessage.includes("ECONNRESET")) {
throw new AcmeConnectionError({ message: "Connection reset by peer" });
}
if (errorCode === "ECONNABORTED" || errorMessage.includes("timeout")) {
logger.error(exp, "Connection timed out while validating ACME challenge response");
throw new AcmeConnectionError({ message: "Connection timed out" });
}
logger.error(exp, "Unknown error validating ACME challenge response");
throw new AcmeServerInternalError({ message: "Unknown error validating ACME challenge response" });
}
if (exp instanceof Error) {
if ((exp as unknown as { code?: string })?.code === "ENOTFOUND") {
throw new AcmeDnsFailureError({ message: "Hostname could not be resolved (DNS failure)" });
}
logger.error(exp, "Error validating ACME challenge response");
throw exp;
}
logger.error(exp, "Unknown error validating ACME challenge response");
throw new AcmeServerInternalError({ message: "Unknown error validating ACME challenge response" });
} catch (outterExp) {
await auditLogService.createAuditLog({
projectId: challenge.auth.account.project.id,
actor: {
type: ActorType.ACME_ACCOUNT,
metadata: {
profileId: challenge.auth.account.profileId,
accountId: challenge.auth.account.id
}
},
event: {
type: finalAttempt ? EventType.FAIL_ACME_CHALLENGE : EventType.ATTEMPT_ACME_CHALLENGE,
metadata: {
challengeId,
type: challenge.type as AcmeChallengeType,
retryCount,
errorMessage: exp instanceof Error ? exp.message : "Unknown error"
}
}
});
throw outterExp;
}
};
const validateChallengeResponse = async (challengeId: string, retryCount: number): Promise<void> => {
logger.info({ challengeId, retryCount }, "Validating ACME challenge response");
const challenge = await acmeChallengeDAL.findByIdForChallengeValidation(challengeId);
@@ -80,41 +230,16 @@ export const pkiAcmeChallengeServiceFactory = ({
message: `ACME challenge is ${challenge.status} instead of ${AcmeChallengeStatus.Processing}`
});
}
let host = challenge.auth.identifierValue;
if (appCfg.isAcmeDevelopmentMode && appCfg.ACME_DEVELOPMENT_HTTP01_CHALLENGE_HOST_OVERRIDES[host]) {
host = appCfg.ACME_DEVELOPMENT_HTTP01_CHALLENGE_HOST_OVERRIDES[host];
logger.warn(
{ srcHost: challenge.auth.identifierValue, dstHost: host },
"Using ACME development HTTP-01 challenge host override"
);
}
const challengeUrl = new URL(`/.well-known/acme-challenge/${challenge.auth.token}`, `http://${host}`);
logger.info({ challengeUrl }, "Performing ACME HTTP-01 challenge validation");
try {
// TODO: read config from the profile to get the timeout instead
const timeoutMs = 10 * 1000; // 10 seconds
// Notice: well, we are in a transaction, ideally we should not hold transaction and perform
// a long running operation for long time. But assuming we are not performing a tons of
// challenge validation at the same time, it should be fine.
const challengeResponse = await axios.get<string>(challengeUrl.toString(), {
// In case if we override the host in the development mode, still provide the original host in the header
// to help the upstream server to validate the request
headers: { Host: challenge.auth.identifierValue },
timeout: timeoutMs,
responseType: "text",
validateStatus: () => true
});
if (challengeResponse.status !== 200) {
throw new AcmeIncorrectResponseError({
message: `ACME challenge response is not 200: ${challengeResponse.status}`
});
}
const challengeResponseBody: string = challengeResponse.data;
const thumbprint = challenge.auth.account.publicKeyThumbprint;
const expectedChallengeResponseBody = `${challenge.auth.token}.${thumbprint}`;
if (challengeResponseBody.trimEnd() !== expectedChallengeResponseBody) {
throw new AcmeIncorrectResponseError({ message: "ACME challenge response is not correct" });
if (challenge.type === AcmeChallengeType.HTTP_01) {
await validateHttp01Challenge(challenge);
} else if (challenge.type === AcmeChallengeType.DNS_01) {
await validateDns01Challenge(challenge);
} else {
throw new BadRequestError({ message: `Unsupported challenge type: ${challenge.type}` });
}
logger.info({ challengeId }, "ACME challenge response is correct, marking challenge as valid");
await acmeChallengeDAL.markAsValidCascadeById(challengeId);
await auditLogService.createAuditLog({
@@ -135,67 +260,7 @@ export const pkiAcmeChallengeServiceFactory = ({
}
});
} catch (exp) {
let finalAttempt = false;
if (retryCount >= 2) {
logger.error(
exp,
`Last attempt to validate ACME challenge response failed, marking ${challengeId} challenge as invalid`
);
// This is the last attempt to validate the challenge response, if it fails, we mark the challenge as invalid
await acmeChallengeDAL.markAsInvalidCascadeById(challengeId);
finalAttempt = true;
}
try {
// Properly type and inspect the error
if (axios.isAxiosError(exp)) {
const axiosError = exp as AxiosError;
const errorCode = axiosError.code;
const errorMessage = axiosError.message;
if (errorCode === "ECONNREFUSED" || errorMessage.includes("ECONNREFUSED")) {
throw new AcmeConnectionError({ message: "Connection refused" });
}
if (errorCode === "ENOTFOUND" || errorMessage.includes("ENOTFOUND")) {
throw new AcmeDnsFailureError({ message: "Hostname could not be resolved (DNS failure)" });
}
if (errorCode === "ECONNRESET" || errorMessage.includes("ECONNRESET")) {
throw new AcmeConnectionError({ message: "Connection reset by peer" });
}
if (errorCode === "ECONNABORTED" || errorMessage.includes("timeout")) {
logger.error(exp, "Connection timed out while validating ACME challenge response");
throw new AcmeConnectionError({ message: "Connection timed out" });
}
logger.error(exp, "Unknown error validating ACME challenge response");
throw new AcmeServerInternalError({ message: "Unknown error validating ACME challenge response" });
}
if (exp instanceof Error) {
logger.error(exp, "Error validating ACME challenge response");
throw exp;
}
logger.error(exp, "Unknown error validating ACME challenge response");
throw new AcmeServerInternalError({ message: "Unknown error validating ACME challenge response" });
} catch (outterExp) {
await auditLogService.createAuditLog({
projectId: challenge.auth.account.project.id,
actor: {
type: ActorType.ACME_ACCOUNT,
metadata: {
profileId: challenge.auth.account.profileId,
accountId: challenge.auth.account.id
}
},
event: {
type: finalAttempt ? EventType.FAIL_ACME_CHALLENGE : EventType.ATTEMPT_ACME_CHALLENGE,
metadata: {
challengeId,
type: challenge.type as AcmeChallengeType,
retryCount,
errorMessage: exp instanceof Error ? exp.message : "Unknown error"
}
}
});
throw outterExp;
}
await handleChallengeValidationError(exp, challenge, challengeId, retryCount);
}
};

View File

@@ -707,15 +707,17 @@ export const pkiAcmeServiceFactory = ({
tx
);
if (!skipDnsOwnershipVerification) {
// TODO: support other challenge types here. Currently only HTTP-01 is supported.
await acmeChallengeDAL.create(
{
authId: auth.id,
status: AcmeChallengeStatus.Pending,
type: AcmeChallengeType.HTTP_01
},
tx
);
for (const challengeType of [AcmeChallengeType.HTTP_01, AcmeChallengeType.DNS_01]) {
// eslint-disable-next-line no-await-in-loop
await acmeChallengeDAL.create(
{
authId: auth.id,
status: AcmeChallengeStatus.Pending,
type: challengeType
},
tx
);
}
}
return auth;
})

View File

@@ -77,6 +77,7 @@ export enum ApiDocsTags {
OidcSso = "OIDC SSO",
SamlSso = "SAML SSO",
LdapSso = "LDAP SSO",
Scim = "SCIM",
Events = "Event Subscriptions"
}
@@ -2549,6 +2550,10 @@ export const AppConnections = {
orgName: "The short name of the Chef organization to connect to.",
userName: "The username used to access Chef.",
privateKey: "The private key used to access Chef."
},
OCTOPUS_DEPLOY: {
instanceUrl: "The Octopus Deploy instance URL to connect to.",
apiKey: "The API key used to authenticate with Octopus Deploy."
}
}
};
@@ -2709,6 +2714,14 @@ export const SecretSyncs = {
siteId: "The ID of the Laravel Forge site to sync secrets to.",
siteName: "The name of the Laravel Forge site to sync secrets to."
},
OCTOPUS_DEPLOY: {
spaceId: "The ID of the Octopus Deploy space to sync secrets to.",
spaceName: "The name of the Octopus Deploy space to sync secrets to.",
projectId: "The ID of the Octopus Deploy project to sync secrets to.",
projectName: "The name of the Octopus Deploy project to sync secrets to.",
scope: "The Octopus Deploy scope that secrets should be synced to.",
scopeValues: "The Octopus Deploy scope values that secrets should be synced to."
},
WINDMILL: {
workspace: "The Windmill workspace to sync secrets to.",
path: "The Windmill workspace path to sync secrets to."
@@ -3151,6 +3164,13 @@ export const LdapSso = {
}
};
export const Scim = {
UPDATE_GROUP_ORG_ROLE_MAPPINGS: {
groupName: "The name of the group in the SCIM provider.",
roleSlug: "The slug of the role that group members should be assigned when provisioned."
}
};
export const EventSubscriptions = {
SUBSCRIBE_PROJECT_EVENTS: {
projectId: "The ID of the project to subscribe to events for.",

View File

@@ -119,6 +119,16 @@ const envSchema = z
})
.default("{}")
),
ACME_DNS_RESOLVER_SERVERS: zpStr(
z
.string()
.optional()
.transform((val) => {
if (!val) return [];
return val.split(",");
})
),
ACME_DNS_RESOLVE_RESOLVER_SERVERS_HOST_ENABLED: zodStrBool.default("false").optional(),
DNS_MADE_EASY_SANDBOX_ENABLED: zodStrBool.default("false").optional(),
// smtp options
SMTP_HOST: zpStr(z.string().optional()),
@@ -229,6 +239,7 @@ const envSchema = z
CAPTCHA_SECRET: zpStr(z.string().optional()),
CAPTCHA_SITE_KEY: zpStr(z.string().optional()),
INTERCOM_ID: zpStr(z.string().optional()),
CDN_HOST: zpStr(z.string().optional()),
// TELEMETRY
OTEL_TELEMETRY_COLLECTION_ENABLED: zodStrBool.default("false"),

View File

@@ -134,3 +134,67 @@ export const deterministicStringify = (value: unknown): string => {
return JSON.stringify(value);
};
/**
* Recursively extracts all field paths from a nested object structure.
* Returns an array of dot-notation paths (e.g., ["password", "username", "field.nestedField"])
*/
export const extractObjectFieldPaths = (obj: unknown, prefix = ""): string[] => {
const paths: string[] = [];
if (obj === null || obj === undefined) {
return paths;
}
if (typeof obj !== "object") {
// return the path if it exists
if (prefix) {
paths.push(prefix);
}
return paths;
}
if (Array.isArray(obj)) {
// for arrays, we log the array itself and optionally nested paths
if (prefix) {
paths.push(prefix);
}
// we just want to know the array field changed
obj.forEach((item, index) => {
if (typeof item === "object" && item !== null) {
const nestedPaths = extractObjectFieldPaths(item, `${prefix}[${index}]`);
paths.push(...nestedPaths);
}
});
return paths;
}
// for objects, extract all keys and recurse
const keys = Object.keys(obj);
if (keys.length === 0 && prefix) {
// empty object with prefix
paths.push(prefix);
}
keys.forEach((key) => {
const currentPath = prefix ? `${prefix}.${key}` : key;
const value = (obj as Record<string, unknown>)[key];
if (value === null || value === undefined) {
paths.push(currentPath);
} else if (typeof value === "object") {
// recurse into nested objects/arrays
const nestedPaths = extractObjectFieldPaths(value, currentPath);
if (nestedPaths.length === 0) {
// if nested object is empty, add the path itself
paths.push(currentPath);
} else {
paths.push(...nestedPaths);
}
} else {
paths.push(currentPath);
}
});
return paths;
};

View File

@@ -33,3 +33,7 @@ export const sanitizeString = (dto: { unsanitizedString: string; tokens: string[
});
return sanitizedWords.join("");
};
export const sanitizeSqlLikeString = (value: string): string => {
return String(value).replace(new RE2("[%_\\\\]", "g"), "\\$&");
};

View File

@@ -1,6 +1,8 @@
import fs from "node:fs";
import path from "node:path";
import staticServe from "@fastify/static";
import RE2 from "re2";
import { getConfig, IS_PACKAGED } from "@app/lib/config/env";
@@ -15,6 +17,9 @@ export const registerServeUI = async (
dir: string;
}
) => {
const appCfg = getConfig();
const cdnHost = appCfg.CDN_HOST || "";
// use this only for frontend runtime static non-sensitive configuration in standalone mode
// that app needs before loading like posthog dsn key
// for most of the other usecase use server config
@@ -25,15 +30,26 @@ export const registerServeUI = async (
hide: true
},
handler: (_req, res) => {
const appCfg = getConfig();
void res.type("application/javascript");
const config = {
CAPTCHA_SITE_KEY: appCfg.CAPTCHA_SITE_KEY,
POSTHOG_API_KEY: appCfg.POSTHOG_PROJECT_API_KEY,
INTERCOM_ID: appCfg.INTERCOM_ID,
TELEMETRY_CAPTURING_ENABLED: appCfg.TELEMETRY_ENABLED
TELEMETRY_CAPTURING_ENABLED: appCfg.TELEMETRY_ENABLED,
CDN_HOST: cdnHost
};
const js = `window.__INFISICAL_RUNTIME_ENV__ = Object.freeze(${JSON.stringify(config)});`;
// Define window.__toCdnUrl for Vite's experimental.renderBuiltUrl runtime support
// This function is called by dynamically imported chunks to resolve CDN URLs
const js = `
window.__INFISICAL_RUNTIME_ENV__ = Object.freeze(${JSON.stringify(config)});
window.__toCdnUrl = function(filename) {
var cdnHost = window.__INFISICAL_RUNTIME_ENV__.CDN_HOST || "";
if (cdnHost && filename.startsWith("assets/")) {
return cdnHost + "/" + filename;
}
return "/" + filename;
};
`.trim();
return res.send(js);
}
});
@@ -41,11 +57,30 @@ export const registerServeUI = async (
if (standaloneMode) {
const frontendName = IS_PACKAGED ? "frontend" : "frontend-build";
const frontendPath = path.join(dir, frontendName);
const indexHtmlPath = path.join(frontendPath, "index.html");
let indexHtml = fs.readFileSync(indexHtmlPath, "utf-8");
if (cdnHost) {
// Replace relative asset paths with CDN URLs in script and link tags
indexHtml = indexHtml
.replace(/src="\/assets\//g, `src="${cdnHost}/assets/`)
.replace(/href="\/assets\//g, `href="${cdnHost}/assets/`);
// Inject CDN host into CSP directives that need it
const cspDirectives = ["script-src", "style-src", "font-src", "connect-src", "media-src"];
for (const directive of cspDirectives) {
const regex = new RE2(`(${directive}\\s+'self')`, "g");
indexHtml = indexHtml.replace(regex, `$1 ${cdnHost}`);
}
}
await server.register(staticServe, {
root: frontendPath,
wildcard: false,
maxAge: "30d",
immutable: true
immutable: true,
index: false
});
server.route({
@@ -60,12 +95,12 @@ export const registerServeUI = async (
return;
}
return reply.sendFile("index.html", {
immutable: false,
maxAge: 0,
lastModified: false,
etag: false
});
return reply
.type("text/html")
.header("Cache-Control", "no-cache, no-store, must-revalidate")
.header("Pragma", "no-cache")
.header("Expires", "0")
.send(indexHtml);
}
});
}

View File

@@ -1375,10 +1375,7 @@ export const registerRoutes = async (
permissionService,
projectDAL,
projectSshConfigDAL,
secretDAL,
secretV2BridgeDAL,
projectQueue: projectQueueService,
projectBotService,
userDAL,
projectEnvDAL,
orgDAL,
@@ -1405,7 +1402,6 @@ export const registerRoutes = async (
microsoftTeamsIntegrationDAL,
projectTemplateService,
smtpService,
reminderService,
notificationService,
membershipGroupDAL,
membershipIdentityDAL,

View File

@@ -101,6 +101,10 @@ import {
NorthflankConnectionListItemSchema,
SanitizedNorthflankConnectionSchema
} from "@app/services/app-connection/northflank";
import {
OctopusDeployConnectionListItemSchema,
SanitizedOctopusDeployConnectionSchema
} from "@app/services/app-connection/octopus-deploy";
import { OktaConnectionListItemSchema, SanitizedOktaConnectionSchema } from "@app/services/app-connection/okta";
import {
PostgresConnectionListItemSchema,
@@ -180,7 +184,8 @@ const SanitizedAppConnectionSchema = z.union([
...SanitizedMongoDBConnectionSchema.options,
...SanitizedLaravelForgeConnectionSchema.options,
...SanitizedChefConnectionSchema.options,
...SanitizedDNSMadeEasyConnectionSchema.options
...SanitizedDNSMadeEasyConnectionSchema.options,
...SanitizedOctopusDeployConnectionSchema.options
]);
const AppConnectionOptionsSchema = z.discriminatedUnion("app", [
@@ -227,7 +232,8 @@ const AppConnectionOptionsSchema = z.discriminatedUnion("app", [
MongoDBConnectionListItemSchema,
LaravelForgeConnectionListItemSchema,
ChefConnectionListItemSchema,
DNSMadeEasyConnectionListItemSchema
DNSMadeEasyConnectionListItemSchema,
OctopusDeployConnectionListItemSchema
]);
export const registerAppConnectionRouter = async (server: FastifyZodProvider) => {

View File

@@ -33,6 +33,7 @@ import { registerMsSqlConnectionRouter } from "./mssql-connection-router";
import { registerMySqlConnectionRouter } from "./mysql-connection-router";
import { registerNetlifyConnectionRouter } from "./netlify-connection-router";
import { registerNorthflankConnectionRouter } from "./northflank-connection-router";
import { registerOctopusDeployConnectionRouter } from "./octopus-deploy-connection-router";
import { registerOktaConnectionRouter } from "./okta-connection-router";
import { registerPostgresConnectionRouter } from "./postgres-connection-router";
import { registerRailwayConnectionRouter } from "./railway-connection-router";
@@ -92,5 +93,6 @@ export const APP_CONNECTION_REGISTER_ROUTER_MAP: Record<AppConnection, (server:
[AppConnection.Okta]: registerOktaConnectionRouter,
[AppConnection.Redis]: registerRedisConnectionRouter,
[AppConnection.MongoDB]: registerMongoDBConnectionRouter,
[AppConnection.Chef]: registerChefConnectionRouter
[AppConnection.Chef]: registerChefConnectionRouter,
[AppConnection.OctopusDeploy]: registerOctopusDeployConnectionRouter
};

View File

@@ -0,0 +1,168 @@
import { z } from "zod";
import { BadRequestError } from "@app/lib/errors";
import { readLimit } from "@app/server/config/rateLimiter";
import { verifyAuth } from "@app/server/plugins/auth/verify-auth";
import { AppConnection } from "@app/services/app-connection/app-connection-enums";
import {
CreateOctopusDeployConnectionSchema,
SanitizedOctopusDeployConnectionSchema,
UpdateOctopusDeployConnectionSchema
} from "@app/services/app-connection/octopus-deploy";
import { AuthMode } from "@app/services/auth/auth-type";
import { registerAppConnectionEndpoints } from "./app-connection-endpoints";
export const registerOctopusDeployConnectionRouter = async (server: FastifyZodProvider) => {
registerAppConnectionEndpoints({
app: AppConnection.OctopusDeploy,
server,
sanitizedResponseSchema: SanitizedOctopusDeployConnectionSchema,
createSchema: CreateOctopusDeployConnectionSchema,
updateSchema: UpdateOctopusDeployConnectionSchema
});
server.route({
method: "GET",
url: `/:connectionId/spaces`,
config: {
rateLimit: readLimit
},
schema: {
params: z.object({
connectionId: z.string().uuid()
}),
response: {
200: z.array(
z.object({
id: z.string(),
name: z.string(),
slug: z.string(),
isDefault: z.boolean()
})
)
}
},
onRequest: verifyAuth([AuthMode.JWT]),
handler: async (req) => {
const { connectionId } = req.params;
const spaces = await server.services.appConnection.octopusDeploy.listSpaces(connectionId, req.permission);
return spaces;
}
});
server.route({
method: "GET",
url: `/:connectionId/projects`,
config: {
rateLimit: readLimit
},
schema: {
params: z.object({
connectionId: z.string().uuid()
}),
querystring: z.object({
spaceId: z.string().min(1, "Space ID is required")
}),
response: {
200: z.array(
z.object({
id: z.string(),
name: z.string(),
slug: z.string()
})
)
}
},
onRequest: verifyAuth([AuthMode.JWT]),
handler: async (req) => {
const { connectionId } = req.params;
const { spaceId } = req.query;
const projects = await server.services.appConnection.octopusDeploy.listProjects(
connectionId,
spaceId,
req.permission
);
return projects;
}
});
server.route({
method: "GET",
url: `/:connectionId/scope-values`,
config: {
rateLimit: readLimit
},
schema: {
params: z.object({
connectionId: z.string().uuid()
}),
querystring: z.object({
spaceId: z.string().min(1, "Space ID is required"),
projectId: z.string().min(1, "Project ID is required")
}),
response: {
200: z.object({
environments: z
.object({
id: z.string(),
name: z.string()
})
.array(),
roles: z
.object({
id: z.string(),
name: z.string()
})
.array(),
machines: z
.object({
id: z.string(),
name: z.string()
})
.array(),
processes: z
.object({
id: z.string(),
name: z.string()
})
.array(),
actions: z
.object({
id: z.string(),
name: z.string()
})
.array(),
channels: z
.object({
id: z.string(),
name: z.string()
})
.array()
})
}
},
onRequest: verifyAuth([AuthMode.JWT]),
handler: async (req) => {
const { connectionId } = req.params;
const { spaceId, projectId } = req.query;
const scopeValues = await server.services.appConnection.octopusDeploy.getScopeValues(
connectionId,
spaceId,
projectId,
req.permission
);
if (!scopeValues) {
throw new BadRequestError({ message: "Unable to get Octopus Deploy scope values" });
}
return scopeValues;
}
});
};

View File

@@ -7,6 +7,7 @@ import { verifyAuth } from "@app/server/plugins/auth/verify-auth";
import { ApprovalPolicyType } from "@app/services/approval-policy/approval-policy-enums";
import {
TApprovalPolicy,
TApprovalPolicyInputs,
TCreatePolicyDTO,
TCreateRequestDTO,
TUpdatePolicyDTO
@@ -21,7 +22,8 @@ export const registerApprovalPolicyEndpoints = <P extends TApprovalPolicy>({
policyResponseSchema,
createRequestSchema,
requestResponseSchema,
grantResponseSchema
grantResponseSchema,
inputsSchema
}: {
server: FastifyZodProvider;
policyType: ApprovalPolicyType;
@@ -41,6 +43,7 @@ export const registerApprovalPolicyEndpoints = <P extends TApprovalPolicy>({
createRequestSchema: z.ZodType<TCreateRequestDTO>;
requestResponseSchema: z.ZodTypeAny;
grantResponseSchema: z.ZodTypeAny;
inputsSchema: z.ZodType<TApprovalPolicyInputs>;
}) => {
// Policies
server.route({
@@ -622,4 +625,31 @@ export const registerApprovalPolicyEndpoints = <P extends TApprovalPolicy>({
return { grant };
}
});
server.route({
method: "POST",
url: "/check-policy-match",
config: {
rateLimit: readLimit
},
schema: {
description: "Check if a resource path matches any approval policy and if the user has an active grant",
body: z.object({
projectId: z.string().uuid(),
inputs: inputsSchema
}),
response: {
200: z.object({
requiresApproval: z.boolean(),
hasActiveGrant: z.boolean()
})
}
},
onRequest: verifyAuth([AuthMode.JWT]),
handler: async (req) => {
const result = await server.services.approvalPolicy.checkPolicyMatch(policyType, req.body, req.permission);
return result;
}
});
};

View File

@@ -2,6 +2,7 @@ import { ApprovalPolicyType } from "@app/services/approval-policy/approval-polic
import {
CreatePamAccessPolicySchema,
CreatePamAccessRequestSchema,
PamAccessPolicyInputsSchema,
PamAccessPolicySchema,
PamAccessRequestGrantSchema,
PamAccessRequestSchema,
@@ -23,7 +24,8 @@ export const APPROVAL_POLICY_REGISTER_ROUTER_MAP: Record<
policyResponseSchema: PamAccessPolicySchema,
createRequestSchema: CreatePamAccessRequestSchema,
requestResponseSchema: PamAccessRequestSchema,
grantResponseSchema: PamAccessRequestGrantSchema
grantResponseSchema: PamAccessRequestGrantSchema,
inputsSchema: PamAccessPolicyInputsSchema
});
}
};

View File

@@ -25,6 +25,7 @@ import { EnrollmentType } from "@app/services/certificate-profile/certificate-pr
import { CertificateRequestStatus } from "@app/services/certificate-request/certificate-request-types";
import { validateTemplateRegexField } from "@app/services/certificate-template/certificate-template-validators";
import { TCertificateFromProfileResponse } from "@app/services/certificate-v3/certificate-v3-types";
import { ProjectFilterType } from "@app/services/project/project-types";
import { booleanSchema } from "../sanitizedSchemas";
@@ -353,6 +354,123 @@ export const registerCertificateRouter = async (server: FastifyZodProvider) => {
}
});
server.route({
method: "GET",
url: "/certificate-requests",
config: {
rateLimit: readLimit
},
schema: {
hide: false,
tags: [ApiDocsTags.PkiCertificates],
querystring: z.object({
projectSlug: z.string().min(1).trim(),
offset: z.coerce.number().min(0).default(0),
limit: z.coerce.number().min(1).max(100).default(20),
search: z.string().trim().optional(),
status: z.nativeEnum(CertificateRequestStatus).optional(),
fromDate: z.coerce.date().optional(),
toDate: z.coerce.date().optional(),
profileIds: z
.string()
.transform((val) => val.split(",").map((id) => id.trim()))
.pipe(z.array(z.string().uuid()))
.optional()
.describe("Comma-separated list of profile IDs"),
sortBy: z.string().trim().optional(),
sortOrder: z.enum(["asc", "desc"]).optional()
}),
response: {
200: z.object({
certificateRequests: z.array(
z.object({
id: z.string(),
status: z.nativeEnum(CertificateRequestStatus),
commonName: z.string().nullable(),
altNames: z.string().nullable(),
profileId: z.string().nullable(),
profileName: z.string().nullable(),
caId: z.string().nullable(),
certificateId: z.string().nullable(),
errorMessage: z.string().nullable(),
createdAt: z.date(),
updatedAt: z.date(),
certificate: z
.object({
id: z.string(),
serialNumber: z.string(),
status: z.string()
})
.nullable()
})
),
totalCount: z.number()
})
}
},
onRequest: verifyAuth([AuthMode.JWT, AuthMode.IDENTITY_ACCESS_TOKEN]),
handler: async (req) => {
const project = await server.services.project.getAProject({
actor: req.permission.type,
actorId: req.permission.id,
actorOrgId: req.permission.orgId,
actorAuthMethod: req.permission.authMethod,
filter: {
type: ProjectFilterType.SLUG,
slug: req.query.projectSlug,
orgId: req.permission.orgId
}
});
const { certificateRequests, totalCount } = await server.services.certificateRequest.listCertificateRequests({
actor: req.permission.type,
actorId: req.permission.id,
actorAuthMethod: req.permission.authMethod,
actorOrgId: req.permission.orgId,
projectId: project.id,
offset: req.query.offset,
limit: req.query.limit,
search: req.query.search,
status: req.query.status,
fromDate: req.query.fromDate,
toDate: req.query.toDate,
profileIds: req.query.profileIds,
sortBy: req.query.sortBy,
sortOrder: req.query.sortOrder
});
await server.services.auditLog.createAuditLog({
...req.auditLogInfo,
projectId: project.id,
event: {
type: EventType.LIST_CERTIFICATE_REQUESTS,
metadata: {
offset: req.query.offset,
limit: req.query.limit,
search: req.query.search,
status: req.query.status,
count: certificateRequests.length,
certificateRequestIds: certificateRequests.map((certReq) => certReq.id)
}
}
});
return {
certificateRequests: certificateRequests.map((certReq) => ({
...certReq,
profileId: certReq.profileId ?? null,
caId: certReq.caId ?? null,
certificateId: certReq.certificateId ?? null,
commonName: certReq.commonName ?? null,
altNames: certReq.altNames ?? null,
errorMessage: certReq.errorMessage ?? null,
profileName: certReq.profileName ?? null
})),
totalCount
};
}
});
server.route({
method: "POST",
url: "/issue-certificate",

View File

@@ -359,6 +359,21 @@ export const registerDashboardRouter = async (server: FastifyZodProvider) => {
// get the count of unique dynamic secret names to properly adjust remaining limit
const uniqueDynamicSecretsCount = new Set(dynamicSecrets.map((dynamicSecret) => dynamicSecret.name)).size;
if (dynamicSecrets.length) {
await server.services.auditLog.createAuditLog({
...req.auditLogInfo,
projectId,
event: {
type: EventType.LIST_DYNAMIC_SECRETS,
metadata: {
environment: [...new Set(dynamicSecrets.map((dynamicSecret) => dynamicSecret.environment))].join(","),
secretPath,
projectId
}
}
});
}
remainingLimit -= uniqueDynamicSecretsCount;
adjustedOffset = 0;
} else {
@@ -738,7 +753,9 @@ export const registerDashboardRouter = async (server: FastifyZodProvider) => {
reminder: Awaited<ReturnType<typeof server.services.reminder.getRemindersForDashboard>>[string] | null;
})[]
| undefined;
let dynamicSecrets: Awaited<ReturnType<typeof server.services.dynamicSecret.listDynamicSecretsByEnv>> | undefined;
let dynamicSecrets:
| Awaited<ReturnType<typeof server.services.dynamicSecret.listDynamicSecretsByEnv>>["dynamicSecrets"]
| undefined;
let secretRotations:
| (Awaited<ReturnType<typeof server.services.secretRotationV2.getDashboardSecretRotations>>[number] & {
secrets: (NonNullable<
@@ -923,7 +940,7 @@ export const registerDashboardRouter = async (server: FastifyZodProvider) => {
});
if (remainingLimit > 0 && totalDynamicSecretCount > adjustedOffset) {
dynamicSecrets = await server.services.dynamicSecret.listDynamicSecretsByEnv({
const { dynamicSecrets: dynamicSecretCfgs } = await server.services.dynamicSecret.listDynamicSecretsByEnv({
actor: req.permission.type,
actorId: req.permission.id,
actorAuthMethod: req.permission.authMethod,
@@ -938,6 +955,23 @@ export const registerDashboardRouter = async (server: FastifyZodProvider) => {
offset: adjustedOffset
});
if (dynamicSecretCfgs.length) {
await server.services.auditLog.createAuditLog({
...req.auditLogInfo,
projectId,
event: {
type: EventType.LIST_DYNAMIC_SECRETS,
metadata: {
environment,
secretPath,
projectId
}
}
});
}
dynamicSecrets = dynamicSecretCfgs;
remainingLimit -= dynamicSecrets.length;
adjustedOffset = 0;
} else {
@@ -1263,6 +1297,27 @@ export const registerDashboardRouter = async (server: FastifyZodProvider) => {
const sliceQuickSearch = <T>(array: T[]) => array.slice(0, 25);
const filteredDynamicSecrets = sliceQuickSearch(
searchPath ? dynamicSecrets.filter((dynamicSecret) => dynamicSecret.path.endsWith(searchPath)) : dynamicSecrets
);
if (filteredDynamicSecrets?.length) {
await server.services.auditLog.createAuditLog({
projectId,
...req.auditLogInfo,
event: {
type: EventType.LIST_DYNAMIC_SECRETS,
metadata: {
environment: [...new Set(filteredDynamicSecrets.map((dynamicSecret) => dynamicSecret.environment))].join(
","
),
secretPath: [...new Set(filteredDynamicSecrets.map((dynamicSecret) => dynamicSecret.path))].join(","),
projectId
}
}
});
}
return {
secrets: sliceQuickSearch(
searchPath ? secrets.filter((secret) => secret.secretPath.endsWith(searchPath)) : secrets

View File

@@ -2,6 +2,7 @@ import { z } from "zod";
import { ExternalGroupOrgRoleMappingsSchema } from "@app/db/schemas/external-group-org-role-mappings";
import { EventType } from "@app/ee/services/audit-log/audit-log-types";
import { ApiDocsTags, Scim } from "@app/lib/api-docs";
import { readLimit, writeLimit } from "@app/server/config/rateLimiter";
import { slugSchema } from "@app/server/lib/schemas";
import { verifyAuth } from "@app/server/plugins/auth/verify-auth";
@@ -16,6 +17,8 @@ export const registerExternalGroupOrgRoleMappingRouter = async (server: FastifyZ
rateLimit: readLimit
},
schema: {
hide: false,
tags: [ApiDocsTags.Scim],
response: {
200: ExternalGroupOrgRoleMappingsSchema.array()
}
@@ -44,11 +47,13 @@ export const registerExternalGroupOrgRoleMappingRouter = async (server: FastifyZ
rateLimit: writeLimit
},
schema: {
hide: false,
tags: [ApiDocsTags.Scim],
body: z.object({
mappings: z
.object({
groupName: z.string().trim().min(1),
roleSlug: slugSchema({ max: 64 })
groupName: z.string().trim().min(1).describe(Scim.UPDATE_GROUP_ORG_ROLE_MAPPINGS.groupName),
roleSlug: slugSchema({ max: 64 }).describe(Scim.UPDATE_GROUP_ORG_ROLE_MAPPINGS.roleSlug)
})
.array()
}),

View File

@@ -236,7 +236,7 @@ export const registerV1Routes = async (server: FastifyZodProvider) => {
await server.register(registerUserEngagementRouter, { prefix: "/user-engagement" });
await server.register(registerDashboardRouter, { prefix: "/dashboard" });
await server.register(registerCmekRouter, { prefix: "/kms" });
await server.register(registerExternalGroupOrgRoleMappingRouter, { prefix: "/external-group-mappings" });
await server.register(registerExternalGroupOrgRoleMappingRouter, { prefix: "/scim/group-org-role-mappings" });
await server.register(
async (appConnectionRouter) => {

View File

@@ -1209,7 +1209,16 @@ export const registerProjectRouter = async (server: FastifyZodProvider) => {
.boolean()
.default(false)
.optional()
.describe("Retrieve only certificates available for PKI sync")
.describe("Retrieve only certificates available for PKI sync"),
search: z.string().trim().optional().describe("Search by SAN, CN, certificate ID, or serial number"),
status: z.string().optional().describe("Filter by certificate status"),
profileIds: z
.union([z.string().uuid(), z.array(z.string().uuid())])
.transform((val) => (Array.isArray(val) ? val : [val]))
.optional()
.describe("Filter by profile IDs"),
fromDate: z.coerce.date().optional().describe("Filter certificates created from this date"),
toDate: z.coerce.date().optional().describe("Filter certificates created until this date")
}),
response: {
200: z.object({

View File

@@ -25,6 +25,7 @@ import { registerHumanitecSyncRouter } from "./humanitec-sync-router";
import { registerLaravelForgeSyncRouter } from "./laravel-forge-sync-router";
import { registerNetlifySyncRouter } from "./netlify-sync-router";
import { registerNorthflankSyncRouter } from "./northflank-sync-router";
import { registerOctopusDeploySyncRouter } from "./octopus-deploy-sync-router";
import { registerRailwaySyncRouter } from "./railway-sync-router";
import { registerRenderSyncRouter } from "./render-sync-router";
import { registerSupabaseSyncRouter } from "./supabase-sync-router";
@@ -69,5 +70,6 @@ export const SECRET_SYNC_REGISTER_ROUTER_MAP: Record<SecretSync, (server: Fastif
[SecretSync.Northflank]: registerNorthflankSyncRouter,
[SecretSync.Bitbucket]: registerBitbucketSyncRouter,
[SecretSync.LaravelForge]: registerLaravelForgeSyncRouter,
[SecretSync.Chef]: registerChefSyncRouter
[SecretSync.Chef]: registerChefSyncRouter,
[SecretSync.OctopusDeploy]: registerOctopusDeploySyncRouter
};

View File

@@ -0,0 +1,17 @@
import {
CreateOctopusDeploySyncSchema,
OctopusDeploySyncSchema,
UpdateOctopusDeploySyncSchema
} from "@app/services/secret-sync/octopus-deploy";
import { SecretSync } from "@app/services/secret-sync/secret-sync-enums";
import { registerSyncSecretsEndpoints } from "./secret-sync-endpoints";
export const registerOctopusDeploySyncRouter = async (server: FastifyZodProvider) =>
registerSyncSecretsEndpoints({
destination: SecretSync.OctopusDeploy,
server,
responseSchema: OctopusDeploySyncSchema,
createSchema: CreateOctopusDeploySyncSchema,
updateSchema: UpdateOctopusDeploySyncSchema
});

View File

@@ -48,6 +48,7 @@ import { HumanitecSyncListItemSchema, HumanitecSyncSchema } from "@app/services/
import { LaravelForgeSyncListItemSchema, LaravelForgeSyncSchema } from "@app/services/secret-sync/laravel-forge";
import { NetlifySyncListItemSchema, NetlifySyncSchema } from "@app/services/secret-sync/netlify";
import { NorthflankSyncListItemSchema, NorthflankSyncSchema } from "@app/services/secret-sync/northflank";
import { OctopusDeploySyncListItemSchema, OctopusDeploySyncSchema } from "@app/services/secret-sync/octopus-deploy";
import { RailwaySyncListItemSchema, RailwaySyncSchema } from "@app/services/secret-sync/railway/railway-sync-schemas";
import { RenderSyncListItemSchema, RenderSyncSchema } from "@app/services/secret-sync/render/render-sync-schemas";
import { SupabaseSyncListItemSchema, SupabaseSyncSchema } from "@app/services/secret-sync/supabase";
@@ -90,7 +91,8 @@ const SecretSyncSchema = z.discriminatedUnion("destination", [
NorthflankSyncSchema,
BitbucketSyncSchema,
LaravelForgeSyncSchema,
ChefSyncSchema
ChefSyncSchema,
OctopusDeploySyncSchema
]);
const SecretSyncOptionsSchema = z.discriminatedUnion("destination", [
@@ -126,7 +128,8 @@ const SecretSyncOptionsSchema = z.discriminatedUnion("destination", [
NorthflankSyncListItemSchema,
BitbucketSyncListItemSchema,
LaravelForgeSyncListItemSchema,
ChefSyncListItemSchema
ChefSyncListItemSchema,
OctopusDeploySyncListItemSchema
]);
export const registerSecretSyncRouter = async (server: FastifyZodProvider) => {

View File

@@ -42,7 +42,8 @@ export enum AppConnection {
MongoDB = "mongodb",
LaravelForge = "laravel-forge",
Chef = "chef",
Northflank = "northflank"
Northflank = "northflank",
OctopusDeploy = "octopus-deploy"
}
export enum AWSRegion {

View File

@@ -129,6 +129,11 @@ import {
NorthflankConnectionMethod,
validateNorthflankConnectionCredentials
} from "./northflank";
import {
getOctopusDeployConnectionListItem,
OctopusDeployConnectionMethod,
validateOctopusDeployConnectionCredentials
} from "./octopus-deploy";
import { getOktaConnectionListItem, OktaConnectionMethod, validateOktaConnectionCredentials } from "./okta";
import { getPostgresConnectionListItem, PostgresConnectionMethod } from "./postgres";
import { getRailwayConnectionListItem, validateRailwayConnectionCredentials } from "./railway";
@@ -211,6 +216,7 @@ export const listAppConnectionOptions = (projectType?: ProjectType) => {
getHerokuConnectionListItem(),
getRenderConnectionListItem(),
getLaravelForgeConnectionListItem(),
getOctopusDeployConnectionListItem(),
getFlyioConnectionListItem(),
getGitLabConnectionListItem(),
getCloudflareConnectionListItem(),
@@ -360,7 +366,8 @@ export const validateAppConnectionCredentials = async (
[AppConnection.Okta]: validateOktaConnectionCredentials as TAppConnectionCredentialsValidator,
[AppConnection.Chef]: validateChefConnectionCredentials as TAppConnectionCredentialsValidator,
[AppConnection.Redis]: validateRedisConnectionCredentials as TAppConnectionCredentialsValidator,
[AppConnection.MongoDB]: validateMongoDBConnectionCredentials as TAppConnectionCredentialsValidator
[AppConnection.MongoDB]: validateMongoDBConnectionCredentials as TAppConnectionCredentialsValidator,
[AppConnection.OctopusDeploy]: validateOctopusDeployConnectionCredentials as TAppConnectionCredentialsValidator
};
return VALIDATE_APP_CONNECTION_CREDENTIALS_MAP[appConnection.app](appConnection, gatewayService, gatewayV2Service);
@@ -430,6 +437,7 @@ export const getAppConnectionMethodName = (method: TAppConnection["method"]) =>
return "Simple Bind";
case RenderConnectionMethod.ApiKey:
case ChecklyConnectionMethod.ApiKey:
case OctopusDeployConnectionMethod.ApiKey:
return "API Key";
case ChefConnectionMethod.UserKey:
return "User Key";
@@ -510,7 +518,8 @@ export const TRANSITION_CONNECTION_CREDENTIALS_TO_PLATFORM: Record<
[AppConnection.Redis]: platformManagedCredentialsNotSupported,
[AppConnection.MongoDB]: platformManagedCredentialsNotSupported,
[AppConnection.LaravelForge]: platformManagedCredentialsNotSupported,
[AppConnection.Chef]: platformManagedCredentialsNotSupported
[AppConnection.Chef]: platformManagedCredentialsNotSupported,
[AppConnection.OctopusDeploy]: platformManagedCredentialsNotSupported
};
export const enterpriseAppCheck = async (

View File

@@ -44,7 +44,8 @@ export const APP_CONNECTION_NAME_MAP: Record<AppConnection, string> = {
[AppConnection.Redis]: "Redis",
[AppConnection.MongoDB]: "MongoDB",
[AppConnection.Chef]: "Chef",
[AppConnection.Northflank]: "Northflank"
[AppConnection.Northflank]: "Northflank",
[AppConnection.OctopusDeploy]: "Octopus Deploy"
};
export const APP_CONNECTION_PLAN_MAP: Record<AppConnection, AppConnectionPlanType> = {
@@ -91,5 +92,6 @@ export const APP_CONNECTION_PLAN_MAP: Record<AppConnection, AppConnectionPlanTyp
[AppConnection.Redis]: AppConnectionPlanType.Regular,
[AppConnection.MongoDB]: AppConnectionPlanType.Regular,
[AppConnection.Chef]: AppConnectionPlanType.Enterprise,
[AppConnection.Northflank]: AppConnectionPlanType.Regular
[AppConnection.Northflank]: AppConnectionPlanType.Regular,
[AppConnection.OctopusDeploy]: AppConnectionPlanType.Regular
};

View File

@@ -103,6 +103,8 @@ import { ValidateNetlifyConnectionCredentialsSchema } from "./netlify";
import { netlifyConnectionService } from "./netlify/netlify-connection-service";
import { ValidateNorthflankConnectionCredentialsSchema } from "./northflank";
import { northflankConnectionService } from "./northflank/northflank-connection-service";
import { ValidateOctopusDeployConnectionCredentialsSchema } from "./octopus-deploy";
import { octopusDeployConnectionService } from "./octopus-deploy/octopus-deploy-connection-service";
import { ValidateOktaConnectionCredentialsSchema } from "./okta";
import { oktaConnectionService } from "./okta/okta-connection-service";
import { ValidatePostgresConnectionCredentialsSchema } from "./postgres";
@@ -182,7 +184,8 @@ const VALIDATE_APP_CONNECTION_CREDENTIALS_MAP: Record<AppConnection, TValidateAp
[AppConnection.Okta]: ValidateOktaConnectionCredentialsSchema,
[AppConnection.Redis]: ValidateRedisConnectionCredentialsSchema,
[AppConnection.MongoDB]: ValidateMongoDBConnectionCredentialsSchema,
[AppConnection.Chef]: ValidateChefConnectionCredentialsSchema
[AppConnection.Chef]: ValidateChefConnectionCredentialsSchema,
[AppConnection.OctopusDeploy]: ValidateOctopusDeployConnectionCredentialsSchema
};
export const appConnectionServiceFactory = ({
@@ -891,6 +894,7 @@ export const appConnectionServiceFactory = ({
northflank: northflankConnectionService(connectAppConnectionById),
okta: oktaConnectionService(connectAppConnectionById),
laravelForge: laravelForgeConnectionService(connectAppConnectionById),
chef: chefConnectionService(connectAppConnectionById, licenseService)
chef: chefConnectionService(connectAppConnectionById, licenseService),
octopusDeploy: octopusDeployConnectionService(connectAppConnectionById)
};
};

View File

@@ -192,6 +192,12 @@ import {
TNorthflankConnectionInput,
TValidateNorthflankConnectionCredentialsSchema
} from "./northflank";
import {
TOctopusDeployConnection,
TOctopusDeployConnectionConfig,
TOctopusDeployConnectionInput,
TValidateOctopusDeployConnectionCredentialsSchema
} from "./octopus-deploy";
import {
TOktaConnection,
TOktaConnectionConfig,
@@ -303,6 +309,7 @@ export type TAppConnection = { id: string } & (
| TRedisConnection
| TMongoDBConnection
| TChefConnection
| TOctopusDeployConnection
);
export type TAppConnectionRaw = NonNullable<Awaited<ReturnType<TAppConnectionDALFactory["findById"]>>>;
@@ -354,6 +361,7 @@ export type TAppConnectionInput = { id: string } & (
| TRedisConnectionInput
| TMongoDBConnectionInput
| TChefConnectionInput
| TOctopusDeployConnectionInput
);
export type TSqlConnectionInput =
@@ -422,7 +430,8 @@ export type TAppConnectionConfig =
| TOktaConnectionConfig
| TRedisConnectionConfig
| TMongoDBConnectionConfig
| TChefConnectionConfig;
| TChefConnectionConfig
| TOctopusDeployConnectionConfig;
export type TValidateAppConnectionCredentialsSchema =
| TValidateAwsConnectionCredentialsSchema
@@ -468,7 +477,8 @@ export type TValidateAppConnectionCredentialsSchema =
| TValidateOktaConnectionCredentialsSchema
| TValidateRedisConnectionCredentialsSchema
| TValidateMongoDBConnectionCredentialsSchema
| TValidateChefConnectionCredentialsSchema;
| TValidateChefConnectionCredentialsSchema
| TValidateOctopusDeployConnectionCredentialsSchema;
export type TListAwsConnectionKmsKeys = {
connectionId: string;

View File

@@ -0,0 +1,4 @@
export * from "./octopus-deploy-connection-enums";
export * from "./octopus-deploy-connection-fns";
export * from "./octopus-deploy-connection-schemas";
export * from "./octopus-deploy-connection-types";

View File

@@ -0,0 +1,3 @@
export enum OctopusDeployConnectionMethod {
ApiKey = "api-key"
}

View File

@@ -0,0 +1,204 @@
import { AxiosError } from "axios";
import { request } from "@app/lib/config/request";
import { BadRequestError } from "@app/lib/errors";
import { removeTrailingSlash } from "@app/lib/fn";
import { blockLocalAndPrivateIpAddresses } from "@app/lib/validator";
import { AppConnection } from "../app-connection-enums";
import { OctopusDeployConnectionMethod } from "./octopus-deploy-connection-enums";
import {
TOctopusDeployConnection,
TOctopusDeployConnectionConfig,
TOctopusDeployProject,
TOctopusDeployProjectResponse,
TOctopusDeployScopeValues,
TOctopusDeployScopeValuesResponse,
TOctopusDeploySpace,
TOctopusDeploySpaceResponse
} from "./octopus-deploy-connection-types";
export const getOctopusDeployInstanceUrl = async (config: TOctopusDeployConnectionConfig) => {
const instanceUrl = removeTrailingSlash(config.credentials.instanceUrl);
await blockLocalAndPrivateIpAddresses(instanceUrl);
return instanceUrl;
};
export const getOctopusDeployConnectionListItem = () => {
return {
name: "Octopus Deploy" as const,
app: AppConnection.OctopusDeploy as const,
methods: Object.values(OctopusDeployConnectionMethod) as [OctopusDeployConnectionMethod.ApiKey]
};
};
export const validateOctopusDeployConnectionCredentials = async (config: TOctopusDeployConnectionConfig) => {
const instanceUrl = await getOctopusDeployInstanceUrl(config);
const { apiKey } = config.credentials;
try {
await request.get(`${instanceUrl}/api/users/me`, {
headers: {
"X-Octopus-ApiKey": apiKey,
"X-NuGet-ApiKey": apiKey,
Accept: "application/json"
}
});
} catch (error: unknown) {
if (error instanceof AxiosError) {
throw new BadRequestError({
message: `Failed to validate Octopus Deploy credentials: ${error.message || "Unknown error"}`
});
}
throw new BadRequestError({
message: `Failed to validate Octopus Deploy credentials - verify API key is correct`
});
}
return config.credentials;
};
export const getOctopusDeploySpaces = async (
appConnection: TOctopusDeployConnection
): Promise<TOctopusDeploySpace[]> => {
const instanceUrl = await getOctopusDeployInstanceUrl(appConnection);
const { apiKey } = appConnection.credentials;
try {
const { data } = await request.get<TOctopusDeploySpaceResponse[]>(`${instanceUrl}/api/spaces/all`, {
headers: {
"X-Octopus-ApiKey": apiKey,
"X-NuGet-ApiKey": apiKey,
Accept: "application/json"
}
});
return data.map((space) => ({
id: space.Id,
name: space.Name,
slug: space.Slug,
isDefault: space.IsDefault
}));
} catch (error: unknown) {
if (error instanceof AxiosError) {
const errorMessage = (error.response?.data as { error: { ErrorMessage: string } })?.error?.ErrorMessage;
throw new BadRequestError({
message: `Failed to list Octopus Deploy spaces: ${errorMessage || "Unknown error"}`,
error: error.response?.data
});
}
throw new BadRequestError({
message: "Unable to list Octopus Deploy spaces",
error
});
}
};
export const getOctopusDeployProjects = async (
appConnection: TOctopusDeployConnection,
spaceId: string
): Promise<TOctopusDeployProject[]> => {
const instanceUrl = await getOctopusDeployInstanceUrl(appConnection);
const { apiKey } = appConnection.credentials;
try {
const { data } = await request.get<TOctopusDeployProjectResponse[]>(`${instanceUrl}/api/${spaceId}/projects/all`, {
headers: {
"X-Octopus-ApiKey": apiKey,
"X-NuGet-ApiKey": apiKey,
Accept: "application/json"
}
});
return data.map((project) => ({
id: project.Id,
name: project.Name,
slug: project.Slug
}));
} catch (error: unknown) {
if (error instanceof AxiosError) {
const errorMessage = (error.response?.data as { error: { ErrorMessage: string } })?.error?.ErrorMessage;
throw new BadRequestError({
message: `Failed to list Octopus Deploy projects: ${errorMessage || "Unknown error"}`,
error: error.response?.data
});
}
throw new BadRequestError({
message: "Unable to list Octopus Deploy projects",
error
});
}
};
export const getOctopusDeployScopeValues = async (
appConnection: TOctopusDeployConnection,
spaceId: string,
projectId: string
): Promise<TOctopusDeployScopeValues> => {
const instanceUrl = await getOctopusDeployInstanceUrl(appConnection);
const { apiKey } = appConnection.credentials;
try {
const { data } = await request.get<TOctopusDeployScopeValuesResponse>(
`${instanceUrl}/api/${spaceId}/projects/${projectId}/variables`,
{
headers: {
"X-Octopus-ApiKey": apiKey,
"X-NuGet-ApiKey": apiKey,
Accept: "application/json"
}
}
);
const { ScopeValues } = data;
const scopeValues: TOctopusDeployScopeValues = {
environments: ScopeValues.Environments.map((environment) => ({
id: environment.Id,
name: environment.Name
})),
roles: ScopeValues.Roles.map((role) => ({
id: role.Id,
name: role.Name
})),
machines: ScopeValues.Machines.map((machine) => ({
id: machine.Id,
name: machine.Name
})),
processes: ScopeValues.Processes.map((process) => ({
id: process.Id,
name: process.Name
})),
actions: ScopeValues.Actions.map((action) => ({
id: action.Id,
name: action.Name
})),
channels: ScopeValues.Channels.map((channel) => ({
id: channel.Id,
name: channel.Name
}))
};
return scopeValues;
} catch (error: unknown) {
if (error instanceof AxiosError) {
const errorMessage = (error.response?.data as { error: { ErrorMessage: string } })?.error?.ErrorMessage;
throw new BadRequestError({
message: `Failed to get Octopus Deploy scope values: ${errorMessage || "Unknown error"}`,
error: error.response?.data
});
}
throw new BadRequestError({
message: "Unable to get Octopus Deploy scope values",
error
});
}
};

View File

@@ -0,0 +1,72 @@
import z from "zod";
import { AppConnections } from "@app/lib/api-docs";
import { AppConnection } from "@app/services/app-connection/app-connection-enums";
import {
BaseAppConnectionSchema,
GenericCreateAppConnectionFieldsSchema,
GenericUpdateAppConnectionFieldsSchema
} from "@app/services/app-connection/app-connection-schemas";
import { APP_CONNECTION_NAME_MAP } from "../app-connection-maps";
import { OctopusDeployConnectionMethod } from "./octopus-deploy-connection-enums";
export const OctopusDeployConnectionApiKeyCredentialsSchema = z.object({
instanceUrl: z
.string()
.trim()
.url("Invalid Instance URL")
.min(1, "Instance URL required")
.max(255)
.describe(AppConnections.CREDENTIALS.OCTOPUS_DEPLOY.instanceUrl),
apiKey: z.string().trim().min(1, "API key required").describe(AppConnections.CREDENTIALS.OCTOPUS_DEPLOY.apiKey)
});
const BaseOctopusDeployConnectionSchema = BaseAppConnectionSchema.extend({
app: z.literal(AppConnection.OctopusDeploy)
});
export const OctopusDeployConnectionSchema = z.discriminatedUnion("method", [
BaseOctopusDeployConnectionSchema.extend({
method: z.literal(OctopusDeployConnectionMethod.ApiKey),
credentials: OctopusDeployConnectionApiKeyCredentialsSchema
})
]);
export const SanitizedOctopusDeployConnectionSchema = z.discriminatedUnion("method", [
BaseOctopusDeployConnectionSchema.extend({
method: z.literal(OctopusDeployConnectionMethod.ApiKey),
credentials: OctopusDeployConnectionApiKeyCredentialsSchema.pick({ instanceUrl: true })
}).describe(JSON.stringify({ title: `${APP_CONNECTION_NAME_MAP[AppConnection.OctopusDeploy]} (API Key)` }))
]);
export const ValidateOctopusDeployConnectionCredentialsSchema = z.discriminatedUnion("method", [
z.object({
method: z
.literal(OctopusDeployConnectionMethod.ApiKey)
.describe(AppConnections.CREATE(AppConnection.OctopusDeploy).method),
credentials: OctopusDeployConnectionApiKeyCredentialsSchema.describe(
AppConnections.CREATE(AppConnection.OctopusDeploy).credentials
)
})
]);
export const CreateOctopusDeployConnectionSchema = ValidateOctopusDeployConnectionCredentialsSchema.and(
GenericCreateAppConnectionFieldsSchema(AppConnection.OctopusDeploy)
);
export const UpdateOctopusDeployConnectionSchema = z
.object({
credentials: OctopusDeployConnectionApiKeyCredentialsSchema.optional().describe(
AppConnections.UPDATE(AppConnection.OctopusDeploy).credentials
)
})
.and(GenericUpdateAppConnectionFieldsSchema(AppConnection.OctopusDeploy));
export const OctopusDeployConnectionListItemSchema = z
.object({
name: z.literal("Octopus Deploy"),
app: z.literal(AppConnection.OctopusDeploy),
methods: z.nativeEnum(OctopusDeployConnectionMethod).array()
})
.describe(JSON.stringify({ title: APP_CONNECTION_NAME_MAP[AppConnection.OctopusDeploy] }));

View File

@@ -0,0 +1,65 @@
import { logger } from "@app/lib/logger";
import { OrgServiceActor } from "@app/lib/types";
import { AppConnection } from "@app/services/app-connection/app-connection-enums";
import {
getOctopusDeployProjects,
getOctopusDeployScopeValues,
getOctopusDeploySpaces
} from "./octopus-deploy-connection-fns";
import { TOctopusDeployConnection } from "./octopus-deploy-connection-types";
type TGetAppConnectionFunc = (
app: AppConnection,
connectionId: string,
actor: OrgServiceActor
) => Promise<TOctopusDeployConnection>;
export const octopusDeployConnectionService = (getAppConnection: TGetAppConnectionFunc) => {
const listSpaces = async (connectionId: string, actor: OrgServiceActor) => {
const appConnection = await getAppConnection(AppConnection.OctopusDeploy, connectionId, actor);
try {
const spaces = await getOctopusDeploySpaces(appConnection);
return spaces;
} catch (error) {
logger.error({ error, connectionId, actor: actor.type }, "Failed to list Octopus Deploy spaces");
return [];
}
};
const listProjects = async (connectionId: string, spaceId: string, actor: OrgServiceActor) => {
const appConnection = await getAppConnection(AppConnection.OctopusDeploy, connectionId, actor);
try {
const projects = await getOctopusDeployProjects(appConnection, spaceId);
return projects;
} catch (error) {
logger.error({ error, connectionId, spaceId, actor: actor.type }, "Failed to list Octopus Deploy projects");
return [];
}
};
const getScopeValues = async (connectionId: string, spaceId: string, projectId: string, actor: OrgServiceActor) => {
const appConnection = await getAppConnection(AppConnection.OctopusDeploy, connectionId, actor);
try {
const scopeValues = await getOctopusDeployScopeValues(appConnection, spaceId, projectId);
return scopeValues;
} catch (error) {
logger.error(
{ error, connectionId, spaceId, projectId, actor: actor.type },
"Failed to get Octopus Deploy scope values"
);
return null;
}
};
return {
listSpaces,
listProjects,
getScopeValues
};
};

View File

@@ -0,0 +1,69 @@
import z from "zod";
import { DiscriminativePick } from "@app/lib/types";
import { AppConnection } from "../app-connection-enums";
import {
CreateOctopusDeployConnectionSchema,
OctopusDeployConnectionSchema,
ValidateOctopusDeployConnectionCredentialsSchema
} from "./octopus-deploy-connection-schemas";
export type TOctopusDeployConnection = z.infer<typeof OctopusDeployConnectionSchema>;
export type TOctopusDeployConnectionInput = z.infer<typeof CreateOctopusDeployConnectionSchema> & {
app: AppConnection.OctopusDeploy;
};
export type TValidateOctopusDeployConnectionCredentialsSchema = typeof ValidateOctopusDeployConnectionCredentialsSchema;
export type TOctopusDeployConnectionConfig = DiscriminativePick<
TOctopusDeployConnectionInput,
"method" | "app" | "credentials"
>;
export type TOctopusDeploySpaceResponse = {
Id: string;
Name: string;
Slug: string;
IsDefault: boolean;
};
export type TOctopusDeploySpace = {
id: string;
name: string;
slug: string;
isDefault: boolean;
};
export type TOctopusDeployProjectResponse = {
Id: string;
Name: string;
Slug: string;
};
export type TOctopusDeployProject = {
id: string;
name: string;
slug: string;
};
export type TOctopusDeployScopeValuesResponse = {
ScopeValues: {
Environments: { Id: string; Name: string }[];
Roles: { Id: string; Name: string }[];
Machines: { Id: string; Name: string }[];
Processes: { Id: string; Name: string }[];
Actions: { Id: string; Name: string }[];
Channels: { Id: string; Name: string }[];
};
};
export type TOctopusDeployScopeValues = {
environments: { id: string; name: string }[];
roles: { id: string; name: string }[];
machines: { id: string; name: string }[];
processes: { id: string; name: string }[];
actions: { id: string; name: string }[];
channels: { id: string; name: string }[];
};

View File

@@ -31,6 +31,7 @@ import {
import { APPROVAL_POLICY_FACTORY_MAP } from "./approval-policy-factory";
import {
ApprovalPolicyStep,
TApprovalPolicyInputs,
TApprovalRequest,
TCreatePolicyDTO,
TCreateRequestDTO,
@@ -819,7 +820,18 @@ export const approvalPolicyServiceFactory = ({
);
const grants = await approvalRequestGrantsDAL.find({ projectId, type: policyType });
return { grants };
const updatedGrants = grants.map((grant) => {
if (
grant.status === ApprovalRequestGrantStatus.Active &&
grant.expiresAt &&
new Date(grant.expiresAt) < new Date()
) {
return { ...grant, status: ApprovalRequestGrantStatus.Expired };
}
return grant;
});
return { grants: updatedGrants };
};
const getGrantById = async (grantId: string, actor: OrgServiceActor) => {
@@ -842,7 +854,15 @@ export const approvalPolicyServiceFactory = ({
ProjectPermissionSub.ApprovalRequestGrants
);
return { grant };
let { status } = grant;
if (
grant.status === ApprovalRequestGrantStatus.Active &&
grant.expiresAt &&
new Date(grant.expiresAt) < new Date()
) {
status = ApprovalRequestGrantStatus.Expired;
}
return { grant: { ...grant, status } };
};
const revokeGrant = async (
@@ -883,6 +903,36 @@ export const approvalPolicyServiceFactory = ({
return { grant: updatedGrant };
};
const checkPolicyMatch = async (
policyType: ApprovalPolicyType,
{ projectId, inputs }: { projectId: string; inputs: TApprovalPolicyInputs },
actor: OrgServiceActor
) => {
await permissionService.getProjectPermission({
actor: actor.type,
actorAuthMethod: actor.authMethod,
actorId: actor.id,
actorOrgId: actor.orgId,
projectId,
actionProjectType: ActionProjectType.Any
});
const fac = APPROVAL_POLICY_FACTORY_MAP[policyType](policyType);
const policy = await fac.matchPolicy(approvalPolicyDAL, projectId, inputs);
if (!policy) {
return { requiresApproval: false, hasActiveGrant: false };
}
const hasActiveGrant = await fac.canAccess(approvalRequestGrantsDAL, projectId, actor.id, inputs);
return {
requiresApproval: !hasActiveGrant,
hasActiveGrant
};
};
return {
create,
list,
@@ -897,6 +947,7 @@ export const approvalPolicyServiceFactory = ({
cancelRequest,
listGrants,
getGrantById,
revokeGrant
revokeGrant,
checkPolicyMatch
};
};

View File

@@ -26,13 +26,16 @@ export const pamAccessPolicyFactory: TApprovalResourceFactory<
let bestMatch: { policy: TPamAccessPolicy; wildcardCount: number; pathLength: number } | null = null;
const normalizedAccountPath = inputs.accountPath.startsWith("/") ? inputs.accountPath.slice(1) : inputs.accountPath;
for (const policy of policies) {
const p = policy as TPamAccessPolicy;
for (const c of p.conditions.conditions) {
// Find the most specific path pattern
// TODO(andrey): Make matching logic more advanced by accounting for wildcard positions
for (const pathPattern of c.accountPaths) {
if (picomatch(pathPattern)(inputs.accountPath)) {
const normalizedPathPattern = pathPattern.startsWith("/") ? pathPattern.slice(1) : pathPattern;
if (picomatch(normalizedPathPattern)(normalizedAccountPath)) {
const wildcardCount = (pathPattern.match(/\*/g) || []).length;
const pathLength = pathPattern.length;
@@ -65,11 +68,16 @@ export const pamAccessPolicyFactory: TApprovalResourceFactory<
revokedAt: null
});
const normalizedAccountPath = inputs.accountPath.startsWith("/") ? inputs.accountPath.slice(1) : inputs.accountPath;
// TODO(andrey): Move some of this check to be part of SQL query
return grants.some((grant) => {
const grantAttributes = grant.attributes as TPamAccessPolicyInputs;
const isMatch = picomatch(grantAttributes.accountPath);
return isMatch(inputs.accountPath) && (!grant.expiresAt || grant.expiresAt > new Date());
const normalizedGrantPath = grantAttributes.accountPath.startsWith("/")
? grantAttributes.accountPath.slice(1)
: grantAttributes.accountPath;
const isMatch = picomatch(normalizedGrantPath);
return isMatch(normalizedAccountPath) && (!grant.expiresAt || grant.expiresAt > new Date());
});
};

View File

@@ -3,10 +3,23 @@ import { Knex } from "knex";
import { TDbClient } from "@app/db";
import { TableName, TCertificateRequests, TCertificates } from "@app/db/schemas";
import { DatabaseError } from "@app/lib/errors";
import { sanitizeSqlLikeString } from "@app/lib/fn/string";
import { ormify, selectAllTableCols } from "@app/lib/knex";
import {
applyProcessedPermissionRulesToQuery,
type ProcessedPermissionRules
} from "@app/lib/knex/permission-filter-utils";
type TCertificateRequestWithCertificate = TCertificateRequests & {
certificate: TCertificates | null;
profileName: string | null;
};
type TCertificateRequestQueryResult = TCertificateRequests & {
certId: string | null;
certSerialNumber: string | null;
certStatus: string | null;
profileName: string | null;
};
export type TCertificateRequestDALFactory = ReturnType<typeof certificateRequestDALFactory>;
@@ -16,24 +29,41 @@ export const certificateRequestDALFactory = (db: TDbClient) => {
const findByIdWithCertificate = async (id: string): Promise<TCertificateRequestWithCertificate | null> => {
try {
const certificateRequest = await certificateRequestOrm.findById(id);
if (!certificateRequest) return null;
const result = (await db(TableName.CertificateRequests)
.leftJoin(
TableName.Certificate,
`${TableName.CertificateRequests}.certificateId`,
`${TableName.Certificate}.id`
)
.leftJoin(
TableName.PkiCertificateProfile,
`${TableName.CertificateRequests}.profileId`,
`${TableName.PkiCertificateProfile}.id`
)
.where(`${TableName.CertificateRequests}.id`, id)
.select(selectAllTableCols(TableName.CertificateRequests))
.select(db.ref("slug").withSchema(TableName.PkiCertificateProfile).as("profileName"))
.select(db.ref("id").withSchema(TableName.Certificate).as("certId"))
.select(db.ref("serialNumber").withSchema(TableName.Certificate).as("certSerialNumber"))
.select(db.ref("status").withSchema(TableName.Certificate).as("certStatus"))
.first()) as TCertificateRequestQueryResult | undefined;
if (!certificateRequest.certificateId) {
return {
...certificateRequest,
certificate: null
};
}
if (!result) return null;
const certificate = await db(TableName.Certificate)
.where("id", certificateRequest.certificateId)
.select(selectAllTableCols(TableName.Certificate))
.first();
const { certId, certSerialNumber, certStatus, profileName, ...certificateRequestData } = result;
const certificate: TCertificates | null = certId
? ({
id: certId,
serialNumber: certSerialNumber,
status: certStatus
} as TCertificates)
: null;
return {
...certificateRequest,
certificate: certificate || null
...certificateRequestData,
profileName: profileName || null,
certificate
};
} catch (error) {
throw new DatabaseError({ error, name: "Find certificate request by ID with certificate" });
@@ -82,11 +112,259 @@ export const certificateRequestDALFactory = (db: TDbClient) => {
}
};
const findByProjectId = async (
projectId: string,
options: {
offset?: number;
limit?: number;
search?: string;
status?: string;
fromDate?: Date;
toDate?: Date;
profileIds?: string[];
} = {},
processedRules?: ProcessedPermissionRules,
tx?: Knex
): Promise<TCertificateRequests[]> => {
try {
const { offset = 0, limit = 20, search, status, fromDate, toDate, profileIds } = options;
let query = (tx || db)(TableName.CertificateRequests)
.leftJoin(
TableName.PkiCertificateProfile,
`${TableName.CertificateRequests}.profileId`,
`${TableName.PkiCertificateProfile}.id`
)
.where(`${TableName.CertificateRequests}.projectId`, projectId);
if (profileIds && profileIds.length > 0) {
query = query.whereIn(`${TableName.CertificateRequests}.profileId`, profileIds);
}
if (search) {
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment, @typescript-eslint/no-unsafe-call
const sanitizedSearch = sanitizeSqlLikeString(search);
query = query.where((builder) => {
void builder
.whereILike(`${TableName.CertificateRequests}.commonName`, `%${sanitizedSearch}%`)
.orWhereILike(`${TableName.CertificateRequests}.altNames`, `%${sanitizedSearch}%`);
});
}
if (status) {
query = query.where(`${TableName.CertificateRequests}.status`, status);
}
if (fromDate) {
query = query.where(`${TableName.CertificateRequests}.createdAt`, ">=", fromDate);
}
if (toDate) {
query = query.where(`${TableName.CertificateRequests}.createdAt`, "<=", toDate);
}
query = query
.select(selectAllTableCols(TableName.CertificateRequests))
.select(db.ref("slug").withSchema(TableName.PkiCertificateProfile).as("profileName"));
if (processedRules) {
query = applyProcessedPermissionRulesToQuery(
query,
TableName.CertificateRequests,
processedRules
) as typeof query;
}
const certificateRequests = await query.orderBy("createdAt", "desc").offset(offset).limit(limit);
return certificateRequests;
} catch (error) {
throw new DatabaseError({ error, name: "Find certificate requests by project ID" });
}
};
const countByProjectId = async (
projectId: string,
options: {
search?: string;
status?: string;
fromDate?: Date;
toDate?: Date;
profileIds?: string[];
} = {},
processedRules?: ProcessedPermissionRules,
tx?: Knex
): Promise<number> => {
try {
const { search, status, fromDate, toDate, profileIds } = options;
let query = (tx || db)(TableName.CertificateRequests)
.leftJoin(
TableName.PkiCertificateProfile,
`${TableName.CertificateRequests}.profileId`,
`${TableName.PkiCertificateProfile}.id`
)
.where(`${TableName.CertificateRequests}.projectId`, projectId);
if (profileIds && profileIds.length > 0) {
query = query.whereIn(`${TableName.CertificateRequests}.profileId`, profileIds);
}
if (search) {
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment, @typescript-eslint/no-unsafe-call
const sanitizedSearch = sanitizeSqlLikeString(search);
query = query.where((builder) => {
void builder
.whereILike(`${TableName.CertificateRequests}.commonName`, `%${sanitizedSearch}%`)
.orWhereILike(`${TableName.CertificateRequests}.altNames`, `%${sanitizedSearch}%`);
});
}
if (status) {
query = query.where(`${TableName.CertificateRequests}.status`, status);
}
if (fromDate) {
query = query.where(`${TableName.CertificateRequests}.createdAt`, ">=", fromDate);
}
if (toDate) {
query = query.where(`${TableName.CertificateRequests}.createdAt`, "<=", toDate);
}
if (processedRules) {
query = applyProcessedPermissionRulesToQuery(
query,
TableName.CertificateRequests,
processedRules
) as typeof query;
}
const result = await query.count("*").first();
const count = (result as unknown as Record<string, unknown>)?.count;
return parseInt(String(count || "0"), 10);
} catch (error) {
throw new DatabaseError({ error, name: "Count certificate requests by project ID" });
}
};
const findByProjectIdWithCertificate = async (
projectId: string,
options: {
offset?: number;
limit?: number;
search?: string;
status?: string;
fromDate?: Date;
toDate?: Date;
profileIds?: string[];
sortBy?: string;
sortOrder?: "asc" | "desc";
} = {},
processedRules?: ProcessedPermissionRules,
tx?: Knex
): Promise<TCertificateRequestWithCertificate[]> => {
try {
const {
offset = 0,
limit = 20,
search,
status,
fromDate,
toDate,
profileIds,
sortBy = "createdAt",
sortOrder = "desc"
} = options;
let query: Knex.QueryBuilder = (tx || db)(TableName.CertificateRequests)
.leftJoin(
TableName.Certificate,
`${TableName.CertificateRequests}.certificateId`,
`${TableName.Certificate}.id`
)
.leftJoin(
TableName.PkiCertificateProfile,
`${TableName.CertificateRequests}.profileId`,
`${TableName.PkiCertificateProfile}.id`
);
if (profileIds && profileIds.length > 0) {
query = query.whereIn(`${TableName.CertificateRequests}.profileId`, profileIds);
}
query = query
.select(selectAllTableCols(TableName.CertificateRequests))
.select(db.ref("slug").withSchema(TableName.PkiCertificateProfile).as("profileName"))
.select(db.ref("id").withSchema(TableName.Certificate).as("certId"))
.select(db.ref("serialNumber").withSchema(TableName.Certificate).as("certSerialNumber"))
.select(db.ref("status").withSchema(TableName.Certificate).as("certStatus"))
.where(`${TableName.CertificateRequests}.projectId`, projectId);
if (search) {
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment, @typescript-eslint/no-unsafe-call
const sanitizedSearch = sanitizeSqlLikeString(search);
query = query.where((builder) => {
void builder
.whereILike(`${TableName.CertificateRequests}.commonName`, `%${sanitizedSearch}%`)
.orWhereILike(`${TableName.CertificateRequests}.altNames`, `%${sanitizedSearch}%`);
});
}
if (status) {
query = query.where(`${TableName.CertificateRequests}.status`, status);
}
if (fromDate) {
query = query.where(`${TableName.CertificateRequests}.createdAt`, ">=", fromDate);
}
if (toDate) {
query = query.where(`${TableName.CertificateRequests}.createdAt`, "<=", toDate);
}
if (processedRules) {
query = applyProcessedPermissionRulesToQuery(query, TableName.CertificateRequests, processedRules);
}
const allowedSortColumns = ["createdAt", "updatedAt", "status", "commonName"];
const safeSortBy = allowedSortColumns.includes(sortBy) ? sortBy : "createdAt";
const safeSortOrder = sortOrder === "asc" || sortOrder === "desc" ? sortOrder : "desc";
const results = (await query
.orderBy(`${TableName.CertificateRequests}.${safeSortBy}`, safeSortOrder)
.offset(offset)
.limit(limit)) as TCertificateRequestQueryResult[];
return results.map((row): TCertificateRequestWithCertificate => {
const { certId, certSerialNumber, certStatus, profileName: rowProfileName, ...certificateRequestData } = row;
const certificate: TCertificates | null = certId
? ({
id: certId,
serialNumber: certSerialNumber,
status: certStatus
} as TCertificates)
: null;
return {
...certificateRequestData,
profileName: rowProfileName || null,
certificate
};
});
} catch (error) {
throw new DatabaseError({ error, name: "Find certificate requests by project ID with certificates" });
}
};
return {
...certificateRequestOrm,
findByIdWithCertificate,
findPendingByProjectId,
updateStatus,
attachCertificate
attachCertificate,
findByProjectId,
countByProjectId,
findByProjectIdWithCertificate
};
};

View File

@@ -9,6 +9,7 @@ import {
ProjectPermissionCertificateProfileActions,
ProjectPermissionSub
} from "@app/ee/services/permission/project-permission";
import { getProcessedPermissionRules } from "@app/lib/casl/permission-filter-utils";
import { BadRequestError, NotFoundError } from "@app/lib/errors";
import { TCertificateDALFactory } from "@app/services/certificate/certificate-dal";
import { TCertificateServiceFactory } from "@app/services/certificate/certificate-service";
@@ -21,6 +22,7 @@ import {
TCreateCertificateRequestDTO,
TGetCertificateFromRequestDTO,
TGetCertificateRequestDTO,
TListCertificateRequestsDTO,
TUpdateCertificateRequestStatusDTO
} from "./certificate-request-types";
@@ -285,11 +287,76 @@ export const certificateRequestServiceFactory = ({
return certificateRequestDAL.attachCertificate(certificateRequestId, certificateId);
};
const listCertificateRequests = async ({
actor,
actorId,
actorAuthMethod,
actorOrgId,
projectId,
offset = 0,
limit = 20,
search,
status,
fromDate,
toDate,
profileIds,
sortBy,
sortOrder
}: TListCertificateRequestsDTO) => {
const { permission } = await permissionService.getProjectPermission({
actor,
actorId,
projectId,
actorAuthMethod,
actorOrgId,
actionProjectType: ActionProjectType.CertificateManager
});
ForbiddenError.from(permission).throwUnlessCan(
ProjectPermissionCertificateActions.Read,
ProjectPermissionSub.Certificates
);
const processedRules = getProcessedPermissionRules(
permission,
ProjectPermissionCertificateActions.Read,
ProjectPermissionSub.Certificates
);
const options: Parameters<typeof certificateRequestDAL.findByProjectIdWithCertificate>[1] = {
offset,
limit,
search,
status,
fromDate,
toDate,
profileIds,
sortBy,
sortOrder
};
const [certificateRequests, totalCount] = await Promise.all([
certificateRequestDAL.findByProjectIdWithCertificate(projectId, options, processedRules),
certificateRequestDAL.countByProjectId(projectId, options, processedRules)
]);
const mappedCertificateRequests = certificateRequests.map((request) => ({
...request,
status: request.status as CertificateRequestStatus
}));
return {
certificateRequests: mappedCertificateRequests,
totalCount
};
};
return {
createCertificateRequest,
getCertificateRequest,
getCertificateFromRequest,
updateCertificateRequestStatus,
attachCertificateToRequest
attachCertificateToRequest,
listCertificateRequests
};
};

View File

@@ -42,3 +42,15 @@ export type TAttachCertificateToRequestDTO = {
certificateRequestId: string;
certificateId: string;
};
export type TListCertificateRequestsDTO = TProjectPermission & {
offset?: number;
limit?: number;
search?: string;
status?: CertificateRequestStatus;
fromDate?: Date;
toDate?: Date;
profileIds?: string[];
sortBy?: string;
sortOrder?: "asc" | "desc";
};

View File

@@ -1,13 +1,13 @@
import RE2 from "re2";
import { TDbClient } from "@app/db";
import { TableName, TCertificates } from "@app/db/schemas";
import { DatabaseError } from "@app/lib/errors";
import { sanitizeSqlLikeString } from "@app/lib/fn/string";
import { ormify, selectAllTableCols } from "@app/lib/knex";
import {
applyProcessedPermissionRulesToQuery,
type ProcessedPermissionRules
} from "@app/lib/knex/permission-filter-utils";
import { isUuidV4 } from "@app/lib/validator";
import { CertStatus } from "./certificate-types";
@@ -48,11 +48,21 @@ export const certificateDALFactory = (db: TDbClient) => {
const countCertificatesInProject = async ({
projectId,
friendlyName,
commonName
commonName,
search,
status,
profileIds,
fromDate,
toDate
}: {
projectId: string;
friendlyName?: string;
commonName?: string;
search?: string;
status?: string | string[];
profileIds?: string[];
fromDate?: Date;
toDate?: Date;
}) => {
try {
interface CountResult {
@@ -66,15 +76,69 @@ export const certificateDALFactory = (db: TDbClient) => {
.where(`${TableName.Project}.id`, projectId);
if (friendlyName) {
const sanitizedValue = String(friendlyName).replace(new RE2("[%_\\\\]", "g"), "\\$&");
const sanitizedValue = sanitizeSqlLikeString(friendlyName);
query = query.andWhere(`${TableName.Certificate}.friendlyName`, "like", `%${sanitizedValue}%`);
}
if (commonName) {
const sanitizedValue = String(commonName).replace(new RE2("[%_\\\\]", "g"), "\\$&");
const sanitizedValue = sanitizeSqlLikeString(commonName);
query = query.andWhere(`${TableName.Certificate}.commonName`, "like", `%${sanitizedValue}%`);
}
if (search) {
const sanitizedValue = sanitizeSqlLikeString(search);
query = query.andWhere((qb) => {
void qb
.where(`${TableName.Certificate}.commonName`, "like", `%${sanitizedValue}%`)
.orWhere(`${TableName.Certificate}.altNames`, "like", `%${sanitizedValue}%`)
.orWhere(`${TableName.Certificate}.serialNumber`, "like", `%${sanitizedValue}%`)
.orWhere(`${TableName.Certificate}.friendlyName`, "like", `%${sanitizedValue}%`);
if (isUuidV4(sanitizedValue)) {
void qb.orWhere(`${TableName.Certificate}.id`, sanitizedValue);
}
});
}
if (status) {
const now = new Date();
const statuses = Array.isArray(status) ? status : [status];
query = query.andWhere((qb) => {
statuses.forEach((statusValue, index) => {
const whereMethod = index === 0 ? "where" : "orWhere";
if (statusValue === CertStatus.ACTIVE) {
void qb[whereMethod]((innerQb) => {
void innerQb
.where(`${TableName.Certificate}.notAfter`, ">", now)
.andWhere(`${TableName.Certificate}.status`, "!=", CertStatus.REVOKED);
});
} else if (statusValue === CertStatus.EXPIRED) {
void qb[whereMethod]((innerQb) => {
void innerQb
.where(`${TableName.Certificate}.notAfter`, "<=", now)
.andWhere(`${TableName.Certificate}.status`, "!=", CertStatus.REVOKED);
});
} else {
void qb[whereMethod](`${TableName.Certificate}.status`, statusValue);
}
});
});
}
if (fromDate) {
query = query.andWhere(`${TableName.Certificate}.createdAt`, ">=", fromDate);
}
if (toDate) {
query = query.andWhere(`${TableName.Certificate}.createdAt`, "<=", toDate);
}
if (profileIds) {
query = query.whereIn(`${TableName.Certificate}.profileId`, profileIds);
}
const count = await query.count("*").first();
return parseInt((count as unknown as CountResult).count || "0", 10);
@@ -160,7 +224,7 @@ export const certificateDALFactory = (db: TDbClient) => {
Object.entries(filter).forEach(([key, value]) => {
if (value !== undefined && value !== null) {
if (key === "friendlyName" || key === "commonName") {
const sanitizedValue = String(value).replace(new RE2("[%_\\\\]", "g"), "\\$&");
const sanitizedValue = sanitizeSqlLikeString(String(value));
query = query.andWhere(`${TableName.Certificate}.${key}`, "like", `%${sanitizedValue}%`);
} else {
query = query.andWhere(`${TableName.Certificate}.${key}`, value);
@@ -213,12 +277,12 @@ export const certificateDALFactory = (db: TDbClient) => {
.whereNull(`${TableName.Certificate}.renewedByCertificateId`);
if (friendlyName) {
const sanitizedValue = String(friendlyName).replace(new RE2("[%_\\\\]", "g"), "\\$&");
const sanitizedValue = sanitizeSqlLikeString(friendlyName);
query = query.andWhere(`${TableName.Certificate}.friendlyName`, "like", `%${sanitizedValue}%`);
}
if (commonName) {
const sanitizedValue = String(commonName).replace(new RE2("[%_\\\\]", "g"), "\\$&");
const sanitizedValue = sanitizeSqlLikeString(commonName);
query = query.andWhere(`${TableName.Certificate}.commonName`, "like", `%${sanitizedValue}%`);
}
@@ -275,7 +339,17 @@ export const certificateDALFactory = (db: TDbClient) => {
};
const findWithPrivateKeyInfo = async (
filter: Partial<TCertificates & { friendlyName?: string; commonName?: string }>,
filter: Partial<
TCertificates & {
friendlyName?: string;
commonName?: string;
search?: string;
status?: string | string[];
profileIds?: string[];
fromDate?: Date;
toDate?: Date;
}
>,
options?: { offset?: number; limit?: number; sort?: [string, "asc" | "desc"][] },
permissionFilters?: ProcessedPermissionRules
): Promise<(TCertificates & { hasPrivateKey: boolean })[]> => {
@@ -286,17 +360,78 @@ export const certificateDALFactory = (db: TDbClient) => {
.select(selectAllTableCols(TableName.Certificate))
.select(db.ref(`${TableName.CertificateSecret}.certId`).as("privateKeyRef"));
Object.entries(filter).forEach(([key, value]) => {
const { friendlyName, commonName, search, status, profileIds, fromDate, toDate, ...regularFilters } = filter;
Object.entries(regularFilters).forEach(([key, value]) => {
if (value !== undefined && value !== null) {
if (key === "friendlyName" || key === "commonName") {
const sanitizedValue = String(value).replace(new RE2("[%_\\\\]", "g"), "\\$&");
query = query.andWhere(`${TableName.Certificate}.${key}`, "like", `%${sanitizedValue}%`);
} else {
query = query.andWhere(`${TableName.Certificate}.${key}`, value);
}
query = query.andWhere(`${TableName.Certificate}.${key}`, value);
}
});
if (friendlyName) {
const sanitizedValue = sanitizeSqlLikeString(friendlyName);
query = query.andWhere(`${TableName.Certificate}.friendlyName`, "like", `%${sanitizedValue}%`);
}
if (commonName) {
const sanitizedValue = sanitizeSqlLikeString(commonName);
query = query.andWhere(`${TableName.Certificate}.commonName`, "like", `%${sanitizedValue}%`);
}
if (search) {
const sanitizedValue = sanitizeSqlLikeString(search);
query = query.andWhere((qb) => {
void qb
.where(`${TableName.Certificate}.commonName`, "like", `%${sanitizedValue}%`)
.orWhere(`${TableName.Certificate}.altNames`, "like", `%${sanitizedValue}%`)
.orWhere(`${TableName.Certificate}.serialNumber`, "like", `%${sanitizedValue}%`)
.orWhere(`${TableName.Certificate}.friendlyName`, "like", `%${sanitizedValue}%`);
if (isUuidV4(sanitizedValue)) {
void qb.orWhere(`${TableName.Certificate}.id`, sanitizedValue);
}
});
}
if (status) {
const now = new Date();
const statuses = Array.isArray(status) ? status : [status];
query = query.andWhere((qb) => {
statuses.forEach((statusValue, index) => {
const whereMethod = index === 0 ? "where" : "orWhere";
if (statusValue === CertStatus.ACTIVE) {
void qb[whereMethod]((innerQb) => {
void innerQb
.where(`${TableName.Certificate}.notAfter`, ">", now)
.andWhere(`${TableName.Certificate}.status`, "!=", CertStatus.REVOKED);
});
} else if (statusValue === CertStatus.EXPIRED) {
void qb[whereMethod]((innerQb) => {
void innerQb
.where(`${TableName.Certificate}.notAfter`, "<=", now)
.andWhere(`${TableName.Certificate}.status`, "!=", CertStatus.REVOKED);
});
} else {
void qb[whereMethod](`${TableName.Certificate}.status`, statusValue);
}
});
});
}
if (fromDate) {
query = query.andWhere(`${TableName.Certificate}.createdAt`, ">=", fromDate);
}
if (toDate) {
query = query.andWhere(`${TableName.Certificate}.createdAt`, "<=", toDate);
}
if (profileIds) {
query = query.whereIn(`${TableName.Certificate}.profileId`, profileIds);
}
if (permissionFilters) {
query = applyProcessedPermissionRulesToQuery(query, TableName.Certificate, permissionFilters) as typeof query;
}

View File

@@ -68,16 +68,11 @@ import { NotificationType } from "../notification/notification-types";
import { TOrgDALFactory } from "../org/org-dal";
import { TPkiAlertDALFactory } from "../pki-alert/pki-alert-dal";
import { TPkiCollectionDALFactory } from "../pki-collection/pki-collection-dal";
import { TProjectBotServiceFactory } from "../project-bot/project-bot-service";
import { TProjectEnvDALFactory } from "../project-env/project-env-dal";
import { TProjectMembershipDALFactory } from "../project-membership/project-membership-dal";
import { getPredefinedRoles } from "../project-role/project-role-fns";
import { TReminderServiceFactory } from "../reminder/reminder-types";
import { TRoleDALFactory } from "../role/role-dal";
import { TSecretDALFactory } from "../secret/secret-dal";
import { fnDeleteProjectSecretReminders } from "../secret/secret-fns";
import { ROOT_FOLDER_NAME, TSecretFolderDALFactory } from "../secret-folder/secret-folder-dal";
import { TSecretV2BridgeDALFactory } from "../secret-v2-bridge/secret-v2-bridge-dal";
import { TProjectSlackConfigDALFactory } from "../slack/project-slack-config-dal";
import { validateSlackChannelsField } from "../slack/slack-auth-validators";
import { TSlackIntegrationDALFactory } from "../slack/slack-integration-dal";
@@ -133,10 +128,7 @@ type TProjectServiceFactoryDep = {
projectSshConfigDAL: Pick<TProjectSshConfigDALFactory, "transaction" | "create" | "findOne" | "updateById">;
projectQueue: TProjectQueueFactory;
userDAL: TUserDALFactory;
projectBotService: Pick<TProjectBotServiceFactory, "getBotKey">;
folderDAL: Pick<TSecretFolderDALFactory, "insertMany" | "findByProjectId">;
secretDAL: Pick<TSecretDALFactory, "find">;
secretV2BridgeDAL: Pick<TSecretV2BridgeDALFactory, "find">;
projectEnvDAL: Pick<TProjectEnvDALFactory, "insertMany" | "find">;
projectMembershipDAL: Pick<TProjectMembershipDALFactory, "findProjectGhostUser" | "findAllProjectMembers">;
membershipUserDAL: Pick<TMembershipUserDALFactory, "create" | "findOne" | "delete">;
@@ -192,7 +184,6 @@ type TProjectServiceFactoryDep = {
| "createCipherPairWithDataKey"
>;
projectTemplateService: TProjectTemplateServiceFactory;
reminderService: Pick<TReminderServiceFactory, "deleteReminderBySecretId">;
notificationService: Pick<TNotificationServiceFactory, "createUserNotifications">;
};
@@ -201,11 +192,8 @@ export type TProjectServiceFactory = ReturnType<typeof projectServiceFactory>;
export const projectServiceFactory = ({
projectDAL,
projectSshConfigDAL,
secretDAL,
secretV2BridgeDAL,
projectQueue,
permissionService,
projectBotService,
orgDAL,
userDAL,
folderDAL,
@@ -232,7 +220,6 @@ export const projectServiceFactory = ({
microsoftTeamsIntegrationDAL,
projectTemplateService,
smtpService,
reminderService,
notificationService,
membershipIdentityDAL,
membershipUserDAL,
@@ -521,14 +508,6 @@ export const projectServiceFactory = ({
await userDAL.deleteById(projectGhostUser.id, tx);
}
await fnDeleteProjectSecretReminders(project.id, {
secretDAL,
secretV2BridgeDAL,
reminderService,
projectBotService,
folderDAL
});
return delProject;
});
@@ -942,6 +921,11 @@ export const projectServiceFactory = ({
friendlyName,
commonName,
forPkiSync = false,
search,
status,
profileIds,
fromDate,
toDate,
actorId,
actorOrgId,
actorAuthMethod,
@@ -968,7 +952,12 @@ export const projectServiceFactory = ({
const regularFilters = {
projectId,
...(friendlyName && { friendlyName }),
...(commonName && { commonName })
...(commonName && { commonName }),
...(search && { search }),
...(status && { status: Array.isArray(status) ? status[0] : status }),
...(profileIds && { profileIds }),
...(fromDate && { fromDate }),
...(toDate && { toDate })
};
const permissionFilters = getProcessedPermissionRules(
permission,
@@ -991,7 +980,12 @@ export const projectServiceFactory = ({
const countFilter = {
projectId,
...(regularFilters.friendlyName && { friendlyName: String(regularFilters.friendlyName) }),
...(regularFilters.commonName && { commonName: String(regularFilters.commonName) })
...(regularFilters.commonName && { commonName: String(regularFilters.commonName) }),
...(regularFilters.search && { search: String(regularFilters.search) }),
...(regularFilters.status && { status: String(regularFilters.status) }),
...(regularFilters.profileIds && { profileIds: regularFilters.profileIds }),
...(regularFilters.fromDate && { fromDate: regularFilters.fromDate }),
...(regularFilters.toDate && { toDate: regularFilters.toDate })
};
const count = forPkiSync

View File

@@ -144,6 +144,11 @@ export type TListProjectCertsDTO = {
friendlyName?: string;
commonName?: string;
forPkiSync?: boolean;
search?: string;
status?: string | string[];
profileIds?: string[];
fromDate?: Date;
toDate?: Date;
} & Omit<TProjectPermission, "projectId">;
export type TListProjectAlertsDTO = TProjectPermission;

View File

@@ -0,0 +1,4 @@
export * from "./octopus-deploy-sync-constants";
export * from "./octopus-deploy-sync-fns";
export * from "./octopus-deploy-sync-schemas";
export * from "./octopus-deploy-sync-types";

View File

@@ -0,0 +1,10 @@
import { AppConnection } from "@app/services/app-connection/app-connection-enums";
import { SecretSync } from "@app/services/secret-sync/secret-sync-enums";
import { TSecretSyncListItem } from "@app/services/secret-sync/secret-sync-types";
export const OCTOPUS_DEPLOY_SYNC_LIST_OPTION: TSecretSyncListItem = {
name: "Octopus Deploy",
destination: SecretSync.OctopusDeploy,
connection: AppConnection.OctopusDeploy,
canImportSecrets: false
};

View File

@@ -0,0 +1,151 @@
import { request } from "@app/lib/config/request";
import { BadRequestError } from "@app/lib/errors";
import { getOctopusDeployInstanceUrl } from "@app/services/app-connection/octopus-deploy";
import { matchesSchema } from "@app/services/secret-sync/secret-sync-fns";
import { TSecretMap } from "@app/services/secret-sync/secret-sync-types";
import { SECRET_SYNC_NAME_MAP } from "../secret-sync-maps";
import {
TOctopusDeploySyncWithCredentials,
TOctopusDeployVariable,
TOctopusDeployVariableSet
} from "./octopus-deploy-sync-types";
export const OctopusDeploySyncFns = {
getAuthHeader(apiKey: string): Record<string, string> {
return {
"X-NuGet-ApiKey": apiKey,
"X-Octopus-ApiKey": apiKey,
Accept: "application/json",
"Content-Type": "application/json"
};
},
buildVariableUrl(instanceUrl: string, spaceId: string, projectId: string, scope: string): string {
switch (scope) {
case "project":
return `${instanceUrl}/api/${spaceId}/projects/${projectId}/variables`;
default:
throw new BadRequestError({
message: `Unsupported Octopus Deploy scope: ${scope}`
});
}
},
async syncSecrets(secretSync: TOctopusDeploySyncWithCredentials, secretMap: TSecretMap) {
const {
connection,
environment,
syncOptions: { disableSecretDeletion, keySchema }
} = secretSync;
const instanceUrl = await getOctopusDeployInstanceUrl(connection);
const { apiKey } = connection.credentials;
const { spaceId, projectId, scope } = secretSync.destinationConfig;
const url = this.buildVariableUrl(instanceUrl, spaceId, projectId, scope);
const { data: variableSet } = await request.get<TOctopusDeployVariableSet>(url, {
headers: this.getAuthHeader(apiKey)
});
// Get scope values from destination config (if configured)
const scopeValues = secretSync.destinationConfig.scopeValues || {};
const nonSensitiveVariables: TOctopusDeployVariable[] = [];
let sensitiveVariables: TOctopusDeployVariable[] = [];
variableSet.Variables.forEach((variable) => {
if (!variable.IsSensitive && variable.Type !== "Sensitive") {
nonSensitiveVariables.push(variable);
} else {
// sensitive variables, this could contain infisical secrets
sensitiveVariables.push(variable);
}
});
// Build new variables array from secret map
const newVariables: TOctopusDeployVariable[] = Object.entries(secretMap).map(([key, secret]) => ({
Name: key,
Value: secret.value,
Description: secret.comment || "",
Scope: {
Environment: scopeValues.environments,
Role: scopeValues.roles,
Machine: scopeValues.machines,
ProcessOwner: scopeValues.processes,
Action: scopeValues.actions,
Channel: scopeValues.channels
},
IsEditable: false,
Prompt: null,
Type: "Sensitive",
IsSensitive: true
}));
const keysToDelete = new Set<string>();
if (!disableSecretDeletion) {
sensitiveVariables.forEach((variable) => {
if (!matchesSchema(variable.Name, environment?.slug || "", keySchema)) return;
if (!secretMap[variable.Name]) {
keysToDelete.add(variable.Name);
}
});
}
sensitiveVariables = sensitiveVariables.filter((variable) => !keysToDelete.has(variable.Name));
const newVariableKeys = newVariables.map((variable) => variable.Name);
// Keep sensitive variables that are not in the new variables array, to avoid duplication
sensitiveVariables = sensitiveVariables.filter((variable) => !newVariableKeys.includes(variable.Name));
await request.put(
url,
{
...variableSet,
Variables: [...nonSensitiveVariables, ...sensitiveVariables, ...newVariables]
},
{
headers: this.getAuthHeader(apiKey)
}
);
},
async removeSecrets(secretSync: TOctopusDeploySyncWithCredentials, secretMap: TSecretMap) {
const {
connection,
destinationConfig: { spaceId, projectId, scope }
} = secretSync;
const instanceUrl = await getOctopusDeployInstanceUrl(connection);
const { apiKey } = connection.credentials;
const url = this.buildVariableUrl(instanceUrl, spaceId, projectId, scope);
const { data: variableSet } = await request.get<TOctopusDeployVariableSet>(url, {
headers: this.getAuthHeader(apiKey)
});
const infisicalSecretKeys = Object.keys(secretMap);
const variablesToDelete = variableSet.Variables.filter(
(variable) =>
infisicalSecretKeys.includes(variable.Name) && variable.IsSensitive === true && variable.Type === "Sensitive"
).map((variable) => variable.Id);
await request.put(
url,
{
...variableSet,
Variables: variableSet.Variables.filter((variable) => !variablesToDelete.includes(variable.Id))
},
{
headers: this.getAuthHeader(apiKey)
}
);
},
async getSecrets(secretSync: TOctopusDeploySyncWithCredentials): Promise<TSecretMap> {
throw new Error(`${SECRET_SYNC_NAME_MAP[secretSync.destination]} does not support importing secrets.`);
}
};

View File

@@ -0,0 +1,80 @@
import { z } from "zod";
import { SecretSyncs } from "@app/lib/api-docs";
import { AppConnection } from "@app/services/app-connection/app-connection-enums";
import { SecretSync } from "@app/services/secret-sync/secret-sync-enums";
import {
BaseSecretSyncSchema,
GenericCreateSecretSyncFieldsSchema,
GenericUpdateSecretSyncFieldsSchema
} from "@app/services/secret-sync/secret-sync-schemas";
import { TSyncOptionsConfig } from "@app/services/secret-sync/secret-sync-types";
import { SECRET_SYNC_NAME_MAP } from "../secret-sync-maps";
export enum OctopusDeploySyncScope {
Project = "project"
}
const OctopusDeploySyncDestinationConfigBaseSchema = z.object({
spaceId: z.string().min(1, "Space ID is required").describe(SecretSyncs.DESTINATION_CONFIG.OCTOPUS_DEPLOY.spaceId),
spaceName: z.string().optional().describe(SecretSyncs.DESTINATION_CONFIG.OCTOPUS_DEPLOY.spaceName),
scope: z.nativeEnum(OctopusDeploySyncScope).default(OctopusDeploySyncScope.Project)
});
export const OctopusDeploySyncDestinationConfigSchema = z.intersection(
OctopusDeploySyncDestinationConfigBaseSchema,
z.discriminatedUnion("scope", [
z.object({
scope: z.literal(OctopusDeploySyncScope.Project),
projectId: z
.string()
.min(1, "Project ID is required")
.describe(SecretSyncs.DESTINATION_CONFIG.OCTOPUS_DEPLOY.projectId),
projectName: z.string().optional().describe(SecretSyncs.DESTINATION_CONFIG.OCTOPUS_DEPLOY.projectName),
scopeValues: z
.object({
environments: z.array(z.string()).optional(),
roles: z.array(z.string()).optional(),
machines: z.array(z.string()).optional(),
processes: z.array(z.string()).optional(),
actions: z.array(z.string()).optional(),
channels: z.array(z.string()).optional()
})
.optional()
.describe(SecretSyncs.DESTINATION_CONFIG.OCTOPUS_DEPLOY.scopeValues)
})
])
);
const OctopusDeploySyncOptionsConfig: TSyncOptionsConfig = { canImportSecrets: false };
export const OctopusDeploySyncSchema = BaseSecretSyncSchema(SecretSync.OctopusDeploy, OctopusDeploySyncOptionsConfig)
.extend({
destination: z.literal(SecretSync.OctopusDeploy),
destinationConfig: OctopusDeploySyncDestinationConfigSchema
})
.describe(JSON.stringify({ title: SECRET_SYNC_NAME_MAP[SecretSync.OctopusDeploy] }));
export const CreateOctopusDeploySyncSchema = GenericCreateSecretSyncFieldsSchema(
SecretSync.OctopusDeploy,
OctopusDeploySyncOptionsConfig
).extend({
destinationConfig: OctopusDeploySyncDestinationConfigSchema
});
export const UpdateOctopusDeploySyncSchema = GenericUpdateSecretSyncFieldsSchema(
SecretSync.OctopusDeploy,
OctopusDeploySyncOptionsConfig
).extend({
destinationConfig: OctopusDeploySyncDestinationConfigSchema.optional()
});
export const OctopusDeploySyncListItemSchema = z
.object({
name: z.literal("Octopus Deploy"),
connection: z.literal(AppConnection.OctopusDeploy),
destination: z.literal(SecretSync.OctopusDeploy),
canImportSecrets: z.literal(false)
})
.describe(JSON.stringify({ title: SECRET_SYNC_NAME_MAP[SecretSync.OctopusDeploy] }));

View File

@@ -0,0 +1,67 @@
import z from "zod";
import { TOctopusDeployConnection } from "@app/services/app-connection/octopus-deploy";
import {
CreateOctopusDeploySyncSchema,
OctopusDeploySyncListItemSchema,
OctopusDeploySyncSchema
} from "./octopus-deploy-sync-schemas";
export type TOctopusDeploySyncListItem = z.infer<typeof OctopusDeploySyncListItemSchema>;
export type TOctopusDeploySync = z.infer<typeof OctopusDeploySyncSchema>;
export type TOctopusDeploySyncInput = z.infer<typeof CreateOctopusDeploySyncSchema>;
export type TOctopusDeploySyncWithCredentials = Omit<TOctopusDeploySync, "connection"> & {
connection: TOctopusDeployConnection;
};
export type TOctopusDeployVariable = {
Id?: string;
Name: string;
Value: string;
Description: string;
Scope: {
Environment?: string[];
Machine?: string[];
Role?: string[];
Action?: string[];
Channel?: string[];
ProcessOwner?: string[];
Tenant?: string[];
TenantTag?: string[];
};
IsEditable: boolean;
Prompt: {
Description: string;
DisplaySettings: Record<string, string>;
Label: string;
Required: boolean;
} | null;
Type: "String" | "Sensitive";
IsSensitive: boolean;
};
export type TOctopusDeployVariableSet = {
Id: string;
OwnerId: string;
Version: number;
Variables: TOctopusDeployVariable[];
ScopeValues: {
Environments: { Id: string; Name: string }[];
Machines: { Id: string; Name: string }[];
Actions: { Id: string; Name: string }[];
Roles: { Id: string; Name: string }[];
Channels: { Id: string; Name: string }[];
TenantTags: { Id: string; Name: string }[];
Processes: {
ProcessType: string;
Id: string;
Name: string;
}[];
};
SpaceId: string;
Links: {
Self: string;
};
};

View File

@@ -31,7 +31,8 @@ export enum SecretSync {
Northflank = "northflank",
Bitbucket = "bitbucket",
LaravelForge = "laravel-forge",
Chef = "chef"
Chef = "chef",
OctopusDeploy = "octopus-deploy"
}
export enum SecretSyncInitialSyncBehavior {

View File

@@ -53,6 +53,7 @@ import { HumanitecSyncFns } from "./humanitec/humanitec-sync-fns";
import { LARAVEL_FORGE_SYNC_LIST_OPTION, LaravelForgeSyncFns } from "./laravel-forge";
import { NETLIFY_SYNC_LIST_OPTION, NetlifySyncFns } from "./netlify";
import { NORTHFLANK_SYNC_LIST_OPTION, NorthflankSyncFns } from "./northflank";
import { OCTOPUS_DEPLOY_SYNC_LIST_OPTION, OctopusDeploySyncFns } from "./octopus-deploy";
import { RAILWAY_SYNC_LIST_OPTION } from "./railway/railway-sync-constants";
import { RailwaySyncFns } from "./railway/railway-sync-fns";
import { RENDER_SYNC_LIST_OPTION, RenderSyncFns } from "./render";
@@ -97,7 +98,8 @@ const SECRET_SYNC_LIST_OPTIONS: Record<SecretSync, TSecretSyncListItem> = {
[SecretSync.Northflank]: NORTHFLANK_SYNC_LIST_OPTION,
[SecretSync.Bitbucket]: BITBUCKET_SYNC_LIST_OPTION,
[SecretSync.LaravelForge]: LARAVEL_FORGE_SYNC_LIST_OPTION,
[SecretSync.Chef]: CHEF_SYNC_LIST_OPTION
[SecretSync.Chef]: CHEF_SYNC_LIST_OPTION,
[SecretSync.OctopusDeploy]: OCTOPUS_DEPLOY_SYNC_LIST_OPTION
};
export const listSecretSyncOptions = () => {
@@ -289,6 +291,8 @@ export const SecretSyncFns = {
return LaravelForgeSyncFns.syncSecrets(secretSync, schemaSecretMap);
case SecretSync.Chef:
return ChefSyncFns.syncSecrets(secretSync, schemaSecretMap);
case SecretSync.OctopusDeploy:
return OctopusDeploySyncFns.syncSecrets(secretSync, schemaSecretMap);
default:
throw new Error(
`Unhandled sync destination for sync secrets fns: ${(secretSync as TSecretSyncWithCredentials).destination}`
@@ -414,6 +418,9 @@ export const SecretSyncFns = {
case SecretSync.Chef:
secretMap = await ChefSyncFns.getSecrets(secretSync);
break;
case SecretSync.OctopusDeploy:
secretMap = await OctopusDeploySyncFns.getSecrets(secretSync);
break;
default:
throw new Error(
`Unhandled sync destination for get secrets fns: ${(secretSync as TSecretSyncWithCredentials).destination}`
@@ -513,6 +520,8 @@ export const SecretSyncFns = {
return LaravelForgeSyncFns.removeSecrets(secretSync, schemaSecretMap);
case SecretSync.Chef:
return ChefSyncFns.removeSecrets(secretSync, schemaSecretMap);
case SecretSync.OctopusDeploy:
return OctopusDeploySyncFns.removeSecrets(secretSync, schemaSecretMap);
default:
throw new Error(
`Unhandled sync destination for remove secrets fns: ${(secretSync as TSecretSyncWithCredentials).destination}`

View File

@@ -35,7 +35,8 @@ export const SECRET_SYNC_NAME_MAP: Record<SecretSync, string> = {
[SecretSync.Northflank]: "Northflank",
[SecretSync.Bitbucket]: "Bitbucket",
[SecretSync.LaravelForge]: "Laravel Forge",
[SecretSync.Chef]: "Chef"
[SecretSync.Chef]: "Chef",
[SecretSync.OctopusDeploy]: "Octopus Deploy"
};
export const SECRET_SYNC_CONNECTION_MAP: Record<SecretSync, AppConnection> = {
@@ -71,7 +72,8 @@ export const SECRET_SYNC_CONNECTION_MAP: Record<SecretSync, AppConnection> = {
[SecretSync.Northflank]: AppConnection.Northflank,
[SecretSync.Bitbucket]: AppConnection.Bitbucket,
[SecretSync.LaravelForge]: AppConnection.LaravelForge,
[SecretSync.Chef]: AppConnection.Chef
[SecretSync.Chef]: AppConnection.Chef,
[SecretSync.OctopusDeploy]: AppConnection.OctopusDeploy
};
export const SECRET_SYNC_PLAN_MAP: Record<SecretSync, SecretSyncPlanType> = {
@@ -107,7 +109,8 @@ export const SECRET_SYNC_PLAN_MAP: Record<SecretSync, SecretSyncPlanType> = {
[SecretSync.Northflank]: SecretSyncPlanType.Regular,
[SecretSync.Bitbucket]: SecretSyncPlanType.Regular,
[SecretSync.LaravelForge]: SecretSyncPlanType.Regular,
[SecretSync.Chef]: SecretSyncPlanType.Enterprise
[SecretSync.Chef]: SecretSyncPlanType.Enterprise,
[SecretSync.OctopusDeploy]: SecretSyncPlanType.Regular
};
export const SECRET_SYNC_SKIP_FIELDS_MAP: Record<SecretSync, string[]> = {
@@ -152,7 +155,8 @@ export const SECRET_SYNC_SKIP_FIELDS_MAP: Record<SecretSync, string[]> = {
[SecretSync.Northflank]: [],
[SecretSync.Bitbucket]: [],
[SecretSync.LaravelForge]: [],
[SecretSync.Chef]: []
[SecretSync.Chef]: [],
[SecretSync.OctopusDeploy]: []
};
const defaultDuplicateCheck: DestinationDuplicateCheckFn = () => true;
@@ -214,5 +218,6 @@ export const DESTINATION_DUPLICATE_CHECK_MAP: Record<SecretSync, DestinationDupl
[SecretSync.Northflank]: defaultDuplicateCheck,
[SecretSync.Bitbucket]: defaultDuplicateCheck,
[SecretSync.LaravelForge]: defaultDuplicateCheck,
[SecretSync.Chef]: defaultDuplicateCheck
[SecretSync.Chef]: defaultDuplicateCheck,
[SecretSync.OctopusDeploy]: defaultDuplicateCheck
};

View File

@@ -136,6 +136,12 @@ import {
TNorthflankSyncListItem,
TNorthflankSyncWithCredentials
} from "./northflank";
import {
TOctopusDeploySync,
TOctopusDeploySyncInput,
TOctopusDeploySyncListItem,
TOctopusDeploySyncWithCredentials
} from "./octopus-deploy";
import {
TRailwaySync,
TRailwaySyncInput,
@@ -201,7 +207,8 @@ export type TSecretSync =
| TSupabaseSync
| TNetlifySync
| TNorthflankSync
| TBitbucketSync;
| TBitbucketSync
| TOctopusDeploySync;
export type TSecretSyncWithCredentials =
| TAwsParameterStoreSyncWithCredentials
@@ -236,7 +243,8 @@ export type TSecretSyncWithCredentials =
| TNetlifySyncWithCredentials
| TNorthflankSyncWithCredentials
| TBitbucketSyncWithCredentials
| TLaravelForgeSyncWithCredentials;
| TLaravelForgeSyncWithCredentials
| TOctopusDeploySyncWithCredentials;
export type TSecretSyncInput =
| TAwsParameterStoreSyncInput
@@ -271,7 +279,8 @@ export type TSecretSyncInput =
| TNetlifySyncInput
| TNorthflankSyncInput
| TBitbucketSyncInput
| TLaravelForgeSyncInput;
| TLaravelForgeSyncInput
| TOctopusDeploySyncInput;
export type TSecretSyncListItem =
| TAwsParameterStoreSyncListItem
@@ -306,7 +315,8 @@ export type TSecretSyncListItem =
| TDigitalOceanAppPlatformSyncListItem
| TNetlifySyncListItem
| TNorthflankSyncListItem
| TBitbucketSyncListItem;
| TBitbucketSyncListItem
| TOctopusDeploySyncListItem;
export type TSyncOptionsConfig = {
canImportSecrets: boolean;

View File

@@ -94,6 +94,13 @@ services:
volumes:
- ./backend/bdd/pebble/:/var/data/pebble:ro
technitium:
image: technitium/dns-server:14.2.0
ports:
- "5380:5380/tcp"
environment:
- DNS_SERVER_ADMIN_PASSWORD=infisical
volumes:
postgres-data:
driver: local

View File

@@ -0,0 +1,4 @@
---
title: "Available"
openapi: "GET /api/v1/app-connections/octopus-deploy/available"
---

View File

@@ -0,0 +1,10 @@
---
title: "Create"
openapi: "POST /api/v1/app-connections/octopus-deploy"
---
<Note>
Check out the configuration docs for [Octopus Deploy
Connections](/integrations/app-connections/octopus-deploy) to learn how to
obtain the required credentials.
</Note>

View File

@@ -0,0 +1,4 @@
---
title: "Delete"
openapi: "DELETE /api/v1/app-connections/octopus-deploy/{connectionId}"
---

View File

@@ -0,0 +1,4 @@
---
title: "Get by ID"
openapi: "GET /api/v1/app-connections/octopus-deploy/{connectionId}"
---

View File

@@ -0,0 +1,4 @@
---
title: "Get by Name"
openapi: "GET /api/v1/app-connections/octopus-deploy/connection-name/{connectionName}"
---

View File

@@ -0,0 +1,4 @@
---
title: "List"
openapi: "GET /api/v1/app-connections/octopus-deploy"
---

View File

@@ -0,0 +1,10 @@
---
title: "Update"
openapi: "PATCH /api/v1/app-connections/octopus-deploy/{connectionId}"
---
<Note>
Check out the configuration docs for [Octopus Deploy
Connections](/integrations/app-connections/octopus-deploy) to learn how to
obtain the required credentials.
</Note>

View File

@@ -0,0 +1,4 @@
---
title: "List"
openapi: "GET /api/v1/scim/group-org-role-mappings"
---

View File

@@ -0,0 +1,4 @@
---
title: "Update"
openapi: "PUT /api/v1/scim/group-org-role-mappings"
---

View File

@@ -0,0 +1,4 @@
---
title: "Create"
openapi: "POST /api/v1/secret-syncs/octopus-deploy"
---

View File

@@ -0,0 +1,4 @@
---
title: "Delete"
openapi: "DELETE /api/v1/secret-syncs/octopus-deploy/{syncId}"
---

View File

@@ -0,0 +1,4 @@
---
title: "Get by ID"
openapi: "GET /api/v1/secret-syncs/octopus-deploy/{syncId}"
---

View File

@@ -0,0 +1,4 @@
---
title: "Get by Name"
openapi: "GET /api/v1/secret-syncs/octopus-deploy/sync-name/{syncName}"
---

View File

@@ -0,0 +1,4 @@
---
title: "List"
openapi: "GET /api/v1/secret-syncs/octopus-deploy"
---

View File

@@ -0,0 +1,4 @@
---
title: "Remove Secrets"
openapi: "POST /api/v1/secret-syncs/octopus-deploy/{syncId}/remove-secrets"
---

View File

@@ -0,0 +1,4 @@
---
title: "Sync Secrets"
openapi: "POST /api/v1/secret-syncs/octopus-deploy/{syncId}/sync-secrets"
---

View File

@@ -0,0 +1,4 @@
---
title: "Update"
openapi: "PATCH /api/v1/secret-syncs/octopus-deploy/{syncId}"
---

View File

@@ -15,7 +15,7 @@ git checkout -b MY_BRANCH_NAME
## Set up environment variables
Start by creating a `.env` file at the root of the Infisical directory then copy the contents of the file linked [here](https://github.com/Infisical/infisical/blob/main/.env.example). View all available [environment variables](https://infisical.com/docs/self-hosting/configuration/envars) and guidance for each.
Start by creating a `.env` file at the root of the Infisical directory then copy the contents of the file linked [here](https://github.com/Infisical/infisical/blob/main/.env.dev.example). View all available [environment variables](https://infisical.com/docs/self-hosting/configuration/envars) and guidance for each.
## Starting Infisical for development

View File

@@ -33,7 +33,10 @@
},
{
"group": "Guides",
"pages": ["documentation/guides/organization-structure"]
"pages": [
"documentation/guides/governance-models",
"documentation/guides/organization-structure"
]
}
]
},
@@ -135,6 +138,7 @@
"integrations/app-connections/netlify",
"integrations/app-connections/northflank",
"integrations/app-connections/oci",
"integrations/app-connections/octopus-deploy",
"integrations/app-connections/okta",
"integrations/app-connections/oracledb",
"integrations/app-connections/postgres",
@@ -563,6 +567,7 @@
"integrations/secret-syncs/netlify",
"integrations/secret-syncs/northflank",
"integrations/secret-syncs/oci-vault",
"integrations/secret-syncs/octopus-deploy",
"integrations/secret-syncs/railway",
"integrations/secret-syncs/render",
"integrations/secret-syncs/supabase",
@@ -928,6 +933,18 @@
"api-reference/endpoints/organizations/saml-sso/update-saml-config",
"api-reference/endpoints/organizations/saml-sso/create-saml-config"
]
},
{
"group": "SCIM",
"pages": [
{
"group": " Group to Org Role Mappings",
"pages": [
"api-reference/endpoints/organizations/scim/group-org-role-mappings/list",
"api-reference/endpoints/organizations/scim/group-org-role-mappings/update"
]
}
]
}
]
},
@@ -1470,6 +1487,18 @@
"api-reference/endpoints/app-connections/oci/delete"
]
},
{
"group": "Octopus Deploy",
"pages": [
"api-reference/endpoints/app-connections/octopus-deploy/list",
"api-reference/endpoints/app-connections/octopus-deploy/available",
"api-reference/endpoints/app-connections/octopus-deploy/get-by-id",
"api-reference/endpoints/app-connections/octopus-deploy/get-by-name",
"api-reference/endpoints/app-connections/octopus-deploy/create",
"api-reference/endpoints/app-connections/octopus-deploy/update",
"api-reference/endpoints/app-connections/octopus-deploy/delete"
]
},
{
"group": "Okta",
"pages": [
@@ -1482,6 +1511,7 @@
"api-reference/endpoints/app-connections/okta/delete"
]
},
{
"group": "OracleDB",
"pages": [
@@ -2381,6 +2411,19 @@
"api-reference/endpoints/secret-syncs/oci-vault/remove-secrets"
]
},
{
"group": "Octopus Deploy",
"pages": [
"api-reference/endpoints/secret-syncs/octopus-deploy/list",
"api-reference/endpoints/secret-syncs/octopus-deploy/get-by-id",
"api-reference/endpoints/secret-syncs/octopus-deploy/get-by-name",
"api-reference/endpoints/secret-syncs/octopus-deploy/create",
"api-reference/endpoints/secret-syncs/octopus-deploy/update",
"api-reference/endpoints/secret-syncs/octopus-deploy/delete",
"api-reference/endpoints/secret-syncs/octopus-deploy/sync-secrets",
"api-reference/endpoints/secret-syncs/octopus-deploy/remove-secrets"
]
},
{
"group": "Railway",
"pages": [

View File

@@ -0,0 +1,479 @@
---
title: "Centralized vs. Self-Service Governance"
sidebarTitle: "Governance Models"
description: "Learn how to structure Infisical for centralized platform administration or team self-service autonomy"
---
Organizations adopt different approaches to secrets management governance based on their security requirements, compliance obligations, and team structures. Infisical supports a spectrum of governance models—from fully centralized platform administration to team-driven self-service.
This guide covers how to configure Infisical for different governance approaches and what tradeoffs to consider.
## Understanding the Spectrum
Most organizations don't operate at the extremes. Instead, they land somewhere on a spectrum between two models:
**Centralized Administration**: A dedicated platform or security team controls project creation, access policies, integrations, and secret lifecycle management. Application teams consume secrets but don't manage the underlying infrastructure.
**Self-Service**: Teams have autonomy to create projects, manage their own access, configure integrations, and operate independently. Central teams provide guardrails and standards rather than direct management.
<Note>
The right model depends on your regulatory environment, team maturity, organizational scale, and security posture. Highly regulated industries often lean toward centralized control, while organizations with mature DevOps practices may benefit from self-service with guardrails.
</Note>
## Organizational Structure
Project and environment structure is where governance decisions start to take shape.
### Project Ownership
| Approach | Centralized | Self-Service |
|----------|-------------|--------------|
| **Project creation** | Platform team creates all projects on behalf of application teams | Teams create their own projects as needed |
| **Naming conventions** | Enforced through process and templates | Documented standards, team-enforced |
| **Folder structure** | Predefined conventions (e.g., `/apps/{app-name}/{component}`) | Teams define hierarchies that fit their needs |
### Project Templates
[Project Templates](/documentation/platform/project-templates) allow you to define standard environments, roles, and settings that are applied when creating new projects. This feature supports both governance models:
- **Centralized**: Require all projects to use approved templates, ensuring consistent environment structures and role definitions across the organization
- **Self-Service**: Provide templates as a starting point that teams can build upon, reducing setup time while allowing customization
<Info>
Project Templates apply at creation time and don't propagate changes to existing projects. Plan your template strategy before widespread adoption.
</Info>
### Environment Strategy
Environments define the deployment stages where secrets are managed.
- **Standardized environments** (e.g., `dev`, `staging`, `prod`) provide consistency and simplify cross-team collaboration
- **Custom environments** allow teams to model their specific deployment pipelines (e.g., `qa`, `uat`, `perf-test`, `prod-eu`, `prod-us`)
With Project Templates, you can enforce a base set of environments while optionally allowing teams to add additional ones.
## Authentication and Identity
How you manage identity—both for users and machines—significantly affects your governance strategy.
### User Authentication
| Approach | Centralized | Self-Service |
|----------|-------------|--------------|
| **Login methods** | SSO enforced, local accounts disabled | SSO available, local accounts permitted |
| **MFA** | Required organization-wide | Encouraged or optional |
| **Session duration** | Short sessions enforced | Longer sessions permitted |
Infisical supports multiple authentication methods that can be configured based on your requirements:
- [SAML SSO](/documentation/platform/sso/overview) with providers like Okta, Azure AD, Google Workspace, and JumpCloud
- [OIDC SSO](/documentation/platform/sso/general-oidc) for standards-based authentication
- [LDAP](/documentation/platform/ldap/overview) for directory-based authentication
### User Provisioning
| Approach | Centralized | Self-Service |
|----------|-------------|--------------|
| **User onboarding** | Automatic via SCIM from identity provider | Direct invitations by project admins |
| **Role assignment** | Mapped from IdP groups | Assigned manually per project |
| **Offboarding** | Automatic deprovisioning via SCIM | Manual removal required |
[SCIM provisioning](/documentation/platform/scim/overview) enables automatic user lifecycle management synced with your identity provider. Combined with [group mappings](/documentation/platform/scim/group-mappings), you can automatically assign organization roles based on IdP group membership.
For organizations using SAML, [group membership mapping](/documentation/platform/sso/google-saml#saml-group-membership-mapping) synchronizes group memberships when users log in, ensuring access reflects current IdP state.
### Machine Identity Management
[Machine identities](/documentation/platform/identities/machine-identities) authenticate applications, services, and automated systems with Infisical. Your governance model shapes how these identities are managed:
| Approach | Centralized | Self-Service |
|----------|-------------|--------------|
| **Identity creation** | Platform team creates all identities; teams submit requests | Teams create identities for their own projects |
| **Auth method selection** | Standardized methods enforced (e.g., "Kubernetes Auth only in production") | Teams choose methods appropriate to their infrastructure |
| **Credential management** | Platform team manages and distributes credentials | Teams manage their own identity credentials |
Infisical supports multiple machine identity authentication methods:
- [Universal Auth](/documentation/platform/identities/universal-auth) — Client ID/secret authentication for any environment
- [Kubernetes Auth](/documentation/platform/identities/kubernetes-auth) — Native authentication using Kubernetes service accounts
- [AWS Auth](/documentation/platform/identities/aws-auth) — Authentication using AWS IAM roles
- [Azure Auth](/documentation/platform/identities/azure-auth) — Authentication using Azure managed identities
- [GCP Auth](/documentation/platform/identities/gcp-auth) — Authentication using GCP service accounts
- [OIDC Auth](/documentation/platform/identities/oidc-auth) — Authentication using OIDC identity tokens
Centralized organizations often standardize on platform-native authentication methods (Kubernetes Auth, cloud provider auth) to eliminate static credentials, while self-service models may permit Universal Auth for flexibility.
## Access Control
Infisical's [role-based access control](/documentation/platform/access-controls/role-based-access-controls) operates at two levels: organization and project. How you configure these controls determines who can do what across your secrets infrastructure.
### Organization-Level Roles
[Organization roles](/documentation/platform/access-controls/role-based-access-controls#organization-level-access-controls) govern access to organization-wide resources like billing, member management, and identity provider configuration.
| Role | Capabilities |
|------|--------------|
| **Admin** | Full access to all organization settings and all projects |
| **Member** | Basic organization access; project access determined separately |
| **Custom roles** | Tailored permissions for specific administrative functions |
<Warning>
The Admin role grants access to all projects in the organization. In both governance models, this role should be assigned sparingly to prevent unintended access to sensitive secrets.
</Warning>
### Project-Level Roles
[Project roles](/documentation/platform/access-controls/role-based-access-controls#project-level-access-controls) control what users and machine identities can do within a specific project.
| Approach | Centralized | Self-Service |
|----------|-------------|--------------|
| **Role definition** | Custom roles defined by platform team; teams assigned predefined roles | Teams create project-level custom roles as needed |
| **Production access** | Restricted to specific roles; requires approval | Teams determine their own access patterns |
| **Role assignment** | Managed through groups synced from IdP | Project admins assign roles directly |
Built-in project roles include:
- **Admin**: Full access to all environments, folders, secrets, and project settings
- **Developer**: Standard access with restrictions on project administration and policy management
- **Viewer**: Read-only access to secrets and project resources
[Custom roles](/documentation/platform/access-controls/role-based-access-controls#creating-custom-roles) let you define granular permissions for specific environments, folder paths, and actions—useful for implementing least-privilege access.
### Groups
[Groups](/documentation/platform/groups) simplify access management by allowing you to assign roles to collections of users rather than individuals.
Key behaviors:
- Adding a group to a project grants all group members access with the assigned role(s)
- Users inherit composite permissions from all groups they belong to
- Group membership can be managed locally or synced from your identity provider via SCIM
| Approach | Centralized | Self-Service |
|----------|-------------|--------------|
| **Group management** | Groups defined in IdP, synced via SCIM | Project admins create and manage local groups |
| **Project membership** | Controlled through IdP group assignments | Direct group/user additions by project admins |
### Temporary and Just-in-Time Access
For sensitive environments, both governance models benefit from time-limited access:
- [Temporary access](/documentation/platform/access-controls/temporary-access) grants permissions that automatically expire after a defined period
- [Additional privileges](/documentation/platform/access-controls/additional-privileges) allow temporary elevation beyond a user's base role
Centralized organizations typically require temporary access for production environments, while self-service models may use it selectively for high-risk operations.
## Approval Workflows
[Approval workflows](/documentation/platform/pr-workflows) add oversight to sensitive operations, supporting compliance requirements and change management practices.
### Change Policies
Change policies require approval before secrets can be modified in specific environments or folder paths. When a policy applies, proposed changes enter a review queue where designated approvers can approve and merge—or reject—the changes.
| Approach | Centralized | Self-Service |
|----------|-------------|--------------|
| **Policy scope** | Required for all production environments | Teams define policies for their sensitive environments |
| **Approvers** | Security team members or designated reviewers | Team leads or senior engineers |
| **Bypass permissions** | Strictly limited | May allow emergency bypass for on-call |
### Access Requests
[Access requests](/documentation/platform/access-controls/access-requests) formalize the process of granting access to sensitive resources. Combined with temporary access, this enables just-in-time access patterns where users request and receive time-limited permissions.
| Approach | Centralized | Self-Service |
|----------|-------------|--------------|
| **Request requirement** | Mandatory for production access | Optional or environment-specific |
| **Approval workflow** | Formal review by security team | Peer approval or team lead sign-off |
| **Access duration** | Strictly time-limited | Flexible based on need |
### Notifications
Approval workflows integrate with [Slack](/documentation/platform/workflow-integrations/slack-integration) and [Microsoft Teams](/documentation/platform/workflow-integrations/microsoft-teams-integration) to notify approvers in real-time, reducing delays in the approval process.
## Secret Lifecycle
Who creates, rotates, and retires secrets—and how—depends on your governance model.
### App Connections
[App Connections](/integrations/app-connections/overview) are reusable integrations with third-party platforms like AWS, GCP, Azure, databases, and other services. They're required for secret rotation, dynamic secrets, and secret syncs—so how you manage them affects multiple workflows.
| Approach | Centralized | Self-Service |
|----------|-------------|--------------|
| **Connection creation** | Platform team creates connections at the organization level and distributes access to projects | Teams create their own connections at the project level |
| **Credential management** | Platform team manages service accounts and API keys used by connections | Teams manage credentials for their own connections |
| **Access distribution** | Connections shared across multiple projects as needed | Each team maintains their own set of connections |
### Secret Creation and Ownership
| Approach | Centralized | Self-Service |
|----------|-------------|--------------|
| **Shared secrets** | Platform team provisions infrastructure secrets (databases, APIs) | Teams request or create their own |
| **Application secrets** | Teams manage within their designated paths | Teams have full ownership |
| **Secret standards** | Naming conventions and metadata requirements enforced | Guidelines provided, team-enforced |
### Secret Rotation
[Secret rotation](/documentation/platform/secret-rotation/overview) automates credential lifecycle management, reducing the risk of long-lived secrets.
| Approach | Centralized | Self-Service |
|----------|-------------|--------------|
| **Rotation policies** | Defined and managed by platform team | Teams configure for their services |
| **Rotation schedules** | Standardized intervals based on secret classification | Teams determine appropriate intervals |
### Dynamic Secrets
[Dynamic secrets](/documentation/platform/dynamic-secrets/overview) generate short-lived credentials on demand, eliminating standing access to sensitive systems.
| Approach | Centralized | Self-Service |
|----------|-------------|--------------|
| **Configuration** | Platform team sets up dynamic secret sources | Teams configure for their databases and services |
| **Lease duration** | Standardized TTLs based on use case | Teams determine appropriate durations |
| **Access control** | Restricted to specific roles | Available to authorized team members |
### Secret Referencing Within Projects
[Secret referencing](/documentation/platform/secret-references) and [imports](/documentation/platform/secret-references) allow secrets to be shared across environments and folders within the same project. This helps reduce duplication when the same secret is needed in multiple environments.
| Approach | Centralized | Self-Service |
|----------|-------------|--------------|
| **Reference patterns** | Standardized import structures across projects | Teams define their own reference hierarchies |
| **Base environment** | Platform team designates source environments for imports | Teams choose which environments to reference from |
<Note>
Projects in Infisical are isolated from one another. Secret referencing and imports work within a single project—you cannot reference secrets across different projects.
</Note>
## Integrations and Secret Delivery
Infisical offers multiple methods for delivering secrets to applications and infrastructure.
### Secret Syncs
[Secret Syncs](/integrations/secret-syncs/overview) push secrets to third-party platforms like AWS Secrets Manager, Azure Key Vault, GCP Secret Manager, and others. Syncs keep external secret stores updated when values change in Infisical.
| Approach | Centralized | Self-Service |
|----------|-------------|--------------|
| **Sync setup** | Platform team configures syncs to approved destinations | Teams configure syncs for their projects |
| **Target platforms** | Limited to approved platforms | Teams choose appropriate destinations |
| **Sync scope** | Standardized patterns (e.g., sync prod to AWS SM only) | Teams determine what to sync and where |
### Kubernetes Integration
For Kubernetes environments, two primary integration patterns are available:
- [Infisical Kubernetes Operator](/integrations/platforms/kubernetes/infisical-operator) — Syncs secrets to Kubernetes Secrets resources
- [Infisical Secrets Injector](/integrations/platforms/kubernetes-injector) — Injects secrets directly into pods at runtime
| Approach | Centralized | Self-Service |
|----------|-------------|--------------|
| **Operator deployment** | Single cluster-wide instance managed by platform team | Teams may deploy namespace-scoped instances |
| **Secret sync patterns** | Standardized CRD configurations | Teams define their own InfisicalSecret resources |
### Agent and CLI
The [Infisical Agent](/infisical-agent/overview) and [CLI](/cli/overview) provide flexible secret consumption patterns:
| Approach | Centralized | Self-Service |
|----------|-------------|--------------|
| **Agent deployment** | Managed by platform team as infrastructure | Teams deploy and configure their own agents |
| **CLI usage** | Standardized configurations provided | Teams use CLI as needed in their workflows |
### Gateways
[Gateways](/documentation/platform/gateways/overview) enable Infisical to securely access private resources—such as databases in isolated VPCs—without exposing them to the public internet. Gateways are lightweight components deployed within your private network that establish secure, outbound-only connections to Infisical.
Gateways are essential for features that require direct access to private infrastructure:
- [Dynamic secrets](/documentation/platform/dynamic-secrets/overview) for databases in private networks
- [Secret rotation](/documentation/platform/secret-rotation/overview) for credentials stored in isolated systems
- [Kubernetes Auth](/documentation/platform/identities/kubernetes-auth) token review for private clusters
#### Gateway Architecture
Gateways operate at two levels within Infisical:
1. **Organization-level registration**: Gateways are registered and visible in Organization Settings → Access Control → Gateways. This provides central visibility into all gateway infrastructure.
2. **Project-level linking**: When configuring features like dynamic secrets, teams select from available gateways to route requests through private networks.
This architecture naturally supports a hybrid governance model where infrastructure teams manage gateway deployment while application teams consume them.
#### Governance Considerations
| Approach | Centralized | Self-Service |
|----------|-------------|--------------|
| **Gateway deployment** | Platform/infrastructure team deploys gateways in shared network zones | Teams deploy gateways in their own VPCs or network segments |
| **Machine identity management** | Platform team creates and manages identities used by gateways | Teams create identities for gateways they deploy |
| **Network configuration** | Central team manages firewall rules and network connectivity | Teams responsible for their own network access |
| **Gateway selection** | Platform team links gateways to projects | Teams select from available gateways when configuring features |
<Info>
Each gateway requires a [machine identity](/documentation/platform/identities/machine-identities) for authentication. Your gateway governance model should align with your broader machine identity strategy.
</Info>
#### Common Patterns
**Shared Gateway Model** (Centralized)
A platform team deploys gateways in shared network zones that can reach common infrastructure (e.g., a central database cluster). Multiple projects link to these shared gateways, reducing deployment overhead and centralizing network management.
This pattern works well when:
- Multiple applications share common database infrastructure
- Network access is controlled by a central team
- You want to minimize the number of gateway deployments to manage
**Team-Owned Gateway Model** (Self-Service)
Each team deploys gateways within their own network boundaries (e.g., per-team VPCs or Kubernetes namespaces). Teams manage the full lifecycle of their gateways, including the machine identities that authenticate them.
This pattern works well when:
- Teams have isolated network environments
- Teams have infrastructure expertise to deploy and maintain gateways
- Strict network segmentation requires dedicated gateways per team
**Hybrid Model**
Platform team deploys and registers gateways, but application teams independently select which gateway to use when configuring dynamic secrets or rotation. This provides central oversight of infrastructure while giving teams flexibility in how they use it.
For Kubernetes environments, gateways can also serve as token reviewers for [Kubernetes Auth](/documentation/platform/identities/kubernetes-auth), eliminating the need for long-lived service account tokens. In this scenario, the gateway deployment often aligns with whoever manages the Kubernetes cluster.
## Audit and Compliance
Visibility into secrets access and changes is critical for security and compliance. Infisical provides audit capabilities at both organization and project levels.
### Audit Logs
[Audit logs](/documentation/platform/audit-logs) capture all platform activity including secret access, modifications, and administrative actions.
| Level | Scope | Typical Access |
|-------|-------|----------------|
| **Organization** | All activity across all projects | Security team, compliance officers |
| **Project** | Activity within a specific project | Project admins, team leads |
| Approach | Centralized | Self-Service |
|----------|-------------|--------------|
| **Log access** | Security team has organization-wide visibility | Teams access only their project logs |
| **Log retention** | Centrally managed retention policies | Platform-defined retention |
| **Compliance reporting** | Platform team generates reports | Teams may generate their own project reports |
### Audit Log Streaming
[Audit log streaming](/documentation/platform/audit-log-streams/audit-log-streams) exports logs to external systems for long-term retention and analysis.
Supported destinations include:
- SIEM platforms (Splunk, Datadog, Elastic)
- Cloud storage (AWS S3, Azure Blob Storage)
- Log aggregators (Better Stack, generic HTTP endpoints)
| Approach | Centralized | Self-Service |
|----------|-------------|--------------|
| **Stream configuration** | Platform team manages all log streams | N/A (organization-level feature) |
| **SIEM integration** | Centralized security monitoring | Teams may not have direct SIEM access |
## Security Controls
Beyond access control, Infisical offers additional security settings.
### Security Policies
Organization-level [security policies](/documentation/platform/organization) allow you to enforce:
- MFA requirements for all users
- Session duration limits
- Login restrictions
### IP Access Controls
Restrict API and dashboard access to specific IP ranges, useful for:
- Limiting production access to corporate networks or VPNs
- Restricting machine identity authentication to known infrastructure IPs
### Encryption and Key Management
| Feature | Description | Governance Consideration |
|---------|-------------|-------------------------|
| **External KMS** | Integrate with AWS KMS, GCP KMS, or Azure Key Vault | Centralized key management |
| **BYOK** | Bring your own encryption keys | Enterprise key management policies |
| **KMIP** | Connect to KMIP-compatible HSMs | Hardware-backed security requirements |
These features are typically managed centrally regardless of overall governance model, as encryption infrastructure requires specialized expertise.
## Choosing Your Model
A few factors tend to push organizations toward one end of the spectrum or the other:
### Factors Favoring Centralized Control
- **Regulatory requirements**: SOC 2, HIPAA, PCI-DSS, and similar frameworks often require demonstrated control over secrets management
- **Limited security expertise**: When application teams lack security experience, central management reduces risk
- **Consistency requirements**: Large organizations benefit from standardized patterns across teams
- **High-risk environments**: Financial services, healthcare, and government contexts often require strict oversight
### Factors Favoring Self-Service
- **Mature DevOps culture**: Teams with strong security awareness can manage their own secrets responsibly
- **Speed of delivery**: Self-service reduces bottlenecks and accelerates development cycles
- **Diverse technology stacks**: Teams using different platforms benefit from flexibility in integration choices
- **Distributed organizations**: Global teams may need autonomy to operate across time zones
### The Hybrid Approach
Most organizations benefit from a hybrid model that combines central guardrails with team autonomy:
**Platform team responsibilities:**
- SSO and SCIM configuration
- Project template creation and maintenance
- Organization-wide security policies
- Audit log streaming and compliance reporting
- Approval workflow policies for production environments
- Shared infrastructure secrets (databases, external APIs)
**Application team responsibilities:**
- Project creation (from approved templates)
- Application-specific secret management
- Integration configuration within their projects
- Team-level access control within policy bounds
- Secret rotation for team-owned credentials
This balances compliance requirements with team velocity—central teams handle the infrastructure and guardrails, while application teams own their day-to-day secrets operations.
## Implementation Considerations
### Starting Centralized, Moving to Self-Service
Organizations often begin with centralized control and gradually extend autonomy as teams demonstrate security maturity:
1. **Phase 1**: Platform team manages all aspects; teams consume secrets via provided integrations
2. **Phase 2**: Teams gain ability to manage secrets within their projects; platform team controls project creation and policies
3. **Phase 3**: Teams can create projects from templates and configure integrations; platform team focuses on guardrails and compliance
### Starting Self-Service, Adding Controls
Organizations scaling from startup to enterprise may need to add centralization:
1. **Phase 1**: Establish SSO and basic security policies
2. **Phase 2**: Introduce project templates and approval workflows for production
3. **Phase 3**: Implement SCIM provisioning and comprehensive audit streaming
### Documentation and Training
Regardless of model, invest in:
- Clear documentation of secrets management standards and processes
- Training for teams on Infisical features and security best practices
- Runbooks for common operations (secret rotation, access requests, incident response)
## Summary
Here's a quick reference for how key Infisical features map to each governance model:
| Feature | Centralized Use | Self-Service Use |
|---------|-----------------|------------------|
| [Project Templates](/documentation/platform/project-templates) | Enforce standards | Provide starting points |
| [SCIM](/documentation/platform/scim/overview) | Automate user lifecycle | Supplement direct invitations |
| [Groups](/documentation/platform/groups) | IdP-synced membership | Local team management |
| [Custom Roles](/documentation/platform/access-controls/role-based-access-controls) | Define organization-wide | Create project-specific |
| [Approval Workflows](/documentation/platform/pr-workflows) | Require for all changes | Apply selectively |
| [App Connections](/integrations/app-connections/overview) | Org-level connections distributed to projects | Teams create project-level connections |
| [Secret Syncs](/integrations/secret-syncs/overview) | Platform-managed syncs to approved destinations | Teams configure their own syncs |
| [Gateways](/documentation/platform/gateways/overview) | Shared infrastructure for private access | Team-deployed per network zone |
| [Audit Logs](/documentation/platform/audit-logs) | Centralized monitoring | Project-level visibility |
Most organizations land somewhere in between—central control over identity, policies, and infrastructure with team ownership of secrets and integrations. You can start at either end of the spectrum and adjust as your needs change.

Binary file not shown.

After

Width:  |  Height:  |  Size: 404 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 304 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 393 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 238 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 170 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 338 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 168 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 494 KiB

Some files were not shown because too many files have changed in this diff Show More