Merge branch 'main' into ENG-3723

This commit is contained in:
x032205
2025-09-22 22:01:00 -04:00
244 changed files with 13666 additions and 1904 deletions

View File

@@ -63,6 +63,8 @@ jobs:
build-args: |
POSTHOG_API_KEY=${{ secrets.PUBLIC_POSTHOG_API_KEY }}
INFISICAL_PLATFORM_VERSION=${{ steps.extract_version.outputs.version }}
DD_GIT_REPOSITORY_URL=${{ github.server_url }}/${{ github.repository }}
DD_GIT_COMMIT_SHA=${{ github.sha }}
infisical-fips-standalone:
name: Build infisical standalone image postgres

View File

@@ -35,7 +35,7 @@ jobs:
run: kubectl create namespace infisical-gateway
- name: Create gateway secret
run: kubectl create secret generic infisical-gateway-environment --from-literal=TOKEN=my-test-token -n infisical-gateway
run: kubectl create secret generic infisical-gateway-environment --from-literal=TOKEN=my-test-token --from-literal=INFISICAL_RELAY_NAME=my-test-relay -n infisical-gateway
- name: Run chart-testing (install)
run: |

View File

@@ -173,6 +173,12 @@ COPY --from=frontend-runner /app ./backend/frontend-build
ARG INFISICAL_PLATFORM_VERSION
ENV INFISICAL_PLATFORM_VERSION $INFISICAL_PLATFORM_VERSION
ARG DD_GIT_REPOSITORY_URL
ENV DD_GIT_REPOSITORY_URL $DD_GIT_REPOSITORY_URL
ARG DD_GIT_COMMIT_SHA
ENV DD_GIT_COMMIT_SHA $DD_GIT_COMMIT_SHA
ENV PORT 8080
ENV HOST=0.0.0.0
ENV HTTPS_ENABLED false

View File

@@ -93,6 +93,7 @@ import { TOrgAdminServiceFactory } from "@app/services/org-admin/org-admin-servi
import { TPkiAlertServiceFactory } from "@app/services/pki-alert/pki-alert-service";
import { TPkiCollectionServiceFactory } from "@app/services/pki-collection/pki-collection-service";
import { TPkiSubscriberServiceFactory } from "@app/services/pki-subscriber/pki-subscriber-service";
import { TPkiSyncServiceFactory } from "@app/services/pki-sync/pki-sync-service";
import { TPkiTemplatesServiceFactory } from "@app/services/pki-templates/pki-templates-service";
import { TProjectServiceFactory } from "@app/services/project/project-service";
import { TProjectBotServiceFactory } from "@app/services/project-bot/project-bot-service";
@@ -267,6 +268,7 @@ declare module "fastify" {
certificateEst: TCertificateEstServiceFactory;
pkiCollection: TPkiCollectionServiceFactory;
pkiSubscriber: TPkiSubscriberServiceFactory;
pkiSync: TPkiSyncServiceFactory;
secretScanning: TSecretScanningServiceFactory;
license: TLicenseServiceFactory;
trustedIp: TTrustedIpServiceFactory;

View File

@@ -263,6 +263,9 @@ import {
TPkiSubscribers,
TPkiSubscribersInsert,
TPkiSubscribersUpdate,
TPkiSyncs,
TPkiSyncsInsert,
TPkiSyncsUpdate,
TProjectBots,
TProjectBotsInsert,
TProjectBotsUpdate,
@@ -680,6 +683,7 @@ declare module "knex/types/tables" {
TPkiSubscribersInsert,
TPkiSubscribersUpdate
>;
[TableName.PkiSync]: KnexOriginal.CompositeTableType<TPkiSyncs, TPkiSyncsInsert, TPkiSyncsUpdate>;
[TableName.UserGroupMembership]: KnexOriginal.CompositeTableType<
TUserGroupMembership,
TUserGroupMembershipInsert,

View File

@@ -0,0 +1,47 @@
import { Knex } from "knex";
import { TableName } from "@app/db/schemas";
import { createOnUpdateTrigger, dropOnUpdateTrigger } from "@app/db/utils";
export async function up(knex: Knex): Promise<void> {
if (!(await knex.schema.hasTable(TableName.PkiSync))) {
await knex.schema.createTable(TableName.PkiSync, (t) => {
t.uuid("id", { primaryKey: true }).defaultTo(knex.fn.uuid());
t.string("name", 32).notNullable();
t.string("description");
t.string("destination").notNullable();
t.boolean("isAutoSyncEnabled").notNullable().defaultTo(true);
t.integer("version").defaultTo(1).notNullable();
t.jsonb("destinationConfig").notNullable();
t.jsonb("syncOptions").notNullable();
t.string("projectId").notNullable();
t.foreign("projectId").references("id").inTable(TableName.Project).onDelete("CASCADE");
t.uuid("subscriberId");
t.foreign("subscriberId").references("id").inTable(TableName.PkiSubscriber).onDelete("SET NULL");
t.uuid("connectionId").notNullable();
t.foreign("connectionId").references("id").inTable(TableName.AppConnection);
t.timestamps(true, true, true);
t.string("syncStatus");
t.string("lastSyncJobId");
t.string("lastSyncMessage");
t.datetime("lastSyncedAt");
t.string("importStatus");
t.string("lastImportJobId");
t.string("lastImportMessage");
t.datetime("lastImportedAt");
t.string("removeStatus");
t.string("lastRemoveJobId");
t.string("lastRemoveMessage");
t.datetime("lastRemovedAt");
t.unique(["name", "projectId"], { indexName: "pki_syncs_name_project_id_unique" });
});
await createOnUpdateTrigger(knex, TableName.PkiSync);
}
}
export async function down(knex: Knex): Promise<void> {
await knex.schema.dropTableIfExists(TableName.PkiSync);
await dropOnUpdateTrigger(knex, TableName.PkiSync);
}

View File

@@ -0,0 +1,31 @@
import { Knex } from "knex";
import { TableName } from "../schemas";
export async function up(knex: Knex): Promise<void> {
const hasAllowedNamespaces = await knex.schema.hasColumn(TableName.IdentityKubernetesAuth, "allowedNamespaces");
const hasAllowedNames = await knex.schema.hasColumn(TableName.IdentityKubernetesAuth, "allowedNames");
const hasAllowedAudience = await knex.schema.hasColumn(TableName.IdentityKubernetesAuth, "allowedAudience");
if (hasAllowedNamespaces || hasAllowedNames || hasAllowedAudience) {
await knex.schema.alterTable(TableName.IdentityKubernetesAuth, (t) => {
if (hasAllowedNames) t.string("allowedNames", 1000).notNullable().alter();
if (hasAllowedNamespaces) t.string("allowedNamespaces", 1000).notNullable().alter();
if (hasAllowedAudience) t.string("allowedAudience", 1000).notNullable().alter();
});
}
}
export async function down(knex: Knex): Promise<void> {
const hasAllowedNamespaces = await knex.schema.hasColumn(TableName.IdentityKubernetesAuth, "allowedNamespaces");
const hasAllowedNames = await knex.schema.hasColumn(TableName.IdentityKubernetesAuth, "allowedNames");
const hasAllowedAudience = await knex.schema.hasColumn(TableName.IdentityKubernetesAuth, "allowedAudience");
if (hasAllowedNamespaces || hasAllowedNames || hasAllowedAudience) {
await knex.schema.alterTable(TableName.IdentityKubernetesAuth, (t) => {
if (hasAllowedNames) t.string("allowedNames", 255).notNullable().alter();
if (hasAllowedNamespaces) t.string("allowedNamespaces", 255).notNullable().alter();
if (hasAllowedAudience) t.string("allowedAudience", 255).notNullable().alter();
});
}
}

View File

@@ -0,0 +1,19 @@
import { Knex } from "knex";
import { TableName } from "@app/db/schemas";
export async function up(knex: Knex): Promise<void> {
if (await knex.schema.hasTable(TableName.SecretApprovalRequestSecretV2)) {
await knex.schema.alterTable(TableName.SecretApprovalRequestSecretV2, (t) => {
t.boolean("skipMultilineEncoding").alter();
});
}
}
export async function down(knex: Knex): Promise<void> {
if (await knex.schema.hasTable(TableName.SecretApprovalRequestSecretV2)) {
await knex.schema.alterTable(TableName.SecretApprovalRequestSecretV2, (t) => {
t.boolean("skipMultilineEncoding").defaultTo(false).alter();
});
}
}

View File

@@ -87,6 +87,7 @@ export * from "./pki-alerts";
export * from "./pki-collection-items";
export * from "./pki-collections";
export * from "./pki-subscribers";
export * from "./pki-syncs";
export * from "./project-bots";
export * from "./project-environments";
export * from "./project-gateways";

View File

@@ -156,6 +156,7 @@ export enum TableName {
ProjectSlackConfigs = "project_slack_configs",
AppConnection = "app_connections",
SecretSync = "secret_syncs",
PkiSync = "pki_syncs",
KmipClient = "kmip_clients",
KmipOrgConfig = "kmip_org_configs",
KmipOrgServerCertificates = "kmip_org_server_certificates",

View File

@@ -0,0 +1,40 @@
// Code generated by automation script, DO NOT EDIT.
// Automated by pulling database and generating zod schema
// To update. Just run npm run generate:schema
// Written by akhilmhdh.
import { z } from "zod";
import { TImmutableDBKeys } from "./models";
export const PkiSyncsSchema = z.object({
id: z.string().uuid(),
name: z.string(),
description: z.string().nullable().optional(),
destination: z.string(),
isAutoSyncEnabled: z.boolean().default(true),
version: z.number().default(1),
destinationConfig: z.unknown(),
syncOptions: z.unknown(),
projectId: z.string(),
subscriberId: z.string().uuid().nullable().optional(),
connectionId: z.string().uuid(),
createdAt: z.date(),
updatedAt: z.date(),
syncStatus: z.string().nullable().optional(),
lastSyncJobId: z.string().nullable().optional(),
lastSyncMessage: z.string().nullable().optional(),
lastSyncedAt: z.date().nullable().optional(),
importStatus: z.string().nullable().optional(),
lastImportJobId: z.string().nullable().optional(),
lastImportMessage: z.string().nullable().optional(),
lastImportedAt: z.date().nullable().optional(),
removeStatus: z.string().nullable().optional(),
lastRemoveJobId: z.string().nullable().optional(),
lastRemoveMessage: z.string().nullable().optional(),
lastRemovedAt: z.date().nullable().optional()
});
export type TPkiSyncs = z.infer<typeof PkiSyncsSchema>;
export type TPkiSyncsInsert = Omit<z.input<typeof PkiSyncsSchema>, TImmutableDBKeys>;
export type TPkiSyncsUpdate = Partial<Omit<z.input<typeof PkiSyncsSchema>, TImmutableDBKeys>>;

View File

@@ -17,7 +17,7 @@ export const SecretApprovalRequestsSecretsV2Schema = z.object({
encryptedComment: zodBuffer.nullable().optional(),
reminderNote: z.string().nullable().optional(),
reminderRepeatDays: z.number().nullable().optional(),
skipMultilineEncoding: z.boolean().default(false).nullable().optional(),
skipMultilineEncoding: z.boolean().nullable().optional(),
metadata: z.unknown().nullable().optional(),
createdAt: z.date(),
updatedAt: z.date(),

View File

@@ -2,6 +2,7 @@ import { packRules } from "@casl/ability/extra";
import { z } from "zod";
import { ProjectMembershipRole, ProjectRolesSchema } from "@app/db/schemas";
import { EventType } from "@app/ee/services/audit-log/audit-log-types";
import {
backfillPermissionV1SchemaToV2Schema,
ProjectPermissionV1Schema
@@ -50,6 +51,10 @@ export const registerDeprecatedProjectRoleRouter = async (server: FastifyZodProv
},
onRequest: verifyAuth([AuthMode.JWT, AuthMode.IDENTITY_ACCESS_TOKEN]),
handler: async (req) => {
const stringifiedPermissions = JSON.stringify(
packRules(backfillPermissionV1SchemaToV2Schema(req.body.permissions, true))
);
const role = await server.services.projectRole.createRole({
actorAuthMethod: req.permission.authMethod,
actorId: req.permission.id,
@@ -61,7 +66,23 @@ export const registerDeprecatedProjectRoleRouter = async (server: FastifyZodProv
},
data: {
...req.body,
permissions: JSON.stringify(packRules(backfillPermissionV1SchemaToV2Schema(req.body.permissions, true)))
permissions: stringifiedPermissions
}
});
await server.services.auditLog.createAuditLog({
...req.auditLogInfo,
orgId: req.permission.orgId,
projectId: role.projectId,
event: {
type: EventType.CREATE_PROJECT_ROLE,
metadata: {
roleId: role.id,
slug: req.body.slug,
name: req.body.name,
description: req.body.description,
permissions: stringifiedPermissions
}
}
});
@@ -106,6 +127,10 @@ export const registerDeprecatedProjectRoleRouter = async (server: FastifyZodProv
},
onRequest: verifyAuth([AuthMode.JWT, AuthMode.IDENTITY_ACCESS_TOKEN]),
handler: async (req) => {
const stringifiedPermissions = req.body.permissions
? JSON.stringify(packRules(backfillPermissionV1SchemaToV2Schema(req.body.permissions, true)))
: undefined;
const role = await server.services.projectRole.updateRole({
actorAuthMethod: req.permission.authMethod,
actorId: req.permission.id,
@@ -114,11 +139,26 @@ export const registerDeprecatedProjectRoleRouter = async (server: FastifyZodProv
roleId: req.params.roleId,
data: {
...req.body,
permissions: req.body.permissions
? JSON.stringify(packRules(backfillPermissionV1SchemaToV2Schema(req.body.permissions, true)))
: undefined
permissions: stringifiedPermissions
}
});
await server.services.auditLog.createAuditLog({
...req.auditLogInfo,
orgId: req.permission.orgId,
projectId: role.projectId,
event: {
type: EventType.UPDATE_PROJECT_ROLE,
metadata: {
roleId: role.id,
slug: req.body.slug,
name: req.body.name,
description: req.body.description,
permissions: stringifiedPermissions
}
}
});
return { role };
}
});
@@ -155,6 +195,21 @@ export const registerDeprecatedProjectRoleRouter = async (server: FastifyZodProv
actor: req.permission.type,
roleId: req.params.roleId
});
await server.services.auditLog.createAuditLog({
...req.auditLogInfo,
orgId: req.permission.orgId,
projectId: role.projectId,
event: {
type: EventType.DELETE_PROJECT_ROLE,
metadata: {
roleId: role.id,
slug: role.slug,
name: role.name
}
}
});
return { role };
}
});

View File

@@ -1,6 +1,7 @@
import { z } from "zod";
import { OrgMembershipRole, OrgMembershipsSchema, OrgRolesSchema } from "@app/db/schemas";
import { EventType } from "@app/ee/services/audit-log/audit-log-types";
import { readLimit, writeLimit } from "@app/server/config/rateLimiter";
import { slugSchema } from "@app/server/lib/schemas";
import { verifyAuth } from "@app/server/plugins/auth/verify-auth";
@@ -42,6 +43,22 @@ export const registerOrgRoleRouter = async (server: FastifyZodProvider) => {
req.permission.authMethod,
req.permission.orgId
);
await server.services.auditLog.createAuditLog({
...req.auditLogInfo,
orgId: req.permission.orgId,
event: {
type: EventType.CREATE_ORG_ROLE,
metadata: {
roleId: role.id,
slug: req.body.slug,
name: req.body.name,
description: req.body.description,
permissions: JSON.stringify(req.body.permissions)
}
}
});
return { role };
}
});
@@ -116,6 +133,22 @@ export const registerOrgRoleRouter = async (server: FastifyZodProvider) => {
req.permission.authMethod,
req.permission.orgId
);
await server.services.auditLog.createAuditLog({
...req.auditLogInfo,
orgId: req.permission.orgId,
event: {
type: EventType.UPDATE_ORG_ROLE,
metadata: {
roleId: role.id,
slug: req.body.slug,
name: req.body.name,
description: req.body.description,
permissions: req.body.permissions ? JSON.stringify(req.body.permissions) : undefined
}
}
});
return { role };
}
});
@@ -146,6 +179,16 @@ export const registerOrgRoleRouter = async (server: FastifyZodProvider) => {
req.permission.authMethod,
req.permission.orgId
);
await server.services.auditLog.createAuditLog({
...req.auditLogInfo,
orgId: req.permission.orgId,
event: {
type: EventType.DELETE_ORG_ROLE,
metadata: { roleId: role.id, slug: role.slug, name: role.name }
}
});
return { role };
}
});

View File

@@ -2,6 +2,7 @@ import { packRules } from "@casl/ability/extra";
import { z } from "zod";
import { ProjectMembershipRole, ProjectRolesSchema } from "@app/db/schemas";
import { EventType } from "@app/ee/services/audit-log/audit-log-types";
import { checkForInvalidPermissionCombination } from "@app/ee/services/permission/permission-fns";
import { ProjectPermissionV2Schema } from "@app/ee/services/permission/project-permission";
import { ApiDocsTags, PROJECT_ROLE } from "@app/lib/api-docs";
@@ -52,6 +53,8 @@ export const registerProjectRoleRouter = async (server: FastifyZodProvider) => {
},
onRequest: verifyAuth([AuthMode.JWT, AuthMode.IDENTITY_ACCESS_TOKEN]),
handler: async (req) => {
const stringifiedPermissions = JSON.stringify(packRules(req.body.permissions));
const role = await server.services.projectRole.createRole({
actorAuthMethod: req.permission.authMethod,
actorId: req.permission.id,
@@ -63,9 +66,26 @@ export const registerProjectRoleRouter = async (server: FastifyZodProvider) => {
},
data: {
...req.body,
permissions: JSON.stringify(packRules(req.body.permissions))
permissions: stringifiedPermissions
}
});
await server.services.auditLog.createAuditLog({
...req.auditLogInfo,
orgId: req.permission.orgId,
projectId: role.projectId,
event: {
type: EventType.CREATE_PROJECT_ROLE,
metadata: {
roleId: role.id,
slug: req.body.slug,
name: req.body.name,
description: req.body.description,
permissions: stringifiedPermissions
}
}
});
return { role };
}
});
@@ -112,6 +132,7 @@ export const registerProjectRoleRouter = async (server: FastifyZodProvider) => {
},
onRequest: verifyAuth([AuthMode.JWT, AuthMode.IDENTITY_ACCESS_TOKEN]),
handler: async (req) => {
const stringifiedPermissions = req.body.permissions ? JSON.stringify(packRules(req.body.permissions)) : undefined;
const role = await server.services.projectRole.updateRole({
actorAuthMethod: req.permission.authMethod,
actorId: req.permission.id,
@@ -120,9 +141,26 @@ export const registerProjectRoleRouter = async (server: FastifyZodProvider) => {
roleId: req.params.roleId,
data: {
...req.body,
permissions: req.body.permissions ? JSON.stringify(packRules(req.body.permissions)) : undefined
permissions: stringifiedPermissions
}
});
await server.services.auditLog.createAuditLog({
...req.auditLogInfo,
orgId: req.permission.orgId,
projectId: role.projectId,
event: {
type: EventType.UPDATE_PROJECT_ROLE,
metadata: {
roleId: role.id,
slug: req.body.slug,
name: req.body.name,
description: req.body.description,
permissions: stringifiedPermissions
}
}
});
return { role };
}
});
@@ -161,6 +199,21 @@ export const registerProjectRoleRouter = async (server: FastifyZodProvider) => {
actor: req.permission.type,
roleId: req.params.roleId
});
await server.services.auditLog.createAuditLog({
...req.auditLogInfo,
orgId: req.permission.orgId,
projectId: role.projectId,
event: {
type: EventType.DELETE_PROJECT_ROLE,
metadata: {
roleId: role.id,
slug: role.slug,
name: role.name
}
}
});
return { role };
}
});

View File

@@ -1,9 +1,10 @@
import { z } from "zod";
import { RelaysSchema } from "@app/db/schemas";
import { getConfig } from "@app/lib/config/env";
import { crypto } from "@app/lib/crypto/cryptography";
import { BadRequestError, UnauthorizedError } from "@app/lib/errors";
import { writeLimit } from "@app/server/config/rateLimiter";
import { UnauthorizedError } from "@app/lib/errors";
import { readLimit, writeLimit } from "@app/server/config/rateLimiter";
import { slugSchema } from "@app/server/lib/schemas";
import { verifyAuth } from "@app/server/plugins/auth/verify-auth";
import { AuthMode } from "@app/services/auth/auth-type";
@@ -89,14 +90,59 @@ export const registerRelayRouter = async (server: FastifyZodProvider) => {
},
onRequest: verifyAuth([AuthMode.IDENTITY_ACCESS_TOKEN]),
handler: async (req) => {
throw new BadRequestError({
message: "Org relay registration is not yet supported"
});
return server.services.relay.registerRelay({
...req.body,
identityId: req.permission.id,
orgId: req.permission.orgId
orgId: req.permission.orgId,
actorAuthMethod: req.permission.authMethod
});
}
});
server.route({
method: "GET",
url: "/",
schema: {
response: {
200: RelaysSchema.array()
}
},
config: {
rateLimit: readLimit
},
onRequest: verifyAuth([AuthMode.JWT, AuthMode.IDENTITY_ACCESS_TOKEN]),
handler: async (req) => {
return server.services.relay.getRelays({
actorId: req.permission.id,
actor: req.permission.type,
actorAuthMethod: req.permission.authMethod,
actorOrgId: req.permission.orgId
});
}
});
server.route({
method: "DELETE",
url: "/:id",
config: {
rateLimit: writeLimit
},
schema: {
params: z.object({
id: z.string()
}),
response: {
200: RelaysSchema
}
},
onRequest: verifyAuth([AuthMode.JWT, AuthMode.IDENTITY_ACCESS_TOKEN]),
handler: async (req) => {
return server.services.relay.deleteRelay({
id: req.params.id,
actorId: req.permission.id,
actor: req.permission.type,
actorAuthMethod: req.permission.authMethod,
actorOrgId: req.permission.orgId
});
}
});

View File

@@ -320,10 +320,20 @@ export const registerSecretApprovalRequestRouter = async (server: FastifyZodProv
.array(),
secretPath: z.string(),
commits: secretRawSchema
.omit({ _id: true, environment: true, workspace: true, type: true, version: true, secretValue: true })
.omit({
_id: true,
environment: true,
workspace: true,
type: true,
version: true,
secretValue: true,
secretComment: true
})
.extend({
secretValueHidden: z.boolean(),
secretValue: z.string().optional(),
secretComment: z.string().optional(),
skipMultilineEncoding: z.boolean().nullish(),
isRotatedSecret: z.boolean().optional(),
op: z.string(),
tags: SanitizedTagSchema.array().optional(),
@@ -348,7 +358,8 @@ export const registerSecretApprovalRequestRouter = async (server: FastifyZodProv
secretValueHidden: z.boolean(),
secretComment: z.string().optional(),
tags: SanitizedTagSchema.array().optional(),
secretMetadata: ResourceMetadataSchema.nullish()
secretMetadata: ResourceMetadataSchema.nullish(),
skipMultilineEncoding: z.boolean().nullish()
})
.optional()
})

View File

@@ -2,6 +2,7 @@ import { packRules } from "@casl/ability/extra";
import { z } from "zod";
import { ProjectMembershipRole, ProjectRolesSchema } from "@app/db/schemas";
import { EventType } from "@app/ee/services/audit-log/audit-log-types";
import { checkForInvalidPermissionCombination } from "@app/ee/services/permission/permission-fns";
import { ProjectPermissionV2Schema } from "@app/ee/services/permission/project-permission";
import { ApiDocsTags, PROJECT_ROLE } from "@app/lib/api-docs";
@@ -52,6 +53,8 @@ export const registerDeprecatedProjectRoleRouter = async (server: FastifyZodProv
},
onRequest: verifyAuth([AuthMode.JWT, AuthMode.IDENTITY_ACCESS_TOKEN]),
handler: async (req) => {
const stringifiedPermissions = JSON.stringify(packRules(req.body.permissions));
const role = await server.services.projectRole.createRole({
actorAuthMethod: req.permission.authMethod,
actorId: req.permission.id,
@@ -63,9 +66,26 @@ export const registerDeprecatedProjectRoleRouter = async (server: FastifyZodProv
},
data: {
...req.body,
permissions: JSON.stringify(packRules(req.body.permissions))
permissions: stringifiedPermissions
}
});
await server.services.auditLog.createAuditLog({
...req.auditLogInfo,
orgId: req.permission.orgId,
projectId: role.projectId,
event: {
type: EventType.CREATE_PROJECT_ROLE,
metadata: {
roleId: role.id,
slug: req.body.slug,
name: req.body.name,
description: req.body.description,
permissions: stringifiedPermissions
}
}
});
return { role };
}
});
@@ -112,6 +132,7 @@ export const registerDeprecatedProjectRoleRouter = async (server: FastifyZodProv
},
onRequest: verifyAuth([AuthMode.JWT, AuthMode.IDENTITY_ACCESS_TOKEN]),
handler: async (req) => {
const stringifiedPermissions = req.body.permissions ? JSON.stringify(packRules(req.body.permissions)) : undefined;
const role = await server.services.projectRole.updateRole({
actorAuthMethod: req.permission.authMethod,
actorId: req.permission.id,
@@ -120,9 +141,26 @@ export const registerDeprecatedProjectRoleRouter = async (server: FastifyZodProv
roleId: req.params.roleId,
data: {
...req.body,
permissions: req.body.permissions ? JSON.stringify(packRules(req.body.permissions)) : undefined
permissions: stringifiedPermissions
}
});
await server.services.auditLog.createAuditLog({
...req.auditLogInfo,
orgId: req.permission.orgId,
projectId: role.projectId,
event: {
type: EventType.UPDATE_PROJECT_ROLE,
metadata: {
roleId: role.id,
slug: req.body.slug,
name: req.body.name,
description: req.body.description,
permissions: stringifiedPermissions
}
}
});
return { role };
}
});
@@ -161,6 +199,21 @@ export const registerDeprecatedProjectRoleRouter = async (server: FastifyZodProv
actor: req.permission.type,
roleId: req.params.roleId
});
await server.services.auditLog.createAuditLog({
...req.auditLogInfo,
orgId: req.permission.orgId,
projectId: role.projectId,
event: {
type: EventType.DELETE_PROJECT_ROLE,
metadata: {
roleId: role.id,
slug: role.slug,
name: role.name
}
}
});
return { role };
}
});

View File

@@ -407,6 +407,14 @@ export enum EventType {
SECRET_SYNC_SYNC_SECRETS = "secret-sync-sync-secrets",
SECRET_SYNC_IMPORT_SECRETS = "secret-sync-import-secrets",
SECRET_SYNC_REMOVE_SECRETS = "secret-sync-remove-secrets",
GET_PKI_SYNCS = "get-pki-syncs",
GET_PKI_SYNC = "get-pki-sync",
CREATE_PKI_SYNC = "create-pki-sync",
UPDATE_PKI_SYNC = "update-pki-sync",
DELETE_PKI_SYNC = "delete-pki-sync",
PKI_SYNC_SYNC_CERTIFICATES = "pki-sync-sync-certificates",
PKI_SYNC_IMPORT_CERTIFICATES = "pki-sync-import-certificates",
PKI_SYNC_REMOVE_CERTIFICATES = "pki-sync-remove-certificates",
OIDC_GROUP_MEMBERSHIP_MAPPING_ASSIGN_USER = "oidc-group-membership-mapping-assign-user",
OIDC_GROUP_MEMBERSHIP_MAPPING_REMOVE_USER = "oidc-group-membership-mapping-remove-user",
CREATE_KMIP_CLIENT = "create-kmip-client",
@@ -478,9 +486,21 @@ export enum EventType {
UPDATE_PROJECT = "update-project",
DELETE_PROJECT = "delete-project",
CREATE_PROJECT_ROLE = "create-project-role",
UPDATE_PROJECT_ROLE = "update-project-role",
DELETE_PROJECT_ROLE = "delete-project-role",
CREATE_ORG_ROLE = "create-org-role",
UPDATE_ORG_ROLE = "update-org-role",
DELETE_ORG_ROLE = "delete-org-role",
CREATE_SECRET_REMINDER = "create-secret-reminder",
GET_SECRET_REMINDER = "get-secret-reminder",
DELETE_SECRET_REMINDER = "delete-secret-reminder"
DELETE_SECRET_REMINDER = "delete-secret-reminder",
DASHBOARD_LIST_SECRETS = "dashboard-list-secrets",
DASHBOARD_GET_SECRET_VALUE = "dashboard-get-secret-value",
DASHBOARD_GET_SECRET_VERSION_VALUE = "dashboard-get-secret-version-value"
}
export const filterableSecretEvents: EventType[] = [
@@ -591,6 +611,7 @@ interface CreateSecretEvent {
secretKey: string;
secretVersion: number;
secretMetadata?: TSecretMetadata;
secretTags?: string[];
};
}
@@ -605,6 +626,7 @@ interface CreateSecretBatchEvent {
secretPath?: string;
secretVersion: number;
secretMetadata?: TSecretMetadata;
secretTags?: string[];
}>;
};
}
@@ -618,6 +640,7 @@ interface UpdateSecretEvent {
secretKey: string;
secretVersion: number;
secretMetadata?: TSecretMetadata;
secretTags?: string[];
};
}
@@ -632,6 +655,7 @@ interface UpdateSecretBatchEvent {
secretVersion: number;
secretMetadata?: TSecretMetadata;
secretPath?: string;
secretTags?: string[];
}>;
};
}
@@ -2943,6 +2967,77 @@ interface SecretSyncRemoveSecretsEvent {
};
}
interface GetPkiSyncsEvent {
type: EventType.GET_PKI_SYNCS;
metadata: {
projectId: string;
};
}
interface GetPkiSyncEvent {
type: EventType.GET_PKI_SYNC;
metadata: {
destination: string;
syncId: string;
};
}
interface CreatePkiSyncEvent {
type: EventType.CREATE_PKI_SYNC;
metadata: {
pkiSyncId: string;
name: string;
destination: string;
};
}
interface UpdatePkiSyncEvent {
type: EventType.UPDATE_PKI_SYNC;
metadata: {
pkiSyncId: string;
name: string;
};
}
interface DeletePkiSyncEvent {
type: EventType.DELETE_PKI_SYNC;
metadata: {
pkiSyncId: string;
name: string;
destination: string;
};
}
interface PkiSyncSyncCertificatesEvent {
type: EventType.PKI_SYNC_SYNC_CERTIFICATES;
metadata: {
syncId: string;
syncMessage: string | null;
jobId: string;
jobRanAt: Date;
};
}
interface PkiSyncImportCertificatesEvent {
type: EventType.PKI_SYNC_IMPORT_CERTIFICATES;
metadata: {
syncId: string;
importMessage: string | null;
jobId: string;
jobRanAt: Date;
};
}
interface PkiSyncRemoveCertificatesEvent {
type: EventType.PKI_SYNC_REMOVE_CERTIFICATES;
metadata: {
syncId: string;
removeMessage: string | null;
jobId: string;
jobRanAt: Date;
};
}
interface OidcGroupMembershipMappingAssignUserEvent {
type: EventType.OIDC_GROUP_MEMBERSHIP_MAPPING_ASSIGN_USER;
metadata: {
@@ -3502,6 +3597,96 @@ interface ProjectDeleteEvent {
};
}
interface DashboardListSecretsEvent {
type: EventType.DASHBOARD_LIST_SECRETS;
metadata: {
environment: string;
secretPath: string;
numberOfSecrets: number;
secretIds: string[];
};
}
interface DashboardGetSecretValueEvent {
type: EventType.DASHBOARD_GET_SECRET_VALUE;
metadata: {
secretId: string;
secretKey: string;
environment: string;
secretPath: string;
};
}
interface DashboardGetSecretVersionValueEvent {
type: EventType.DASHBOARD_GET_SECRET_VERSION_VALUE;
metadata: {
secretId: string;
version: string;
};
}
interface ProjectRoleCreateEvent {
type: EventType.CREATE_PROJECT_ROLE;
metadata: {
roleId: string;
slug: string;
name: string;
description?: string | null;
permissions: string;
};
}
interface ProjectRoleUpdateEvent {
type: EventType.UPDATE_PROJECT_ROLE;
metadata: {
roleId: string;
slug?: string;
name?: string;
description?: string | null;
permissions?: string;
};
}
interface ProjectRoleDeleteEvent {
type: EventType.DELETE_PROJECT_ROLE;
metadata: {
roleId: string;
slug: string;
name: string;
};
}
interface OrgRoleCreateEvent {
type: EventType.CREATE_ORG_ROLE;
metadata: {
roleId: string;
slug: string;
name: string;
description?: string | null;
permissions: string;
};
}
interface OrgRoleUpdateEvent {
type: EventType.UPDATE_ORG_ROLE;
metadata: {
roleId: string;
slug?: string;
name?: string;
description?: string | null;
permissions?: string;
};
}
interface OrgRoleDeleteEvent {
type: EventType.DELETE_ORG_ROLE;
metadata: {
roleId: string;
slug: string;
name: string;
};
}
export type Event =
| GetSecretsEvent
| GetSecretEvent
@@ -3753,6 +3938,14 @@ export type Event =
| SecretSyncSyncSecretsEvent
| SecretSyncImportSecretsEvent
| SecretSyncRemoveSecretsEvent
| GetPkiSyncsEvent
| GetPkiSyncEvent
| CreatePkiSyncEvent
| UpdatePkiSyncEvent
| DeletePkiSyncEvent
| PkiSyncSyncCertificatesEvent
| PkiSyncImportCertificatesEvent
| PkiSyncRemoveCertificatesEvent
| OidcGroupMembershipMappingAssignUserEvent
| OidcGroupMembershipMappingRemoveUserEvent
| CreateKmipClientEvent
@@ -3818,4 +4011,13 @@ export type Event =
| ProjectDeleteEvent
| SecretReminderCreateEvent
| SecretReminderGetEvent
| SecretReminderDeleteEvent;
| SecretReminderDeleteEvent
| DashboardListSecretsEvent
| DashboardGetSecretValueEvent
| DashboardGetSecretVersionValueEvent
| ProjectRoleCreateEvent
| ProjectRoleUpdateEvent
| ProjectRoleDeleteEvent
| OrgRoleCreateEvent
| OrgRoleUpdateEvent
| OrgRoleDeleteEvent;

View File

@@ -395,7 +395,8 @@ export const gatewayV2ServiceFactory = ({
relayId: gateway.relayId,
orgId: gateway.orgId,
orgName: gateway.orgName,
gatewayId
gatewayId,
gatewayName: gateway.name
});
return {
@@ -508,7 +509,8 @@ export const gatewayV2ServiceFactory = ({
const relayCredentials = await relayService.getCredentialsForGateway({
relayName,
orgId,
gatewayId: gateway.id
gatewayId: gateway.id,
gatewayName: gateway.name
});
return {

View File

@@ -31,6 +31,7 @@ export const getDefaultOnPremFeatures = () => {
caCrl: false,
sshHostGroups: false,
enterpriseSecretSyncs: false,
enterpriseCertificateSyncs: false,
enterpriseAppConnections: true,
machineIdentityAuthTemplates: false
};

View File

@@ -62,6 +62,7 @@ export const getDefaultOnPremFeatures = (): TFeatureSet => ({
sshHostGroups: false,
secretScanning: false,
enterpriseSecretSyncs: false,
enterpriseCertificateSyncs: false,
enterpriseAppConnections: false,
fips: false,
eventSubscriptions: false,

View File

@@ -75,6 +75,7 @@ export type TFeatureSet = {
sshHostGroups: false;
secretScanning: false;
enterpriseSecretSyncs: false;
enterpriseCertificateSyncs: false;
enterpriseAppConnections: false;
machineIdentityAuthTemplates: false;
fips: false;

View File

@@ -13,6 +13,7 @@ import {
ProjectPermissionKmipActions,
ProjectPermissionMemberActions,
ProjectPermissionPkiSubscriberActions,
ProjectPermissionPkiSyncActions,
ProjectPermissionPkiTemplateActions,
ProjectPermissionSecretActions,
ProjectPermissionSecretEventActions,
@@ -209,6 +210,19 @@ const buildAdminPermissionRules = () => {
ProjectPermissionSub.SecretSyncs
);
can(
[
ProjectPermissionPkiSyncActions.Create,
ProjectPermissionPkiSyncActions.Edit,
ProjectPermissionPkiSyncActions.Delete,
ProjectPermissionPkiSyncActions.Read,
ProjectPermissionPkiSyncActions.SyncCertificates,
ProjectPermissionPkiSyncActions.ImportCertificates,
ProjectPermissionPkiSyncActions.RemoveCertificates
],
ProjectPermissionSub.PkiSyncs
);
can(
[
ProjectPermissionKmipActions.CreateClients,
@@ -462,6 +476,19 @@ const buildMemberPermissionRules = () => {
ProjectPermissionSub.SecretSyncs
);
can(
[
ProjectPermissionPkiSyncActions.Create,
ProjectPermissionPkiSyncActions.Edit,
ProjectPermissionPkiSyncActions.Delete,
ProjectPermissionPkiSyncActions.Read,
ProjectPermissionPkiSyncActions.SyncCertificates,
ProjectPermissionPkiSyncActions.ImportCertificates,
ProjectPermissionPkiSyncActions.RemoveCertificates
],
ProjectPermissionSub.PkiSyncs
);
can(
[
ProjectPermissionSecretScanningDataSourceActions.Read,
@@ -526,6 +553,7 @@ const buildViewerPermissionRules = () => {
can(ProjectPermissionActions.Read, ProjectPermissionSub.SshCertificates);
can(ProjectPermissionActions.Read, ProjectPermissionSub.SshCertificateTemplates);
can(ProjectPermissionSecretSyncActions.Read, ProjectPermissionSub.SecretSyncs);
can(ProjectPermissionPkiSyncActions.Read, ProjectPermissionSub.PkiSyncs);
can(ProjectPermissionCommitsActions.Read, ProjectPermissionSub.Commits);
can(

View File

@@ -58,6 +58,13 @@ export enum OrgPermissionGatewayActions {
AttachGateways = "attach-gateways"
}
export enum OrgPermissionRelayActions {
CreateRelays = "create-relays",
ListRelays = "list-relays",
EditRelays = "edit-relays",
DeleteRelays = "delete-relays"
}
export enum OrgPermissionIdentityActions {
Read = "read",
Create = "create",
@@ -109,6 +116,7 @@ export enum OrgPermissionSubjects {
AppConnections = "app-connections",
Kmip = "kmip",
Gateway = "gateway",
Relay = "relay",
SecretShare = "secret-share"
}
@@ -136,6 +144,7 @@ export type OrgPermissionSet =
| [OrgPermissionAuditLogsActions, OrgPermissionSubjects.AuditLogs]
| [OrgPermissionActions, OrgPermissionSubjects.ProjectTemplates]
| [OrgPermissionGatewayActions, OrgPermissionSubjects.Gateway]
| [OrgPermissionRelayActions, OrgPermissionSubjects.Relay]
| [
OrgPermissionAppConnectionActions,
(
@@ -279,6 +288,12 @@ export const OrgPermissionSchema = z.discriminatedUnion("subject", [
action: CASL_ACTION_SCHEMA_NATIVE_ENUM(OrgPermissionGatewayActions).describe(
"Describe what action an entity can take."
)
}),
z.object({
subject: z.literal(OrgPermissionSubjects.Relay).describe("The entity this permission pertains to."),
action: CASL_ACTION_SCHEMA_NATIVE_ENUM(OrgPermissionRelayActions).describe(
"Describe what action an entity can take."
)
})
]);
@@ -383,6 +398,11 @@ const buildAdminPermission = () => {
can(OrgPermissionGatewayActions.DeleteGateways, OrgPermissionSubjects.Gateway);
can(OrgPermissionGatewayActions.AttachGateways, OrgPermissionSubjects.Gateway);
can(OrgPermissionRelayActions.ListRelays, OrgPermissionSubjects.Relay);
can(OrgPermissionRelayActions.CreateRelays, OrgPermissionSubjects.Relay);
can(OrgPermissionRelayActions.EditRelays, OrgPermissionSubjects.Relay);
can(OrgPermissionRelayActions.DeleteRelays, OrgPermissionSubjects.Relay);
can(OrgPermissionAdminConsoleAction.AccessAllProjects, OrgPermissionSubjects.AdminConsole);
can(OrgPermissionKmipActions.Setup, OrgPermissionSubjects.Kmip);
@@ -445,6 +465,10 @@ const buildMemberPermission = () => {
can(OrgPermissionGatewayActions.CreateGateways, OrgPermissionSubjects.Gateway);
can(OrgPermissionGatewayActions.AttachGateways, OrgPermissionSubjects.Gateway);
can(OrgPermissionRelayActions.ListRelays, OrgPermissionSubjects.Relay);
can(OrgPermissionRelayActions.CreateRelays, OrgPermissionSubjects.Relay);
can(OrgPermissionRelayActions.EditRelays, OrgPermissionSubjects.Relay);
can(OrgPermissionMachineIdentityAuthTemplateActions.ListTemplates, OrgPermissionSubjects.MachineIdentityAuthTemplate);
can(
OrgPermissionMachineIdentityAuthTemplateActions.UnlinkTemplates,

View File

@@ -120,6 +120,16 @@ export enum ProjectPermissionSecretSyncActions {
RemoveSecrets = "remove-secrets"
}
export enum ProjectPermissionPkiSyncActions {
Read = "read",
Create = "create",
Edit = "edit",
Delete = "delete",
SyncCertificates = "sync-certificates",
ImportCertificates = "import-certificates",
RemoveCertificates = "remove-certificates"
}
export enum ProjectPermissionSecretRotationActions {
Read = "read",
ReadGeneratedCredentials = "read-generated-credentials",
@@ -212,6 +222,7 @@ export enum ProjectPermissionSub {
Kms = "kms",
Cmek = "cmek",
SecretSyncs = "secret-syncs",
PkiSyncs = "pki-syncs",
Kmip = "kmip",
SecretScanningDataSources = "secret-scanning-data-sources",
SecretScanningFindings = "secret-scanning-findings",
@@ -244,6 +255,10 @@ export type SecretSyncSubjectFields = {
secretPath: string;
};
export type PkiSyncSubjectFields = {
subscriberName: string;
};
export type DynamicSecretSubjectFields = {
environment: string;
secretPath: string;
@@ -308,6 +323,10 @@ export type ProjectPermissionSet =
ProjectPermissionSecretSyncActions,
ProjectPermissionSub.SecretSyncs | (ForcedSubject<ProjectPermissionSub.SecretSyncs> & SecretSyncSubjectFields)
]
| [
ProjectPermissionPkiSyncActions,
ProjectPermissionSub.PkiSyncs | (ForcedSubject<ProjectPermissionSub.PkiSyncs> & PkiSyncSubjectFields)
]
| [
ProjectPermissionActions,
(
@@ -480,6 +499,22 @@ const SecretSyncConditionV2Schema = z
})
.partial();
const PkiSyncConditionSchema = z
.object({
subscriberName: z.union([
z.string(),
z
.object({
[PermissionConditionOperators.$EQ]: PermissionConditionSchema[PermissionConditionOperators.$EQ],
[PermissionConditionOperators.$NEQ]: PermissionConditionSchema[PermissionConditionOperators.$NEQ],
[PermissionConditionOperators.$IN]: PermissionConditionSchema[PermissionConditionOperators.$IN],
[PermissionConditionOperators.$GLOB]: PermissionConditionSchema[PermissionConditionOperators.$GLOB]
})
.partial()
])
})
.partial();
const SecretImportConditionSchema = z
.object({
environment: z.union([
@@ -943,6 +978,16 @@ export const ProjectPermissionV2Schema = z.discriminatedUnion("subject", [
"When specified, only matching conditions will be allowed to access given resource."
).optional()
}),
z.object({
subject: z.literal(ProjectPermissionSub.PkiSyncs).describe("The entity this permission pertains to."),
inverted: z.boolean().optional().describe("Whether rule allows or forbids."),
action: CASL_ACTION_SCHEMA_NATIVE_ENUM(ProjectPermissionPkiSyncActions).describe(
"Describe what action an entity can take."
),
conditions: PkiSyncConditionSchema.describe(
"When specified, only matching conditions will be allowed to access given resource."
).optional()
}),
z.object({
subject: z.literal(ProjectPermissionSub.SecretEvents).describe("The entity this permission pertains to."),
inverted: z.boolean().optional().describe("Whether rule allows or forbids."),

View File

@@ -754,7 +754,8 @@ export const pitServiceFactory = ({
secrets: newSecrets.map((secret) => ({
secretId: secret.id,
secretKey: secret.secretKey,
secretVersion: secret.version
secretVersion: secret.version,
secretTags: secret.tags?.map((tag) => tag.name)
}))
}
});
@@ -781,7 +782,8 @@ export const pitServiceFactory = ({
secrets: updatedSecrets.map((secret) => ({
secretId: secret.id,
secretKey: secret.secretKey,
secretVersion: secret.version
secretVersion: secret.version,
secretTags: secret.tags?.map((tag) => tag.name)
}))
}
});

View File

@@ -0,0 +1 @@
export const RELAY_CONNECTING_GATEWAY_INFO = "1.3.6.1.4.1.12345.100.3";

View File

@@ -1,9 +1,13 @@
import { isIP } from "node:net";
import { ForbiddenError } from "@casl/ability";
import * as x509 from "@peculiar/x509";
import { TRelays } from "@app/db/schemas";
import { PgSqlLock } from "@app/keystore/keystore";
import { crypto } from "@app/lib/crypto";
import { BadRequestError, NotFoundError } from "@app/lib/errors";
import { ActorAuthMethod, ActorType } from "@app/services/auth/auth-type";
import { constructPemChainFromCerts, prependCertToPemChain } from "@app/services/certificate/certificate-fns";
import { CertExtendedKeyUsage, CertKeyAlgorithm, CertKeyUsage } from "@app/services/certificate/certificate-types";
import {
@@ -14,11 +18,15 @@ import { TKmsServiceFactory } from "@app/services/kms/kms-service";
import { KmsDataKey } from "@app/services/kms/kms-types";
import { verifyHostInputValidity } from "../dynamic-secret/dynamic-secret-fns";
import { TLicenseServiceFactory } from "../license/license-service";
import { OrgPermissionRelayActions, OrgPermissionSubjects } from "../permission/org-permission";
import { TPermissionServiceFactory } from "../permission/permission-service-types";
import { createSshCert, createSshKeyPair } from "../ssh/ssh-certificate-authority-fns";
import { SshCertType } from "../ssh/ssh-certificate-authority-types";
import { SshCertKeyAlgorithm } from "../ssh-certificate/ssh-certificate-types";
import { TInstanceRelayConfigDALFactory } from "./instance-relay-config-dal";
import { TOrgRelayConfigDALFactory } from "./org-relay-config-dal";
import { RELAY_CONNECTING_GATEWAY_INFO } from "./relay-constants";
import { TRelayDALFactory } from "./relay-dal";
export type TRelayServiceFactory = ReturnType<typeof relayServiceFactory>;
@@ -29,12 +37,16 @@ export const relayServiceFactory = ({
instanceRelayConfigDAL,
orgRelayConfigDAL,
relayDAL,
kmsService
kmsService,
licenseService,
permissionService
}: {
instanceRelayConfigDAL: TInstanceRelayConfigDALFactory;
orgRelayConfigDAL: TOrgRelayConfigDALFactory;
relayDAL: TRelayDALFactory;
kmsService: TKmsServiceFactory;
licenseService: TLicenseServiceFactory;
permissionService: TPermissionServiceFactory;
}) => {
const $getInstanceCAs = async () => {
const instanceConfig = await instanceRelayConfigDAL.transaction(async (tx) => {
@@ -639,8 +651,9 @@ export const relayServiceFactory = ({
true
),
new x509.ExtendedKeyUsageExtension([x509.ExtendedKeyUsage[CertExtendedKeyUsage.SERVER_AUTH]], true),
// san
new x509.SubjectAlternativeNameExtension([{ type: "ip", value: host }], false)
new x509.SubjectAlternativeNameExtension([{ type: isIP(host) ? "ip" : "dns", value: host }], false)
];
const relayServerSerialNumber = createSerialNumber();
@@ -689,6 +702,7 @@ export const relayServiceFactory = ({
const $generateRelayClientCredentials = async ({
gatewayId,
gatewayName,
orgId,
orgName,
relayPkiClientCaCertificate,
@@ -697,6 +711,7 @@ export const relayServiceFactory = ({
relayPkiServerCaCertificateChain
}: {
gatewayId: string;
gatewayName: string;
orgId: string;
orgName: string;
relayPkiClientCaCertificate: Buffer;
@@ -727,6 +742,16 @@ export const relayServiceFactory = ({
const clientCertPrivateKey = crypto.nativeCrypto.KeyObject.from(clientKeys.privateKey);
const clientCertSerialNumber = createSerialNumber();
const connectingGatewayInfoExtension = new x509.Extension(
RELAY_CONNECTING_GATEWAY_INFO,
false,
Buffer.from(
JSON.stringify({
name: gatewayName
})
)
);
// Build standard extensions
const extensions: x509.Extension[] = [
new x509.BasicConstraintsExtension(false),
@@ -740,7 +765,8 @@ export const relayServiceFactory = ({
x509.KeyUsageFlags[CertKeyUsage.KEY_AGREEMENT],
true
),
new x509.ExtendedKeyUsageExtension([x509.ExtendedKeyUsage[CertExtendedKeyUsage.CLIENT_AUTH]], true)
new x509.ExtendedKeyUsageExtension([x509.ExtendedKeyUsage[CertExtendedKeyUsage.CLIENT_AUTH]], true),
connectingGatewayInfoExtension
];
const clientCert = await x509.X509CertificateGenerator.create({
@@ -768,11 +794,13 @@ export const relayServiceFactory = ({
const getCredentialsForGateway = async ({
relayName,
orgId,
gatewayId
gatewayId,
gatewayName
}: {
relayName: string;
orgId: string;
gatewayId: string;
gatewayName: string;
}) => {
let relay: TRelays | null = await relayDAL.findOne({
orgId,
@@ -819,10 +847,10 @@ export const relayServiceFactory = ({
const relayClientSshCert = await createSshCert({
caPrivateKey: orgCAs.relaySshClientCaPrivateKey.toString("utf8"),
clientPublicKey: relayClientSshPublicKey,
keyId: `relay-client-${relay.id}`,
principals: [gatewayId],
keyId: `client-${relayName}`,
principals: [gatewayId, gatewayName],
certType: SshCertType.USER,
requestedTtl: "30d"
requestedTtl: "1d"
});
return {
@@ -837,12 +865,14 @@ export const relayServiceFactory = ({
relayId,
orgId,
orgName,
gatewayId
gatewayId,
gatewayName
}: {
relayId: string;
orgId: string;
orgName: string;
gatewayId: string;
gatewayName: string;
}) => {
const relay = await relayDAL.findOne({
id: relayId
@@ -860,6 +890,7 @@ export const relayServiceFactory = ({
const instanceCAs = await $getInstanceCAs();
const relayCertificateCredentials = await $generateRelayClientCredentials({
gatewayId,
gatewayName,
orgId,
orgName,
relayPkiClientCaCertificate: instanceCAs.instanceRelayPkiClientCaCertificate,
@@ -877,6 +908,7 @@ export const relayServiceFactory = ({
const orgCAs = await $getOrgCAs(orgId);
const relayCertificateCredentials = await $generateRelayClientCredentials({
gatewayId,
gatewayName,
orgId,
orgName,
relayPkiClientCaCertificate: orgCAs.relayPkiClientCaCertificate,
@@ -895,11 +927,13 @@ export const relayServiceFactory = ({
host,
name,
identityId,
actorAuthMethod,
orgId
}: {
host: string;
name: string;
identityId?: string;
actorAuthMethod?: ActorAuthMethod;
orgId?: string;
}) => {
let relay: TRelays;
@@ -908,6 +942,27 @@ export const relayServiceFactory = ({
await verifyHostInputValidity(host);
if (isOrgRelay) {
const orgLicensePlan = await licenseService.getPlan(orgId);
if (!orgLicensePlan.gateway) {
throw new BadRequestError({
message:
"Relay registration failed due to organization plan restrictions. Please upgrade your instance to Infisical's Enterprise plan."
});
}
const { permission } = await permissionService.getOrgPermission(
ActorType.IDENTITY,
identityId,
orgId,
actorAuthMethod!,
orgId
);
ForbiddenError.from(permission).throwUnlessCan(
OrgPermissionRelayActions.CreateRelays,
OrgPermissionSubjects.Relay
);
relay = await relayDAL.transaction(async (tx) => {
const existingRelay = await relayDAL.findOne(
{
@@ -995,9 +1050,75 @@ export const relayServiceFactory = ({
});
};
const getRelays = async ({
actorId,
actor,
actorAuthMethod,
actorOrgId
}: {
actorId: string;
actor: ActorType;
actorAuthMethod: ActorAuthMethod;
actorOrgId: string;
}) => {
const { permission } = await permissionService.getOrgPermission(
actor,
actorId,
actorOrgId,
actorAuthMethod,
actorOrgId
);
ForbiddenError.from(permission).throwUnlessCan(OrgPermissionRelayActions.ListRelays, OrgPermissionSubjects.Relay);
const instanceRelays = await relayDAL.find({
orgId: null
});
const orgRelays = await relayDAL.find({
orgId: actorOrgId
});
return [...instanceRelays, ...orgRelays];
};
const deleteRelay = async ({
id,
actorId,
actor,
actorAuthMethod,
actorOrgId
}: {
id: string;
actorId: string;
actor: ActorType;
actorAuthMethod: ActorAuthMethod;
actorOrgId: string;
}) => {
const { permission } = await permissionService.getOrgPermission(
actor,
actorId,
actorOrgId,
actorAuthMethod,
actorOrgId
);
ForbiddenError.from(permission).throwUnlessCan(OrgPermissionRelayActions.DeleteRelays, OrgPermissionSubjects.Relay);
const relay = await relayDAL.findById(id);
if (!relay || relay.orgId !== actorOrgId || relay.orgId === null) {
throw new NotFoundError({ message: "Relay not found" });
}
const deletedRelay = await relayDAL.deleteById(id);
return deletedRelay;
};
return {
registerRelay,
getCredentialsForGateway,
getCredentialsForClient
getCredentialsForClient,
getRelays,
deleteRelay
};
};

View File

@@ -284,7 +284,8 @@ export const secretApprovalRequestSecretDALFactory = (db: TDbClient) => {
db.ref("version").withSchema(TableName.SecretVersionV2).as("secVerVersion"),
db.ref("key").withSchema(TableName.SecretVersionV2).as("secVerKey"),
db.ref("encryptedValue").withSchema(TableName.SecretVersionV2).as("secVerValue"),
db.ref("encryptedComment").withSchema(TableName.SecretVersionV2).as("secVerComment")
db.ref("encryptedComment").withSchema(TableName.SecretVersionV2).as("secVerComment"),
db.ref("skipMultilineEncoding").withSchema(TableName.SecretVersionV2).as("secVerSkipMultilineEncoding")
)
.select(
db.ref("id").withSchema(TableName.ResourceMetadata).as("metadataId"),
@@ -326,14 +327,22 @@ export const secretApprovalRequestSecretDALFactory = (db: TDbClient) => {
{
key: "secretVersion",
label: "secretVersion" as const,
mapper: ({ secretVersion, secVerVersion, secVerKey, secVerValue, secVerComment }) =>
mapper: ({
secretVersion,
secVerVersion,
secVerKey,
secVerValue,
secVerComment,
secVerSkipMultilineEncoding
}) =>
secretVersion
? {
version: secVerVersion,
id: secretVersion,
key: secVerKey,
encryptedValue: secVerValue,
encryptedComment: secVerComment
encryptedComment: secVerComment,
skipMultilineEncoding: secVerSkipMultilineEncoding
}
: undefined,
childrenMapper: [

View File

@@ -337,12 +337,17 @@ export const secretApprovalRequestServiceFactory = ({
? INFISICAL_SECRET_VALUE_HIDDEN_MASK
: el.secret && el.secret.isRotatedSecret
? undefined
: el.encryptedValue
: el.encryptedValue !== undefined && el.encryptedValue !== null
? secretManagerDecryptor({ cipherTextBlob: el.encryptedValue }).toString()
: "",
secretComment: el.encryptedComment
? secretManagerDecryptor({ cipherTextBlob: el.encryptedComment }).toString()
: "",
: undefined,
secretComment:
el.encryptedComment !== undefined && el.encryptedComment !== null
? secretManagerDecryptor({ cipherTextBlob: el.encryptedComment }).toString()
: undefined,
skipMultilineEncoding:
el.skipMultilineEncoding !== undefined && el.skipMultilineEncoding !== null
? el.skipMultilineEncoding
: undefined,
secret: el.secret
? {
secretKey: el.secret.key,
@@ -394,7 +399,8 @@ export const secretApprovalRequestServiceFactory = ({
? secretManagerDecryptor({ cipherTextBlob: el.secretVersion.encryptedComment }).toString()
: "",
tags: el.secretVersion.tags,
secretMetadata: el.oldSecretMetadata as ResourceMetadataDTO
secretMetadata: el.oldSecretMetadata as ResourceMetadataDTO,
skipMultilineEncoding: el.secretVersion.skipMultilineEncoding
}
: undefined
}));
@@ -733,9 +739,9 @@ export const secretApprovalRequestServiceFactory = ({
tx,
inputSecrets: secretUpdationCommits.map((el) => {
const encryptedValue =
!el.secret?.isRotatedSecret && typeof el.encryptedValue !== "undefined"
!el.secret?.isRotatedSecret && el.encryptedValue !== null && el.encryptedValue !== undefined
? {
encryptedValue: el.encryptedValue as Buffer,
encryptedValue: el.encryptedValue,
references: el.encryptedValue
? getAllSecretReferencesV2Bridge(
secretManagerDecryptor({
@@ -749,9 +755,9 @@ export const secretApprovalRequestServiceFactory = ({
filter: { id: el.secretId as string, type: SecretType.Shared },
data: {
reminderRepeatDays: el.reminderRepeatDays,
encryptedComment: el.encryptedComment,
encryptedComment: el.encryptedComment !== null ? el.encryptedComment : undefined,
reminderNote: el.reminderNote,
skipMultilineEncoding: el.skipMultilineEncoding,
skipMultilineEncoding: el.skipMultilineEncoding !== null ? el.skipMultilineEncoding : undefined,
key: el.key,
tags: el?.tags.map(({ id }) => id),
secretMetadata: el.secretMetadata as ResourceMetadataDTO,
@@ -1084,7 +1090,9 @@ export const secretApprovalRequestServiceFactory = ({
// @ts-expect-error not present on v1 secrets
secretKey: secret.key as string,
// @ts-expect-error not present on v1 secrets
secretMetadata: secret.secretMetadata as ResourceMetadataDTO
secretMetadata: secret.secretMetadata as ResourceMetadataDTO,
// @ts-expect-error not present on v1 secrets
secretTags: (secret.tags as { name: string }[])?.map((tag) => tag.name)
}))
}
});
@@ -1100,7 +1108,9 @@ export const secretApprovalRequestServiceFactory = ({
// @ts-expect-error not present on v1 secrets
secretKey: secret.key as string,
// @ts-expect-error not present on v1 secrets
secretMetadata: secret.secretMetadata as ResourceMetadataDTO
secretMetadata: secret.secretMetadata as ResourceMetadataDTO,
// @ts-expect-error not present on v1 secrets
secretTags: (secret.tags as { name: string }[])?.map((tag) => tag.name)
}
});
}
@@ -1119,7 +1129,9 @@ export const secretApprovalRequestServiceFactory = ({
// @ts-expect-error not present on v1 secrets
secretKey: secret.key as string,
// @ts-expect-error not present on v1 secrets
secretMetadata: secret.secretMetadata as ResourceMetadataDTO
secretMetadata: secret.secretMetadata as ResourceMetadataDTO,
// @ts-expect-error not present on v1 secrets
secretTags: (secret.tags as { name: string }[])?.map((tag) => tag.name)
}))
}
});
@@ -1135,7 +1147,9 @@ export const secretApprovalRequestServiceFactory = ({
// @ts-expect-error not present on v1 secrets
secretKey: secret.key as string,
// @ts-expect-error not present on v1 secrets
secretMetadata: secret.secretMetadata as ResourceMetadataDTO
secretMetadata: secret.secretMetadata as ResourceMetadataDTO,
// @ts-expect-error not present on v1 secrets
secretTags: (secret.tags as { name: string }[])?.map((tag) => tag.name)
}
});
}
@@ -1625,11 +1639,13 @@ export const secretApprovalRequestServiceFactory = ({
key: newSecretName || secretKey,
encryptedComment: setKnexStringValue(
secretComment,
(value) => secretManagerEncryptor({ plainText: Buffer.from(value) }).cipherTextBlob
(value) => secretManagerEncryptor({ plainText: Buffer.from(value) }).cipherTextBlob,
true // scott: we need to encrypt empty string on update to differentiate not updating comment vs clearing comment
),
encryptedValue: setKnexStringValue(
secretValue,
(value) => secretManagerEncryptor({ plainText: Buffer.from(value) }).cipherTextBlob
(value) => secretManagerEncryptor({ plainText: Buffer.from(value) }).cipherTextBlob,
true // scott: we need to encrypt empty string on update to differentiate not updating value vs clearing value
),
reminderRepeatDays,
reminderNote,

View File

@@ -47,6 +47,7 @@ export const KeyStorePrefixes = {
SyncSecretIntegrationLastRunTimestamp: (projectId: string, environmentSlug: string, secretPath: string) =>
`sync-integration-last-run-${projectId}-${environmentSlug}-${secretPath}` as const,
SecretSyncLock: (syncId: string) => `secret-sync-mutex-${syncId}` as const,
PkiSyncLock: (syncId: string) => `pki-sync-mutex-${syncId}` as const,
AppConnectionConcurrentJobs: (connectionId: string) => `app-connection-concurrency-${connectionId}` as const,
SecretRotationLock: (rotationId: string) => `secret-rotation-v2-mutex-${rotationId}` as const,
SecretScanningLock: (dataSourceId: string, resourceExternalId: string) =>

View File

@@ -50,6 +50,7 @@ export enum ApiDocsTags {
IdentitySpecificPrivilegesV2 = "Identity Specific Privileges V2",
AppConnections = "App Connections",
SecretSyncs = "Secret Syncs",
PkiSyncs = "PKI Syncs",
Integrations = "Integrations",
ServiceTokens = "Service Tokens",
AuditLogs = "Audit Logs",

View File

@@ -16,8 +16,12 @@ export const stripUndefinedInWhere = <T extends object>(val: T): Exclude<T, unde
// if its undefined its skipped in knex
// if its empty string its set as null
// else pass to the required one
export const setKnexStringValue = <T>(value: string | null | undefined, cb: (arg: string) => T) => {
export const setKnexStringValue = <T>(
value: string | null | undefined,
cb: (arg: string) => T,
allowEmptyString?: boolean
) => {
if (typeof value === "undefined") return;
if (value === "" || value === null) return null;
if ((value === "" && !allowEmptyString) || value === null) return null;
return cb(value);
};

View File

@@ -24,6 +24,11 @@ import { QueueWorkerProfile } from "@app/lib/types";
import { CaType } from "@app/services/certificate-authority/certificate-authority-enums";
import { ExternalPlatforms } from "@app/services/external-migration/external-migration-types";
import { TCreateUserNotificationDTO } from "@app/services/notification/notification-types";
import {
TQueuePkiSyncImportCertificatesByIdDTO,
TQueuePkiSyncRemoveCertificatesByIdDTO,
TQueuePkiSyncSyncCertificatesByIdDTO
} from "@app/services/pki-sync/pki-sync-types";
import {
TFailedIntegrationSyncEmailsPayload,
TIntegrationSyncPayload,
@@ -46,6 +51,7 @@ export enum QueueName {
AuditLogPrune = "audit-log-prune",
DailyResourceCleanUp = "daily-resource-cleanup",
DailyExpiringPkiItemAlert = "daily-expiring-pki-item-alert",
PkiSyncCleanup = "pki-sync-cleanup",
PkiSubscriber = "pki-subscriber",
TelemetryInstanceStats = "telemtry-self-hosted-stats",
IntegrationSync = "sync-integrations",
@@ -58,6 +64,7 @@ export enum QueueName {
CaLifecycle = "ca-lifecycle", // parent queue to ca-order-certificate-for-subscriber
SecretReplication = "secret-replication",
SecretSync = "secret-sync", // parent queue to push integration sync, webhook, and secret replication
PkiSync = "pki-sync",
ProjectV3Migration = "project-v3-migration",
AccessTokenStatusUpdate = "access-token-status-update",
ImportSecretsFromExternalSource = "import-secrets-from-external-source",
@@ -80,6 +87,7 @@ export enum QueueJobs {
AuditLogPrune = "audit-log-prune-job",
DailyResourceCleanUp = "daily-resource-cleanup-job",
DailyExpiringPkiItemAlert = "daily-expiring-pki-item-alert",
PkiSyncCleanup = "pki-sync-cleanup-job",
SecWebhook = "secret-webhook-trigger",
TelemetryInstanceStats = "telemetry-self-hosted-stats",
IntegrationSync = "secret-integration-pull",
@@ -91,6 +99,7 @@ export enum QueueJobs {
CaCrlRotation = "ca-crl-rotation-job",
SecretReplication = "secret-replication",
SecretSync = "secret-sync", // parent queue to push integration sync, webhook, and secret replication
PkiSync = "pki-sync",
ProjectV3Migration = "project-v3-migration",
IdentityAccessTokenStatusUpdate = "identity-access-token-status-update",
ServiceTokenStatusUpdate = "service-token-status-update",
@@ -99,6 +108,9 @@ export enum QueueJobs {
SecretSyncImportSecrets = "secret-sync-import-secrets",
SecretSyncRemoveSecrets = "secret-sync-remove-secrets",
SecretSyncSendActionFailedNotifications = "secret-sync-send-action-failed-notifications",
PkiSyncSyncCertificates = "pki-sync-sync-certificates",
PkiSyncImportCertificates = "pki-sync-import-certificates",
PkiSyncRemoveCertificates = "pki-sync-remove-certificates",
SecretRotationV2QueueRotations = "secret-rotation-v2-queue-rotations",
SecretRotationV2RotateSecrets = "secret-rotation-v2-rotate-secrets",
SecretRotationV2SendNotification = "secret-rotation-v2-send-notification",
@@ -141,6 +153,10 @@ export type TQueueJobTypes = {
name: QueueJobs.DailyExpiringPkiItemAlert;
payload: undefined;
};
[QueueName.PkiSyncCleanup]: {
name: QueueJobs.PkiSyncCleanup;
payload: undefined;
};
[QueueName.AuditLogPrune]: {
name: QueueJobs.AuditLogPrune;
payload: undefined;
@@ -218,6 +234,19 @@ export type TQueueJobTypes = {
name: QueueJobs.SecretSync;
payload: TSyncSecretsDTO;
};
[QueueName.PkiSync]:
| {
name: QueueJobs.PkiSyncSyncCertificates;
payload: TQueuePkiSyncSyncCertificatesByIdDTO;
}
| {
name: QueueJobs.PkiSyncImportCertificates;
payload: TQueuePkiSyncImportCertificatesByIdDTO;
}
| {
name: QueueJobs.PkiSyncRemoveCertificates;
payload: TQueuePkiSyncRemoveCertificatesByIdDTO;
};
[QueueName.ProjectV3Migration]: {
name: QueueJobs.ProjectV3Migration;
payload: { projectId: string };

View File

@@ -248,6 +248,10 @@ import { pkiCollectionServiceFactory } from "@app/services/pki-collection/pki-co
import { pkiSubscriberDALFactory } from "@app/services/pki-subscriber/pki-subscriber-dal";
import { pkiSubscriberQueueServiceFactory } from "@app/services/pki-subscriber/pki-subscriber-queue";
import { pkiSubscriberServiceFactory } from "@app/services/pki-subscriber/pki-subscriber-service";
import { pkiSyncCleanupQueueServiceFactory } from "@app/services/pki-sync/pki-sync-cleanup-queue";
import { pkiSyncDALFactory } from "@app/services/pki-sync/pki-sync-dal";
import { pkiSyncQueueFactory } from "@app/services/pki-sync/pki-sync-queue";
import { pkiSyncServiceFactory } from "@app/services/pki-sync/pki-sync-service";
import { pkiTemplatesDALFactory } from "@app/services/pki-templates/pki-templates-dal";
import { pkiTemplatesServiceFactory } from "@app/services/pki-templates/pki-templates-service";
import { projectDALFactory } from "@app/services/project/project-dal";
@@ -979,6 +983,7 @@ export const registerRoutes = async (
const pkiCollectionDAL = pkiCollectionDALFactory(db);
const pkiCollectionItemDAL = pkiCollectionItemDALFactory(db);
const pkiSubscriberDAL = pkiSubscriberDALFactory(db);
const pkiSyncDAL = pkiSyncDALFactory(db);
const pkiTemplatesDAL = pkiTemplatesDALFactory(db);
const instanceRelayConfigDAL = instanceRelayConfigDalFactory(db);
@@ -988,21 +993,6 @@ export const registerRoutes = async (
const orgGatewayConfigV2DAL = orgGatewayConfigV2DalFactory(db);
const certificateService = certificateServiceFactory({
certificateDAL,
certificateBodyDAL,
certificateSecretDAL,
certificateAuthorityDAL,
certificateAuthorityCertDAL,
certificateAuthorityCrlDAL,
certificateAuthoritySecretDAL,
projectDAL,
kmsService,
permissionService,
pkiCollectionDAL,
pkiCollectionItemDAL
});
const sshCertificateAuthorityService = sshCertificateAuthorityServiceFactory({
sshCertificateAuthorityDAL,
sshCertificateAuthoritySecretDAL,
@@ -1110,7 +1100,9 @@ export const registerRoutes = async (
instanceRelayConfigDAL,
orgRelayConfigDAL,
relayDAL,
kmsService
kmsService,
licenseService,
permissionService
});
const gatewayV2Service = gatewayV2ServiceFactory({
@@ -1862,52 +1854,6 @@ export const registerRoutes = async (
licenseService
});
const certificateAuthorityQueue = certificateAuthorityQueueFactory({
certificateAuthorityCrlDAL,
certificateAuthorityDAL,
certificateAuthoritySecretDAL,
certificateDAL,
projectDAL,
kmsService,
queueService,
pkiSubscriberDAL,
certificateBodyDAL,
certificateSecretDAL,
externalCertificateAuthorityDAL,
keyStore,
appConnectionDAL,
appConnectionService
});
const internalCertificateAuthorityService = internalCertificateAuthorityServiceFactory({
certificateAuthorityDAL,
certificateAuthorityCertDAL,
certificateAuthoritySecretDAL,
certificateAuthorityCrlDAL,
certificateTemplateDAL,
certificateAuthorityQueue,
certificateDAL,
certificateBodyDAL,
certificateSecretDAL,
pkiCollectionDAL,
pkiCollectionItemDAL,
projectDAL,
internalCertificateAuthorityDAL,
kmsService,
permissionService
});
const certificateEstService = certificateEstServiceFactory({
internalCertificateAuthorityService,
certificateTemplateService,
certificateTemplateDAL,
certificateAuthorityCertDAL,
certificateAuthorityDAL,
projectDAL,
kmsService,
licenseService
});
const kmipService = kmipServiceFactory({
kmipClientDAL,
permissionService,
@@ -1950,6 +1896,79 @@ export const registerRoutes = async (
gatewayV2Service
});
const pkiSyncQueue = pkiSyncQueueFactory({
queueService,
kmsService,
appConnectionDAL,
keyStore,
pkiSyncDAL,
auditLogService,
projectDAL,
licenseService,
certificateDAL,
certificateBodyDAL,
certificateSecretDAL,
certificateAuthorityDAL,
certificateAuthorityCertDAL
});
const pkiSyncCleanup = pkiSyncCleanupQueueServiceFactory({
queueService,
pkiSyncDAL,
pkiSyncQueue
});
const internalCaFns = InternalCertificateAuthorityFns({
certificateAuthorityDAL,
certificateAuthorityCertDAL,
certificateAuthoritySecretDAL,
certificateAuthorityCrlDAL,
certificateDAL,
certificateBodyDAL,
certificateSecretDAL,
projectDAL,
kmsService,
pkiSyncDAL,
pkiSyncQueue
});
const certificateAuthorityQueue = certificateAuthorityQueueFactory({
certificateAuthorityCrlDAL,
certificateAuthorityDAL,
certificateAuthoritySecretDAL,
certificateDAL,
projectDAL,
kmsService,
queueService,
pkiSubscriberDAL,
certificateBodyDAL,
certificateSecretDAL,
externalCertificateAuthorityDAL,
keyStore,
appConnectionDAL,
appConnectionService,
pkiSyncDAL,
pkiSyncQueue
});
const internalCertificateAuthorityService = internalCertificateAuthorityServiceFactory({
certificateAuthorityDAL,
certificateAuthorityCertDAL,
certificateAuthoritySecretDAL,
certificateAuthorityCrlDAL,
certificateTemplateDAL,
certificateAuthorityQueue,
certificateDAL,
certificateBodyDAL,
certificateSecretDAL,
pkiCollectionDAL,
pkiCollectionItemDAL,
projectDAL,
internalCertificateAuthorityDAL,
kmsService,
permissionService
});
const certificateAuthorityService = certificateAuthorityServiceFactory({
certificateAuthorityDAL,
permissionService,
@@ -1962,19 +1981,20 @@ export const registerRoutes = async (
certificateSecretDAL,
kmsService,
pkiSubscriberDAL,
projectDAL
projectDAL,
pkiSyncDAL,
pkiSyncQueue
});
const internalCaFns = InternalCertificateAuthorityFns({
certificateAuthorityDAL,
const certificateEstService = certificateEstServiceFactory({
internalCertificateAuthorityService,
certificateTemplateService,
certificateTemplateDAL,
certificateAuthorityCertDAL,
certificateAuthoritySecretDAL,
certificateAuthorityCrlDAL,
certificateDAL,
certificateBodyDAL,
certificateSecretDAL,
certificateAuthorityDAL,
projectDAL,
kmsService
kmsService,
licenseService
});
const pkiSubscriberQueue = pkiSubscriberQueueServiceFactory({
@@ -1987,6 +2007,23 @@ export const registerRoutes = async (
internalCaFns
});
const certificateService = certificateServiceFactory({
certificateDAL,
certificateBodyDAL,
certificateSecretDAL,
certificateAuthorityDAL,
certificateAuthorityCertDAL,
certificateAuthorityCrlDAL,
certificateAuthoritySecretDAL,
projectDAL,
kmsService,
permissionService,
pkiCollectionDAL,
pkiCollectionItemDAL,
pkiSyncDAL,
pkiSyncQueue
});
const pkiSubscriberService = pkiSubscriberServiceFactory({
pkiSubscriberDAL,
certificateAuthorityDAL,
@@ -2000,7 +2037,18 @@ export const registerRoutes = async (
kmsService,
permissionService,
certificateAuthorityQueue,
internalCaFns
internalCaFns,
pkiSyncDAL,
pkiSyncQueue
});
const pkiSyncService = pkiSyncServiceFactory({
pkiSyncDAL,
pkiSubscriberDAL,
appConnectionService,
permissionService,
licenseService,
pkiSyncQueue
});
const pkiTemplateService = pkiTemplatesServiceFactory({
@@ -2065,6 +2113,7 @@ export const registerRoutes = async (
await telemetryQueue.startTelemetryCheck();
await telemetryQueue.startAggregatedEventsJob();
await dailyResourceCleanUp.init();
await pkiSyncCleanup.init();
await dailyReminderQueueService.startDailyRemindersJob();
await dailyReminderQueueService.startSecretReminderMigrationJob();
await dailyExpiringPkiItemAlert.startSendingAlerts();
@@ -2148,6 +2197,7 @@ export const registerRoutes = async (
pkiAlert: pkiAlertService,
pkiCollection: pkiCollectionService,
pkiSubscriber: pkiSubscriberService,
pkiSync: pkiSyncService,
pkiTemplate: pkiTemplateService,
secretScanning: secretScanningService,
license: licenseService,

View File

@@ -1,16 +1,16 @@
import { ForbiddenError } from "@casl/ability";
import { z } from "zod";
import { SecretFoldersSchema, SecretImportsSchema, UsersSchema } from "@app/db/schemas";
import { SecretFoldersSchema, SecretImportsSchema, SecretType, UsersSchema } from "@app/db/schemas";
import { RemindersSchema } from "@app/db/schemas/reminders";
import { EventType, UserAgentType } from "@app/ee/services/audit-log/audit-log-types";
import { ProjectPermissionSecretActions } from "@app/ee/services/permission/project-permission";
import { SecretRotationV2Schema } from "@app/ee/services/secret-rotation-v2/secret-rotation-v2-union-schema";
import { DASHBOARD } from "@app/lib/api-docs";
import { BadRequestError } from "@app/lib/errors";
import { BadRequestError, NotFoundError } from "@app/lib/errors";
import { removeTrailingSlash } from "@app/lib/fn";
import { OrderByDirection } from "@app/lib/types";
import { secretsLimit } from "@app/server/config/rateLimiter";
import { readLimit, secretsLimit } from "@app/server/config/rateLimiter";
import { getTelemetryDistinctId } from "@app/server/lib/telemetry";
import { getUserAgentType } from "@app/server/plugins/audit-log";
import { verifyAuth } from "@app/server/plugins/auth/verify-auth";
@@ -111,6 +111,7 @@ export const registerDashboardRouter = async (server: FastifyZodProvider) => {
SecretRotationV2Schema,
z.object({
secrets: secretRawSchema
.omit({ secretValue: true })
.extend({
secretValueHidden: z.boolean(),
secretPath: z.string().optional(),
@@ -124,7 +125,9 @@ export const registerDashboardRouter = async (server: FastifyZodProvider) => {
.array()
.optional(),
secrets: secretRawSchema
.omit({ secretValue: true })
.extend({
isEmpty: z.boolean(),
secretValueHidden: z.boolean(),
secretPath: z.string().optional(),
secretMetadata: ResourceMetadataSchema.optional(),
@@ -219,7 +222,9 @@ export const registerDashboardRouter = async (server: FastifyZodProvider) => {
let imports: Awaited<ReturnType<typeof server.services.secretImport.getImportsMultiEnv>> | undefined;
let folders: Awaited<ReturnType<typeof server.services.folder.getFoldersMultiEnv>> | undefined;
let secrets: Awaited<ReturnType<typeof server.services.secret.getSecretsRawMultiEnv>> | undefined;
let secrets:
| (Awaited<ReturnType<typeof server.services.secret.getSecretsRawMultiEnv>>[number] & { isEmpty: boolean })[]
| undefined;
let dynamicSecrets:
| Awaited<ReturnType<typeof server.services.dynamicSecret.listDynamicSecretsByEnvs>>
| undefined;
@@ -426,43 +431,51 @@ export const registerDashboardRouter = async (server: FastifyZodProvider) => {
});
if (remainingLimit > 0 && totalSecretCount > adjustedOffset) {
secrets = await server.services.secret.getSecretsRawMultiEnv({
viewSecretValue: true,
actorId: req.permission.id,
actor: req.permission.type,
actorOrgId: req.permission.orgId,
environments,
actorAuthMethod: req.permission.authMethod,
projectId,
path: secretPath,
orderBy,
orderDirection,
search,
limit: remainingLimit,
offset: adjustedOffset,
isInternal: true
});
secrets = (
await server.services.secret.getSecretsRawMultiEnv({
viewSecretValue: true,
actorId: req.permission.id,
actor: req.permission.type,
actorOrgId: req.permission.orgId,
environments,
actorAuthMethod: req.permission.authMethod,
projectId,
path: secretPath,
orderBy,
orderDirection,
search,
limit: remainingLimit,
offset: adjustedOffset,
isInternal: true
})
).map((secret) => ({ ...secret, isEmpty: !secret.secretValue }));
}
}
if (secrets?.length || secretRotations?.length) {
for await (const environment of environments) {
const secretCountFromEnv =
(secrets?.filter((secret) => secret.environment === environment).length ?? 0) +
(secretRotations
?.filter((rotation) => rotation.environment.slug === environment)
.flatMap((rotation) => rotation.secrets.filter((secret) => Boolean(secret))).length ?? 0);
const secretIds = [
...new Set(
[
...(secrets?.filter((secret) => secret.environment === environment) ?? []),
...(secretRotations
?.filter((rotation) => rotation.environment.slug === environment)
.flatMap((rotation) => rotation.secrets.filter((secret) => Boolean(secret))) ?? [])
].map((secret) => secret.id)
)
];
if (secretCountFromEnv) {
if (secretIds) {
await server.services.auditLog.createAuditLog({
projectId,
...req.auditLogInfo,
event: {
type: EventType.GET_SECRETS,
type: EventType.DASHBOARD_LIST_SECRETS,
metadata: {
environment,
secretPath,
numberOfSecrets: secretCountFromEnv
numberOfSecrets: secretIds.length,
secretIds
}
}
});
@@ -473,7 +486,7 @@ export const registerDashboardRouter = async (server: FastifyZodProvider) => {
distinctId: getTelemetryDistinctId(req),
organizationId: req.permission.orgId,
properties: {
numberOfSecrets: secretCountFromEnv,
numberOfSecrets: secretIds.length,
projectId,
environment,
secretPath,
@@ -584,7 +597,6 @@ export const registerDashboardRouter = async (server: FastifyZodProvider) => {
.optional(),
search: z.string().trim().describe(DASHBOARD.SECRET_DETAILS_LIST.search).optional(),
tags: z.string().trim().transform(decodeURIComponent).describe(DASHBOARD.SECRET_DETAILS_LIST.tags).optional(),
viewSecretValue: booleanSchema.default(true),
includeSecrets: booleanSchema.describe(DASHBOARD.SECRET_DETAILS_LIST.includeSecrets),
includeFolders: booleanSchema.describe(DASHBOARD.SECRET_DETAILS_LIST.includeFolders),
includeDynamicSecrets: booleanSchema.describe(DASHBOARD.SECRET_DETAILS_LIST.includeDynamicSecrets),
@@ -606,7 +618,9 @@ export const registerDashboardRouter = async (server: FastifyZodProvider) => {
SecretRotationV2Schema,
z.object({
secrets: secretRawSchema
.omit({ secretValue: true })
.extend({
isEmpty: z.boolean(),
secretValueHidden: z.boolean(),
secretPath: z.string().optional(),
secretMetadata: ResourceMetadataSchema.optional(),
@@ -619,7 +633,9 @@ export const registerDashboardRouter = async (server: FastifyZodProvider) => {
.array()
.optional(),
secrets: secretRawSchema
.omit({ secretValue: true })
.extend({
isEmpty: z.boolean(),
secretReminderRecipients: z
.object({
user: UsersSchema.pick({ id: true, email: true, username: true }),
@@ -715,12 +731,21 @@ export const registerDashboardRouter = async (server: FastifyZodProvider) => {
let folders: Awaited<ReturnType<typeof server.services.folder.getFolders>> | undefined;
let secrets:
| (Awaited<ReturnType<typeof server.services.secret.getSecretsRaw>>["secrets"][number] & {
isEmpty: boolean;
reminder: Awaited<ReturnType<typeof server.services.reminder.getRemindersForDashboard>>[string] | null;
})[]
| undefined;
let dynamicSecrets: Awaited<ReturnType<typeof server.services.dynamicSecret.listDynamicSecretsByEnv>> | undefined;
let secretRotations:
| Awaited<ReturnType<typeof server.services.secretRotationV2.getDashboardSecretRotations>>
| (Awaited<ReturnType<typeof server.services.secretRotationV2.getDashboardSecretRotations>>[number] & {
secrets: (NonNullable<
Awaited<
ReturnType<typeof server.services.secretRotationV2.getDashboardSecretRotations>
>[number]["secrets"][number] & {
isEmpty: boolean;
}
> | null)[];
})[]
| undefined;
let totalImportCount: number | undefined;
@@ -822,19 +847,31 @@ export const registerDashboardRouter = async (server: FastifyZodProvider) => {
);
if (remainingLimit > 0 && totalSecretRotationCount > adjustedOffset) {
secretRotations = await server.services.secretRotationV2.getDashboardSecretRotations(
{
projectId,
search,
orderBy,
orderDirection,
environments: [environment],
secretPath,
limit: remainingLimit,
offset: adjustedOffset
},
req.permission
);
secretRotations = (
await server.services.secretRotationV2.getDashboardSecretRotations(
{
projectId,
search,
orderBy,
orderDirection,
environments: [environment],
secretPath,
limit: remainingLimit,
offset: adjustedOffset
},
req.permission
)
).map((rotation) => ({
...rotation,
secrets: rotation.secrets.map((secret) =>
secret
? {
...secret,
isEmpty: !secret.secretValue
}
: secret
)
}));
await server.services.auditLog.createAuditLog({
projectId,
@@ -919,7 +956,7 @@ export const registerDashboardRouter = async (server: FastifyZodProvider) => {
await server.services.secret.getSecretsRaw({
actorId: req.permission.id,
actor: req.permission.type,
viewSecretValue: req.query.viewSecretValue,
viewSecretValue: true,
throwOnMissingReadValuePermission: false,
actorOrgId: req.permission.orgId,
environment,
@@ -943,6 +980,7 @@ export const registerDashboardRouter = async (server: FastifyZodProvider) => {
secrets = rawSecrets.map((secret) => ({
...secret,
isEmpty: !secret.secretValue,
reminder: reminders[secret.id] ?? null
}));
}
@@ -977,19 +1015,25 @@ export const registerDashboardRouter = async (server: FastifyZodProvider) => {
}));
if (secrets?.length || secretRotations?.length) {
const secretCount =
(secrets?.length ?? 0) +
(secretRotations?.flatMap((rotation) => rotation.secrets.filter((secret) => Boolean(secret))).length ?? 0);
const secretIds = [
...new Set(
[
...(secrets ?? []),
...(secretRotations?.flatMap((rotation) => rotation.secrets.filter((secret) => Boolean(secret))) ?? [])
].map((secret) => secret.id)
)
];
await server.services.auditLog.createAuditLog({
projectId,
...req.auditLogInfo,
event: {
type: EventType.GET_SECRETS,
type: EventType.DASHBOARD_LIST_SECRETS,
metadata: {
environment,
secretPath,
numberOfSecrets: secretCount
numberOfSecrets: secretIds.length,
secretIds
}
}
});
@@ -1000,7 +1044,7 @@ export const registerDashboardRouter = async (server: FastifyZodProvider) => {
distinctId: getTelemetryDistinctId(req),
organizationId: req.permission.orgId,
properties: {
numberOfSecrets: secretCount,
numberOfSecrets: secretIds.length,
projectId,
environment,
secretPath,
@@ -1060,6 +1104,7 @@ export const registerDashboardRouter = async (server: FastifyZodProvider) => {
.array()
.optional(),
secrets: secretRawSchema
.omit({ secretValue: true })
.extend({
secretValueHidden: z.boolean(),
secretPath: z.string().optional(),
@@ -1145,18 +1190,20 @@ export const registerDashboardRouter = async (server: FastifyZodProvider) => {
);
for await (const environment of environments) {
const secretCountForEnv = secrets.filter((secret) => secret.environment === environment).length;
const envSecrets = secrets.filter((secret) => secret.environment === environment);
const secretCountForEnv = envSecrets.length;
if (secretCountForEnv) {
await server.services.auditLog.createAuditLog({
projectId,
...req.auditLogInfo,
event: {
type: EventType.GET_SECRETS,
type: EventType.DASHBOARD_LIST_SECRETS,
metadata: {
environment,
secretPath,
numberOfSecrets: secretCountForEnv
numberOfSecrets: secretCountForEnv,
secretIds: envSecrets.map((secret) => secret.id)
}
}
});
@@ -1259,6 +1306,7 @@ export const registerDashboardRouter = async (server: FastifyZodProvider) => {
}),
response: {
200: z.object({
// TODO(scott): omit secretValue here, but requires refactor of uploading env/copy from board
secrets: secretRawSchema
.extend({
secretPath: z.string().optional(),
@@ -1310,6 +1358,7 @@ export const registerDashboardRouter = async (server: FastifyZodProvider) => {
}),
response: {
200: z.object({
// TODO(scott): omit secretValue here, but requires refactor of uploading env/copy from board
secrets: secretRawSchema
.extend({
secretValueHidden: z.boolean(),
@@ -1345,11 +1394,12 @@ export const registerDashboardRouter = async (server: FastifyZodProvider) => {
projectId,
...req.auditLogInfo,
event: {
type: EventType.GET_SECRETS,
type: EventType.DASHBOARD_LIST_SECRETS,
metadata: {
environment,
secretPath,
numberOfSecrets: secrets.length
numberOfSecrets: secrets.length,
secretIds: secrets.map((secret) => secret.id)
}
}
});
@@ -1373,4 +1423,256 @@ export const registerDashboardRouter = async (server: FastifyZodProvider) => {
return { secrets };
}
});
server.route({
method: "GET",
url: "/secret-value",
config: {
rateLimit: secretsLimit
},
schema: {
security: [
{
bearerAuth: []
}
],
querystring: z.object({
projectId: z.string().trim(),
environment: z.string().trim(),
secretPath: z.string().trim().default("/").transform(removeTrailingSlash),
secretKey: z.string().trim(),
isOverride: z
.enum(["true", "false"])
.transform((value) => value === "true")
.optional()
}),
response: {
200: z.object({
valueOverride: z.string().optional(),
value: z.string().optional()
})
}
},
onRequest: verifyAuth([AuthMode.JWT]),
handler: async (req) => {
const { secretPath, projectId, environment, secretKey, isOverride } = req.query;
// TODO (scott): just get the secret instead of searching for it in list
const { secrets } = await server.services.secret.getSecretsRaw({
actorId: req.permission.id,
actor: req.permission.type,
viewSecretValue: true,
throwOnMissingReadValuePermission: false,
actorOrgId: req.permission.orgId,
environment,
actorAuthMethod: req.permission.authMethod,
projectId,
path: secretPath,
search: secretKey,
includeTagsInSearch: true,
includeMetadataInSearch: true
});
if (isOverride) {
const personalSecret = secrets.find(
(secret) => secret.type === SecretType.Personal && secret.secretKey === secretKey
);
if (!personalSecret)
throw new BadRequestError({
message: `Could not find personal secret with key "${secretKey}" at secret path "${secretPath}" in environment "${environment}" for project with ID "${projectId}"`
});
if (personalSecret)
return {
valueOverride: personalSecret.secretValue
};
}
const sharedSecret = secrets.find(
(secret) => secret.type === SecretType.Shared && secret.secretKey === secretKey
);
if (!sharedSecret)
throw new BadRequestError({
message: `Could not find secret with key "${secretKey}" at secret path "${secretPath}" in environment "${environment}" for project with ID "${projectId}"`
});
// only audit if not personal
await server.services.auditLog.createAuditLog({
projectId,
...req.auditLogInfo,
event: {
type: EventType.DASHBOARD_GET_SECRET_VALUE,
metadata: {
environment: req.query.environment,
secretPath: req.query.secretPath,
secretKey,
secretId: sharedSecret.id
}
}
});
return { value: sharedSecret.secretValue };
}
});
server.route({
url: "/secret-imports",
method: "GET",
config: {
rateLimit: secretsLimit
},
schema: {
querystring: z.object({
projectId: z.string().trim(),
environment: z.string().trim(),
path: z.string().trim().default("/").transform(removeTrailingSlash)
}),
response: {
200: z.object({
secrets: z
.object({
secretPath: z.string(),
environment: z.string(),
environmentInfo: z.object({
id: z.string(),
name: z.string(),
slug: z.string()
}),
folderId: z.string().optional(),
secrets: secretRawSchema.omit({ secretValue: true }).extend({ isEmpty: z.boolean() }).array()
})
.array()
})
}
},
onRequest: verifyAuth([AuthMode.JWT]),
handler: async (req) => {
const importedSecrets = await server.services.secretImport.getRawSecretsFromImports({
actorId: req.permission.id,
actor: req.permission.type,
actorAuthMethod: req.permission.authMethod,
actorOrgId: req.permission.orgId,
...req.query
});
await server.services.auditLog.createAuditLog({
projectId: req.query.projectId,
...req.auditLogInfo,
event: {
type: EventType.DASHBOARD_LIST_SECRETS,
metadata: {
environment: req.query.environment,
secretPath: req.query.path,
numberOfSecrets: importedSecrets.length,
secretIds: importedSecrets.map((secret) => secret.id)
}
}
});
return {
secrets: importedSecrets.map((importData) => ({
...importData,
secrets: importData.secrets.map((secret) => ({
...secret,
isEmpty: !secret.secretValue
}))
}))
};
}
});
server.route({
method: "GET",
url: "/secret-versions/:secretId",
config: {
rateLimit: readLimit
},
schema: {
params: z.object({
secretId: z.string()
}),
querystring: z.object({
offset: z.coerce.number(),
limit: z.coerce.number()
}),
response: {
200: z.object({
secretVersions: secretRawSchema
.omit({ secretValue: true })
.extend({
secretValueHidden: z.boolean()
})
.array()
})
}
},
onRequest: verifyAuth([AuthMode.JWT]),
handler: async (req) => {
const secretVersions = await server.services.secret.getSecretVersions({
actor: req.permission.type,
actorId: req.permission.id,
actorAuthMethod: req.permission.authMethod,
actorOrgId: req.permission.orgId,
limit: req.query.limit,
offset: req.query.offset,
secretId: req.params.secretId
});
return { secretVersions };
}
});
server.route({
method: "GET",
url: "/secret-versions/:secretId/value/:version",
config: {
rateLimit: readLimit
},
schema: {
params: z.object({
secretId: z.string(),
version: z.string()
}),
response: {
200: z.object({
value: z.string()
})
}
},
onRequest: verifyAuth([AuthMode.JWT]),
handler: async (req) => {
const { version, secretId } = req.params;
const [secretVersion] = await server.services.secret.getSecretVersions({
actor: req.permission.type,
actorId: req.permission.id,
actorAuthMethod: req.permission.authMethod,
actorOrgId: req.permission.orgId,
secretId,
secretVersions: [version]
});
if (!secretVersion)
throw new NotFoundError({
message: `Could not find secret version "${version}" for secret with ID "${secretId}`
});
await server.services.auditLog.createAuditLog({
projectId: secretVersion.workspace,
...req.auditLogInfo,
event: {
type: EventType.DASHBOARD_GET_SECRET_VERSION_VALUE,
metadata: {
secretId,
version
}
}
});
return { value: secretVersion.secretValue };
}
});
};

View File

@@ -48,6 +48,7 @@ import { registerPasswordRouter } from "./password-router";
import { registerPkiAlertRouter } from "./pki-alert-router";
import { registerPkiCollectionRouter } from "./pki-collection-router";
import { registerPkiSubscriberRouter } from "./pki-subscriber-router";
import { PKI_SYNC_REGISTER_ROUTER_MAP, registerPkiSyncRouter } from "./pki-sync-routers";
import { registerProjectEnvRouter } from "./project-env-router";
import { registerProjectKeyRouter } from "./project-key-router";
import { registerProjectMembershipRouter } from "./project-membership-router";
@@ -147,6 +148,15 @@ export const registerV1Routes = async (server: FastifyZodProvider) => {
await pkiRouter.register(registerPkiAlertRouter, { prefix: "/alerts" });
await pkiRouter.register(registerPkiCollectionRouter, { prefix: "/collections" });
await pkiRouter.register(registerPkiSubscriberRouter, { prefix: "/subscribers" });
await pkiRouter.register(
async (pkiSyncRouter) => {
await pkiSyncRouter.register(registerPkiSyncRouter);
for await (const [destination, router] of Object.entries(PKI_SYNC_REGISTER_ROUTER_MAP)) {
await pkiSyncRouter.register(router, { prefix: `/${destination}` });
}
},
{ prefix: "/syncs" }
);
},
{ prefix: "/pki" }
);

View File

@@ -0,0 +1,22 @@
import {
AZURE_KEY_VAULT_PKI_SYNC_LIST_OPTION,
AzureKeyVaultPkiSyncSchema,
CreateAzureKeyVaultPkiSyncSchema,
UpdateAzureKeyVaultPkiSyncSchema
} from "@app/services/pki-sync/azure-key-vault";
import { PkiSync } from "@app/services/pki-sync/pki-sync-enums";
import { registerSyncPkiEndpoints } from "./pki-sync-endpoints";
export const registerAzureKeyVaultPkiSyncRouter = async (server: FastifyZodProvider) =>
registerSyncPkiEndpoints({
destination: PkiSync.AzureKeyVault,
server,
responseSchema: AzureKeyVaultPkiSyncSchema,
createSchema: CreateAzureKeyVaultPkiSyncSchema,
updateSchema: UpdateAzureKeyVaultPkiSyncSchema,
syncOptions: {
canImportCertificates: AZURE_KEY_VAULT_PKI_SYNC_LIST_OPTION.canImportCertificates,
canRemoveCertificates: AZURE_KEY_VAULT_PKI_SYNC_LIST_OPTION.canRemoveCertificates
}
});

View File

@@ -0,0 +1,9 @@
import { PkiSync } from "@app/services/pki-sync/pki-sync-enums";
import { registerAzureKeyVaultPkiSyncRouter } from "./azure-key-vault-pki-sync-router";
export * from "./pki-sync-router";
export const PKI_SYNC_REGISTER_ROUTER_MAP: Record<PkiSync, (server: FastifyZodProvider) => Promise<void>> = {
[PkiSync.AzureKeyVault]: registerAzureKeyVaultPkiSyncRouter
};

View File

@@ -0,0 +1,341 @@
import { z } from "zod";
import { EventType } from "@app/ee/services/audit-log/audit-log-types";
import { ApiDocsTags } from "@app/lib/api-docs";
import { readLimit, writeLimit } from "@app/server/config/rateLimiter";
import { verifyAuth } from "@app/server/plugins/auth/verify-auth";
import { AuthMode } from "@app/services/auth/auth-type";
import { PkiSync } from "@app/services/pki-sync/pki-sync-enums";
import { PKI_SYNC_NAME_MAP } from "@app/services/pki-sync/pki-sync-maps";
export const registerSyncPkiEndpoints = ({
server,
destination,
createSchema,
updateSchema,
responseSchema,
syncOptions
}: {
destination: PkiSync;
server: FastifyZodProvider;
createSchema: z.ZodType<{
name: string;
projectId: string;
connectionId: string;
destinationConfig: Record<string, unknown>;
syncOptions?: Record<string, unknown>;
description?: string;
isAutoSyncEnabled?: boolean;
subscriberId?: string;
}>;
updateSchema: z.ZodType<{
connectionId?: string;
name?: string;
destinationConfig?: Record<string, unknown>;
syncOptions?: Record<string, unknown>;
description?: string;
isAutoSyncEnabled?: boolean;
subscriberId?: string;
}>;
responseSchema: z.ZodTypeAny;
syncOptions: {
canImportCertificates: boolean;
canRemoveCertificates: boolean;
};
}) => {
const destinationName = PKI_SYNC_NAME_MAP[destination];
server.route({
method: "GET",
url: `/`,
config: {
rateLimit: readLimit
},
schema: {
hide: false,
tags: [ApiDocsTags.PkiSyncs],
description: `List the ${destinationName} PKI Syncs for the specified project.`,
querystring: z.object({
projectId: z.string().trim().min(1, "Project ID required")
}),
response: {
200: z.object({ pkiSyncs: responseSchema.array() })
}
},
onRequest: verifyAuth([AuthMode.JWT, AuthMode.IDENTITY_ACCESS_TOKEN]),
handler: async (req) => {
const {
query: { projectId }
} = req;
const pkiSyncs = await server.services.pkiSync.listPkiSyncsByProjectId({ projectId }, req.permission);
await server.services.auditLog.createAuditLog({
...req.auditLogInfo,
projectId,
event: {
type: EventType.GET_PKI_SYNCS,
metadata: {
projectId
}
}
});
return { pkiSyncs };
}
});
server.route({
method: "GET",
url: "/:pkiSyncId",
config: {
rateLimit: readLimit
},
schema: {
hide: false,
tags: [ApiDocsTags.PkiSyncs],
description: `Get the specified ${destinationName} PKI Sync by ID.`,
params: z.object({
pkiSyncId: z.string()
}),
response: {
200: responseSchema
}
},
onRequest: verifyAuth([AuthMode.JWT, AuthMode.IDENTITY_ACCESS_TOKEN]),
handler: async (req) => {
const { pkiSyncId } = req.params;
const pkiSync = await server.services.pkiSync.findPkiSyncById({ id: pkiSyncId }, req.permission);
await server.services.auditLog.createAuditLog({
...req.auditLogInfo,
projectId: pkiSync.projectId,
event: {
type: EventType.GET_PKI_SYNC,
metadata: {
syncId: pkiSyncId,
destination
}
}
});
return pkiSync;
}
});
server.route({
method: "POST",
url: "/",
config: {
rateLimit: writeLimit
},
schema: {
hide: false,
tags: [ApiDocsTags.PkiSyncs],
description: `Create a ${destinationName} PKI Sync for the specified project.`,
body: createSchema,
response: {
200: responseSchema
}
},
onRequest: verifyAuth([AuthMode.JWT, AuthMode.IDENTITY_ACCESS_TOKEN]),
handler: async (req) => {
const pkiSync = await server.services.pkiSync.createPkiSync({ ...req.body, destination }, req.permission);
await server.services.auditLog.createAuditLog({
...req.auditLogInfo,
projectId: pkiSync.projectId,
event: {
type: EventType.CREATE_PKI_SYNC,
metadata: {
pkiSyncId: pkiSync.id,
name: pkiSync.name,
destination
}
}
});
return pkiSync;
}
});
server.route({
method: "PATCH",
url: "/:pkiSyncId",
config: {
rateLimit: writeLimit
},
schema: {
hide: false,
tags: [ApiDocsTags.PkiSyncs],
description: `Update the specified ${destinationName} PKI Sync.`,
params: z.object({
pkiSyncId: z.string()
}),
body: updateSchema,
response: {
200: responseSchema
}
},
onRequest: verifyAuth([AuthMode.JWT, AuthMode.IDENTITY_ACCESS_TOKEN]),
handler: async (req) => {
const { pkiSyncId } = req.params;
const pkiSync = await server.services.pkiSync.updatePkiSync({ ...req.body, id: pkiSyncId }, req.permission);
await server.services.auditLog.createAuditLog({
...req.auditLogInfo,
projectId: pkiSync.projectId,
event: {
type: EventType.UPDATE_PKI_SYNC,
metadata: {
pkiSyncId,
name: pkiSync.name
}
}
});
return pkiSync;
}
});
server.route({
method: "DELETE",
url: `/:pkiSyncId`,
config: {
rateLimit: writeLimit
},
schema: {
hide: false,
tags: [ApiDocsTags.PkiSyncs],
description: `Delete the specified ${destinationName} PKI Sync.`,
params: z.object({
pkiSyncId: z.string()
}),
response: {
200: responseSchema
}
},
onRequest: verifyAuth([AuthMode.JWT, AuthMode.IDENTITY_ACCESS_TOKEN]),
handler: async (req) => {
const { pkiSyncId } = req.params;
const pkiSync = await server.services.pkiSync.deletePkiSync({ id: pkiSyncId }, req.permission);
await server.services.auditLog.createAuditLog({
...req.auditLogInfo,
projectId: pkiSync.projectId,
event: {
type: EventType.DELETE_PKI_SYNC,
metadata: {
pkiSyncId,
name: pkiSync.name,
destination: pkiSync.destination
}
}
});
return pkiSync;
}
});
server.route({
method: "POST",
url: "/:pkiSyncId/sync",
config: {
rateLimit: writeLimit
},
schema: {
hide: false,
tags: [ApiDocsTags.PkiSyncs],
description: `Trigger a sync for the specified ${destinationName} PKI Sync.`,
params: z.object({
pkiSyncId: z.string()
}),
response: {
200: z.object({ message: z.string() })
}
},
onRequest: verifyAuth([AuthMode.JWT, AuthMode.IDENTITY_ACCESS_TOKEN]),
handler: async (req) => {
const { pkiSyncId } = req.params;
const result = await server.services.pkiSync.triggerPkiSyncSyncCertificatesById(
{
id: pkiSyncId
},
req.permission
);
return result;
}
});
// Only register import route if the destination supports it
if (syncOptions.canImportCertificates) {
server.route({
method: "POST",
url: "/:pkiSyncId/import",
config: {
rateLimit: writeLimit
},
schema: {
hide: false,
tags: [ApiDocsTags.PkiSyncs],
description: `Import certificates from the specified ${destinationName} PKI Sync destination.`,
params: z.object({
pkiSyncId: z.string()
}),
response: {
200: z.object({ message: z.string() })
}
},
onRequest: verifyAuth([AuthMode.JWT, AuthMode.IDENTITY_ACCESS_TOKEN]),
handler: async (req) => {
const { pkiSyncId } = req.params;
const result = await server.services.pkiSync.triggerPkiSyncImportCertificatesById(
{
id: pkiSyncId
},
req.permission
);
return result;
}
});
}
server.route({
method: "POST",
url: "/:pkiSyncId/remove-certificates",
config: {
rateLimit: writeLimit
},
schema: {
hide: false,
tags: [ApiDocsTags.PkiSyncs],
description: `Remove certificates from the specified ${destinationName} PKI Sync destination.`,
params: z.object({
pkiSyncId: z.string()
}),
response: {
200: z.object({ message: z.string() })
}
},
onRequest: verifyAuth([AuthMode.JWT, AuthMode.IDENTITY_ACCESS_TOKEN]),
handler: async (req) => {
const { pkiSyncId } = req.params;
const result = await server.services.pkiSync.triggerPkiSyncRemoveCertificatesById(
{
id: pkiSyncId
},
req.permission
);
return result;
}
});
};

View File

@@ -0,0 +1,182 @@
import { z } from "zod";
import { EventType } from "@app/ee/services/audit-log/audit-log-types";
import { ApiDocsTags } from "@app/lib/api-docs";
import { readLimit } from "@app/server/config/rateLimiter";
import { verifyAuth } from "@app/server/plugins/auth/verify-auth";
import { AppConnection } from "@app/services/app-connection/app-connection-enums";
import { AuthMode } from "@app/services/auth/auth-type";
import { PkiSync } from "@app/services/pki-sync/pki-sync-enums";
const PkiSyncSchema = z.object({
id: z.string().uuid(),
name: z.string(),
description: z.string().nullable().optional(),
destination: z.nativeEnum(PkiSync),
isAutoSyncEnabled: z.boolean(),
destinationConfig: z.record(z.unknown()),
syncOptions: z.record(z.unknown()),
projectId: z.string().uuid(),
subscriberId: z.string().uuid().nullable().optional(),
connectionId: z.string().uuid(),
createdAt: z.date(),
updatedAt: z.date(),
// Sync status fields
syncStatus: z.string().nullable().optional(),
lastSyncJobId: z.string().nullable().optional(),
lastSyncMessage: z.string().nullable().optional(),
lastSyncedAt: z.date().nullable().optional(),
// Import status fields
importStatus: z.string().nullable().optional(),
lastImportJobId: z.string().nullable().optional(),
lastImportMessage: z.string().nullable().optional(),
lastImportedAt: z.date().nullable().optional(),
// Remove status fields
removeStatus: z.string().nullable().optional(),
lastRemoveJobId: z.string().nullable().optional(),
lastRemoveMessage: z.string().nullable().optional(),
lastRemovedAt: z.date().nullable().optional(),
// App connection info
appConnectionName: z.string(),
appConnectionApp: z.string(),
connection: z.object({
id: z.string(),
name: z.string(),
app: z.string(),
encryptedCredentials: z.unknown().nullable(),
orgId: z.string().uuid(),
projectId: z.string().uuid().nullable().optional(),
method: z.string(),
description: z.string().nullable().optional(),
version: z.number(),
gatewayId: z.string().uuid().nullable().optional(),
createdAt: z.date(),
updatedAt: z.date(),
isPlatformManagedCredentials: z.boolean().nullable().optional()
}),
subscriber: z
.object({
id: z.string(),
name: z.string()
})
.nullable()
.optional()
});
const PkiSyncOptionsSchema = z.object({
name: z.string(),
connection: z.nativeEnum(AppConnection),
destination: z.nativeEnum(PkiSync),
canImportCertificates: z.boolean(),
canRemoveCertificates: z.boolean(),
defaultCertificateNameSchema: z.string().optional(),
forbiddenCharacters: z.string().optional(),
allowedCharacterPattern: z.string().optional(),
maxCertificateNameLength: z.number().optional(),
minCertificateNameLength: z.number().optional()
});
export const registerPkiSyncRouter = async (server: FastifyZodProvider) => {
server.route({
method: "GET",
url: "/options",
config: {
rateLimit: readLimit
},
schema: {
hide: false,
tags: [ApiDocsTags.PkiSyncs],
description: "List the available PKI Sync Options.",
response: {
200: z.object({
pkiSyncOptions: PkiSyncOptionsSchema.array()
})
}
},
onRequest: verifyAuth([AuthMode.JWT, AuthMode.IDENTITY_ACCESS_TOKEN]),
handler: () => {
const pkiSyncOptions = server.services.pkiSync.getPkiSyncOptions();
return { pkiSyncOptions };
}
});
server.route({
method: "GET",
url: "/",
config: {
rateLimit: readLimit
},
schema: {
hide: false,
tags: [ApiDocsTags.PkiSyncs],
description: "List all the PKI Syncs for the specified project.",
querystring: z.object({
projectId: z.string().trim().min(1)
}),
response: {
200: z.object({ pkiSyncs: PkiSyncSchema.array() })
}
},
onRequest: verifyAuth([AuthMode.JWT, AuthMode.IDENTITY_ACCESS_TOKEN]),
handler: async (req) => {
const {
query: { projectId },
permission
} = req;
const pkiSyncs = await server.services.pkiSync.listPkiSyncsByProjectId({ projectId }, permission);
await server.services.auditLog.createAuditLog({
...req.auditLogInfo,
projectId,
event: {
type: EventType.GET_PKI_SYNCS,
metadata: {
projectId
}
}
});
return { pkiSyncs };
}
});
server.route({
method: "GET",
url: "/:pkiSyncId",
config: {
rateLimit: readLimit
},
schema: {
hide: false,
tags: [ApiDocsTags.PkiSyncs],
description: "Get a PKI Sync by ID.",
params: z.object({
pkiSyncId: z.string()
}),
response: {
200: PkiSyncSchema
}
},
onRequest: verifyAuth([AuthMode.JWT, AuthMode.IDENTITY_ACCESS_TOKEN]),
handler: async (req) => {
const { pkiSyncId } = req.params;
const pkiSync = await server.services.pkiSync.findPkiSyncById({ id: pkiSyncId }, req.permission);
await server.services.auditLog.createAuditLog({
...req.auditLogInfo,
projectId: pkiSync.projectId,
event: {
type: EventType.GET_PKI_SYNC,
metadata: {
syncId: pkiSyncId,
destination: pkiSync.destination
}
}
});
return pkiSync;
}
});
};

View File

@@ -316,7 +316,7 @@ export const registerProjectRouter = async (server: FastifyZodProvider) => {
}
],
params: z.object({
slug: slugSchema({ max: 36 }).describe("The slug of the project to get.")
slug: slugSchema({ max: 64 }).describe("The slug of the project to get.")
}),
response: {
200: projectWithEnv

View File

@@ -180,7 +180,7 @@ export const registerDeprecatedProjectRouter = async (server: FastifyZodProvider
}
],
params: z.object({
slug: slugSchema({ min: 5, max: 36 }).describe("The slug of the project to delete.")
slug: slugSchema({ min: 5, max: 64 }).describe("The slug of the project to delete.")
}),
response: {
200: SanitizedProjectSchema
@@ -233,7 +233,7 @@ export const registerDeprecatedProjectRouter = async (server: FastifyZodProvider
}
],
params: z.object({
slug: slugSchema({ max: 36 }).describe("The slug of the project to get.")
slug: slugSchema({ max: 64 }).describe("The slug of the project to get.")
}),
response: {
200: projectWithEnv
@@ -266,7 +266,7 @@ export const registerDeprecatedProjectRouter = async (server: FastifyZodProvider
},
schema: {
params: z.object({
slug: slugSchema({ min: 5, max: 36 }).describe("The slug of the project to update.")
slug: slugSchema({ min: 5, max: 64 }).describe("The slug of the project to update.")
}),
body: z.object({
name: z.string().trim().optional().describe(PROJECTS.UPDATE.name),
@@ -322,7 +322,7 @@ export const registerDeprecatedProjectRouter = async (server: FastifyZodProvider
hide: false,
tags: [ApiDocsTags.PkiCertificateAuthorities],
params: z.object({
slug: slugSchema({ min: 5, max: 36 }).describe(PROJECTS.LIST_CAS.slug)
slug: slugSchema({ min: 5, max: 64 }).describe(PROJECTS.LIST_CAS.slug)
}),
querystring: z.object({
status: z.enum([CaStatus.ACTIVE, CaStatus.PENDING_CERTIFICATE]).optional().describe(PROJECTS.LIST_CAS.status),
@@ -365,7 +365,7 @@ export const registerDeprecatedProjectRouter = async (server: FastifyZodProvider
hide: false,
tags: [ApiDocsTags.PkiCertificates],
params: z.object({
slug: slugSchema({ min: 5, max: 36 }).describe(PROJECTS.LIST_CERTIFICATES.slug)
slug: slugSchema({ min: 5, max: 64 }).describe(PROJECTS.LIST_CERTIFICATES.slug)
}),
querystring: z.object({
friendlyName: z.string().optional().describe(PROJECTS.LIST_CERTIFICATES.friendlyName),

View File

@@ -627,7 +627,8 @@ export const registerDeprecatedSecretRouter = async (server: FastifyZodProvider)
secretId: secret.id,
secretKey: req.params.secretName,
secretVersion: secret.version,
secretMetadata: req.body.secretMetadata
secretMetadata: req.body.secretMetadata,
secretTags: secret.tags?.map((tag) => tag.name)
}
}
});
@@ -780,7 +781,8 @@ export const registerDeprecatedSecretRouter = async (server: FastifyZodProvider)
secretId: secret.id,
secretKey: req.params.secretName,
secretVersion: secret.version,
secretMetadata: req.body.secretMetadata
secretMetadata: req.body.secretMetadata,
secretTags: secret.tags?.map((tag) => tag.name)
}
}
});
@@ -2154,7 +2156,8 @@ export const registerDeprecatedSecretRouter = async (server: FastifyZodProvider)
secretId: secret.id,
secretKey: secret.secretKey,
secretVersion: secret.version,
secretMetadata: secretMetadataMap.get(secret.secretKey)
secretMetadata: secretMetadataMap.get(secret.secretKey),
secretTags: secret.tags?.map((tag) => tag.name)
}))
}
}
@@ -2288,7 +2291,6 @@ export const registerDeprecatedSecretRouter = async (server: FastifyZodProvider)
return { approval: secretOperation.approval };
}
const { secrets } = secretOperation;
const secretMetadataMap = new Map(
inputSecrets.map(({ secretKey, secretMetadata }) => [secretKey, secretMetadata])
);
@@ -2308,7 +2310,8 @@ export const registerDeprecatedSecretRouter = async (server: FastifyZodProvider)
secretPath: secret.secretPath,
secretKey: secret.secretKey,
secretVersion: secret.version,
secretMetadata: secretMetadataMap.get(secret.secretKey)
secretMetadata: secretMetadataMap.get(secret.secretKey),
secretTags: secret.tags?.map((tag) => tag.name)
}))
}
}
@@ -2328,7 +2331,8 @@ export const registerDeprecatedSecretRouter = async (server: FastifyZodProvider)
secretPath: secret.secretPath,
secretKey: secret.secretKey,
secretVersion: secret.version,
secretMetadata: secretMetadataMap.get(secret.secretKey)
secretMetadata: secretMetadataMap.get(secret.secretKey),
secretTags: secret.tags?.map((tag) => tag.name)
}))
}
}

View File

@@ -478,7 +478,8 @@ export const registerSecretRouter = async (server: FastifyZodProvider) => {
secretId: secret.id,
secretKey: req.params.secretName,
secretVersion: secret.version,
secretMetadata: req.body.secretMetadata
secretMetadata: req.body.secretMetadata,
secretTags: secret.tags?.map((tag) => tag.name)
}
}
});
@@ -621,7 +622,8 @@ export const registerSecretRouter = async (server: FastifyZodProvider) => {
secretId: secret.id,
secretKey: req.params.secretName,
secretVersion: secret.version,
secretMetadata: req.body.secretMetadata
secretMetadata: req.body.secretMetadata,
secretTags: secret.tags?.map((tag) => tag.name)
}
}
});
@@ -911,7 +913,8 @@ export const registerSecretRouter = async (server: FastifyZodProvider) => {
secretId: secret.id,
secretKey: secret.secretKey,
secretVersion: secret.version,
secretMetadata: secretMetadataMap.get(secret.secretKey)
secretMetadata: secretMetadataMap.get(secret.secretKey),
secretTags: secret.tags?.map((tag) => tag.name)
}))
}
}
@@ -1063,7 +1066,8 @@ export const registerSecretRouter = async (server: FastifyZodProvider) => {
secretPath: secret.secretPath,
secretKey: secret.secretKey,
secretVersion: secret.version,
secretMetadata: secretMetadataMap.get(secret.secretKey)
secretMetadata: secretMetadataMap.get(secret.secretKey),
secretTags: secret.tags?.map((tag) => tag.name)
}))
}
}
@@ -1262,7 +1266,7 @@ export const registerSecretRouter = async (server: FastifyZodProvider) => {
handler: async (req) => {
const { secretName } = req.params;
const { secretPath, environment, projectId } = req.query;
const { tree, value } = await server.services.secret.getSecretReferenceTree({
const { tree, value, secret } = await server.services.secret.getSecretReferenceTree({
actorId: req.permission.id,
actor: req.permission.type,
actorAuthMethod: req.permission.authMethod,
@@ -1273,6 +1277,21 @@ export const registerSecretRouter = async (server: FastifyZodProvider) => {
environment
});
await server.services.auditLog.createAuditLog({
projectId,
...req.auditLogInfo,
event: {
type: EventType.GET_SECRET,
metadata: {
environment,
secretPath,
secretId: secret.id,
secretKey: secretName,
secretVersion: secret.version
}
}
});
return { tree, value };
}
});

View File

@@ -150,7 +150,12 @@ const SECRET_SCANNING_APP_CONNECTION_MAP = Object.fromEntries(
);
// scott: ideally this would be derived from a utilized map like the above
const PKI_APP_CONNECTIONS = [AppConnection.AWS, AppConnection.Cloudflare, AppConnection.AzureADCS];
const PKI_APP_CONNECTIONS = [
AppConnection.AWS,
AppConnection.Cloudflare,
AppConnection.AzureADCS,
AppConnection.AzureKeyVault
];
export const listAppConnectionOptions = (projectType?: ProjectType) => {
return [

View File

@@ -266,9 +266,9 @@ export const appConnectionServiceFactory = ({
const { permission } = await permissionService.getOrgPermission(
actor.type,
actor.id,
actor.orgId,
appConnection.orgId,
actor.authMethod,
appConnection.orgId
actor.orgId
);
ForbiddenError.from(permission).throwUnlessCan(
@@ -316,9 +316,9 @@ export const appConnectionServiceFactory = ({
const { permission } = await permissionService.getOrgPermission(
actor.type,
actor.id,
actor.orgId,
appConnection.orgId,
actor.authMethod,
appConnection.orgId
actor.orgId
);
ForbiddenError.from(permission).throwUnlessCan(
@@ -475,9 +475,9 @@ export const appConnectionServiceFactory = ({
const { permission: orgPermission } = await permissionService.getOrgPermission(
actor.type,
actor.id,
actor.orgId,
appConnection.orgId,
actor.authMethod,
appConnection.orgId
actor.orgId
);
if (appConnection.projectId) {
@@ -633,9 +633,9 @@ export const appConnectionServiceFactory = ({
const { permission } = await permissionService.getOrgPermission(
actor.type,
actor.id,
actor.orgId,
appConnection.orgId,
actor.authMethod,
appConnection.orgId
actor.orgId
);
ForbiddenError.from(permission).throwUnlessCan(
@@ -803,9 +803,9 @@ export const appConnectionServiceFactory = ({
const { permission } = await permissionService.getOrgPermission(
actor.type,
actor.id,
actor.orgId,
appConnection.orgId,
actor.authMethod,
appConnection.orgId
actor.orgId
);
ForbiddenError.from(permission).throwUnlessCan(

View File

@@ -23,6 +23,9 @@ import {
} from "@app/services/certificate/certificate-types";
import { TKmsServiceFactory } from "@app/services/kms/kms-service";
import { TPkiSubscriberDALFactory } from "@app/services/pki-subscriber/pki-subscriber-dal";
import { TPkiSyncDALFactory } from "@app/services/pki-sync/pki-sync-dal";
import { triggerAutoSyncForSubscriber } from "@app/services/pki-sync/pki-sync-utils";
import { TPkiSyncQueueFactory } from "@app/services/pki-sync/pki-sync-queue";
import { TProjectDALFactory } from "@app/services/project/project-dal";
import { getProjectKmsCertificateKeyId } from "@app/services/project/project-fns";
@@ -56,6 +59,8 @@ type TAcmeCertificateAuthorityFnsDeps = {
"encryptWithKmsKey" | "generateKmsKey" | "createCipherPairWithDataKey" | "decryptWithKmsKey"
>;
pkiSubscriberDAL: Pick<TPkiSubscriberDALFactory, "findById">;
pkiSyncDAL: Pick<TPkiSyncDALFactory, "find">;
pkiSyncQueue: Pick<TPkiSyncQueueFactory, "queuePkiSyncSyncCertificatesById">;
projectDAL: Pick<TProjectDALFactory, "findById" | "findOne" | "updateById" | "transaction">;
};
@@ -109,7 +114,9 @@ export const AcmeCertificateAuthorityFns = ({
certificateSecretDAL,
kmsService,
projectDAL,
pkiSubscriberDAL
pkiSubscriberDAL,
pkiSyncDAL,
pkiSyncQueue
}: TAcmeCertificateAuthorityFnsDeps) => {
const createCertificateAuthority = async ({
name,
@@ -524,6 +531,8 @@ export const AcmeCertificateAuthorityFns = ({
tx
);
});
await triggerAutoSyncForSubscriber(subscriber.id, { pkiSyncDAL, pkiSyncQueue });
};
return {

View File

@@ -26,6 +26,9 @@ import {
import { TKmsServiceFactory } from "@app/services/kms/kms-service";
import { TPkiSubscriberDALFactory } from "@app/services/pki-subscriber/pki-subscriber-dal";
import { TPkiSubscriberProperties } from "@app/services/pki-subscriber/pki-subscriber-types";
import { TPkiSyncDALFactory } from "@app/services/pki-sync/pki-sync-dal";
import { triggerAutoSyncForSubscriber } from "@app/services/pki-sync/pki-sync-utils";
import { TPkiSyncQueueFactory } from "@app/services/pki-sync/pki-sync-queue";
import { TProjectDALFactory } from "@app/services/project/project-dal";
import { getProjectKmsCertificateKeyId } from "@app/services/project/project-fns";
@@ -55,6 +58,8 @@ type TAzureAdCsCertificateAuthorityFnsDeps = {
"encryptWithKmsKey" | "generateKmsKey" | "createCipherPairWithDataKey" | "decryptWithKmsKey"
>;
pkiSubscriberDAL: Pick<TPkiSubscriberDALFactory, "findById">;
pkiSyncDAL: Pick<TPkiSyncDALFactory, "find">;
pkiSyncQueue: Pick<TPkiSyncQueueFactory, "queuePkiSyncSyncCertificatesById">;
projectDAL: Pick<TProjectDALFactory, "findById" | "findOne" | "updateById" | "transaction">;
};
@@ -584,7 +589,9 @@ export const AzureAdCsCertificateAuthorityFns = ({
certificateSecretDAL,
kmsService,
projectDAL,
pkiSubscriberDAL
pkiSubscriberDAL,
pkiSyncDAL,
pkiSyncQueue
}: TAzureAdCsCertificateAuthorityFnsDeps) => {
const createCertificateAuthority = async ({
name,
@@ -1024,6 +1031,8 @@ export const AzureAdCsCertificateAuthorityFns = ({
);
});
await triggerAutoSyncForSubscriber(subscriber.id, { pkiSyncDAL, pkiSyncQueue });
return {
certificate: certificatePem,
certificateChain: certificateChainPem,

View File

@@ -20,6 +20,8 @@ import { TCertificateBodyDALFactory } from "../certificate/certificate-body-dal"
import { TCertificateSecretDALFactory } from "../certificate/certificate-secret-dal";
import { TPkiSubscriberDALFactory } from "../pki-subscriber/pki-subscriber-dal";
import { SubscriberOperationStatus } from "../pki-subscriber/pki-subscriber-types";
import { TPkiSyncDALFactory } from "../pki-sync/pki-sync-dal";
import { TPkiSyncQueueFactory } from "../pki-sync/pki-sync-queue";
import { AcmeCertificateAuthorityFns } from "./acme/acme-certificate-authority-fns";
import { AzureAdCsCertificateAuthorityFns } from "./azure-ad-cs/azure-ad-cs-certificate-authority-fns";
import { TCertificateAuthorityDALFactory } from "./certificate-authority-dal";
@@ -50,6 +52,8 @@ type TCertificateAuthorityQueueFactoryDep = {
certificateSecretDAL: Pick<TCertificateSecretDALFactory, "create">;
queueService: TQueueServiceFactory;
pkiSubscriberDAL: Pick<TPkiSubscriberDALFactory, "findById" | "updateById">;
pkiSyncDAL: Pick<TPkiSyncDALFactory, "find">;
pkiSyncQueue: Pick<TPkiSyncQueueFactory, "queuePkiSyncSyncCertificatesById">;
};
export type TCertificateAuthorityQueueFactory = ReturnType<typeof certificateAuthorityQueueFactory>;
@@ -68,7 +72,9 @@ export const certificateAuthorityQueueFactory = ({
externalCertificateAuthorityDAL,
certificateBodyDAL,
certificateSecretDAL,
pkiSubscriberDAL
pkiSubscriberDAL,
pkiSyncDAL,
pkiSyncQueue
}: TCertificateAuthorityQueueFactoryDep) => {
const acmeFns = AcmeCertificateAuthorityFns({
appConnectionDAL,
@@ -80,7 +86,9 @@ export const certificateAuthorityQueueFactory = ({
certificateSecretDAL,
kmsService,
pkiSubscriberDAL,
projectDAL
projectDAL,
pkiSyncDAL,
pkiSyncQueue
});
const azureAdCsFns = AzureAdCsCertificateAuthorityFns({
@@ -93,7 +101,9 @@ export const certificateAuthorityQueueFactory = ({
certificateSecretDAL,
kmsService,
pkiSubscriberDAL,
projectDAL
projectDAL,
pkiSyncDAL,
pkiSyncQueue
});
// TODO 1: auto-periodic rotation

View File

@@ -13,6 +13,8 @@ import { TCertificateDALFactory } from "../certificate/certificate-dal";
import { TCertificateSecretDALFactory } from "../certificate/certificate-secret-dal";
import { TKmsServiceFactory } from "../kms/kms-service";
import { TPkiSubscriberDALFactory } from "../pki-subscriber/pki-subscriber-dal";
import { TPkiSyncDALFactory } from "../pki-sync/pki-sync-dal";
import { TPkiSyncQueueFactory } from "../pki-sync/pki-sync-queue";
import { TProjectDALFactory } from "../project/project-dal";
import {
AcmeCertificateAuthorityFns,
@@ -68,6 +70,8 @@ type TCertificateAuthorityServiceFactoryDep = {
"encryptWithKmsKey" | "generateKmsKey" | "createCipherPairWithDataKey" | "decryptWithKmsKey"
>;
pkiSubscriberDAL: Pick<TPkiSubscriberDALFactory, "findById">;
pkiSyncDAL: Pick<TPkiSyncDALFactory, "find">;
pkiSyncQueue: Pick<TPkiSyncQueueFactory, "queuePkiSyncSyncCertificatesById">;
};
export type TCertificateAuthorityServiceFactory = ReturnType<typeof certificateAuthorityServiceFactory>;
@@ -84,7 +88,9 @@ export const certificateAuthorityServiceFactory = ({
certificateBodyDAL,
certificateSecretDAL,
kmsService,
pkiSubscriberDAL
pkiSubscriberDAL,
pkiSyncDAL,
pkiSyncQueue
}: TCertificateAuthorityServiceFactoryDep) => {
const acmeFns = AcmeCertificateAuthorityFns({
appConnectionDAL,
@@ -96,7 +102,9 @@ export const certificateAuthorityServiceFactory = ({
certificateSecretDAL,
kmsService,
pkiSubscriberDAL,
projectDAL
projectDAL,
pkiSyncDAL,
pkiSyncQueue
});
const azureAdCsFns = AzureAdCsCertificateAuthorityFns({
@@ -109,7 +117,9 @@ export const certificateAuthorityServiceFactory = ({
certificateSecretDAL,
kmsService,
pkiSubscriberDAL,
projectDAL
projectDAL,
pkiSyncDAL,
pkiSyncQueue
});
const createCertificateAuthority = async (

View File

@@ -19,6 +19,9 @@ import {
TAltNameMapping
} from "@app/services/certificate/certificate-types";
import { TKmsServiceFactory } from "@app/services/kms/kms-service";
import { TPkiSyncDALFactory } from "@app/services/pki-sync/pki-sync-dal";
import { triggerAutoSyncForSubscriber } from "@app/services/pki-sync/pki-sync-utils";
import { TPkiSyncQueueFactory } from "@app/services/pki-sync/pki-sync-queue";
import { TProjectDALFactory } from "@app/services/project/project-dal";
import { getProjectKmsCertificateKeyId } from "@app/services/project/project-fns";
@@ -51,6 +54,8 @@ type TInternalCertificateAuthorityFnsDeps = {
certificateDAL: Pick<TCertificateDALFactory, "create" | "transaction">;
certificateBodyDAL: Pick<TCertificateBodyDALFactory, "create">;
certificateSecretDAL: Pick<TCertificateSecretDALFactory, "create">;
pkiSyncDAL: Pick<TPkiSyncDALFactory, "find">;
pkiSyncQueue: Pick<TPkiSyncQueueFactory, "queuePkiSyncSyncCertificatesById">;
};
export const InternalCertificateAuthorityFns = ({
@@ -62,7 +67,9 @@ export const InternalCertificateAuthorityFns = ({
certificateAuthorityCrlDAL,
certificateDAL,
certificateBodyDAL,
certificateSecretDAL
certificateSecretDAL,
pkiSyncDAL,
pkiSyncQueue
}: TInternalCertificateAuthorityFnsDeps) => {
const issueCertificate = async (
subscriber: TPkiSubscribers,
@@ -251,6 +258,8 @@ export const InternalCertificateAuthorityFns = ({
);
});
await triggerAutoSyncForSubscriber(subscriber.id, { pkiSyncDAL, pkiSyncQueue });
return {
certificate: leafCert.toString("pem"),
certificateChain: certificateChainPem,

View File

@@ -1,5 +1,5 @@
import { TDbClient } from "@app/db";
import { TableName } from "@app/db/schemas";
import { TableName, TCertificates } from "@app/db/schemas";
import { DatabaseError } from "@app/lib/errors";
import { ormify } from "@app/lib/knex";
@@ -25,6 +25,20 @@ export const certificateDALFactory = (db: TDbClient) => {
}
};
const findAllActiveCertsForSubscriber = async ({ subscriberId }: { subscriberId: string }) => {
try {
const certs = await db
.replicaNode()(TableName.Certificate)
.where({ pkiSubscriberId: subscriberId, status: CertStatus.ACTIVE })
.where("notAfter", ">", new Date())
.orderBy("notBefore", "desc");
return certs;
} catch (error) {
throw new DatabaseError({ error, name: "Find all active certificates for subscriber" });
}
};
const countCertificatesInProject = async ({
projectId,
friendlyName,
@@ -79,10 +93,33 @@ export const certificateDALFactory = (db: TDbClient) => {
}
};
const findExpiredSyncedCertificates = async (): Promise<TCertificates[]> => {
try {
const yesterday = new Date();
yesterday.setDate(yesterday.getDate() - 1);
yesterday.setHours(0, 0, 0, 0);
const today = new Date();
today.setHours(0, 0, 0, 0);
const certs = await db
.replicaNode()(TableName.Certificate)
.where("notAfter", ">=", yesterday)
.where("notAfter", "<", today)
.whereNotNull("pkiSubscriberId");
return certs;
} catch (error) {
throw new DatabaseError({ error, name: "Find expired synced certificates" });
}
};
return {
...certificateOrm,
countCertificatesInProject,
countCertificatesForPkiSubscriber,
findLatestActiveCertForSubscriber
findLatestActiveCertForSubscriber,
findAllActiveCertsForSubscriber,
findExpiredSyncedCertificates
};
};

View File

@@ -1,3 +1,4 @@
/* eslint-disable no-await-in-loop */
import { ForbiddenError } from "@casl/ability";
import * as x509 from "@peculiar/x509";
@@ -20,6 +21,9 @@ import { TCertificateAuthoritySecretDALFactory } from "@app/services/certificate
import { TKmsServiceFactory } from "@app/services/kms/kms-service";
import { TPkiCollectionDALFactory } from "@app/services/pki-collection/pki-collection-dal";
import { TPkiCollectionItemDALFactory } from "@app/services/pki-collection/pki-collection-item-dal";
import { TPkiSyncDALFactory } from "@app/services/pki-sync/pki-sync-dal";
import { TPkiSyncQueueFactory } from "@app/services/pki-sync/pki-sync-queue";
import { triggerAutoSyncForSubscriber } from "@app/services/pki-sync/pki-sync-utils";
import { TProjectDALFactory } from "@app/services/project/project-dal";
import { getProjectKmsCertificateKeyId } from "@app/services/project/project-fns";
@@ -53,6 +57,8 @@ type TCertificateServiceFactoryDep = {
projectDAL: Pick<TProjectDALFactory, "findProjectBySlug" | "findOne" | "updateById" | "findById" | "transaction">;
kmsService: Pick<TKmsServiceFactory, "generateKmsKey" | "encryptWithKmsKey" | "decryptWithKmsKey">;
permissionService: Pick<TPermissionServiceFactory, "getProjectPermission">;
pkiSyncDAL: Pick<TPkiSyncDALFactory, "find">;
pkiSyncQueue: Pick<TPkiSyncQueueFactory, "queuePkiSyncSyncCertificatesById">;
};
export type TCertificateServiceFactory = ReturnType<typeof certificateServiceFactory>;
@@ -69,7 +75,9 @@ export const certificateServiceFactory = ({
pkiCollectionItemDAL,
projectDAL,
kmsService,
permissionService
permissionService,
pkiSyncDAL,
pkiSyncQueue
}: TCertificateServiceFactoryDep) => {
/**
* Return details for certificate with serial number [serialNumber]
@@ -158,6 +166,11 @@ export const certificateServiceFactory = ({
const deletedCert = await certificateDAL.deleteById(cert.id);
// Trigger auto sync for PKI syncs connected to this certificate's subscriber
if (cert.pkiSubscriberId) {
await triggerAutoSyncForSubscriber(cert.pkiSubscriberId, { pkiSyncDAL, pkiSyncQueue });
}
return {
deletedCert
};
@@ -222,6 +235,11 @@ export const certificateServiceFactory = ({
}
);
// Trigger auto sync for PKI syncs connected to this certificate's subscriber
if (cert.pkiSubscriberId) {
await triggerAutoSyncForSubscriber(cert.pkiSubscriberId, { pkiSyncDAL, pkiSyncQueue });
}
// Note: External CA revocation handling would go here for supported CA types
// Currently, only internal CAs and ACME CAs support revocation

View File

@@ -0,0 +1,129 @@
/* eslint-disable no-await-in-loop */
import { AxiosError } from "axios";
import { logger } from "@app/lib/logger";
export type RateLimitConfig = {
MAX_CONCURRENT_REQUESTS: number;
BASE_DELAY: number;
MAX_DELAY: number;
MAX_RETRIES: number;
RATE_LIMIT_STATUS_CODES: number[];
};
export type RateLimitContext = {
operation: string;
identifier?: string;
syncId: string;
};
export type ConcurrencyContext = {
operation: string;
syncId: string;
};
export const sleep = (ms: number): Promise<void> =>
new Promise((resolve) => {
setTimeout(resolve, ms);
});
export const createRateLimitErrorChecker =
(config: RateLimitConfig) =>
(error: unknown): boolean => {
if (error instanceof AxiosError) {
return (
config.RATE_LIMIT_STATUS_CODES.includes(error.response?.status || 0) ||
error.message.toLowerCase().includes("rate limit") ||
error.message.toLowerCase().includes("throttl")
);
}
return false;
};
export const createRateLimitRetry =
(config: RateLimitConfig, isRateLimitError: (error: unknown) => boolean) =>
async <T>(fn: () => Promise<T>, context: RateLimitContext, retryCount = 0): Promise<T> => {
try {
return await fn();
} catch (error) {
if (isRateLimitError(error) && retryCount < config.MAX_RETRIES) {
const delay = Math.min(config.BASE_DELAY * 2 ** retryCount, config.MAX_DELAY);
logger.warn(
{
syncId: context.syncId,
operation: context.operation,
identifier: context.identifier,
retryCount: retryCount + 1,
delayMs: delay,
error: error instanceof AxiosError ? error.message : String(error)
},
"Rate limit hit, retrying with exponential backoff"
);
await sleep(delay);
return createRateLimitRetry(config, isRateLimitError)(fn, context, retryCount + 1);
}
throw error;
}
};
export const createConcurrencyLimitExecutor =
(
config: RateLimitConfig,
withRateLimitRetry: <T>(fn: () => Promise<T>, context: RateLimitContext, retryCount?: number) => Promise<T>
) =>
async <T, R>(
items: T[],
executor: (item: T) => Promise<R>,
context: ConcurrencyContext,
concurrencyLimit = config.MAX_CONCURRENT_REQUESTS
): Promise<PromiseSettledResult<R>[]> => {
const results: PromiseSettledResult<R>[] = [];
for (let i = 0; i < items.length; i += concurrencyLimit) {
const batch = items.slice(i, i + concurrencyLimit);
logger.debug(
{
syncId: context.syncId,
operation: context.operation,
batchStart: i + 1,
batchEnd: Math.min(i + concurrencyLimit, items.length),
totalItems: items.length
},
"Processing batch with rate limit protection"
);
const batchPromises = batch.map((item, batchIndex) =>
withRateLimitRetry(() => executor(item), {
operation: context.operation,
identifier: `batch-${i + batchIndex + 1}`,
syncId: context.syncId
})
);
const batchResults = await Promise.allSettled(batchPromises);
results.push(...batchResults);
if (i + concurrencyLimit < items.length) {
await sleep(100);
}
}
return results;
};
export const createConnectionQueue = (config: RateLimitConfig) => {
const isRateLimitError = createRateLimitErrorChecker(config);
const withRateLimitRetry = createRateLimitRetry(config, isRateLimitError);
const executeWithConcurrencyLimit = createConcurrencyLimitExecutor(config, withRateLimitRetry);
return {
sleep,
isRateLimitError,
withRateLimitRetry,
executeWithConcurrencyLimit
};
};

View File

@@ -0,0 +1 @@
export * from "./connection-queue-fns";

View File

@@ -1,3 +1,4 @@
/* eslint-disable no-await-in-loop */
/* eslint-disable no-bitwise */
import { ForbiddenError, subject } from "@casl/ability";
import * as x509 from "@peculiar/x509";
@@ -36,6 +37,9 @@ import {
import { TCertificateAuthoritySecretDALFactory } from "@app/services/certificate-authority/certificate-authority-secret-dal";
import { TKmsServiceFactory } from "@app/services/kms/kms-service";
import { TPkiSubscriberDALFactory } from "@app/services/pki-subscriber/pki-subscriber-dal";
import { TPkiSyncDALFactory } from "@app/services/pki-sync/pki-sync-dal";
import { triggerAutoSyncForSubscriber } from "@app/services/pki-sync/pki-sync-utils";
import { TPkiSyncQueueFactory } from "@app/services/pki-sync/pki-sync-queue";
import { TProjectDALFactory } from "@app/services/project/project-dal";
import { getProjectKmsCertificateKeyId } from "@app/services/project/project-fns";
@@ -79,6 +83,8 @@ type TPkiSubscriberServiceFactoryDep = {
kmsService: Pick<TKmsServiceFactory, "generateKmsKey" | "decryptWithKmsKey" | "encryptWithKmsKey">;
permissionService: Pick<TPermissionServiceFactory, "getProjectPermission">;
internalCaFns: ReturnType<typeof InternalCertificateAuthorityFns>;
pkiSyncDAL: Pick<TPkiSyncDALFactory, "find">;
pkiSyncQueue: Pick<TPkiSyncQueueFactory, "queuePkiSyncSyncCertificatesById">;
};
export type TPkiSubscriberServiceFactory = ReturnType<typeof pkiSubscriberServiceFactory>;
@@ -96,7 +102,9 @@ export const pkiSubscriberServiceFactory = ({
kmsService,
permissionService,
certificateAuthorityQueue,
internalCaFns
internalCaFns,
pkiSyncDAL,
pkiSyncQueue
}: TPkiSubscriberServiceFactoryDep) => {
const createSubscriber = async ({
name,
@@ -413,7 +421,12 @@ export const pkiSubscriberServiceFactory = ({
const ca = await certificateAuthorityDAL.findByIdWithAssociatedCa(subscriber.caId);
if (ca.internalCa?.id) {
return internalCaFns.issueCertificate(subscriber, ca);
const result = await internalCaFns.issueCertificate(subscriber, ca);
// Trigger auto sync for PKI syncs connected to this subscriber after certificate issuance
await triggerAutoSyncForSubscriber(subscriber.id, { pkiSyncDAL, pkiSyncQueue });
return result;
}
throw new BadRequestError({ message: "CA does not support immediate issuance of certificates" });
@@ -671,6 +684,9 @@ export const pkiSubscriberServiceFactory = ({
return cert;
});
// Trigger auto sync for PKI syncs connected to this subscriber after certificate signing
await triggerAutoSyncForSubscriber(subscriber.id, { pkiSyncDAL, pkiSyncQueue });
return {
certificate: leafCert.toString("pem"),
certificateChain: `${issuingCaCertificate}\n${caCertChain}`.trim(),

View File

@@ -0,0 +1,52 @@
import RE2 from "re2";
import { AppConnection } from "@app/services/app-connection/app-connection-enums";
import { PkiSync } from "@app/services/pki-sync/pki-sync-enums";
/**
* Azure Key Vault naming constraints for certificates
*/
export const AZURE_KEY_VAULT_CERTIFICATE_NAMING = {
/**
* Regular expression pattern for valid Azure Key Vault certificate names
* Must contain only alphanumeric characters and hyphens (a-z, A-Z, 0-9, -)
* Must be 1-127 characters long
*/
NAME_PATTERN: new RE2("^[a-zA-Z0-9-]{1,127}$"),
/**
* String of characters that are forbidden in Azure Key Vault certificate names
*/
FORBIDDEN_CHARACTERS: "!@#$%^&*()+=[]{}|\\:;\"'<>,.?/~` _",
/**
* Maximum length for certificate names in Azure Key Vault
*/
MAX_NAME_LENGTH: 127,
/**
* Minimum length for certificate names in Azure Key Vault
*/
MIN_NAME_LENGTH: 1,
/**
* String representation of the allowed character pattern (for UI display)
*/
ALLOWED_CHARACTER_PATTERN: "^[a-zA-Z0-9-]{1,127}$"
} as const;
/**
* Azure Key Vault PKI Sync list option configuration
*/
export const AZURE_KEY_VAULT_PKI_SYNC_LIST_OPTION = {
name: "Azure Key Vault" as const,
connection: AppConnection.AzureKeyVault,
destination: PkiSync.AzureKeyVault,
canImportCertificates: false,
canRemoveCertificates: true,
defaultCertificateNameSchema: "Infisical-PKI-Sync-{{certificateId}}",
forbiddenCharacters: AZURE_KEY_VAULT_CERTIFICATE_NAMING.FORBIDDEN_CHARACTERS,
allowedCharacterPattern: AZURE_KEY_VAULT_CERTIFICATE_NAMING.ALLOWED_CHARACTER_PATTERN,
maxCertificateNameLength: AZURE_KEY_VAULT_CERTIFICATE_NAMING.MAX_NAME_LENGTH,
minCertificateNameLength: AZURE_KEY_VAULT_CERTIFICATE_NAMING.MIN_NAME_LENGTH
} as const;

View File

@@ -0,0 +1,680 @@
/* eslint-disable no-await-in-loop */
import { AxiosError } from "axios";
import * as crypto from "crypto";
import { request } from "@app/lib/config/request";
import { logger } from "@app/lib/logger";
import { TAppConnectionDALFactory } from "@app/services/app-connection/app-connection-dal";
import { getAzureConnectionAccessToken } from "@app/services/app-connection/azure-key-vault";
import { createConnectionQueue, RateLimitConfig } from "@app/services/connection-queue";
import { TKmsServiceFactory } from "@app/services/kms/kms-service";
import { matchesCertificateNameSchema } from "@app/services/pki-sync/pki-sync-fns";
import { TCertificateMap } from "@app/services/pki-sync/pki-sync-types";
import { PkiSyncError } from "../pki-sync-errors";
import { TPkiSyncWithCredentials } from "../pki-sync-types";
import { GetAzureKeyVaultCertificate, TAzureKeyVaultPkiSyncConfig } from "./azure-key-vault-pki-sync-types";
const AZURE_RATE_LIMIT_CONFIG: RateLimitConfig = {
MAX_CONCURRENT_REQUESTS: 10,
BASE_DELAY: 1000,
MAX_DELAY: 30000,
MAX_RETRIES: 3,
RATE_LIMIT_STATUS_CODES: [429, 503]
};
const azureConnectionQueue = createConnectionQueue(AZURE_RATE_LIMIT_CONFIG);
const { withRateLimitRetry, executeWithConcurrencyLimit } = azureConnectionQueue;
const extractCertificateNameFromId = (certificateId: string): string => {
return certificateId.substring(certificateId.lastIndexOf("/") + 1);
};
const isInfisicalManagedCertificate = (certificateName: string, pkiSync: TPkiSyncWithCredentials): boolean => {
const syncOptions = pkiSync.syncOptions as { certificateNameSchema?: string } | undefined;
const certificateNameSchema = syncOptions?.certificateNameSchema;
if (certificateNameSchema) {
const environment = "global";
return matchesCertificateNameSchema(certificateName, environment, certificateNameSchema);
}
return certificateName.startsWith("Infisical-PKI-Sync-");
};
type TAzureKeyVaultPkiSyncFactoryDeps = {
appConnectionDAL: Pick<TAppConnectionDALFactory, "findById" | "updateById">;
kmsService: Pick<TKmsServiceFactory, "createCipherPairWithDataKey">;
};
const parseCertificateX509Props = (certPem: string) => {
try {
const cert = new crypto.X509Certificate(certPem);
const { subject } = cert;
const sans = {
dns_names: [] as string[],
emails: [] as string[],
upns: [] as string[]
};
if (cert.subjectAltName) {
const sanEntries = cert.subjectAltName.split(", ");
for (const entry of sanEntries) {
if (entry.startsWith("DNS:")) {
sans.dns_names.push(entry.substring(4));
} else if (entry.startsWith("email:")) {
sans.emails.push(entry.substring(6));
} else if (entry.startsWith("othername:UPN:")) {
sans.upns.push(entry.substring(14));
}
}
}
return {
subject,
sans
};
} catch (error) {
logger.warn(
{ error: error instanceof Error ? error.message : String(error) },
"Failed to parse certificate X.509 properties, using empty values"
);
return {
subject: "",
sans: {
dns_names: [],
emails: [],
upns: []
}
};
}
};
const parseCertificateKeyProps = (certPem: string) => {
try {
const publicKeyObject = crypto.createPublicKey(certPem);
const keyDetails = publicKeyObject.asymmetricKeyDetails;
if (!keyDetails) {
if (publicKeyObject.asymmetricKeyType === "rsa") {
const pubKeyStr = publicKeyObject.export({ type: "spki", format: "der" }).toString("hex");
const estimatedBits = pubKeyStr.length * 4;
let keySize = 2048;
if (estimatedBits >= 4000) {
keySize = 4096;
} else if (estimatedBits >= 3000) {
keySize = 3072;
} else if (estimatedBits >= 2000) {
keySize = 2048;
} else if (estimatedBits >= 1000) {
keySize = 1024;
}
return {
kty: "RSA",
key_size: keySize
};
}
if (publicKeyObject.asymmetricKeyType === "ec") {
return {
kty: "EC",
curve: "P-256"
};
}
return {
kty: "RSA",
key_size: 2048
};
}
if (publicKeyObject.asymmetricKeyType === "rsa") {
const modulusLength = keyDetails.modulusLength || 2048;
return {
kty: "RSA",
key_size: modulusLength
};
}
if (publicKeyObject.asymmetricKeyType === "ec") {
const { namedCurve } = keyDetails;
let curveName = "P-256";
switch (namedCurve) {
case "prime256v1":
case "secp256r1":
curveName = "P-256";
break;
case "secp384r1":
curveName = "P-384";
break;
case "secp521r1":
curveName = "P-521";
break;
default:
curveName = "P-256";
}
return {
kty: "EC",
curve: curveName
};
}
const keyType = publicKeyObject.asymmetricKeyType;
if (keyType && !["rsa", "ec"].includes(keyType)) {
throw new Error(`Unsupported certificate key type: ${keyType}. Azure Key Vault only supports RSA and EC keys.`);
}
logger.warn({ keyType }, "Unable to determine certificate key type, defaulting to RSA 2048");
return {
kty: "RSA",
key_size: 2048
};
} catch (error) {
logger.warn(
{ error: error instanceof Error ? error.message : String(error) },
"Failed to parse certificate key properties, defaulting to RSA 2048"
);
return {
kty: "RSA",
key_size: 2048
};
}
};
export const azureKeyVaultPkiSyncFactory = ({ kmsService, appConnectionDAL }: TAzureKeyVaultPkiSyncFactoryDeps) => {
const $getAzureKeyVaultCertificates = async (accessToken: string, vaultBaseUrl: string, syncId = "unknown") => {
const paginateAzureKeyVaultCertificates = async () => {
let result: GetAzureKeyVaultCertificate[] = [];
let currentUrl = `${vaultBaseUrl}/certificates?api-version=7.4`;
while (currentUrl) {
const urlToFetch = currentUrl; // Capture current URL to avoid loop function issue
const res = await withRateLimitRetry(
() =>
request.get<{ value: GetAzureKeyVaultCertificate[]; nextLink: string }>(urlToFetch, {
headers: {
Authorization: `Bearer ${accessToken}`
}
}),
{ operation: "list-certificates", syncId }
);
result = result.concat(res.data.value);
currentUrl = res.data.nextLink;
}
return result;
};
const getAzureKeyVaultCertificates = await paginateAzureKeyVaultCertificates();
const enabledAzureKeyVaultCertificates = getAzureKeyVaultCertificates.filter((cert) => cert.attributes.enabled);
// disabled certificates to skip sending updates to
const disabledAzureKeyVaultCertificateKeys = getAzureKeyVaultCertificates
.filter(({ attributes }) => !attributes.enabled)
.map((certificate) => extractCertificateNameFromId(certificate.id));
// Use rate-limited concurrent execution for fetching certificate details
const certificateResults = await executeWithConcurrencyLimit(
enabledAzureKeyVaultCertificates,
async (getAzureKeyVaultCertificate) => {
const azureKeyVaultCertificate = await request.get<GetAzureKeyVaultCertificate>(
`${getAzureKeyVaultCertificate.id}?api-version=7.4`,
{
headers: {
Authorization: `Bearer ${accessToken}`
}
}
);
let certPem = "";
if (azureKeyVaultCertificate.data.cer) {
try {
// Azure Key Vault stores certificate in base64 DER format
// We need to convert it to PEM format with proper headers
const base64Cert = azureKeyVaultCertificate.data.cer;
const base64Lines = base64Cert.match(/.{1,64}/g);
if (!base64Lines) {
throw new Error("Failed to format base64 certificate data");
}
certPem = `-----BEGIN CERTIFICATE-----\n${base64Lines.join("\n")}\n-----END CERTIFICATE-----`;
} catch (error) {
logger.warn(
{
error: error instanceof Error ? error.message : String(error),
certificateId: getAzureKeyVaultCertificate.id
},
"Failed to convert Azure Key Vault certificate to PEM format, skipping certificate"
);
certPem = ""; // Skip this certificate if we can't convert it properly
}
}
return {
...azureKeyVaultCertificate.data,
key: extractCertificateNameFromId(getAzureKeyVaultCertificate.id),
cert: certPem,
privateKey: "" // Private keys cannot be extracted from Azure Key Vault for security reasons
};
},
{ operation: "fetch-certificate-details", syncId }
);
const successfulCertificates = certificateResults
.filter(
(
result
): result is PromiseFulfilledResult<
GetAzureKeyVaultCertificate & {
key: string;
cert: string;
privateKey: string;
}
> => result.status === "fulfilled"
)
.map((result) => result.value);
// Log any failures
const failedFetches = certificateResults.filter((result) => result.status === "rejected");
if (failedFetches.length > 0) {
logger.warn(
{
syncId,
failedCount: failedFetches.length,
totalCount: enabledAzureKeyVaultCertificates.length
},
"Some certificate details could not be fetched from Azure Key Vault"
);
}
const res: Record<string, { cert: string; privateKey: string }> = successfulCertificates.reduce(
(obj, certificate) => ({
...obj,
[certificate.key]: {
cert: certificate.cert,
privateKey: certificate.privateKey
}
}),
{} as Record<string, { cert: string; privateKey: string }>
);
return {
vaultCertificates: res,
disabledAzureKeyVaultCertificateKeys
};
};
const syncCertificates = async (pkiSync: TPkiSyncWithCredentials, certificateMap: TCertificateMap) => {
const { accessToken } = await getAzureConnectionAccessToken(pkiSync.connection.id, appConnectionDAL, kmsService);
// Cast destination config to Azure Key Vault config
const destinationConfig = pkiSync.destinationConfig as TAzureKeyVaultPkiSyncConfig;
const { vaultCertificates, disabledAzureKeyVaultCertificateKeys } = await $getAzureKeyVaultCertificates(
accessToken,
destinationConfig.vaultBaseUrl,
pkiSync.id
);
const setCertificates: {
key: string;
cert: string;
privateKey: string;
certificateChain?: string;
}[] = [];
// Track which certificates should exist in Azure Key Vault
const activeCertificateNames = Object.keys(certificateMap);
// Iterate through certificates to sync to Azure Key Vault
Object.entries(certificateMap).forEach(([certName, { cert, privateKey, certificateChain }]) => {
if (disabledAzureKeyVaultCertificateKeys.includes(certName)) {
return;
}
const existingCert = vaultCertificates[certName];
const shouldUpdateCert = !existingCert || existingCert.cert !== cert;
if (shouldUpdateCert) {
setCertificates.push({
key: certName,
cert,
privateKey,
certificateChain
});
}
});
// Identify expired/removed certificates that need to be cleaned up from Azure Key Vault
// Only remove certificates that were managed by Infisical (match naming schema)
const certificatesToRemove = Object.keys(vaultCertificates).filter(
(vaultCertName) =>
isInfisicalManagedCertificate(vaultCertName, pkiSync) &&
!activeCertificateNames.includes(vaultCertName) &&
!disabledAzureKeyVaultCertificateKeys.includes(vaultCertName)
);
// Upload certificates to Azure Key Vault with rate limiting
const uploadResults = await executeWithConcurrencyLimit(
setCertificates,
async ({ key, cert, privateKey, certificateChain }) => {
try {
// Combine private key, certificate, and certificate chain in PEM format for Azure Key Vault
let combinedPem = "";
if (privateKey) {
combinedPem = privateKey.trim();
}
if (combinedPem) {
combinedPem = `${combinedPem}\n${cert.trim()}`;
} else {
combinedPem = cert.trim();
}
if (certificateChain) {
const trimmedChain = certificateChain.trim();
if (trimmedChain) {
combinedPem = `${combinedPem}\n${trimmedChain}`;
}
}
// Convert to base64 for Azure Key Vault import
const base64Cert = Buffer.from(combinedPem).toString("base64");
// Parse certificate to extract X.509 properties and key properties
const x509Props = parseCertificateX509Props(cert);
const keyProps = parseCertificateKeyProps(cert);
// Build key_props based on key type
const keyPropsConfig = {
exportable: true,
reuse_key: false,
...keyProps
};
const importData = {
value: base64Cert,
policy: {
key_props: keyPropsConfig,
secret_props: {
contentType: "application/x-pem-file"
},
x509_props: x509Props
},
attributes: {
enabled: true,
exportable: true
}
};
const response = await request.post(
`${destinationConfig.vaultBaseUrl}/certificates/${encodeURIComponent(key)}/import?api-version=7.4`,
importData,
{
headers: {
Authorization: `Bearer ${accessToken}`,
"Content-Type": "application/json"
}
}
);
return { key, success: true, response: response.data as unknown };
} catch (error) {
if (error instanceof AxiosError) {
const errorMessage =
error.response?.data && typeof error.response.data === "object" && "error" in error.response.data
? (error.response.data as { error?: { message?: string } }).error?.message || error.message
: error.message;
// Check if the error is due to certificate in deleted but recoverable state
const isDeletedButRecoverable =
errorMessage.includes("deleted but recoverable state") || errorMessage.includes("name cannot be reused");
if (isDeletedButRecoverable) {
logger.warn(
{ certificateKey: key, syncId: pkiSync.id },
"Certificate exists in deleted but recoverable state in Azure Key Vault - skipping upload"
);
return { key, success: false, skipped: true, reason: "Certificate in deleted but recoverable state" };
}
throw new PkiSyncError({
message: `Failed to upload certificate ${key} to Azure Key Vault: ${errorMessage}`,
cause: error,
context: {
certificateKey: key,
statusCode: error.response?.status,
responseData: error.response?.data
}
});
}
throw error;
}
},
{ operation: "upload-certificates", syncId: pkiSync.id }
);
const results = uploadResults;
const failedUploads = results.filter((result) => result.status === "rejected");
const fulfilledResults = results.filter((result) => result.status === "fulfilled");
// Separate successful uploads from skipped certificates
const successfulUploads = fulfilledResults.filter(
(result) => result.status === "fulfilled" && result.value.success
);
const skippedUploads = fulfilledResults.filter((result) => result.status === "fulfilled" && result.value.skipped);
// Remove expired/removed certificates from Azure Key Vault
let removedCertificates = 0;
let failedRemovals = 0;
if (certificatesToRemove.length > 0) {
const removeResults = await executeWithConcurrencyLimit(
certificatesToRemove,
async (certName) => {
try {
await request.delete(
`${destinationConfig.vaultBaseUrl}/certificates/${encodeURIComponent(certName)}?api-version=7.4`,
{
headers: {
Authorization: `Bearer ${accessToken}`
}
}
);
return { key: certName, success: true };
} catch (error) {
// If certificate doesn't exist (404), consider it as successfully removed
if (error instanceof AxiosError && error.response?.status === 404) {
return { key: certName, success: true, alreadyRemoved: true };
}
logger.error(
{ error, syncId: pkiSync.id, certificateName: certName },
"Failed to remove expired/removed certificate from Azure Key Vault"
);
// Don't throw here - we want to continue with other operations
return { key: certName, success: false, error: error as Error };
}
},
{ operation: "remove-certificates", syncId: pkiSync.id }
);
const successfulRemovals = removeResults.filter(
(result) => result.status === "fulfilled" && result.value.success
);
removedCertificates = successfulRemovals.length;
failedRemovals = removeResults.length - removedCertificates;
if (failedRemovals > 0) {
logger.warn(
{
syncId: pkiSync.id,
failedRemovals,
successfulRemovals: removedCertificates
},
"Some expired/removed certificates could not be removed from Azure Key Vault"
);
}
}
// Collect detailed information for UI feedback
const details: {
failedUploads?: Array<{ name: string; error: string }>;
failedRemovals?: Array<{ name: string; error: string }>;
skippedCertificates?: Array<{ name: string; reason: string }>;
} = {};
// Collect skipped certificate details
if (skippedUploads.length > 0) {
details.skippedCertificates = skippedUploads.map((result) => {
const certificateName = result.status === "fulfilled" ? result.value.key : "unknown";
return {
name: certificateName,
reason: "Azure Key Vault constraints or certificate already up to date"
};
});
}
// Collect failed upload details
if (failedUploads.length > 0) {
details.failedUploads = failedUploads.map((failure, index) => {
const certificateName = setCertificates[index]?.key || "unknown";
let errorMessage = "Unknown error";
if (failure.status === "rejected") {
errorMessage = (failure.reason as Error)?.message || "Unknown error";
}
return {
name: certificateName,
error: errorMessage
};
});
logger.error(
{
syncId: pkiSync.id,
failedUploads: details.failedUploads,
failedCount: failedUploads.length
},
"Some certificates failed to upload to Azure Key Vault"
);
}
// Collect failed removal details
if (failedRemovals > 0) {
const failedRemovalNames = certificatesToRemove.slice(-failedRemovals);
details.failedRemovals = failedRemovalNames.map((certName) => ({
name: certName,
error: "Failed to remove from Azure Key Vault"
}));
logger.warn(
{
syncId: pkiSync.id,
failedRemovals: details.failedRemovals,
successfulRemovals: removedCertificates
},
"Some expired/removed certificates could not be removed from Azure Key Vault"
);
}
return {
uploaded: successfulUploads.length,
removed: removedCertificates,
failedRemovals,
skipped: Object.keys(certificateMap).length - setCertificates.length,
details: Object.keys(details).length > 0 ? details : undefined
};
};
const removeCertificates = async (pkiSync: TPkiSyncWithCredentials, certificateNames: string[]) => {
const { accessToken } = await getAzureConnectionAccessToken(pkiSync.connection.id, appConnectionDAL, kmsService);
// Cast destination config to Azure Key Vault config
const destinationConfig = pkiSync.destinationConfig as TAzureKeyVaultPkiSyncConfig;
// Only remove certificates that are managed by Infisical (match naming schema)
const infisicalManagedCertNames = certificateNames.filter((certName) =>
isInfisicalManagedCertificate(certName, pkiSync)
);
const results = await executeWithConcurrencyLimit(
infisicalManagedCertNames,
async (certName) => {
try {
const response = await request.delete(
`${destinationConfig.vaultBaseUrl}/certificates/${encodeURIComponent(certName)}?api-version=7.4`,
{
headers: {
Authorization: `Bearer ${accessToken}`
}
}
);
return { key: certName, success: true, response: response.data as unknown };
} catch (error) {
if (error instanceof AxiosError) {
// If certificate doesn't exist (404), consider it as successfully removed
if (error.response?.status === 404) {
return { key: certName, success: true, alreadyRemoved: true };
}
throw new PkiSyncError({
message: `Failed to remove certificate ${certName} from Azure Key Vault`,
cause: error,
context: {
certificateKey: certName,
statusCode: error.response?.status,
responseData: error.response?.data
}
});
}
throw error;
}
},
{ operation: "remove-specific-certificates", syncId: pkiSync.id }
);
const failedRemovals = results.filter((result) => result.status === "rejected");
if (failedRemovals.length > 0) {
const failedReasons = failedRemovals.map((failure) => {
if (failure.status === "rejected") {
return (failure.reason as Error)?.message || "Unknown error";
}
return "Unknown error";
});
throw new PkiSyncError({
message: `Failed to remove ${failedRemovals.length} certificate(s) from Azure Key Vault`,
context: {
failedReasons,
totalCertificates: infisicalManagedCertNames.length,
failedCount: failedRemovals.length
}
});
}
return {
removed: infisicalManagedCertNames.length - failedRemovals.length,
failed: failedRemovals.length,
skipped: certificateNames.length - infisicalManagedCertNames.length
};
};
return {
syncCertificates,
removeCertificates
};
};

View File

@@ -0,0 +1,74 @@
import RE2 from "re2";
import { z } from "zod";
import { AppConnection } from "@app/services/app-connection/app-connection-enums";
import { PkiSync } from "@app/services/pki-sync/pki-sync-enums";
import { PkiSyncSchema } from "@app/services/pki-sync/pki-sync-schemas";
import { AZURE_KEY_VAULT_CERTIFICATE_NAMING } from "./azure-key-vault-pki-sync-constants";
export const AzureKeyVaultPkiSyncConfigSchema = z.object({
vaultBaseUrl: z.string().url()
});
const AzureKeyVaultPkiSyncOptionsSchema = z.object({
canImportCertificates: z.boolean().default(false),
canRemoveCertificates: z.boolean().default(true),
certificateNameSchema: z
.string()
.optional()
.refine(
(schema) => {
if (!schema) return true;
const testName = schema
.replace(new RE2("\\{\\{certificateId\\}\\}", "g"), "")
.replace(new RE2("\\{\\{environment\\}\\}", "g"), "");
const hasForbiddenChars = AZURE_KEY_VAULT_CERTIFICATE_NAMING.FORBIDDEN_CHARACTERS.split("").some((char) =>
testName.includes(char)
);
return AZURE_KEY_VAULT_CERTIFICATE_NAMING.NAME_PATTERN.test(testName) && !hasForbiddenChars;
},
{
message:
"Certificate name schema must result in names that contain only alphanumeric characters and hyphens (a-z, A-Z, 0-9, -) and be 1-127 characters long when compiled for Azure Key Vault"
}
)
});
export const AzureKeyVaultPkiSyncSchema = PkiSyncSchema.extend({
destination: z.literal(PkiSync.AzureKeyVault),
destinationConfig: AzureKeyVaultPkiSyncConfigSchema,
syncOptions: AzureKeyVaultPkiSyncOptionsSchema
});
export const CreateAzureKeyVaultPkiSyncSchema = z.object({
name: z.string().trim().min(1).max(64),
description: z.string().optional(),
isAutoSyncEnabled: z.boolean().default(true),
destinationConfig: AzureKeyVaultPkiSyncConfigSchema,
syncOptions: AzureKeyVaultPkiSyncOptionsSchema.optional().default({}),
subscriberId: z.string().optional(),
connectionId: z.string(),
projectId: z.string().trim().min(1)
});
export const UpdateAzureKeyVaultPkiSyncSchema = z.object({
name: z.string().trim().min(1).max(64).optional(),
description: z.string().optional(),
isAutoSyncEnabled: z.boolean().optional(),
destinationConfig: AzureKeyVaultPkiSyncConfigSchema.optional(),
syncOptions: AzureKeyVaultPkiSyncOptionsSchema.optional(),
subscriberId: z.string().optional(),
connectionId: z.string().optional()
});
export const AzureKeyVaultPkiSyncListItemSchema = z.object({
name: z.literal("Azure Key Vault"),
connection: z.literal(AppConnection.AzureKeyVault),
destination: z.literal(PkiSync.AzureKeyVault),
canImportCertificates: z.literal(false),
canRemoveCertificates: z.literal(false)
});

View File

@@ -0,0 +1,38 @@
import { z } from "zod";
import { TAzureKeyVaultConnection } from "@app/services/app-connection/azure-key-vault";
import {
AzureKeyVaultPkiSyncConfigSchema,
AzureKeyVaultPkiSyncSchema,
CreateAzureKeyVaultPkiSyncSchema,
UpdateAzureKeyVaultPkiSyncSchema
} from "./azure-key-vault-pki-sync-schemas";
export type GetAzureKeyVaultCertificate = {
id: string;
value: string;
attributes: {
enabled: boolean;
created: number;
updated: number;
recoveryLevel: string;
tags?: Record<string, string>;
};
x5t?: string;
contentType?: string;
key?: string;
cer?: string;
};
export type TAzureKeyVaultPkiSyncConfig = z.infer<typeof AzureKeyVaultPkiSyncConfigSchema>;
export type TAzureKeyVaultPkiSync = z.infer<typeof AzureKeyVaultPkiSyncSchema>;
export type TAzureKeyVaultPkiSyncInput = z.infer<typeof CreateAzureKeyVaultPkiSyncSchema>;
export type TAzureKeyVaultPkiSyncUpdate = z.infer<typeof UpdateAzureKeyVaultPkiSyncSchema>;
export type TAzureKeyVaultPkiSyncWithCredentials = TAzureKeyVaultPkiSync & {
connection: TAzureKeyVaultConnection;
};

View File

@@ -0,0 +1,4 @@
export * from "./azure-key-vault-pki-sync-constants";
export * from "./azure-key-vault-pki-sync-fns";
export * from "./azure-key-vault-pki-sync-schemas";
export * from "./azure-key-vault-pki-sync-types";

View File

@@ -0,0 +1,94 @@
import { getConfig } from "@app/lib/config/env";
import { logger } from "@app/lib/logger";
import { QueueJobs, QueueName, TQueueServiceFactory } from "@app/queue";
import { TPkiSyncDALFactory } from "./pki-sync-dal";
import { TPkiSyncQueueFactory } from "./pki-sync-queue";
type TPkiSyncCleanupQueueServiceFactoryDep = {
queueService: TQueueServiceFactory;
pkiSyncDAL: Pick<TPkiSyncDALFactory, "findPkiSyncsWithExpiredCertificates">;
pkiSyncQueue: Pick<TPkiSyncQueueFactory, "queuePkiSyncSyncCertificatesById">;
};
export type TPkiSyncCleanupQueueServiceFactory = ReturnType<typeof pkiSyncCleanupQueueServiceFactory>;
export const pkiSyncCleanupQueueServiceFactory = ({
queueService,
pkiSyncDAL,
pkiSyncQueue
}: TPkiSyncCleanupQueueServiceFactoryDep) => {
const appCfg = getConfig();
const syncExpiredCertificatesForPkiSyncs = async () => {
try {
const pkiSyncsWithExpiredCerts = await pkiSyncDAL.findPkiSyncsWithExpiredCertificates();
if (pkiSyncsWithExpiredCerts.length === 0) {
logger.info("No PKI syncs found with certificates that expired the previous day");
return;
}
logger.info(
`Found ${pkiSyncsWithExpiredCerts.length} PKI sync(s) with certificates that expired the previous day`
);
// Trigger sync for each PKI sync that has expired certificates
for (const { id: syncId, subscriberId } of pkiSyncsWithExpiredCerts) {
try {
// eslint-disable-next-line no-await-in-loop
await pkiSyncQueue.queuePkiSyncSyncCertificatesById({
syncId
});
logger.info(
`Successfully queued PKI sync ${syncId} for subscriber ${subscriberId} due to expired certificates`
);
} catch (error) {
logger.error(error, `Failed to queue PKI sync ${syncId} for subscriber ${subscriberId}`);
}
}
} catch (error) {
logger.error(error, "Failed to sync expired certificates for PKI syncs");
throw error;
}
};
const init = async () => {
if (appCfg.isSecondaryInstance) {
return;
}
await queueService.stopRepeatableJob(
QueueName.PkiSyncCleanup,
QueueJobs.PkiSyncCleanup,
{ pattern: "0 0 * * *", utc: true },
QueueName.PkiSyncCleanup // just a job id
);
await queueService.startPg<QueueName.PkiSyncCleanup>(
QueueJobs.PkiSyncCleanup,
async () => {
try {
logger.info(`${QueueName.PkiSyncCleanup}: queue task started`);
await syncExpiredCertificatesForPkiSyncs();
logger.info(`${QueueName.PkiSyncCleanup}: queue task completed`);
} catch (error) {
logger.error(error, `${QueueName.PkiSyncCleanup}: PKI sync cleanup failed`);
throw error;
}
},
{
batchSize: 1,
workerCount: 1,
pollingIntervalSeconds: 120
}
);
await queueService.schedulePg(QueueJobs.PkiSyncCleanup, "0 0 * * *", undefined, { tz: "UTC" });
};
return {
init,
syncExpiredCertificatesForPkiSyncs
};
};

View File

@@ -0,0 +1,314 @@
import { Knex } from "knex";
import { TDbClient } from "@app/db";
import { TableName, TPkiSyncs } from "@app/db/schemas";
import { DatabaseError } from "@app/lib/errors";
import { buildFindFilter, ormify, prependTableNameToFindFilter, selectAllTableCols } from "@app/lib/knex";
import { PkiSync } from "./pki-sync-enums";
export type TPkiSyncDALFactory = ReturnType<typeof pkiSyncDALFactory>;
type PkiSyncFindFilter = Parameters<typeof buildFindFilter<TPkiSyncs>>[0];
const basePkiSyncQuery = ({ filter, db, tx }: { db: TDbClient; filter?: PkiSyncFindFilter; tx?: Knex }) => {
const query = (tx || db.replicaNode())(TableName.PkiSync)
.leftJoin(TableName.AppConnection, `${TableName.PkiSync}.connectionId`, `${TableName.AppConnection}.id`)
.select(selectAllTableCols(TableName.PkiSync))
.select(
// app connection fields
db.ref("name").withSchema(TableName.AppConnection).as("appConnectionName"),
db.ref("app").withSchema(TableName.AppConnection).as("appConnectionApp"),
db.ref("encryptedCredentials").withSchema(TableName.AppConnection).as("appConnectionEncryptedCredentials"),
db.ref("orgId").withSchema(TableName.AppConnection).as("appConnectionOrgId"),
db.ref("projectId").withSchema(TableName.AppConnection).as("appConnectionProjectId"),
db.ref("method").withSchema(TableName.AppConnection).as("appConnectionMethod"),
db.ref("description").withSchema(TableName.AppConnection).as("appConnectionDescription"),
db.ref("version").withSchema(TableName.AppConnection).as("appConnectionVersion"),
db.ref("gatewayId").withSchema(TableName.AppConnection).as("appConnectionGatewayId"),
db.ref("createdAt").withSchema(TableName.AppConnection).as("appConnectionCreatedAt"),
db.ref("updatedAt").withSchema(TableName.AppConnection).as("appConnectionUpdatedAt"),
db
.ref("isPlatformManagedCredentials")
.withSchema(TableName.AppConnection)
.as("appConnectionIsPlatformManagedCredentials")
);
if (filter) {
// eslint-disable-next-line @typescript-eslint/no-misused-promises
void query.where(buildFindFilter(prependTableNameToFindFilter(TableName.PkiSync, filter)));
}
return query;
};
const basePkiSyncWithSubscriberQuery = ({
filter,
db,
tx
}: {
db: TDbClient;
filter?: PkiSyncFindFilter;
tx?: Knex;
}) => {
const query = (tx || db.replicaNode())(TableName.PkiSync)
.leftJoin(TableName.AppConnection, `${TableName.PkiSync}.connectionId`, `${TableName.AppConnection}.id`)
.leftJoin(TableName.PkiSubscriber, `${TableName.PkiSync}.subscriberId`, `${TableName.PkiSubscriber}.id`)
.select(selectAllTableCols(TableName.PkiSync))
.select(
// app connection fields
db.ref("name").withSchema(TableName.AppConnection).as("appConnectionName"),
db.ref("app").withSchema(TableName.AppConnection).as("appConnectionApp"),
db.ref("encryptedCredentials").withSchema(TableName.AppConnection).as("appConnectionEncryptedCredentials"),
db.ref("orgId").withSchema(TableName.AppConnection).as("appConnectionOrgId"),
db.ref("projectId").withSchema(TableName.AppConnection).as("appConnectionProjectId"),
db.ref("method").withSchema(TableName.AppConnection).as("appConnectionMethod"),
db.ref("description").withSchema(TableName.AppConnection).as("appConnectionDescription"),
db.ref("version").withSchema(TableName.AppConnection).as("appConnectionVersion"),
db.ref("gatewayId").withSchema(TableName.AppConnection).as("appConnectionGatewayId"),
db.ref("createdAt").withSchema(TableName.AppConnection).as("appConnectionCreatedAt"),
db.ref("updatedAt").withSchema(TableName.AppConnection).as("appConnectionUpdatedAt"),
db
.ref("isPlatformManagedCredentials")
.withSchema(TableName.AppConnection)
.as("appConnectionIsPlatformManagedCredentials"),
// pki subscriber fields
db.ref("id").withSchema(TableName.PkiSubscriber).as("pkiSubscriberId"),
db.ref("name").withSchema(TableName.PkiSubscriber).as("subscriberName")
);
if (filter) {
// eslint-disable-next-line @typescript-eslint/no-misused-promises
void query.where(buildFindFilter(prependTableNameToFindFilter(TableName.PkiSync, filter)));
}
return query;
};
const expandPkiSync = (pkiSync: Awaited<ReturnType<typeof basePkiSyncQuery>>[number]) => {
const {
appConnectionName,
appConnectionApp,
appConnectionEncryptedCredentials,
appConnectionOrgId,
appConnectionProjectId,
appConnectionMethod,
appConnectionDescription,
appConnectionVersion,
appConnectionGatewayId,
appConnectionCreatedAt,
appConnectionUpdatedAt,
appConnectionIsPlatformManagedCredentials,
...el
} = pkiSync;
return {
...el,
destination: el.destination as PkiSync,
destinationConfig: el.destinationConfig as Record<string, unknown>,
syncOptions: el.syncOptions as Record<string, unknown>,
appConnectionName,
appConnectionApp,
connection: {
id: el.connectionId,
name: appConnectionName,
app: appConnectionApp,
encryptedCredentials: appConnectionEncryptedCredentials,
orgId: appConnectionOrgId,
projectId: appConnectionProjectId,
method: appConnectionMethod,
description: appConnectionDescription,
version: appConnectionVersion,
gatewayId: appConnectionGatewayId,
createdAt: appConnectionCreatedAt,
updatedAt: appConnectionUpdatedAt,
isPlatformManagedCredentials: appConnectionIsPlatformManagedCredentials
}
};
};
const expandPkiSyncWithSubscriber = (pkiSync: Awaited<ReturnType<typeof basePkiSyncWithSubscriberQuery>>[number]) => {
const {
appConnectionName,
appConnectionApp,
appConnectionEncryptedCredentials,
appConnectionOrgId,
appConnectionProjectId,
appConnectionMethod,
appConnectionDescription,
appConnectionVersion,
appConnectionGatewayId,
appConnectionCreatedAt,
appConnectionUpdatedAt,
appConnectionIsPlatformManagedCredentials,
pkiSubscriberId,
subscriberName,
...el
} = pkiSync;
return {
...el,
destination: el.destination as PkiSync,
destinationConfig: el.destinationConfig as Record<string, unknown>,
syncOptions: el.syncOptions as Record<string, unknown>,
appConnectionName,
appConnectionApp,
connection: {
id: el.connectionId,
name: appConnectionName,
app: appConnectionApp,
encryptedCredentials: appConnectionEncryptedCredentials,
orgId: appConnectionOrgId,
projectId: appConnectionProjectId,
method: appConnectionMethod,
description: appConnectionDescription,
version: appConnectionVersion,
gatewayId: appConnectionGatewayId,
createdAt: appConnectionCreatedAt,
updatedAt: appConnectionUpdatedAt,
isPlatformManagedCredentials: appConnectionIsPlatformManagedCredentials
},
subscriber: pkiSubscriberId && subscriberName ? { id: pkiSubscriberId, name: subscriberName } : null
};
};
export const pkiSyncDALFactory = (db: TDbClient) => {
const pkiSyncOrm = ormify(db, TableName.PkiSync);
const findByProjectId = async (projectId: string, tx?: Knex) => {
try {
const pkiSyncs = await basePkiSyncQuery({ filter: { projectId }, db, tx });
return pkiSyncs.map(expandPkiSync);
} catch (error) {
throw new DatabaseError({ error, name: "Find By Project ID - PKI Sync" });
}
};
const findByProjectIdWithSubscribers = async (projectId: string, tx?: Knex) => {
try {
const pkiSyncs = await basePkiSyncWithSubscriberQuery({ filter: { projectId }, db, tx });
return pkiSyncs.map(expandPkiSyncWithSubscriber);
} catch (error) {
throw new DatabaseError({ error, name: "Find By Project ID With Subscribers - PKI Sync" });
}
};
const findBySubscriberId = async (subscriberId: string, tx?: Knex) => {
try {
const pkiSyncs = await basePkiSyncQuery({ filter: { subscriberId }, db, tx });
return pkiSyncs.map(expandPkiSync);
} catch (error) {
throw new DatabaseError({ error, name: "Find By Subscriber ID - PKI Sync" });
}
};
const findByIdAndProjectId = async (id: string, projectId: string, tx?: Knex) => {
try {
const pkiSync = await basePkiSyncQuery({ filter: { id, projectId }, db, tx }).first();
return pkiSync ? expandPkiSync(pkiSync) : undefined;
} catch (error) {
throw new DatabaseError({ error, name: "Find By ID and Project ID - PKI Sync" });
}
};
const findByNameAndProjectId = async (name: string, projectId: string, tx?: Knex) => {
try {
const pkiSync = await basePkiSyncQuery({ filter: { name, projectId }, db, tx }).first();
return pkiSync ? expandPkiSync(pkiSync) : undefined;
} catch (error) {
throw new DatabaseError({ error, name: "Find By Name and Project ID - PKI Sync" });
}
};
const findById = async (id: string, tx?: Knex) => {
try {
const pkiSync = await basePkiSyncQuery({ filter: { id }, db, tx }).first();
return pkiSync ? expandPkiSync(pkiSync) : undefined;
} catch (error) {
throw new DatabaseError({ error, name: "Find By ID - PKI Sync" });
}
};
const findOne = async (filter: Parameters<(typeof pkiSyncOrm)["findOne"]>[0], tx?: Knex) => {
try {
const pkiSync = await basePkiSyncQuery({ filter, db, tx }).first();
return pkiSync ? expandPkiSync(pkiSync) : undefined;
} catch (error) {
throw new DatabaseError({ error, name: "Find One - PKI Sync" });
}
};
const find = async (filter: Parameters<(typeof pkiSyncOrm)["find"]>[0], tx?: Knex) => {
try {
const pkiSyncs = await basePkiSyncQuery({ filter, db, tx });
return pkiSyncs.map(expandPkiSync);
} catch (error) {
throw new DatabaseError({ error, name: "Find - PKI Sync" });
}
};
const create = async (data: Parameters<(typeof pkiSyncOrm)["create"]>[0]) => {
const pkiSync = (await pkiSyncOrm.transaction(async (tx) => {
const sync = await pkiSyncOrm.create(data, tx);
return basePkiSyncQuery({ filter: { id: sync.id }, db, tx }).first();
}))!;
return expandPkiSync(pkiSync);
};
const updateById = async (syncId: string, data: Parameters<(typeof pkiSyncOrm)["updateById"]>[1]) => {
const pkiSync = (await pkiSyncOrm.transaction(async (tx) => {
const sync = await pkiSyncOrm.updateById(syncId, data, tx);
return basePkiSyncQuery({ filter: { id: sync.id }, db, tx }).first();
}))!;
return expandPkiSync(pkiSync);
};
const findPkiSyncsWithExpiredCertificates = async (): Promise<Array<{ id: string; subscriberId: string }>> => {
try {
const yesterday = new Date();
yesterday.setDate(yesterday.getDate() - 1);
yesterday.setHours(0, 0, 0, 0);
const today = new Date();
today.setHours(0, 0, 0, 0);
const pkiSyncs = (await db
.replicaNode()(TableName.PkiSync)
.select(`${TableName.PkiSync}.id`, `${TableName.PkiSync}.subscriberId`)
.innerJoin(
TableName.Certificate,
`${TableName.PkiSync}.subscriberId`,
`${TableName.Certificate}.pkiSubscriberId`
)
.where(`${TableName.Certificate}.notAfter`, ">=", yesterday)
.where(`${TableName.Certificate}.notAfter`, "<", today)
.whereNotNull(`${TableName.Certificate}.pkiSubscriberId`)
.whereNotNull(`${TableName.PkiSync}.subscriberId`)
.groupBy(`${TableName.PkiSync}.id`, `${TableName.PkiSync}.subscriberId`)) as Array<{
id: string;
subscriberId: string;
}>;
return pkiSyncs;
} catch (error) {
throw new DatabaseError({ error, name: "Find PKI syncs with expired certificates" });
}
};
return {
...pkiSyncOrm,
findByProjectId,
findByProjectIdWithSubscribers,
findBySubscriberId,
findByIdAndProjectId,
findByNameAndProjectId,
findById,
findOne,
find,
create,
updateById,
findPkiSyncsWithExpiredCertificates
};
};

View File

@@ -0,0 +1,16 @@
export enum PkiSync {
AzureKeyVault = "azure-key-vault"
}
export enum PkiSyncStatus {
Pending = "pending",
Running = "running",
Succeeded = "succeeded",
Failed = "failed"
}
export enum PkiSyncAction {
SyncCertificates = "sync-certificates",
ImportCertificates = "import-certificates",
RemoveCertificates = "remove-certificates"
}

View File

@@ -0,0 +1,25 @@
export class PkiSyncError extends Error {
public context?: Record<string, unknown>;
public cause?: Error;
public shouldRetry: boolean;
constructor({
message,
cause,
context,
shouldRetry = true
}: {
message: string;
cause?: Error;
context?: Record<string, unknown>;
shouldRetry?: boolean;
}) {
super(message);
this.name = "PkiSyncError";
this.cause = cause;
this.context = context;
this.shouldRetry = shouldRetry;
}
}

View File

@@ -0,0 +1,223 @@
import * as handlebars from "handlebars";
import { z, ZodSchema } from "zod";
import { TLicenseServiceFactory } from "@app/ee/services/license/license-service";
import { BadRequestError } from "@app/lib/errors";
import { TAppConnectionDALFactory } from "@app/services/app-connection/app-connection-dal";
import { TKmsServiceFactory } from "@app/services/kms/kms-service";
import { AZURE_KEY_VAULT_PKI_SYNC_LIST_OPTION } from "./azure-key-vault/azure-key-vault-pki-sync-constants";
import { azureKeyVaultPkiSyncFactory } from "./azure-key-vault/azure-key-vault-pki-sync-fns";
import { PkiSync } from "./pki-sync-enums";
import { TCertificateMap, TPkiSyncWithCredentials } from "./pki-sync-types";
const ENTERPRISE_PKI_SYNCS: PkiSync[] = [];
const PKI_SYNC_LIST_OPTIONS = {
[PkiSync.AzureKeyVault]: AZURE_KEY_VAULT_PKI_SYNC_LIST_OPTION
};
export const enterprisePkiSyncCheck = async (
licenseService: Pick<TLicenseServiceFactory, "getPlan">,
orgId: string,
pkiSyncDestination: PkiSync,
errorMessage?: string
) => {
const plan = await licenseService.getPlan(orgId);
if (!plan.enterpriseCertificateSyncs && ENTERPRISE_PKI_SYNCS.includes(pkiSyncDestination)) {
throw new BadRequestError({
message: errorMessage || "Failed to create PKI sync due to plan restriction. Upgrade plan to create PKI sync."
});
}
};
export const listPkiSyncOptions = () => {
return Object.values(PKI_SYNC_LIST_OPTIONS).sort((a, b) => a.name.localeCompare(b.name));
};
export const getPkiSyncProviderCapabilities = (destination: PkiSync) => {
const providerOption = PKI_SYNC_LIST_OPTIONS[destination];
if (!providerOption) {
throw new BadRequestError({ message: `Unsupported PKI sync destination: ${destination}` });
}
return {
canImportCertificates: providerOption.canImportCertificates,
canRemoveCertificates: providerOption.canRemoveCertificates
};
};
export const matchesSchema = <T extends ZodSchema>(schema: T, data: unknown): data is z.infer<T> => {
return schema.safeParse(data).success;
};
export const parsePkiSyncErrorMessage = (error: unknown): string => {
if (error instanceof Error) {
return error.message;
}
if (typeof error === "string") {
return error;
}
return "An unknown error occurred during PKI sync operation";
};
export const applyCertificateNameSchema = (
certificateMap: TCertificateMap,
environment: string,
schema?: string
): TCertificateMap => {
if (!schema) return certificateMap;
const processedCertificateMap: TCertificateMap = {};
for (const [certificateId, value] of Object.entries(certificateMap)) {
const newName = handlebars.compile(schema)({
certificateId,
environment
});
processedCertificateMap[newName] = value;
}
return processedCertificateMap;
};
export const stripCertificateNameSchema = (
certificateMap: TCertificateMap,
environment: string,
schema?: string
): TCertificateMap => {
if (!schema) return certificateMap;
const compiledSchemaPattern = handlebars.compile(schema)({
certificateId: "{{certificateId}}",
environment
});
const parts = compiledSchemaPattern.split("{{certificateId}}");
const prefix = parts[0];
const suffix = parts[parts.length - 1];
const strippedMap: TCertificateMap = {};
for (const [name, value] of Object.entries(certificateMap)) {
if (!name.startsWith(prefix) || !name.endsWith(suffix)) {
// eslint-disable-next-line no-continue
continue;
}
const strippedName = name.slice(prefix.length, name.length - suffix.length);
strippedMap[strippedName] = value;
}
return strippedMap;
};
export const matchesCertificateNameSchema = (name: string, environment: string, schema?: string): boolean => {
if (!schema) return true;
const compiledSchemaPattern = handlebars.compile(schema)({
certificateId: "{{certificateId}}",
environment
});
if (!compiledSchemaPattern.includes("{{certificateId}}")) {
return name === compiledSchemaPattern;
}
const parts = compiledSchemaPattern.split("{{certificateId}}");
const prefix = parts[0];
const suffix = parts[parts.length - 1];
if (prefix === "" && suffix === "") return true;
// If prefix is empty, name must end with suffix
if (prefix === "") return name.endsWith(suffix);
// If suffix is empty, name must start with prefix
if (suffix === "") return name.startsWith(prefix);
// Name must start with prefix and end with suffix
return name.startsWith(prefix) && name.endsWith(suffix);
};
const isAzureKeyVaultPkiSync = (pkiSync: TPkiSyncWithCredentials): boolean => {
return pkiSync.destination === PkiSync.AzureKeyVault;
};
export const PkiSyncFns = {
getCertificates: async (
pkiSync: TPkiSyncWithCredentials,
// eslint-disable-next-line @typescript-eslint/no-unused-vars
dependencies: {
appConnectionDAL: Pick<TAppConnectionDALFactory, "findById" | "updateById">;
kmsService: Pick<TKmsServiceFactory, "createCipherPairWithDataKey">;
}
): Promise<TCertificateMap> => {
switch (pkiSync.destination) {
case PkiSync.AzureKeyVault: {
throw new Error(
"Azure Key Vault does not support importing certificates into Infisical (private keys cannot be extracted)"
);
}
default:
throw new Error(`Unsupported PKI sync destination: ${String(pkiSync.destination)}`);
}
},
syncCertificates: async (
pkiSync: TPkiSyncWithCredentials,
certificateMap: TCertificateMap,
dependencies: {
appConnectionDAL: Pick<TAppConnectionDALFactory, "findById" | "updateById">;
kmsService: Pick<TKmsServiceFactory, "createCipherPairWithDataKey">;
}
): Promise<{
uploaded: number;
removed?: number;
failedRemovals?: number;
skipped: number;
details?: {
failedUploads?: Array<{ name: string; error: string }>;
failedRemovals?: Array<{ name: string; error: string }>;
skippedCertificates?: Array<{ name: string; reason: string }>;
};
}> => {
switch (pkiSync.destination) {
case PkiSync.AzureKeyVault: {
if (!isAzureKeyVaultPkiSync(pkiSync)) {
throw new Error("Invalid Azure Key Vault PKI sync configuration");
}
const azureKeyVaultPkiSync = azureKeyVaultPkiSyncFactory(dependencies);
return azureKeyVaultPkiSync.syncCertificates(pkiSync, certificateMap);
}
default:
throw new Error(`Unsupported PKI sync destination: ${String(pkiSync.destination)}`);
}
},
removeCertificates: async (
pkiSync: TPkiSyncWithCredentials,
certificateNames: string[],
dependencies: {
appConnectionDAL: Pick<TAppConnectionDALFactory, "findById" | "updateById">;
kmsService: Pick<TKmsServiceFactory, "createCipherPairWithDataKey">;
}
): Promise<void> => {
switch (pkiSync.destination) {
case PkiSync.AzureKeyVault: {
if (!isAzureKeyVaultPkiSync(pkiSync)) {
throw new Error("Invalid Azure Key Vault PKI sync configuration");
}
const azureKeyVaultPkiSync = azureKeyVaultPkiSyncFactory(dependencies);
await azureKeyVaultPkiSync.removeCertificates(pkiSync, certificateNames);
break;
}
default:
throw new Error(`Unsupported PKI sync destination: ${String(pkiSync.destination)}`);
}
}
};

View File

@@ -0,0 +1,11 @@
import { AppConnection } from "@app/services/app-connection/app-connection-enums";
import { PkiSync } from "./pki-sync-enums";
export const PKI_SYNC_NAME_MAP: Record<PkiSync, string> = {
[PkiSync.AzureKeyVault]: "Azure Key Vault"
};
export const PKI_SYNC_CONNECTION_MAP: Record<PkiSync, AppConnection> = {
[PkiSync.AzureKeyVault]: AppConnection.AzureKeyVault
};

View File

@@ -0,0 +1,753 @@
/* eslint-disable no-await-in-loop */
import opentelemetry from "@opentelemetry/api";
import * as x509 from "@peculiar/x509";
import { AxiosError } from "axios";
import { Job } from "bullmq";
import handlebars from "handlebars";
import { EventType, TAuditLogServiceFactory } from "@app/ee/services/audit-log/audit-log-types";
import { TLicenseServiceFactory } from "@app/ee/services/license/license-service";
import { KeyStorePrefixes, TKeyStoreFactory } from "@app/keystore/keystore";
import { getConfig } from "@app/lib/config/env";
import { logger } from "@app/lib/logger";
import { QueueJobs, QueueName, TQueueServiceFactory } from "@app/queue";
import { decryptAppConnectionCredentials } from "@app/services/app-connection/app-connection-fns";
import { ActorType } from "@app/services/auth/auth-type";
import { TKmsServiceFactory } from "@app/services/kms/kms-service";
import { TProjectDALFactory } from "@app/services/project/project-dal";
import { getProjectKmsCertificateKeyId } from "@app/services/project/project-fns";
import { TAppConnectionDALFactory } from "../app-connection/app-connection-dal";
import { TCertificateBodyDALFactory } from "../certificate/certificate-body-dal";
import { TCertificateDALFactory } from "../certificate/certificate-dal";
import { getCertificateCredentials } from "../certificate/certificate-fns";
import { TCertificateSecretDALFactory } from "../certificate/certificate-secret-dal";
import { TCertificateAuthorityCertDALFactory } from "../certificate-authority/certificate-authority-cert-dal";
import { TCertificateAuthorityDALFactory } from "../certificate-authority/certificate-authority-dal";
import { getCaCertChain } from "../certificate-authority/certificate-authority-fns";
import { TPkiSyncDALFactory } from "./pki-sync-dal";
import { PkiSyncStatus } from "./pki-sync-enums";
import { PkiSyncError } from "./pki-sync-errors";
import { enterprisePkiSyncCheck, parsePkiSyncErrorMessage, PkiSyncFns } from "./pki-sync-fns";
import {
TCertificateMap,
TPkiSyncImportCertificatesDTO,
TPkiSyncRaw,
TPkiSyncRemoveCertificatesDTO,
TPkiSyncSyncCertificatesDTO,
TPkiSyncWithCredentials,
TQueuePkiSyncImportCertificatesByIdDTO,
TQueuePkiSyncRemoveCertificatesByIdDTO,
TQueuePkiSyncSyncCertificatesByIdDTO
} from "./pki-sync-types";
export type TPkiSyncQueueFactory = ReturnType<typeof pkiSyncQueueFactory>;
type TPkiSyncQueueFactoryDep = {
queueService: Pick<TQueueServiceFactory, "queue" | "start">;
kmsService: Pick<
TKmsServiceFactory,
"createCipherPairWithDataKey" | "decryptWithKmsKey" | "generateKmsKey" | "encryptWithKmsKey"
>;
appConnectionDAL: Pick<TAppConnectionDALFactory, "findById" | "update" | "updateById">;
keyStore: Pick<TKeyStoreFactory, "acquireLock" | "setItemWithExpiry" | "getItem">;
pkiSyncDAL: Pick<TPkiSyncDALFactory, "findById" | "find" | "updateById" | "deleteById" | "update">;
auditLogService: Pick<TAuditLogServiceFactory, "createAuditLog">;
projectDAL: TProjectDALFactory;
licenseService: Pick<TLicenseServiceFactory, "getPlan">;
certificateDAL: Pick<
TCertificateDALFactory,
"findLatestActiveCertForSubscriber" | "findAllActiveCertsForSubscriber" | "create"
>;
certificateBodyDAL: Pick<TCertificateBodyDALFactory, "findOne" | "create">;
certificateSecretDAL: Pick<TCertificateSecretDALFactory, "findOne" | "create">;
certificateAuthorityDAL: Pick<TCertificateAuthorityDALFactory, "findById">;
certificateAuthorityCertDAL: Pick<TCertificateAuthorityCertDALFactory, "findById">;
};
type PkiSyncActionJob = Job<
TQueuePkiSyncSyncCertificatesByIdDTO | TQueuePkiSyncImportCertificatesByIdDTO | TQueuePkiSyncRemoveCertificatesByIdDTO
>;
const JITTER_MS = 10 * 1000;
const REQUEUE_MS = 30 * 1000;
const REQUEUE_LIMIT = 30;
const CONNECTION_CONCURRENCY_LIMIT = 3;
const getRequeueDelay = (failureCount?: number) => {
const jitter = Math.random() * JITTER_MS;
if (!failureCount) return jitter;
return REQUEUE_MS + jitter;
};
export const pkiSyncQueueFactory = ({
queueService,
kmsService,
appConnectionDAL,
keyStore,
pkiSyncDAL,
auditLogService,
projectDAL,
licenseService,
certificateDAL,
certificateBodyDAL,
certificateSecretDAL,
certificateAuthorityDAL,
certificateAuthorityCertDAL
}: TPkiSyncQueueFactoryDep) => {
const appCfg = getConfig();
const integrationMeter = opentelemetry.metrics.getMeter("PkiSyncs");
const syncCertificatesErrorHistogram = integrationMeter.createHistogram("pki_sync_sync_certificates_errors", {
description: "PKI Sync - sync certificates errors",
unit: "1"
});
const importCertificatesErrorHistogram = integrationMeter.createHistogram("pki_sync_import_certificates_errors", {
description: "PKI Sync - import certificates errors",
unit: "1"
});
const removeCertificatesErrorHistogram = integrationMeter.createHistogram("pki_sync_remove_certificates_errors", {
description: "PKI Sync - remove certificates errors",
unit: "1"
});
const $isConnectionConcurrencyLimitReached = async (connectionId: string) => {
const concurrencyCount = await keyStore.getItem(KeyStorePrefixes.AppConnectionConcurrentJobs(connectionId));
if (!concurrencyCount) return false;
const count = Number.parseInt(concurrencyCount, 10);
if (Number.isNaN(count)) return false;
return count >= CONNECTION_CONCURRENCY_LIMIT;
};
const $incrementConnectionConcurrencyCount = async (connectionId: string) => {
const concurrencyCount = await keyStore.getItem(KeyStorePrefixes.AppConnectionConcurrentJobs(connectionId));
const currentCount = Number.parseInt(concurrencyCount || "0", 10);
const incrementedCount = Number.isNaN(currentCount) ? 1 : currentCount + 1;
await keyStore.setItemWithExpiry(
KeyStorePrefixes.AppConnectionConcurrentJobs(connectionId),
(REQUEUE_MS * REQUEUE_LIMIT) / 1000, // in seconds
incrementedCount
);
};
const $decrementConnectionConcurrencyCount = async (connectionId: string) => {
const concurrencyCount = await keyStore.getItem(KeyStorePrefixes.AppConnectionConcurrentJobs(connectionId));
const currentCount = Number.parseInt(concurrencyCount || "0", 10);
const decrementedCount = Math.max(0, Number.isNaN(currentCount) ? 0 : currentCount - 1);
await keyStore.setItemWithExpiry(
KeyStorePrefixes.AppConnectionConcurrentJobs(connectionId),
(REQUEUE_MS * REQUEUE_LIMIT) / 1000, // in seconds
decrementedCount
);
};
const $getInfisicalCertificates = async (
pkiSync: TPkiSyncRaw | TPkiSyncWithCredentials
): Promise<TCertificateMap> => {
const { projectId, subscriberId } = pkiSync;
if (!subscriberId) {
throw new PkiSyncError({
message: "Invalid PKI Sync source configuration: subscriber no longer exists. Please update source subscriber.",
shouldRetry: false
});
}
const certificateMap: TCertificateMap = {};
try {
// Get all active certificates for the subscriber (not just the latest)
const certificates = await certificateDAL.findAllActiveCertsForSubscriber({
subscriberId
});
for (const certificate of certificates) {
try {
// Get the certificate body and decrypt the certificate data
const certBody = await certificateBodyDAL.findOne({ certId: certificate.id });
if (certBody) {
const certificateManagerKeyId = await getProjectKmsCertificateKeyId({
projectId: certificate.projectId,
projectDAL,
kmsService
});
const kmsDecryptor = await kmsService.decryptWithKmsKey({
kmsId: certificateManagerKeyId
});
const decryptedCert = await kmsDecryptor({
cipherTextBlob: certBody.encryptedCertificate
});
const certObj = new x509.X509Certificate(decryptedCert);
const certificatePem = certObj.toString("pem");
// Get private key using getCertificateCredentials - handle cases where private key doesn't exist
let certPrivateKey: string | undefined;
try {
const credentials = await getCertificateCredentials({
certId: certificate.id,
projectId: certificate.projectId,
certificateSecretDAL,
projectDAL,
kmsService
});
certPrivateKey = credentials.certPrivateKey;
} catch (credError) {
logger.warn(
{ certificateId: certificate.id, subscriberId, error: credError },
"Certificate private key not found - certificate may be imported or key was not stored"
);
// Continue without private key - some providers may only need the certificate
certPrivateKey = undefined;
}
let certificateChain: string | undefined;
try {
if (certBody.encryptedCertificateChain) {
const decryptedCertChain = await kmsDecryptor({
cipherTextBlob: certBody.encryptedCertificateChain
});
certificateChain = decryptedCertChain.toString();
} else if (certificate.caCertId) {
const { caCert, caCertChain } = await getCaCertChain({
caCertId: certificate.caCertId,
certificateAuthorityDAL,
certificateAuthorityCertDAL,
projectDAL,
kmsService
});
certificateChain = `${caCert}\n${caCertChain}`.trim();
}
} catch (chainError) {
logger.warn(
{ certificateId: certificate.id, subscriberId, error: chainError },
"Certificate chain not found or could not be decrypted - certificate may be imported or chain was not stored"
);
// Continue without certificate chain
certificateChain = undefined;
}
let certificateName: string;
const syncOptions = pkiSync.syncOptions as { certificateNameSchema?: string } | undefined;
const certificateNameSchema = syncOptions?.certificateNameSchema;
if (certificateNameSchema) {
const environment = "global";
certificateName = handlebars.compile(certificateNameSchema)({
certificateId: certificate.id.replace(/-/g, ""),
environment
});
} else {
certificateName = `Infisical-${certificate.id.replace(/-/g, "")}`;
}
certificateMap[certificateName] = {
cert: certificatePem,
privateKey: certPrivateKey || "",
certificateChain
};
} else {
logger.warn({ certificateId: certificate.id, subscriberId }, "Certificate body not found for certificate");
}
} catch (error) {
logger.error(
{ error, subscriberId, certificateId: certificate.id },
"Failed to decrypt certificate for PKI sync"
);
// Continue with other certificates
}
}
} catch (error) {
logger.error(
error,
`Failed to fetch certificate for subscriber [subscriberId=${subscriberId}] [projectId=${projectId}]`
);
throw new PkiSyncError({
message: `Failed to fetch certificate for PKI subscriber: ${error instanceof Error ? error.message : String(error)}`,
shouldRetry: true
});
}
return certificateMap;
};
const queuePkiSyncSyncCertificatesById = async (payload: TQueuePkiSyncSyncCertificatesByIdDTO) =>
queueService.queue(QueueName.PkiSync, QueueJobs.PkiSyncSyncCertificates, payload, {
delay: getRequeueDelay(payload.failedToAcquireLockCount), // this is for delaying re-queued jobs if sync is locked
attempts: 5,
backoff: {
type: "exponential",
delay: 3000
},
removeOnComplete: true,
removeOnFail: true
});
const queuePkiSyncImportCertificatesById = async (payload: TQueuePkiSyncImportCertificatesByIdDTO) =>
queueService.queue(QueueName.PkiSync, QueueJobs.PkiSyncImportCertificates, payload, {
attempts: 5,
backoff: {
type: "exponential",
delay: 3000
},
removeOnComplete: true,
removeOnFail: true
});
const queuePkiSyncRemoveCertificatesById = async (payload: TQueuePkiSyncRemoveCertificatesByIdDTO) =>
queueService.queue(QueueName.PkiSync, QueueJobs.PkiSyncRemoveCertificates, payload, {
attempts: 5,
backoff: {
type: "exponential",
delay: 3000
},
removeOnComplete: true,
removeOnFail: true
});
const $importCertificates = async (): Promise<TCertificateMap> => {
throw new Error("Certificate import functionality is not implemented");
};
const $handleSyncCertificatesJob = async (job: TPkiSyncSyncCertificatesDTO, pkiSync: TPkiSyncRaw) => {
const {
data: { syncId, auditLogInfo }
} = job;
await enterprisePkiSyncCheck(
licenseService,
pkiSync.connection.orgId,
pkiSync.destination,
"Failed to sync certificates due to plan restriction. Upgrade plan to access enterprise PKI syncs."
);
await pkiSyncDAL.updateById(syncId, {
syncStatus: PkiSyncStatus.Running
});
logger.info(
`PkiSync Sync [syncId=${pkiSync.id}] [destination=${pkiSync.destination}] [projectId=${pkiSync.projectId}] [subscriberId=${pkiSync.subscriberId}] [connectionId=${pkiSync.connectionId}]`
);
let isSynced = false;
let syncMessage: string | null = null;
let isFinalAttempt = job.attemptsStarted === job.opts.attempts;
try {
const {
connection: { orgId, encryptedCredentials, projectId: appConnectionProjectId }
} = pkiSync;
const credentials = await decryptAppConnectionCredentials({
orgId,
encryptedCredentials,
kmsService,
projectId: appConnectionProjectId
});
const pkiSyncWithCredentials = {
...pkiSync,
connection: {
...pkiSync.connection,
credentials
}
} as TPkiSyncWithCredentials;
const certificateMap = await $getInfisicalCertificates(pkiSync);
const syncResult = await PkiSyncFns.syncCertificates(pkiSyncWithCredentials, certificateMap, {
appConnectionDAL,
kmsService
});
logger.info(
{
syncId: pkiSync.id,
uploaded: syncResult.uploaded || 0,
removed: syncResult.removed || 0,
failedRemovals: syncResult.failedRemovals || 0,
skipped: syncResult.skipped || 0
},
"PKI sync operation completed with certificate cleanup"
);
isSynced = true;
} catch (err) {
logger.error(
err,
`PkiSync Sync Error [syncId=${pkiSync.id}] [destination=${pkiSync.destination}] [projectId=${pkiSync.projectId}] [subscriberId=${pkiSync.subscriberId}] [connectionId=${pkiSync.connectionId}]`
);
if (appCfg.OTEL_TELEMETRY_COLLECTION_ENABLED) {
syncCertificatesErrorHistogram.record(1, {
version: 1,
destination: pkiSync.destination,
syncId: pkiSync.id,
projectId: pkiSync.projectId,
type: err instanceof AxiosError ? "AxiosError" : err?.constructor?.name || "UnknownError",
status: err instanceof AxiosError ? err.response?.status : undefined,
name: err instanceof Error ? err.name : undefined
});
}
syncMessage = parsePkiSyncErrorMessage(err);
if (err instanceof PkiSyncError && !err.shouldRetry) {
isFinalAttempt = true;
} else {
throw err;
}
} finally {
const ranAt = new Date();
const syncStatus = isSynced ? PkiSyncStatus.Succeeded : PkiSyncStatus.Failed;
await auditLogService.createAuditLog({
projectId: pkiSync.projectId,
...(auditLogInfo ?? {
actor: {
type: ActorType.PLATFORM,
metadata: {}
}
}),
event: {
type: EventType.PKI_SYNC_SYNC_CERTIFICATES,
metadata: {
syncId: pkiSync.id,
syncMessage,
jobId: job.id!,
jobRanAt: ranAt
}
}
});
if (isSynced || isFinalAttempt) {
await pkiSyncDAL.updateById(pkiSync.id, {
syncStatus,
lastSyncJobId: job.id,
lastSyncMessage: syncMessage,
lastSyncedAt: isSynced ? ranAt : undefined
});
}
}
};
const $handleImportCertificatesJob = async (job: TPkiSyncImportCertificatesDTO, pkiSync: TPkiSyncRaw) => {
const {
data: { syncId, auditLogInfo }
} = job;
await pkiSyncDAL.updateById(syncId, {
importStatus: PkiSyncStatus.Running
});
logger.info(
`PkiSync Import [syncId=${pkiSync.id}] [destination=${pkiSync.destination}] [projectId=${pkiSync.projectId}] [subscriberId=${pkiSync.subscriberId}] [connectionId=${pkiSync.connectionId}]`
);
let isSuccess = false;
let importMessage: string | null = null;
let isFinalAttempt = job.attemptsStarted === job.opts.attempts;
try {
await $importCertificates();
isSuccess = true;
} catch (err) {
logger.error(
err,
`PkiSync Import Error [syncId=${pkiSync.id}] [destination=${pkiSync.destination}] [projectId=${pkiSync.projectId}] [subscriberId=${pkiSync.subscriberId}] [connectionId=${pkiSync.connectionId}]`
);
if (appCfg.OTEL_TELEMETRY_COLLECTION_ENABLED) {
importCertificatesErrorHistogram.record(1, {
version: 1,
destination: pkiSync.destination,
syncId: pkiSync.id,
projectId: pkiSync.projectId,
type: err instanceof AxiosError ? "AxiosError" : err?.constructor?.name || "UnknownError",
status: err instanceof AxiosError ? err.response?.status : undefined,
name: err instanceof Error ? err.name : undefined
});
}
importMessage = parsePkiSyncErrorMessage(err);
if (err instanceof PkiSyncError && !err.shouldRetry) {
isFinalAttempt = true;
} else {
throw err;
}
} finally {
const ranAt = new Date();
const importStatus = isSuccess ? PkiSyncStatus.Succeeded : PkiSyncStatus.Failed;
await auditLogService.createAuditLog({
projectId: pkiSync.projectId,
...(auditLogInfo ?? {
actor: {
type: ActorType.PLATFORM,
metadata: {}
}
}),
event: {
type: EventType.PKI_SYNC_IMPORT_CERTIFICATES,
metadata: {
syncId: pkiSync.id,
importMessage,
jobId: job.id!,
jobRanAt: ranAt
}
}
});
if (isSuccess || isFinalAttempt) {
await pkiSyncDAL.updateById(pkiSync.id, {
importStatus,
lastImportJobId: job.id,
lastImportMessage: importMessage,
lastImportedAt: isSuccess ? ranAt : undefined
});
}
}
};
const $handleRemoveCertificatesJob = async (job: TPkiSyncRemoveCertificatesDTO, pkiSync: TPkiSyncRaw) => {
const {
data: { syncId, auditLogInfo, deleteSyncOnComplete }
} = job;
await enterprisePkiSyncCheck(
licenseService,
pkiSync.connection.orgId,
pkiSync.destination,
"Failed to remove certificates due to plan restriction. Upgrade plan to access enterprise PKI syncs."
);
await pkiSyncDAL.updateById(syncId, {
removeStatus: PkiSyncStatus.Running
});
logger.info(
`PkiSync Remove [syncId=${pkiSync.id}] [destination=${pkiSync.destination}] [projectId=${pkiSync.projectId}] [subscriberId=${pkiSync.subscriberId}] [connectionId=${pkiSync.connectionId}]`
);
let isSuccess = false;
let removeMessage: string | null = null;
let isFinalAttempt = job.attemptsStarted === job.opts.attempts;
try {
const {
connection: { orgId, encryptedCredentials, projectId: appConnectionProjectId }
} = pkiSync;
const credentials = await decryptAppConnectionCredentials({
orgId,
encryptedCredentials,
kmsService,
projectId: appConnectionProjectId
});
const certificateMap = await $getInfisicalCertificates(pkiSync);
await PkiSyncFns.removeCertificates(
{
...pkiSync,
connection: {
...pkiSync.connection,
credentials
}
} as TPkiSyncWithCredentials,
Object.keys(certificateMap),
{
appConnectionDAL,
kmsService
}
);
isSuccess = true;
} catch (err) {
logger.error(
err,
`PkiSync Remove Error [syncId=${pkiSync.id}] [destination=${pkiSync.destination}] [projectId=${pkiSync.projectId}] [subscriberId=${pkiSync.subscriberId}] [connectionId=${pkiSync.connectionId}]`
);
if (appCfg.OTEL_TELEMETRY_COLLECTION_ENABLED) {
removeCertificatesErrorHistogram.record(1, {
version: 1,
destination: pkiSync.destination,
syncId: pkiSync.id,
projectId: pkiSync.projectId,
type: err instanceof AxiosError ? "AxiosError" : err?.constructor?.name || "UnknownError",
status: err instanceof AxiosError ? err.response?.status : undefined,
name: err instanceof Error ? err.name : undefined
});
}
removeMessage = parsePkiSyncErrorMessage(err);
if (err instanceof PkiSyncError && !err.shouldRetry) {
isFinalAttempt = true;
} else {
throw err;
}
} finally {
const ranAt = new Date();
const removeStatus = isSuccess ? PkiSyncStatus.Succeeded : PkiSyncStatus.Failed;
await auditLogService.createAuditLog({
projectId: pkiSync.projectId,
...(auditLogInfo ?? {
actor: {
type: ActorType.PLATFORM,
metadata: {}
}
}),
event: {
type: EventType.PKI_SYNC_REMOVE_CERTIFICATES,
metadata: {
syncId: pkiSync.id,
removeMessage,
jobId: job.id!,
jobRanAt: ranAt
}
}
});
if (isSuccess || isFinalAttempt) {
if (isSuccess && deleteSyncOnComplete) {
await pkiSyncDAL.deleteById(pkiSync.id);
} else {
await pkiSyncDAL.updateById(pkiSync.id, {
removeStatus,
lastRemoveJobId: job.id,
lastRemoveMessage: removeMessage,
lastRemovedAt: isSuccess ? ranAt : undefined
});
}
}
}
};
const $handleAcquireLockFailure = async (job: PkiSyncActionJob) => {
const { syncId } = job.data;
switch (job.name) {
case QueueJobs.PkiSyncSyncCertificates: {
const { failedToAcquireLockCount = 0, ...rest } = job.data as TQueuePkiSyncSyncCertificatesByIdDTO;
if (failedToAcquireLockCount < REQUEUE_LIMIT) {
await queuePkiSyncSyncCertificatesById({ ...rest, failedToAcquireLockCount: failedToAcquireLockCount + 1 });
return;
}
await pkiSyncDAL.updateById(syncId, {
syncStatus: PkiSyncStatus.Failed,
lastSyncMessage:
"Failed to run job. This typically happens when a sync is already in progress. Please try again.",
lastSyncJobId: job.id
});
break;
}
case QueueJobs.PkiSyncImportCertificates: {
await pkiSyncDAL.updateById(syncId, {
importStatus: PkiSyncStatus.Failed,
lastImportMessage:
"Failed to run job. This typically happens when a sync is already in progress. Please try again.",
lastImportJobId: job.id
});
break;
}
case QueueJobs.PkiSyncRemoveCertificates: {
await pkiSyncDAL.updateById(syncId, {
removeStatus: PkiSyncStatus.Failed,
lastRemoveMessage:
"Failed to run job. This typically happens when a sync is already in progress. Please try again.",
lastRemoveJobId: job.id
});
break;
}
default:
throw new Error(`Unhandled PKI Sync Job ${String(job.name)}`);
}
};
queueService.start(QueueName.PkiSync, async (job) => {
const { syncId } = job.data;
const pkiSync = await pkiSyncDAL.findById(syncId);
if (!pkiSync) throw new Error(`Cannot find PKI sync with ID ${syncId}`);
const { connectionId } = pkiSync;
if (job.name === QueueJobs.PkiSyncSyncCertificates) {
const isConcurrentLimitReached = await $isConnectionConcurrencyLimitReached(connectionId);
if (isConcurrentLimitReached) {
await $handleAcquireLockFailure(job as PkiSyncActionJob);
return;
}
}
let lock: Awaited<ReturnType<typeof keyStore.acquireLock>>;
try {
lock = await keyStore.acquireLock(
[KeyStorePrefixes.PkiSyncLock(syncId)],
// PKI syncs can take excessive amounts of time so we need to keep it locked
5 * 60 * 1000
);
} catch (e) {
await $handleAcquireLockFailure(job as PkiSyncActionJob);
return;
}
try {
switch (job.name) {
case QueueJobs.PkiSyncSyncCertificates: {
await $incrementConnectionConcurrencyCount(connectionId);
await $handleSyncCertificatesJob(job as TPkiSyncSyncCertificatesDTO, pkiSync);
break;
}
case QueueJobs.PkiSyncImportCertificates:
await $handleImportCertificatesJob(job as TPkiSyncImportCertificatesDTO, pkiSync);
break;
case QueueJobs.PkiSyncRemoveCertificates:
await $handleRemoveCertificatesJob(job as TPkiSyncRemoveCertificatesDTO, pkiSync);
break;
default:
throw new Error(`Unhandled PKI Sync Job ${String(job.name)}`);
}
} finally {
if (job.name === QueueJobs.PkiSyncSyncCertificates) {
await $decrementConnectionConcurrencyCount(connectionId);
}
await lock.release();
}
});
return {
queuePkiSyncSyncCertificatesById,
queuePkiSyncImportCertificatesById,
queuePkiSyncRemoveCertificatesById
};
};

View File

@@ -0,0 +1,61 @@
import RE2 from "re2";
import { z } from "zod";
import { PkiSync } from "./pki-sync-enums";
// Schema for PKI sync options configuration
export const PkiSyncOptionsSchema = z.object({
canImportCertificates: z.boolean(),
canRemoveCertificates: z.boolean().optional(),
certificateNameSchema: z
.string()
.optional()
.refine(
(val) => {
if (!val) return true;
const allowedPlaceholdersRegexPart = ["{{certificateId}}"]
.map((p) => p.replace(new RE2(/[-/\\^$*+?.()|[\]{}]/g), "\\$&")) // Escape regex special characters
.join("|");
const allowedContentRegex = new RE2(`^([a-zA-Z0-9_\\-/]|${allowedPlaceholdersRegexPart})*$`);
const contentIsValid = allowedContentRegex.test(val);
if (val.trim()) {
const certificateIdRegex = new RE2(/\{\{certificateId\}\}/);
const certificateIdIsPresent = certificateIdRegex.test(val);
return contentIsValid && certificateIdIsPresent;
}
return contentIsValid;
},
{
message:
"Certificate name schema must include exactly one {{certificateId}} placeholder. Only alphanumeric characters (a-z, A-Z, 0-9), dashes (-), underscores (_), and slashes (/) are allowed besides the placeholders."
}
)
});
// Schema for destination-specific configurations
export const PkiSyncDestinationConfigSchema = z.object({
destination: z.nativeEnum(PkiSync),
config: z.record(z.unknown())
});
// Base PKI sync schema for API responses
export const PkiSyncSchema = z.object({
id: z.string().uuid(),
name: z.string().max(255),
description: z.string().nullable().optional(),
destination: z.nativeEnum(PkiSync),
isAutoSyncEnabled: z.boolean(),
destinationConfig: z.record(z.unknown()),
syncOptions: z.record(z.unknown()),
projectId: z.string().uuid(),
subscriberId: z.string().uuid().nullable().optional(),
connectionId: z.string().uuid(),
createdAt: z.date(),
updatedAt: z.date(),
syncStatus: z.string().nullable().optional(),
lastSyncedAt: z.date().nullable().optional()
});

View File

@@ -0,0 +1,447 @@
import { ForbiddenError, subject } from "@casl/ability";
import { ActionProjectType } from "@app/db/schemas";
import { TLicenseServiceFactory } from "@app/ee/services/license/license-service";
import { TPermissionServiceFactory } from "@app/ee/services/permission/permission-service-types";
import { ProjectPermissionPkiSyncActions, ProjectPermissionSub } from "@app/ee/services/permission/project-permission";
import { BadRequestError, DatabaseError, NotFoundError } from "@app/lib/errors";
import { OrgServiceActor } from "@app/lib/types";
import { AppConnection } from "@app/services/app-connection/app-connection-enums";
import { TAppConnectionServiceFactory } from "@app/services/app-connection/app-connection-service";
import { TPkiSubscriberDALFactory } from "@app/services/pki-subscriber/pki-subscriber-dal";
import { TPkiSyncDALFactory } from "./pki-sync-dal";
import { PkiSync, PkiSyncStatus } from "./pki-sync-enums";
import { enterprisePkiSyncCheck, getPkiSyncProviderCapabilities, listPkiSyncOptions } from "./pki-sync-fns";
import { PKI_SYNC_CONNECTION_MAP, PKI_SYNC_NAME_MAP } from "./pki-sync-maps";
import { TPkiSyncQueueFactory } from "./pki-sync-queue";
import {
TCreatePkiSyncDTO,
TDeletePkiSyncDTO,
TFindPkiSyncByIdDTO,
TListPkiSyncsByProjectId,
TPkiSync,
TTriggerPkiSyncImportCertificatesByIdDTO,
TTriggerPkiSyncRemoveCertificatesByIdDTO,
TTriggerPkiSyncSyncCertificatesByIdDTO,
TUpdatePkiSyncDTO
} from "./pki-sync-types";
const getDestinationAppType = (destination: PkiSync): AppConnection => {
const appConnection = PKI_SYNC_CONNECTION_MAP[destination];
if (!appConnection) {
throw new BadRequestError({
message: `Unsupported PKI sync destination: ${destination}`
});
}
return appConnection;
};
type TPkiSyncServiceFactoryDep = {
pkiSyncDAL: Pick<
TPkiSyncDALFactory,
"findById" | "findByProjectIdWithSubscribers" | "findByNameAndProjectId" | "create" | "updateById" | "deleteById"
>;
pkiSubscriberDAL: Pick<TPkiSubscriberDALFactory, "findById">;
appConnectionService: Pick<TAppConnectionServiceFactory, "connectAppConnectionById">;
permissionService: Pick<TPermissionServiceFactory, "getProjectPermission">;
licenseService: Pick<TLicenseServiceFactory, "getPlan">;
pkiSyncQueue: Pick<
TPkiSyncQueueFactory,
"queuePkiSyncSyncCertificatesById" | "queuePkiSyncImportCertificatesById" | "queuePkiSyncRemoveCertificatesById"
>;
};
export type TPkiSyncServiceFactory = ReturnType<typeof pkiSyncServiceFactory>;
export const pkiSyncServiceFactory = ({
pkiSyncDAL,
pkiSubscriberDAL,
appConnectionService,
permissionService,
licenseService,
pkiSyncQueue
}: TPkiSyncServiceFactoryDep) => {
const createPkiSync = async (
{
name,
description,
destination,
isAutoSyncEnabled = true,
destinationConfig,
syncOptions = {},
subscriberId,
connectionId,
projectId
}: Omit<TCreatePkiSyncDTO, "auditLogInfo">,
actor: OrgServiceActor
): Promise<TPkiSync> => {
await enterprisePkiSyncCheck(licenseService, actor.orgId, destination);
const { permission } = await permissionService.getProjectPermission({
actor: actor.type,
actorId: actor.id,
actorAuthMethod: actor.authMethod,
actorOrgId: actor.orgId,
actionProjectType: ActionProjectType.CertificateManager,
projectId
});
let subscriber;
if (subscriberId) {
subscriber = await pkiSubscriberDAL.findById(subscriberId);
if (!subscriber || subscriber.projectId !== projectId) {
throw new NotFoundError({ message: "PKI subscriber not found" });
}
}
ForbiddenError.from(permission).throwUnlessCan(
ProjectPermissionPkiSyncActions.Create,
subscriber
? subject(ProjectPermissionSub.PkiSyncs, { subscriberName: subscriber.name })
: ProjectPermissionSub.PkiSyncs
);
// Get the destination app type based on PKI sync destination
const destinationApp = getDestinationAppType(destination);
// Validates permission to connect and app is valid for sync destination
await appConnectionService.connectAppConnectionById(destinationApp, connectionId, actor);
const providerCapabilities = getPkiSyncProviderCapabilities(destination);
const resolvedSyncOptions = {
...providerCapabilities,
...syncOptions
};
try {
const pkiSync = await pkiSyncDAL.create({
name,
description,
destination,
isAutoSyncEnabled,
destinationConfig,
syncOptions: resolvedSyncOptions,
subscriberId,
connectionId,
projectId,
...(isAutoSyncEnabled && { syncStatus: PkiSyncStatus.Pending })
});
if (pkiSync.isAutoSyncEnabled) {
await pkiSyncQueue.queuePkiSyncSyncCertificatesById({ syncId: pkiSync.id });
}
return pkiSync as TPkiSync;
} catch (err) {
if (err instanceof DatabaseError && (err.error as { code: string })?.code === "23505") {
throw new BadRequestError({
message: `A PKI Sync with the name "${name}" already exists for the project with ID "${projectId}"`
});
}
throw err;
}
};
const updatePkiSync = async (
{
id,
name,
description,
isAutoSyncEnabled,
destinationConfig,
syncOptions,
subscriberId,
connectionId
}: Omit<TUpdatePkiSyncDTO, "auditLogInfo" | "projectId">,
actor: OrgServiceActor
): Promise<TPkiSync> => {
const pkiSync = await pkiSyncDAL.findById(id);
if (!pkiSync) throw new NotFoundError({ message: "PKI sync not found" });
const { permission } = await permissionService.getProjectPermission({
actor: actor.type,
actorId: actor.id,
actorAuthMethod: actor.authMethod,
actorOrgId: actor.orgId,
actionProjectType: ActionProjectType.CertificateManager,
projectId: pkiSync.projectId
});
let currentSubscriber;
if (pkiSync.subscriberId) {
currentSubscriber = await pkiSubscriberDAL.findById(pkiSync.subscriberId);
}
ForbiddenError.from(permission).throwUnlessCan(
ProjectPermissionPkiSyncActions.Edit,
currentSubscriber
? subject(ProjectPermissionSub.PkiSyncs, { subscriberName: currentSubscriber.name })
: ProjectPermissionSub.PkiSyncs
);
if (name && name !== pkiSync.name) {
const existingPkiSync = await pkiSyncDAL.findByNameAndProjectId(name, pkiSync.projectId);
if (existingPkiSync) {
throw new BadRequestError({ message: "PKI sync with this name already exists" });
}
}
if (subscriberId) {
const subscriber = await pkiSubscriberDAL.findById(subscriberId);
if (!subscriber || subscriber.projectId !== pkiSync.projectId) {
throw new NotFoundError({ message: "PKI subscriber not found" });
}
}
if (connectionId && connectionId !== pkiSync.connectionId) {
const destinationApp = getDestinationAppType(pkiSync.destination);
await appConnectionService.connectAppConnectionById(destinationApp, connectionId, actor);
}
let resolvedSyncOptions = syncOptions;
if (syncOptions) {
const providerCapabilities = getPkiSyncProviderCapabilities(pkiSync.destination);
if (syncOptions.canImportCertificates && !providerCapabilities.canImportCertificates) {
throw new BadRequestError({
message: `Certificate import is not supported for ${PKI_SYNC_NAME_MAP[pkiSync.destination]} PKI sync destination`
});
}
if (syncOptions.canRemoveCertificates && !providerCapabilities.canRemoveCertificates) {
throw new BadRequestError({
message: `Certificate removal cannot be enabled for ${PKI_SYNC_NAME_MAP[pkiSync.destination]} PKI sync destination`
});
}
resolvedSyncOptions = {
...providerCapabilities,
...syncOptions
};
}
const updatedPkiSync = await pkiSyncDAL.updateById(id, {
name,
description,
isAutoSyncEnabled,
destinationConfig,
syncOptions: resolvedSyncOptions,
subscriberId,
connectionId
});
return updatedPkiSync as TPkiSync;
};
const deletePkiSync = async (
{ id }: Omit<TDeletePkiSyncDTO, "auditLogInfo" | "projectId">,
actor: OrgServiceActor
) => {
const pkiSync = await pkiSyncDAL.findById(id);
if (!pkiSync) throw new NotFoundError({ message: "PKI sync not found" });
const { permission } = await permissionService.getProjectPermission({
actor: actor.type,
actorId: actor.id,
actorAuthMethod: actor.authMethod,
actorOrgId: actor.orgId,
actionProjectType: ActionProjectType.CertificateManager,
projectId: pkiSync.projectId
});
let pkiSyncSubscriber;
if (pkiSync.subscriberId) {
pkiSyncSubscriber = await pkiSubscriberDAL.findById(pkiSync.subscriberId);
}
ForbiddenError.from(permission).throwUnlessCan(
ProjectPermissionPkiSyncActions.Delete,
pkiSyncSubscriber
? subject(ProjectPermissionSub.PkiSyncs, { subscriberName: pkiSyncSubscriber.name })
: ProjectPermissionSub.PkiSyncs
);
return pkiSyncDAL.deleteById(id);
};
const listPkiSyncsByProjectId = async (
{ projectId }: TListPkiSyncsByProjectId,
actor: OrgServiceActor
): Promise<TPkiSync[]> => {
const { permission } = await permissionService.getProjectPermission({
actor: actor.type,
actorId: actor.id,
actorAuthMethod: actor.authMethod,
actorOrgId: actor.orgId,
actionProjectType: ActionProjectType.CertificateManager,
projectId
});
ForbiddenError.from(permission).throwUnlessCan(ProjectPermissionPkiSyncActions.Read, ProjectPermissionSub.PkiSyncs);
const pkiSyncsWithSubscribers = await pkiSyncDAL.findByProjectIdWithSubscribers(projectId);
return pkiSyncsWithSubscribers as TPkiSync[];
};
const findPkiSyncById = async ({ id, projectId }: TFindPkiSyncByIdDTO, actor: OrgServiceActor) => {
const pkiSync = await pkiSyncDAL.findById(id);
if (!pkiSync || (projectId && pkiSync.projectId !== projectId)) {
throw new NotFoundError({
message: `Could not find PKI Sync with ID "${id}"`
});
}
const { permission } = await permissionService.getProjectPermission({
actor: actor.type,
actorId: actor.id,
actorAuthMethod: actor.authMethod,
actorOrgId: actor.orgId,
actionProjectType: ActionProjectType.CertificateManager,
projectId: pkiSync.projectId
});
let findSubscriber;
if (pkiSync.subscriberId) {
findSubscriber = await pkiSubscriberDAL.findById(pkiSync.subscriberId);
}
ForbiddenError.from(permission).throwUnlessCan(
ProjectPermissionPkiSyncActions.Read,
findSubscriber
? subject(ProjectPermissionSub.PkiSyncs, { subscriberName: findSubscriber.name })
: ProjectPermissionSub.PkiSyncs
);
const result = {
...pkiSync,
subscriber: findSubscriber ? { id: findSubscriber.id, name: findSubscriber.name } : null
} as TPkiSync;
return result;
};
const triggerPkiSyncSyncCertificatesById = async (
{ id }: Omit<TTriggerPkiSyncSyncCertificatesByIdDTO, "auditLogInfo" | "projectId">,
actor: OrgServiceActor
) => {
const pkiSync = await pkiSyncDAL.findById(id);
if (!pkiSync) throw new NotFoundError({ message: "PKI sync not found" });
const { permission } = await permissionService.getProjectPermission({
actor: actor.type,
actorId: actor.id,
actorAuthMethod: actor.authMethod,
actorOrgId: actor.orgId,
actionProjectType: ActionProjectType.CertificateManager,
projectId: pkiSync.projectId
});
let syncSubscriber;
if (pkiSync.subscriberId) {
syncSubscriber = await pkiSubscriberDAL.findById(pkiSync.subscriberId);
}
ForbiddenError.from(permission).throwUnlessCan(
ProjectPermissionPkiSyncActions.SyncCertificates,
syncSubscriber
? subject(ProjectPermissionSub.PkiSyncs, { subscriberName: syncSubscriber.name })
: ProjectPermissionSub.PkiSyncs
);
await pkiSyncQueue.queuePkiSyncSyncCertificatesById({ syncId: id });
return { message: "PKI sync job added to queue successfully" };
};
const triggerPkiSyncImportCertificatesById = async (
{ id }: Omit<TTriggerPkiSyncImportCertificatesByIdDTO, "auditLogInfo" | "projectId">,
actor: OrgServiceActor
) => {
const pkiSync = await pkiSyncDAL.findById(id);
if (!pkiSync) throw new NotFoundError({ message: "PKI sync not found" });
const { permission } = await permissionService.getProjectPermission({
actor: actor.type,
actorId: actor.id,
actorAuthMethod: actor.authMethod,
actorOrgId: actor.orgId,
actionProjectType: ActionProjectType.CertificateManager,
projectId: pkiSync.projectId
});
// Check if the PKI sync destination supports importing certificates
const syncOptions = listPkiSyncOptions().find((option) => option.destination === pkiSync.destination);
if (!syncOptions?.canImportCertificates) {
throw new BadRequestError({
message: `Certificate import is not supported for ${pkiSync.destination} PKI sync destination`
});
}
let importSubscriber;
if (pkiSync.subscriberId) {
importSubscriber = await pkiSubscriberDAL.findById(pkiSync.subscriberId);
}
ForbiddenError.from(permission).throwUnlessCan(
ProjectPermissionPkiSyncActions.ImportCertificates,
importSubscriber
? subject(ProjectPermissionSub.PkiSyncs, { subscriberName: importSubscriber.name })
: ProjectPermissionSub.PkiSyncs
);
await pkiSyncQueue.queuePkiSyncImportCertificatesById({ syncId: id });
return { message: "PKI sync import job added to queue successfully" };
};
const triggerPkiSyncRemoveCertificatesById = async (
{ id }: Omit<TTriggerPkiSyncRemoveCertificatesByIdDTO, "auditLogInfo" | "projectId">,
actor: OrgServiceActor
) => {
const pkiSync = await pkiSyncDAL.findById(id);
if (!pkiSync) throw new NotFoundError({ message: "PKI sync not found" });
const { permission } = await permissionService.getProjectPermission({
actor: actor.type,
actorId: actor.id,
actorAuthMethod: actor.authMethod,
actorOrgId: actor.orgId,
actionProjectType: ActionProjectType.CertificateManager,
projectId: pkiSync.projectId
});
let removeSubscriber;
if (pkiSync.subscriberId) {
removeSubscriber = await pkiSubscriberDAL.findById(pkiSync.subscriberId);
}
ForbiddenError.from(permission).throwUnlessCan(
ProjectPermissionPkiSyncActions.RemoveCertificates,
removeSubscriber
? subject(ProjectPermissionSub.PkiSyncs, { subscriberName: removeSubscriber.name })
: ProjectPermissionSub.PkiSyncs
);
await pkiSyncQueue.queuePkiSyncRemoveCertificatesById({ syncId: id });
return { message: "PKI sync remove job added to queue successfully" };
};
const getPkiSyncOptions = () => {
return listPkiSyncOptions();
};
return {
createPkiSync,
updatePkiSync,
deletePkiSync,
listPkiSyncsByProjectId,
findPkiSyncById,
triggerPkiSyncSyncCertificatesById,
triggerPkiSyncImportCertificatesById,
triggerPkiSyncRemoveCertificatesById,
getPkiSyncOptions
};
};

View File

@@ -0,0 +1,169 @@
import { Job } from "bullmq";
import { AuditLogInfo } from "@app/ee/services/audit-log/audit-log-types";
import { QueueJobs } from "@app/queue";
import { ResourceMetadataDTO } from "@app/services/resource-metadata/resource-metadata-schema";
import { TPkiSyncDALFactory } from "./pki-sync-dal";
import { PkiSync } from "./pki-sync-enums";
export type TPkiSync = {
id: string;
name: string;
description?: string;
destination: PkiSync;
isAutoSyncEnabled: boolean;
destinationConfig: Record<string, unknown>;
syncOptions: Record<string, unknown>;
projectId: string;
subscriberId?: string;
connectionId: string;
createdAt: Date;
updatedAt: Date;
syncStatus?: string;
lastSyncJobId?: string;
lastSyncMessage?: string;
lastSyncedAt?: Date;
importStatus?: string;
lastImportJobId?: string;
lastImportMessage?: string;
lastImportedAt?: Date;
removeStatus?: string;
lastRemoveJobId?: string;
lastRemoveMessage?: string;
lastRemovedAt?: Date;
appConnectionName: string;
appConnectionApp: string;
connection: {
id: string;
name: string;
app: string;
encryptedCredentials: unknown;
orgId: string;
projectId?: string;
method: string;
description?: string;
version: number;
gatewayId?: string;
createdAt: Date;
updatedAt: Date;
isPlatformManagedCredentials?: boolean;
};
subscriber?: {
id: string;
name: string;
} | null;
};
export type TPkiSyncWithCredentials = TPkiSync & {
connection: {
id: string;
name: string;
app: string;
credentials: Record<string, unknown>;
orgId: string;
};
};
export type TPkiSyncListItem = TPkiSync & {
appConnectionName: string;
appConnectionApp: string;
};
export type TCertificateMap = Record<string, { cert: string; privateKey: string; certificateChain?: string }>;
export type TCreatePkiSyncDTO = {
name: string;
description?: string;
destination: PkiSync;
isAutoSyncEnabled?: boolean;
destinationConfig: Record<string, unknown>;
syncOptions?: Record<string, unknown>;
subscriberId?: string;
connectionId: string;
projectId: string;
auditLogInfo: AuditLogInfo;
resourceMetadata?: ResourceMetadataDTO;
};
export type TUpdatePkiSyncDTO = {
id: string;
projectId?: string;
name?: string;
description?: string;
isAutoSyncEnabled?: boolean;
destinationConfig?: Record<string, unknown>;
syncOptions?: Record<string, unknown>;
subscriberId?: string;
connectionId?: string;
auditLogInfo: AuditLogInfo;
resourceMetadata?: ResourceMetadataDTO;
};
export type TDeletePkiSyncDTO = {
id: string;
projectId?: string;
auditLogInfo: AuditLogInfo;
};
export type TListPkiSyncsByProjectId = {
projectId: string;
};
export type TFindPkiSyncByIdDTO = {
id: string;
projectId?: string;
};
export type TTriggerPkiSyncSyncCertificatesByIdDTO = {
id: string;
projectId?: string;
auditLogInfo: AuditLogInfo;
};
export type TTriggerPkiSyncImportCertificatesByIdDTO = {
id: string;
projectId?: string;
auditLogInfo: AuditLogInfo;
};
export type TTriggerPkiSyncRemoveCertificatesByIdDTO = {
id: string;
projectId?: string;
auditLogInfo: AuditLogInfo;
};
export type TPkiSyncRaw = NonNullable<Awaited<ReturnType<TPkiSyncDALFactory["findById"]>>>;
export type TQueuePkiSyncSyncCertificatesByIdDTO = {
syncId: string;
failedToAcquireLockCount?: number;
auditLogInfo?: AuditLogInfo;
};
export type TQueuePkiSyncImportCertificatesByIdDTO = {
syncId: string;
auditLogInfo?: AuditLogInfo;
};
export type TQueuePkiSyncRemoveCertificatesByIdDTO = {
syncId: string;
auditLogInfo?: AuditLogInfo;
deleteSyncOnComplete?: boolean;
};
export type TPkiSyncSyncCertificatesDTO = Job<
TQueuePkiSyncSyncCertificatesByIdDTO,
void,
QueueJobs.PkiSyncSyncCertificates
>;
export type TPkiSyncImportCertificatesDTO = Job<
TQueuePkiSyncImportCertificatesByIdDTO,
void,
QueueJobs.PkiSyncImportCertificates
>;
export type TPkiSyncRemoveCertificatesDTO = Job<
TQueuePkiSyncRemoveCertificatesByIdDTO,
void,
QueueJobs.PkiSyncRemoveCertificates
>;

View File

@@ -0,0 +1,27 @@
import { logger } from "@app/lib/logger";
import { TPkiSyncDALFactory } from "./pki-sync-dal";
import { TPkiSyncQueueFactory } from "./pki-sync-queue";
export const triggerAutoSyncForSubscriber = async (
subscriberId: string,
dependencies: {
pkiSyncDAL: Pick<TPkiSyncDALFactory, "find">;
pkiSyncQueue: Pick<TPkiSyncQueueFactory, "queuePkiSyncSyncCertificatesById">;
}
) => {
try {
const pkiSyncs = await dependencies.pkiSyncDAL.find({
subscriberId,
isAutoSyncEnabled: true
});
// Queue sync jobs for each auto sync enabled PKI sync
const syncPromises = pkiSyncs.map((pkiSync) =>
dependencies.pkiSyncQueue.queuePkiSyncSyncCertificatesById({ syncId: pkiSync.id })
);
await Promise.all(syncPromises);
} catch (error) {
logger.error(error, `Failed to trigger auto sync for subscriber ${subscriberId}:`);
}
};

View File

@@ -446,9 +446,10 @@ export const secretV2BridgeDALFactory = ({ db, keyStore }: TSecretV2DalArg) => {
}
})
.where((bd) => {
void bd
.whereNull(`${TableName.SecretV2}.userId`)
.orWhere({ [`${TableName.SecretV2}.userId` as "userId"]: userId || null });
void bd.whereNull(`${TableName.SecretV2}.userId`);
// scott: removing this as we don't need to count overrides
// and there is currently a bug when you move secrets that doesn't move the override so this can skew count
// .orWhere({ [`${TableName.SecretV2}.userId` as "userId"]: userId || null });
})
.countDistinct(`${TableName.SecretV2}.key`);

View File

@@ -597,19 +597,27 @@ export const expandSecretReferencesFactory = ({
return secretCache[cacheKey][secretKey] || { value: "", tags: [] };
}
const folder = await folderDAL.findBySecretPath(projectId, environment, secretPath);
if (!folder) return { value: "", tags: [] };
const secrets = await secretDAL.findByFolderId({ folderId: folder.id });
try {
const folder = await folderDAL.findBySecretPath(projectId, environment, secretPath);
if (!folder) return { value: "", tags: [] };
const secrets = await secretDAL.findByFolderId({ folderId: folder.id });
const decryptedSecret = secrets.reduce<Record<string, { value: string; tags: string[] }>>((prev, secret) => {
// eslint-disable-next-line no-param-reassign
prev[secret.key] = { value: decryptSecret(secret.encryptedValue) || "", tags: secret.tags?.map((el) => el.slug) };
return prev;
}, {});
const decryptedSecret = secrets.reduce<Record<string, { value: string; tags: string[] }>>((prev, secret) => {
// eslint-disable-next-line no-param-reassign
prev[secret.key] = {
value: decryptSecret(secret.encryptedValue) || "",
tags: secret.tags?.map((el) => el.slug)
};
return prev;
}, {});
secretCache[cacheKey] = decryptedSecret;
secretCache[cacheKey] = decryptedSecret;
return secretCache[cacheKey][secretKey] || { value: "", tags: [] };
return secretCache[cacheKey][secretKey] || { value: "", tags: [] };
} catch (error) {
secretCache[cacheKey] = {};
return { value: "", tags: [] };
}
};
const recursivelyExpandSecret = async (dto: {
@@ -622,11 +630,16 @@ export const expandSecretReferencesFactory = ({
const stackTrace = { ...dto, key: "root", children: [] } as TSecretReferenceTraceNode;
if (!dto.value) return { expandedValue: "", stackTrace };
const stack = [{ ...dto, depth: 0, trace: stackTrace }];
// Track visited secrets to prevent circular references
const createSecretId = (env: string, secretPath: string, key: string) => `${env}:${secretPath}:${key}`;
const currentSecretId = createSecretId(dto.environment, dto.secretPath, dto.secretKey);
const stack = [{ ...dto, depth: 0, trace: stackTrace, visitedSecrets: new Set<string>([currentSecretId]) }];
let expandedValue = dto.value;
while (stack.length) {
const { value, secretPath, environment, depth, trace } = stack.pop()!;
const { value, secretPath, environment, depth, trace, visitedSecrets } = stack.pop()!;
// eslint-disable-next-line no-continue
if (depth > MAX_SECRET_REFERENCE_DEPTH) continue;
@@ -664,6 +677,7 @@ export const expandSecretReferencesFactory = ({
});
const cacheKey = getCacheUniqueKey(environment, secretPath);
if (!secretCache[cacheKey]) secretCache[cacheKey] = {};
secretCache[cacheKey][secretKey] = referredValue;
referencedSecretValue = referredValue.value;
@@ -683,6 +697,7 @@ export const expandSecretReferencesFactory = ({
});
const cacheKey = getCacheUniqueKey(secretReferenceEnvironment, secretReferencePath);
if (!secretCache[cacheKey]) secretCache[cacheKey] = {};
secretCache[cacheKey][secretReferenceKey] = referedValue;
referencedSecretValue = referedValue.value;
@@ -700,17 +715,27 @@ export const expandSecretReferencesFactory = ({
trace
};
const shouldExpandMore = INTERPOLATION_TEST_REGEX.test(referencedSecretValue);
// Check for circular reference
const referencedSecretId = createSecretId(
referencedSecretEnvironmentSlug,
referencedSecretPath,
referencedSecretKey
);
const isCircular = visitedSecrets.has(referencedSecretId);
const newVisitedSecrets = new Set([...visitedSecrets, referencedSecretId]);
const shouldExpandMore = INTERPOLATION_TEST_REGEX.test(referencedSecretValue) && !isCircular;
if (dto.shouldStackTrace) {
const stackTraceNode = { ...node, children: [], key: referencedSecretKey, trace: null };
trace?.children.push(stackTraceNode);
// if stack trace this would be child node
if (shouldExpandMore) {
stack.push({ ...node, trace: stackTraceNode });
stack.push({ ...node, trace: stackTraceNode, visitedSecrets: newVisitedSecrets });
}
} else if (shouldExpandMore) {
// if no stack trace is needed we just keep going with root node
stack.push(node);
stack.push({ ...node, visitedSecrets: newVisitedSecrets });
}
if (referencedSecretValue) {

View File

@@ -159,17 +159,14 @@ export const secretV2BridgeServiceFactory = ({
const uniqueReferenceEnvironmentSlugs = Array.from(new Set(references.map((el) => el.environment)));
const referencesEnvironments = await projectEnvDAL.findBySlugs(projectId, uniqueReferenceEnvironmentSlugs, tx);
if (referencesEnvironments.length !== uniqueReferenceEnvironmentSlugs.length)
throw new BadRequestError({
message: `Referenced environment not found. Missing ${diff(
uniqueReferenceEnvironmentSlugs,
referencesEnvironments.map((el) => el.slug)
).join(",")}`
});
// Filter out references to non-existent environments
const referencesEnvironmentGroupBySlug = groupBy(referencesEnvironments, (i) => i.slug);
const validEnvironmentReferences = references.filter((el) => referencesEnvironmentGroupBySlug[el.environment]);
if (validEnvironmentReferences.length === 0) return;
const referredFolders = await folderDAL.findByManySecretPath(
references.map((el) => ({
validEnvironmentReferences.map((el) => ({
secretPath: el.secretPath,
envId: referencesEnvironmentGroupBySlug[el.environment][0].id
})),
@@ -177,58 +174,71 @@ export const secretV2BridgeServiceFactory = ({
);
const referencesFolderGroupByPath = groupBy(referredFolders.filter(Boolean), (i) => `${i?.envId}-${i?.path}`);
// Find only references that have valid folders (don't throw for missing paths)
const validReferences = validEnvironmentReferences.filter((el) => {
const folderId =
referencesFolderGroupByPath[`${referencesEnvironmentGroupBySlug[el.environment][0].id}-${el.secretPath}`]?.[0]
?.id;
return folderId;
});
if (validReferences.length === 0) return;
const referredSecrets = await secretDAL.find(
{
$complex: {
operator: "or",
value: references.map((el) => {
const folderId =
referencesFolderGroupByPath[
`${referencesEnvironmentGroupBySlug[el.environment][0].id}-${el.secretPath}`
][0]?.id;
if (!folderId) throw new BadRequestError({ message: `Referenced path ${el.secretPath} doesn't exist` });
value: validReferences
.map((el) => {
const folderGroup =
referencesFolderGroupByPath[
`${referencesEnvironmentGroupBySlug[el.environment][0].id}-${el.secretPath}`
];
if (!folderGroup || !folderGroup[0]) return null;
return {
operator: "and",
value: [
{
operator: "eq",
field: "folderId",
value: folderId
},
{
operator: "eq",
field: `${TableName.SecretV2}.key` as "key",
value: el.secretKey
}
]
};
})
const folderId = folderGroup[0].id;
return {
operator: "and",
value: [
{
operator: "eq",
field: "folderId",
value: folderId
},
{
operator: "eq",
field: `${TableName.SecretV2}.key` as "key",
value: el.secretKey
}
]
};
})
.filter((query) => query !== null) as Array<{
operator: "and";
value: Array<{
operator: "eq";
field: "folderId" | "key";
value: string;
}>;
}>
}
},
{ tx }
);
if (
referredSecrets.length !==
new Set(references.map(({ secretKey, secretPath, environment }) => `${secretKey}.${secretPath}.${environment}`))
.size // only count unique references
)
throw new BadRequestError({
message: `Referenced secret(s) not found: ${diff(
references.map((el) => el.secretKey),
referredSecrets.map((el) => el.key)
).join(",")}`
});
const referredSecretsGroupBySecretKey = groupBy(referredSecrets, (i) => i.key);
references.forEach((el) => {
throwIfMissingSecretReadValueOrDescribePermission(permission, ProjectPermissionSecretActions.DescribeSecret, {
environment: el.environment,
secretPath: el.secretPath,
secretName: el.secretKey,
secretTags: referredSecretsGroupBySecretKey[el.secretKey][0]?.tags?.map((i) => i.slug)
});
// Only check permissions for secrets that actually exist
referredSecrets.forEach((secret) => {
const reference = validReferences.find((ref) => ref.secretKey === secret.key);
if (reference) {
throwIfMissingSecretReadValueOrDescribePermission(permission, ProjectPermissionSecretActions.DescribeSecret, {
environment: reference.environment,
secretPath: reference.secretPath,
secretName: reference.secretKey,
secretTags: secret.tags?.map((i) => i.slug)
});
}
});
return referredSecrets;
@@ -478,15 +488,16 @@ export const secretV2BridgeServiceFactory = ({
secret = sharedSecretToModify;
}
ForbiddenError.from(permission).throwUnlessCan(
ProjectPermissionSecretActions.Edit,
subject(ProjectPermissionSub.Secrets, {
environment,
secretPath,
secretName: inputSecret.secretName,
secretTags: secret.tags.map((el) => el.slug)
})
);
if (secret.type !== SecretType.Personal)
ForbiddenError.from(permission).throwUnlessCan(
ProjectPermissionSecretActions.Edit,
subject(ProjectPermissionSub.Secrets, {
environment,
secretPath,
secretName: inputSecret.secretName,
secretTags: secret.tags.map((el) => el.slug)
})
);
// validate tags
// fetch all tags and if not same count throw error meaning one was invalid tags
@@ -497,17 +508,18 @@ export const secretV2BridgeServiceFactory = ({
const tagsToCheck = inputSecret.tagIds ? newTags : secret.tags;
// now check with new ids
ForbiddenError.from(permission).throwUnlessCan(
ProjectPermissionSecretActions.Edit,
subject(ProjectPermissionSub.Secrets, {
environment,
secretPath,
secretName: inputSecret.secretName,
...(tagsToCheck.length && {
secretTags: tagsToCheck.map((el) => el.slug)
if (secret.type !== SecretType.Personal)
ForbiddenError.from(permission).throwUnlessCan(
ProjectPermissionSecretActions.Edit,
subject(ProjectPermissionSub.Secrets, {
environment,
secretPath,
secretName: inputSecret.secretName,
...(tagsToCheck.length && {
secretTags: tagsToCheck.map((el) => el.slug)
})
})
})
);
);
if (inputSecret.newSecretName) {
const doesNewNameSecretExist = await secretDAL.findOne({
@@ -546,6 +558,14 @@ export const secretV2BridgeServiceFactory = ({
);
}
if (secretValue) {
const { nestedReferences, localReferences } = getAllSecretReferences(secretValue);
const allSecretReferences = nestedReferences.concat(
localReferences.map((el) => ({ secretKey: el, secretPath, environment }))
);
await $validateSecretReferences(projectId, permission, allSecretReferences);
}
const { encryptor: secretManagerEncryptor } = await kmsService.createCipherPairWithDataKey({
type: KmsDataKey.SecretManager,
projectId
@@ -706,15 +726,17 @@ export const secretV2BridgeServiceFactory = ({
})
});
if (!secretToDelete) throw new NotFoundError({ message: "Secret not found" });
ForbiddenError.from(permission).throwUnlessCan(
ProjectPermissionSecretActions.Delete,
subject(ProjectPermissionSub.Secrets, {
environment,
secretPath,
secretName: secretToDelete.key,
secretTags: secretToDelete.tags?.map((el) => el.slug)
})
);
if (secretToDelete.type !== SecretType.Personal)
ForbiddenError.from(permission).throwUnlessCan(
ProjectPermissionSecretActions.Delete,
subject(ProjectPermissionSub.Secrets, {
environment,
secretPath,
secretName: secretToDelete.key,
secretTags: secretToDelete.tags?.map((el) => el.slug)
})
);
try {
const deletedSecret = await secretDAL.transaction(async (tx) => {
@@ -1658,7 +1680,7 @@ export const secretV2BridgeServiceFactory = ({
await scanSecretPolicyViolations(projectId, secretPath, inputSecrets, project.secretDetectionIgnoreValues || []);
// get all tags
const sanitizedTagIds = inputSecrets.flatMap(({ tagIds = [] }) => tagIds);
const sanitizedTagIds = [...new Set(inputSecrets.flatMap(({ tagIds = [] }) => tagIds))];
const tags = sanitizedTagIds.length ? await secretTagDAL.findManyTagsById(projectId, sanitizedTagIds) : [];
if (tags.length !== sanitizedTagIds.length)
throw new NotFoundError({ message: `Tag not found. Found ${tags.map((el) => el.slug).join(",")}` });
@@ -1906,7 +1928,7 @@ export const secretV2BridgeServiceFactory = ({
});
// get all tags
const sanitizedTagIds = secretsToUpdate.flatMap(({ tagIds = [] }) => tagIds);
const sanitizedTagIds = [...new Set(secretsToUpdate.flatMap(({ tagIds = [] }) => tagIds))];
const tags = sanitizedTagIds.length ? await secretTagDAL.findManyTagsById(projectId, sanitizedTagIds, tx) : [];
if (tags.length !== sanitizedTagIds.length) throw new NotFoundError({ message: "Tag not found" });
const tagsGroupByID = groupBy(tags, (i) => i.id);
@@ -2333,7 +2355,8 @@ export const secretV2BridgeServiceFactory = ({
actorAuthMethod,
limit = 20,
offset = 0,
secretId
secretId,
secretVersions: secretVersionsFilter
}: TGetSecretVersionsDTO) => {
const secret = await secretDAL.findById(secretId);
@@ -2370,6 +2393,7 @@ export const secretV2BridgeServiceFactory = ({
const secretVersions = await secretVersionDAL.findVersionsBySecretIdWithActors({
secretId,
projectId: folder.projectId,
secretVersions: secretVersionsFilter,
findOpt: {
offset,
limit,
@@ -2939,7 +2963,7 @@ export const secretV2BridgeServiceFactory = ({
secretKey: secretName
});
return { tree: stackTrace, value: expandedValue };
return { tree: stackTrace, value: expandedValue, secret };
};
const getAccessibleSecrets = async ({
@@ -3155,6 +3179,7 @@ export const secretV2BridgeServiceFactory = ({
getSecretById,
getAccessibleSecrets,
getSecretVersionsByIds,
findSecretIdsByFolderIdAndKeys
findSecretIdsByFolderIdAndKeys,
$validateSecretReferences
};
};

View File

@@ -159,6 +159,7 @@ export type TGetSecretVersionsDTO = Omit<TProjectPermission, "projectId"> & {
limit?: number;
offset?: number;
secretId: string;
secretVersions?: string[];
};
export type TSecretReference = { environment: string; secretPath: string; secretKey: string };

View File

@@ -2568,7 +2568,8 @@ export const secretServiceFactory = ({
actorAuthMethod,
limit = 20,
offset = 0,
secretId
secretId,
secretVersions: filterSecretVersions
}: TGetSecretVersionsDTO) => {
const secretVersionV2 = await secretV2BridgeService
.getSecretVersions({
@@ -2578,7 +2579,8 @@ export const secretServiceFactory = ({
actorAuthMethod,
limit,
offset,
secretId
secretId,
secretVersions: filterSecretVersions
})
.catch((err) => {
if ((err as Error).message === "BadRequest: Failed to find secret") {

View File

@@ -331,6 +331,7 @@ export type TGetSecretVersionsDTO = Omit<TProjectPermission, "projectId"> & {
limit?: number;
offset?: number;
secretId: string;
secretVersions?: string[];
};
export type TSecretReference = { environment: string; secretPath: string };

View File

@@ -0,0 +1,4 @@
---
title: "List PKI Syncs"
openapi: "GET /api/v1/pki/syncs"
---

View File

@@ -0,0 +1,4 @@
---
title: "Options"
openapi: "GET /api/v1/pki/syncs/options"
---

View File

@@ -0,0 +1,4 @@
---
title: "Create Azure Key Vault PKI Sync"
openapi: "POST /api/v1/pki/syncs/azure-key-vault"
---

View File

@@ -0,0 +1,4 @@
---
title: "Delete Azure Key Vault PKI Sync"
openapi: "DELETE /api/v1/pki/syncs/azure-key-vault/{pkiSyncId}"
---

View File

@@ -0,0 +1,4 @@
---
title: "Get Azure Key Vault PKI Sync by ID"
openapi: "GET /api/v1/pki/syncs/azure-key-vault/{pkiSyncId}"
---

View File

@@ -0,0 +1,4 @@
---
title: "List Azure Key Vault PKI Syncs"
openapi: "GET /api/v1/pki/syncs/azure-key-vault"
---

View File

@@ -0,0 +1,4 @@
---
title: "Remove Certificates from Azure Key Vault"
openapi: "POST /api/v1/pki/syncs/azure-key-vault/{pkiSyncId}/remove-certificates"
---

View File

@@ -0,0 +1,4 @@
---
title: "Sync Certificates to Azure Key Vault"
openapi: "POST /api/v1/pki/syncs/azure-key-vault/{pkiSyncId}/sync"
---

View File

@@ -0,0 +1,4 @@
---
title: "Update Azure Key Vault PKI Sync"
openapi: "PATCH /api/v1/pki/syncs/azure-key-vault/{pkiSyncId}"
---

View File

@@ -0,0 +1,4 @@
---
title: "Get PKI Sync by ID"
openapi: "GET /api/v1/pki/syncs/{pkiSyncId}"
---

View File

@@ -0,0 +1,4 @@
---
title: "List PKI Syncs"
openapi: "GET /api/v1/pki/syncs"
---

View File

@@ -0,0 +1,4 @@
---
title: "List PKI Sync Options"
openapi: "GET /api/v1/pki/syncs/options"
---

View File

@@ -9,7 +9,7 @@ description: "Run the Infisical gateway or manage its systemd service"
infisical gateway start --name=<name> --relay=<relay-name> --auth-method=<auth-method>
```
</Tab>
<Tab title="Install gateway service">
<Tab title="Start gateway as background daemon (Linux only)">
```bash
sudo infisical gateway systemd install --token=<token> --domain=<domain> --name=<name> --relay=<relay-name>
```
@@ -25,29 +25,29 @@ The gateway system uses SSH reverse tunnels over TCP, eliminating firewall compl
<Warning>
**Deprecation and Migration Notice:** The legacy `infisical gateway` command (v1) will be removed in a future release. Please migrate to `infisical gateway start` (Gateway v2).
If you are moving from Gateway v1 to Gateway v2, this is NOT a drop-in switch. Gateway v2 creates new gateway instances with new gateway IDs. You must update any existing resources that reference gateway IDs (for example: dynamic secret configs, app connections, or other gateway-bound resources) to point to the new Gateway v2 gateway ID. Until you update those references, traffic will continue to target the old v1 gateway.
If you are moving from Gateway v1 to Gateway v2, this is NOT a drop-in switch. Gateway v2 creates new gateway instances with new gateway IDs. You must update any existing resources that reference gateway IDs (for example: dynamic secret configs, app connections, or other gateway-bound resources) to point to the new Gateway v2 gateway resource. Until you update those references, traffic will continue to target the old v1 gateway.
</Warning>
## Subcommands & flags
<Accordion title="infisical gateway start" defaultOpen="true">
Run the Infisical gateway component within your VPC. The gateway establishes an SSH reverse tunnel to the specified relay server and provides secure access to private resources.
Run the Infisical gateway component within your the network where your target resources are located. The gateway establishes an SSH reverse tunnel to the specified relay server and provides secure access to private resources within your network.
```bash
infisical gateway start --relay=<relay-name> --name=<name> --auth-method=<auth-method>
```
The gateway component:
Once started, the gateway component will:
- Establishes outbound SSH reverse tunnels to relay servers (no inbound firewall rules needed)
- Authenticates using SSH certificates issued by Infisical
- Automatically reconnects if the connection is lost
- Provides access to private resources within your network
- Establish outbound SSH reverse tunnels to relay servers (no inbound firewall rules needed)
- Authenticate using SSH certificates issued by Infisical
- Automatically reconnect if the connection is lost
- Provide access to private resources within your network
### Authentication
The Infisical CLI supports multiple authentication methods. Below are the available authentication methods, with their respective flags.
The Relay supports multiple authentication methods. Below are the available authentication methods, with their respective flags.
<AccordionGroup>
<Accordion title="Universal Auth">
@@ -361,12 +361,12 @@ sudo systemctl disable infisical-gateway # Disable auto-start on boot
</Accordion>
## Legacy Gateway Commands (Deprecated)
## Legacy Gateway Commands
<Accordion title="infisical gateway (deprecated)">
<Warning>
**This command is deprecated and will be removed in a future release.**
Please migrate to `infisical gateway start` for the new TCP-based SSH tunnel architecture.
**Migration required:** If you are currently using Gateway v1 (via `infisical gateway`), moving to Gateway v2 is not in-place. Gateway v2 provisions new gateway instances with new gateway IDs. Update any resources that reference a gateway ID (for example: dynamic secret configs, app connections, or other gateway-bound resources) to use the new Gateway v2 gateway ID. Until you update those references, traffic will continue to target the old v1 gateway.
@@ -593,7 +593,7 @@ The Infisical CLI supports multiple authentication methods. Below are the availa
<Accordion title="infisical gateway install (deprecated)">
<Warning>
**This command is deprecated and will be removed in a future release.**
Please migrate to `infisical gateway systemd install` for the new TCP-based SSH tunnel architecture with enhanced security and better performance.
**Migration required:** If you previously installed Gateway v1 via `infisical gateway install`, moving to Gateway v2 is not in-place. Gateway v2 provisions new gateway instances with new gateway IDs. Update any resources that reference a gateway ID (for example: dynamic secret configs, app connections, or other gateway-bound resources) to use the new Gateway v2 gateway ID. Until you update those references, traffic will continue to target the old v1 gateway.

View File

@@ -6,88 +6,70 @@ description: "Relay-related commands for Infisical"
<Tabs>
<Tab title="Start relay">
```bash
infisical relay start --type=<type> --host=<host> --name=<name> --auth-method=<auth-method>
infisical relay start --host=<host> --name=<name> --auth-method=<auth-method>
```
</Tab>
<Tab title="Start relay as background daemon (Linux only)">
```bash
# Install systemd service
sudo infisical relay systemd install --host=<host> --name=<name> --token=<token>
# Uninstall systemd service
sudo infisical relay systemd uninstall
```
</Tab>
</Tabs>
## Description
Relay-related commands for Infisical that provide identity-aware relay infrastructure for routing encrypted traffic:
- **Relay**: Identity-aware server that routes encrypted traffic (can be instance-wide or organization-specific)
The relay system uses SSH reverse tunnels over TCP, eliminating firewall complexity and providing excellent performance for enterprise environments.
Relay-related commands for Infisical that provide identity-aware relay infrastructure for routing encrypted traffic. Relays are organization-deployed servers that route encrypted traffic between Infisical and your gateways.
## Subcommands & flags
<Accordion title="infisical relay start" defaultOpen="true">
Run the Infisical relay component. The relay handles network traffic routing and can operate in different modes.
Run the Infisical relay component. The relay handles network traffic routing between Infisical and your gateways.
```bash
infisical relay start --type=<type> --host=<host> --name=<name> --auth-method=<auth-method>
infisical relay start --host=<host> --name=<name> --auth-method=<auth-method>
```
### Flags
<Accordion title="--type">
The type of relay to run. Must be either 'instance' or 'org'.
- **`instance`**: Shared relay server that can be used by all organizations on your Infisical instance. Set up by the instance administrator. Uses `INFISICAL_RELAY_AUTH_SECRET` environment variable for authentication, which must be configured by the instance admin.
- **`org`**: Dedicated relay server that individual organizations deploy and manage in their own infrastructure. Provides enhanced security, custom geographic placement, and compliance benefits. Uses standard Infisical authentication methods.
```bash
# Organization relay (customer-deployed)
infisical relay start --type=org --host=192.168.1.100 --name=my-org-relay
# Instance relay (configured by instance admin)
INFISICAL_RELAY_AUTH_SECRET=<secret> infisical relay start --type=instance --host=10.0.1.50 --name=shared-relay
```
</Accordion>
<Accordion title="--host">
The host (IP address or hostname) of the instance where the relay is deployed. This must be a static public IP or resolvable hostname that gateways can reach.
```bash
# Example with IP address
infisical relay start --host=203.0.113.100 --type=org --name=my-relay
infisical relay start --host=203.0.113.100 --name=my-relay
# Example with hostname
infisical relay start --host=relay.example.com --type=org --name=my-relay
infisical relay start --host=relay.example.com --name=my-relay
```
</Accordion>
<Accordion title="--name">
The name of the relay.
The name of the relay. This is an arbitrary identifier for your relay instance.
```bash
# Example
infisical relay start --name=my-relay --type=org --host=192.168.1.100
infisical relay start --name=my-relay --host=192.168.1.100
```
</Accordion>
### Authentication
**Organization Relays (`--type=org`):**
Deploy your own relay server in your infrastructure for enhanced security and reduced latency. Supports all standard Infisical authentication methods documented below.
**Instance Relays (`--type=instance`):**
Shared relay servers that serve all organizations on your Infisical instance. For Infisical Cloud, these are already running and ready to use. For self-hosted deployments, they're set up by the instance administrator. Authentication is handled via the `INFISICAL_RELAY_AUTH_SECRET` environment variable.
Relays support all standard Infisical authentication methods. Choose the authentication method that best fits your environment and set the corresponding flags when starting the relay.
```bash
# Organization relay with Universal Auth (customer-deployed)
infisical relay start --type=org --host=192.168.1.100 --name=my-org-relay --auth-method=universal-auth --client-id=<client-id> --client-secret=<client-secret>
# Instance relay (configured by instance admin)
INFISICAL_RELAY_AUTH_SECRET=<secret> infisical relay start --type=instance --host=10.0.1.50 --name=shared-relay
# Example with Universal Auth
infisical relay start --host=192.168.1.100 --name=my-relay --auth-method=universal-auth --client-id=<client-id> --client-secret=<client-secret>
```
### Authentication Methods
### Available Authentication Methods
The Infisical CLI supports multiple authentication methods for organization relays. Below are the available authentication methods, with their respective flags.
The Infisical CLI supports multiple authentication methods for relays. Below are the available authentication methods, with their respective flags.
<AccordionGroup>
<Accordion title="Universal Auth">
@@ -108,7 +90,7 @@ The Infisical CLI supports multiple authentication methods for organization rela
</ParamField>
```bash
infisical relay start --auth-method=universal-auth --client-id=<client-id> --client-secret=<client-secret> --type=org --host=<host> --name=<name>
infisical relay start --auth-method=universal-auth --client-id=<client-id> --client-secret=<client-secret> --host=<host> --name=<name>
```
</Accordion>
@@ -132,7 +114,7 @@ The Infisical CLI supports multiple authentication methods for organization rela
```bash
infisical relay start --auth-method=kubernetes --machine-identity-id=<machine-identity-id> --type=org --host=<host> --name=<name>
infisical relay start --auth-method=kubernetes --machine-identity-id=<machine-identity-id> --host=<host> --name=<name>
```
</Accordion>
@@ -153,7 +135,7 @@ The Infisical CLI supports multiple authentication methods for organization rela
```bash
infisical relay start --auth-method=azure --machine-identity-id=<machine-identity-id> --type=org --host=<host> --name=<name>
infisical relay start --auth-method=azure --machine-identity-id=<machine-identity-id> --host=<host> --name=<name>
```
</Accordion>
@@ -174,7 +156,7 @@ The Infisical CLI supports multiple authentication methods for organization rela
```bash
infisical relay start --auth-method=gcp-id-token --machine-identity-id=<machine-identity-id> --type=org --host=<host> --name=<name>
infisical relay start --auth-method=gcp-id-token --machine-identity-id=<machine-identity-id> --host=<host> --name=<name>
```
</Accordion>
@@ -196,7 +178,7 @@ The Infisical CLI supports multiple authentication methods for organization rela
</ParamField>
```bash
infisical relay start --auth-method=gcp-iam --machine-identity-id=<machine-identity-id> --service-account-key-file-path=<service-account-key-file-path> --type=org --host=<host> --name=<name>
infisical relay start --auth-method=gcp-iam --machine-identity-id=<machine-identity-id> --service-account-key-file-path=<service-account-key-file-path> --host=<host> --name=<name>
```
</Accordion>
@@ -215,7 +197,7 @@ The Infisical CLI supports multiple authentication methods for organization rela
</ParamField>
```bash
infisical relay start --auth-method=aws-iam --machine-identity-id=<machine-identity-id> --type=org --host=<host> --name=<name>
infisical relay start --auth-method=aws-iam --machine-identity-id=<machine-identity-id> --host=<host> --name=<name>
```
</Accordion>
@@ -237,7 +219,7 @@ The Infisical CLI supports multiple authentication methods for organization rela
</ParamField>
```bash
infisical relay start --auth-method=oidc-auth --machine-identity-id=<machine-identity-id> --jwt=<oidc-jwt> --type=org --host=<host> --name=<name>
infisical relay start --auth-method=oidc-auth --machine-identity-id=<machine-identity-id> --jwt=<oidc-jwt> --host=<host> --name=<name>
```
</Accordion>
@@ -261,7 +243,7 @@ The Infisical CLI supports multiple authentication methods for organization rela
```bash
infisical relay start --auth-method=jwt-auth --jwt=<jwt> --machine-identity-id=<machine-identity-id> --type=org --host=<host> --name=<name>
infisical relay start --auth-method=jwt-auth --jwt=<jwt> --machine-identity-id=<machine-identity-id> --host=<host> --name=<name>
```
</Accordion>
@@ -277,30 +259,132 @@ The Infisical CLI supports multiple authentication methods for organization rela
</ParamField>
```bash
infisical relay start --token=<token> --type=org --host=<host> --name=<name>
infisical relay start --token=<token> --host=<host> --name=<name>
```
</Accordion>
</AccordionGroup>
### Deployment Considerations
</Accordion>
**When to use Instance Relays (`--type=instance`):**
<Accordion title="infisical relay systemd" defaultOpen="false">
Manage systemd service for Infisical relay. This allows you to install and run the relay as a systemd service on Linux systems.
### Requirements
- **Operating System**: Linux only (systemd is not supported on other operating systems)
- **Privileges**: Root/sudo privileges required for both install and uninstall operations
- **Systemd**: The system must be running systemd as the init system
- You want to get started quickly without setting up your own relay infrastructure
- You're using Infisical Cloud and want to leverage the existing relay infrastructure
- You're on a self-hosted instance where the admin has already set up shared relays
- You don't need custom geographic placement of relay servers
- You don't have specific compliance requirements that require dedicated infrastructure
- You want to minimize operational overhead by using shared infrastructure
```bash
infisical relay systemd <subcommand>
```
**When to use Organization Relays (`--type=org`):**
### Subcommands
- You need lower latency by deploying relay servers closer to your resources
- You have security requirements that mandate running infrastructure in your own environment
- You have compliance requirements such as data sovereignty or air-gapped environments
- You need custom network policies or specific networking configurations
- You have high-scale performance requirements that shared infrastructure can't meet
- You want full control over your relay infrastructure and its configuration
<Accordion title="install">
Install and enable systemd service for the relay. Must be run with sudo on Linux systems.
```bash
sudo infisical relay systemd install --host=<host> --name=<name> --token=<token> [flags]
```
#### Flags
<Accordion title="--host">
The host (IP address or hostname) of the instance where the relay is deployed. This must be a static public IP or resolvable hostname that gateways can reach.
```bash
# Example with IP address
sudo infisical relay systemd install --host=203.0.113.100 --name=my-relay --token=<token>
# Example with hostname
sudo infisical relay systemd install --host=relay.example.com --name=my-relay --token=<token>
```
</Accordion>
<Accordion title="--name">
The name of the relay.
```bash
# Example
sudo infisical relay systemd install --name=my-relay --host=192.168.1.100 --token=<token>
```
</Accordion>
<Accordion title="--token">
Connect with Infisical using machine identity access token.
```bash
# Example
sudo infisical relay systemd install --token=<machine-identity-token> --host=<host> --name=<name>
```
</Accordion>
<Accordion title="--domain">
Domain of your self-hosted Infisical instance. Optional flag for specifying a custom domain.
```bash
# Example
sudo infisical relay systemd install --domain=http://localhost:8080 --token=<token> --host=<host> --name=<name>
```
</Accordion>
#### Examples
```bash
# Install relay with token authentication
sudo infisical relay systemd install --host=192.168.1.100 --name=my-relay --token=<machine-identity-token>
# Install with custom domain
sudo infisical relay systemd install --domain=http://localhost:8080 --token=<token> --host=<host> --name=<name>
```
#### Post-installation
After successful installation, the service will be enabled but not started. To start the service:
```bash
sudo systemctl start infisical-relay
```
To check the service status:
```bash
sudo systemctl status infisical-relay
```
To view service logs:
```bash
sudo journalctl -u infisical-relay -f
```
</Accordion>
<Accordion title="uninstall">
Uninstall and remove systemd service for the relay. Must be run with sudo on Linux systems.
```bash
sudo infisical relay systemd uninstall
```
#### Examples
```bash
# Uninstall the relay systemd service
sudo infisical relay systemd uninstall
```
#### What it does
- Stops the `infisical-relay` systemd service if it's running
- Disables the service from starting on boot
- Removes the systemd service file
- Cleans up the service configuration
</Accordion>
</Accordion>

View File

@@ -173,8 +173,9 @@
"group": "Gateway",
"pages": [
"documentation/platform/gateways/overview",
"documentation/platform/gateways/gateway-security",
"documentation/platform/gateways/networking",
"documentation/platform/gateways/gateway-deployment",
"documentation/platform/gateways/relay-deployment",
"documentation/platform/gateways/security",
{
"group": "Gateway (Deprecated)",
"pages": [
@@ -711,6 +712,18 @@
"documentation/platform/pki/pki-issuer",
"documentation/platform/pki/integration-guides/gloo-mesh"
]
},
{
"group": "Certificate Syncs",
"pages": [
"documentation/platform/pki/certificate-syncs/overview",
{
"group": "Syncs",
"pages": [
"documentation/platform/pki/certificate-syncs/azure-key-vault"
]
}
]
}
]
}
@@ -2475,6 +2488,26 @@
"api-reference/endpoints/pki-alerts/update",
"api-reference/endpoints/pki-alerts/delete"
]
},
{
"group": "Certificate Syncs",
"pages": [
"api-reference/endpoints/pki/syncs/list",
"api-reference/endpoints/pki/syncs/get-by-id",
"api-reference/endpoints/pki/syncs/options",
{
"group": "Azure Key Vault",
"pages": [
"api-reference/endpoints/pki/syncs/azure-key-vault/list",
"api-reference/endpoints/pki/syncs/azure-key-vault/get-by-id",
"api-reference/endpoints/pki/syncs/azure-key-vault/create",
"api-reference/endpoints/pki/syncs/azure-key-vault/update",
"api-reference/endpoints/pki/syncs/azure-key-vault/delete",
"api-reference/endpoints/pki/syncs/azure-key-vault/sync-certificates",
"api-reference/endpoints/pki/syncs/azure-key-vault/remove-certificates"
]
}
]
}
]
},

View File

@@ -0,0 +1,265 @@
---
title: "Gateway Deployment"
description: "Complete guide to deploying Infisical Gateways including network configuration and firewall requirements"
---
Infisical Gateways enables secure communication between your private resources and the Infisical platform without exposing inbound ports in your network.
This guide covers everything you need to deploy and configure Infisical Gateways.
## Deployment Steps
To successfully deploy an Infisical Gateway for use, follow these steps in order.
<Steps>
<Step title="Provision a Machine Identity">
Create a machine identity with the correct permissions to create and manage gateways. This identity is used by the gateway to authenticate with Infisical and should be provisioned in advance.
The gateway supports several [machine identity auth methods](/documentation/platform/identities/machine-identities), as listed below. Choose the one that best fits your environment and set the corresponding environment variables when deploying the gateway.
<AccordionGroup>
<Accordion title="Universal Auth">
Simple and secure authentication using client ID and client secret.
**Environment Variables:**
- `INFISICAL_AUTH_METHOD=universal-auth`
- `INFISICAL_UNIVERSAL_AUTH_CLIENT_ID=<client-id>`
- `INFISICAL_UNIVERSAL_AUTH_CLIENT_SECRET=<client-secret>`
</Accordion>
<Accordion title="Token Auth">
Direct authentication using a machine identity access token.
**Environment Variables:**
- `INFISICAL_TOKEN=<token>`
</Accordion>
<Accordion title="Native Kubernetes">
Authentication using Kubernetes service account tokens.
**Environment Variables:**
- `INFISICAL_AUTH_METHOD=kubernetes`
- `INFISICAL_MACHINE_IDENTITY_ID=<machine-identity-id>`
</Accordion>
<Accordion title="Native AWS IAM">
Authentication using AWS IAM roles.
**Environment Variables:**
- `INFISICAL_AUTH_METHOD=aws-iam`
- `INFISICAL_MACHINE_IDENTITY_ID=<machine-identity-id>`
</Accordion>
<Accordion title="Native GCP ID Token">
Authentication using GCP identity tokens.
**Environment Variables:**
- `INFISICAL_AUTH_METHOD=gcp-id-token`
- `INFISICAL_MACHINE_IDENTITY_ID=<machine-identity-id>`
</Accordion>
<Accordion title="GCP IAM">
Authentication using GCP service account keys.
**Environment Variables:**
- `INFISICAL_AUTH_METHOD=gcp-iam`
- `INFISICAL_MACHINE_IDENTITY_ID=<machine-identity-id>`
- `INFISICAL_GCP_SERVICE_ACCOUNT_KEY_FILE_PATH=<path-to-key-file>`
</Accordion>
<Accordion title="Native Azure">
Authentication using Azure managed identity.
**Environment Variables:**
- `INFISICAL_AUTH_METHOD=azure`
- `INFISICAL_MACHINE_IDENTITY_ID=<machine-identity-id>`
</Accordion>
<Accordion title="OIDC Auth">
Authentication using OIDC identity tokens.
**Environment Variables:**
- `INFISICAL_AUTH_METHOD=oidc-auth`
- `INFISICAL_MACHINE_IDENTITY_ID=<machine-identity-id>`
- `INFISICAL_JWT=<oidc-jwt>`
</Accordion>
<Accordion title="JWT Auth">
Authentication using JWT tokens.
**Environment Variables:**
- `INFISICAL_AUTH_METHOD=jwt-auth`
- `INFISICAL_MACHINE_IDENTITY_ID=<machine-identity-id>`
- `INFISICAL_JWT=<jwt>`
</Accordion>
</AccordionGroup>
</Step>
<Step title="Set Up a Relay Server">
Ensure a relay server is running and accessible before you deploy any gateways. You have two options:
- **Managed relay (Infisical Cloud, US/EU only):** Managed relays are only available for Infisical Cloud instances in the US and EU regions. If you are using Infisical Cloud in these regions, you can use the provided managed relay.
- **Self-hosted relay:** For all other cases, including all self-hosted and dedicated enterprise instances of Infisical, you must deploy your own relay server. You can also choose to deploy your own relay server when using Infisical Cloud if you require reduced geographic proximity to your target resources for lower latency or to reduce network congestion. For setup instructions, see the <a href="/documentation/platform/gateways/relay-deployment">Relay Deployment Guide</a>.
</Step>
<Step title="Install the Infisical CLI">
Make sure the Infisical CLI is installed on the machine or environment where you plan to deploy the gateway. The CLI is required for gateway installation and management.
See the [CLI Installation Guide](/cli/overview) for instructions.
</Step>
<Step title="Configure Network & Firewall">
Ensure your network and firewall settings allow the gateway to connect to all required services. All connections are outbound only; no inbound ports need to be opened.
| Protocol | Destination | Port | Purpose |
| -------- | ------------------------------------ | ---- | ------------------------------------------ |
| TCP | Relay Server IP/Hostname | 2222 | SSH reverse tunnel establishment |
| TCP | Infisical instance host (US/EU, other) | 443 | API communication and certificate requests |
For managed relays, allow outbound traffic to the provided relay server IP/hostname. For self-hosted relays, allow outbound traffic to your own relay server address.
If you are in a corporate environment with strict egress filtering, ensure outbound TCP 2222 to relay servers and outbound HTTPS 443 to Infisical API endpoints are allowed.
</Step>
<Step title="Select a Deployment Method">
The Infisical CLI is used to install and start the gateway in your chosen environment. The CLI provides commands for both production and development scenarios, and supports a variety of options/flags to configure your deployment.
To view all available flags and equivalent environment variables for gateway deployment, see the [Gateway CLI Command Reference](/cli/commands/gateway).
<Tabs>
<Tab title="Linux Server (Production)">
For production deployments on Linux servers, install the Gateway as a systemd service so that it runs securely in the background and automatically restarts on failure or system reboot:
```bash
sudo infisical gateway systemd install --token <your-machine-identity-token> --domain <your-infisical-domain> --name <gateway-name> --relay <relay-name>
sudo systemctl start infisical-gateway
```
<Warning>
The systemd install command requires a Linux operating system with root/sudo
privileges.
</Warning>
</Tab>
<Tab title="Kubernetes (Production)">
For production deployments on Kubernetes clusters, install the Gateway using the Infisical Helm chart:
#### Install the latest Helm Chart repository
```bash
helm repo add infisical-helm-charts 'https://dl.cloudsmith.io/public/infisical/helm-charts/helm/charts/'
helm repo update
```
#### Create a Kubernetes Secret
The gateway supports all identity authentication methods through environment variables:
```bash
kubectl create secret generic infisical-gateway-environment \
--from-literal=INFISICAL_AUTH_METHOD=universal-auth \
--from-literal=INFISICAL_UNIVERSAL_AUTH_CLIENT_ID=<client-id> \
--from-literal=INFISICAL_UNIVERSAL_AUTH_CLIENT_SECRET=<client-secret> \
--from-literal=INFISICAL_RELAY_NAME=<relay-name> \
--from-literal=INFISICAL_GATEWAY_NAME=<gateway-name>
```
#### Install the Gateway
```bash
helm install infisical-gateway infisical-helm-charts/infisical-gateway
```
</Tab>
<Tab title="Development & Testing">
For development or testing environments:
```bash
infisical gateway start --token <token> --relay=<relay-name> --name=<gateway-name>
```
</Tab>
</Tabs>
</Step>
<Step title="Verify Your Gateway Deployment">
After deployment, verify your gateway is working:
1. **Check logs** for "Gateway started successfully" message indicating the gateway is running and connected to the relay
2. **Verify registration** in the Infisical by visiting the Gateways section of your organization. The new gateway should appear with a recent heartbeat timestamp.
3. **Test connectivity** by creating a resource in Infisical that uses the gateway to access a private service. Verify the resource can successfully connect through the gateway.
</Step>
</Steps>
## Frequently Asked Questions
<Accordion title="Do I need to open any inbound ports on my firewall?">
No inbound ports need to be opened for gateways. The gateway only makes outbound connections:
- **Outbound SSH** to relay servers on port 2222
- **Outbound HTTPS** to Infisical API endpoints on port 443
- **SSH reverse tunnels** handle all communication - no return traffic configuration needed
This design maintains security by avoiding the need for inbound firewall rules that could expose your network to external threats.
</Accordion>
<Accordion title="How do I test network connectivity from the gateway?">
Test relay connectivity and outbound API access from the gateway:
1. Test SSH port to relay:
```bash
nc -zv <relay-ip> 2222
```
2. Test outbound API access (replace with your Infisical domain if different):
```bash
curl -I https://app.infisical.com
```
</Accordion>
<Accordion title="How do I troubleshoot relay connectivity issues?">
If the gateway cannot connect to the relay:
1. Verify the relay server is running and accessible
2. Check firewall rules allow outbound connections on port 2222
3. Confirm the relay name matches exactly
4. Test SSH port to relay:
```bash
nc -zv <relay-ip> 2222
```
</Accordion>
<Accordion title="How do I troubleshoot authentication failures?">
If you encounter authentication failures:
1. Verify machine identity credentials are correct
2. Check token expiration and renewal
3. Ensure authentication method is properly configured
</Accordion>
<Accordion title="Where can I find gateway logs?">
Check gateway logs for detailed error information:
- **systemd service:**
```bash
sudo journalctl -u infisical-gateway -f
```
- **Kubernetes:**
```bash
kubectl logs deployment/infisical-gateway
```
- **Local installation:** Logs appear in the terminal where you started the gateway
</Accordion>
<Accordion title="Where is the gateway configuration file stored?">
For systemd-based installations, the gateway's configuration file is stored at `/etc/infisical/gateway.conf`. You may reference or inspect this file for troubleshooting advanced configuration issues.
</Accordion>
<Accordion title="What happens if there is a network interruption?">
The gateway is designed to handle network interruptions gracefully:
- **Automatic reconnection**: The gateway will automatically attempt to reconnect to relay servers if the SSH connection is lost
- **Connection retry logic**: Built-in retry mechanisms handle temporary network outages without manual intervention
- **Persistent SSH tunnels**: SSH connections are automatically re-established when connectivity is restored
- **Certificate rotation**: The gateway handles certificate renewal automatically during reconnection
- **Graceful degradation**: The gateway logs connection issues and continues attempting to restore connectivity
No manual intervention is typically required during network interruptions.
</Accordion>

View File

@@ -1,178 +0,0 @@
---
title: "Networking"
description: "Network configuration and firewall requirements for Infisical Gateway"
---
The Infisical Gateway requires outbound network connectivity to establish secure SSH reverse tunnels with relay servers.
This page outlines the required ports, protocols, and firewall configurations needed for optimal gateway usage.
## Network Architecture
The gateway uses SSH reverse tunnels to establish secure connections with end-to-end encryption:
1. **Gateway** connects outbound to **Relay Servers** using SSH over TCP
2. **Infisical platform** establishes mTLS connections with gateways for application traffic
3. **Relay Servers** route the doubly-encrypted traffic (mTLS payload within SSH tunnels) between the platform and gateways
4. **Double encryption** ensures relay servers cannot access application data - only the platform and gateway can decrypt traffic
## Required Network Connectivity
### Outbound Connections (Required)
The gateway requires the following outbound connectivity:
| Protocol | Destination | Ports | Purpose |
| -------- | ------------------------------------ | ----- | ------------------------------------------ |
| TCP | Relay Servers | 2222 | SSH reverse tunnel establishment |
| TCP | app.infisical.com / eu.infisical.com | 443 | API communication and certificate requests |
### Relay Server Connectivity
**For Instance Relays (Infisical Cloud):** Your firewall must allow outbound connectivity to Infisical-managed relay servers.
**For Organization Relays:** Your firewall must allow outbound connectivity to your own relay server IP addresses or hostnames.
**For Self-hosted Instance Relays:** Your firewall must allow outbound connectivity to relay servers configured by your instance administrator.
<Tabs>
<Tab title="Instance Relays (Infisical Cloud)">
Infisical provides multiple managed relay servers with static IP addresses.
You can whitelist these IPs ahead of time based on which relay server you
choose to connect to. **Firewall requirements:** Allow outbound TCP
connections to the desired relay server IP on port 2222.
</Tab>
<Tab title="Organization Relays">
You control the relay server IP addresses or hostnames when deploying your
own organization relays. **Firewall requirements:** Allow outbound TCP
connections to your relay server IP or hostname on port 2222. For example,
if your relay is at `203.0.113.100` or `relay.example.com`, allow TCP to
`203.0.113.100:2222` or `relay.example.com:2222`.
</Tab>
<Tab title="Self-hosted Instance Relays">
Contact your instance administrator for the relay server IP addresses or
hostnames configured for your deployment. **Firewall requirements:** Allow
outbound TCP connections to instance relay servers on port 2222.
</Tab>
</Tabs>
## Protocol Details
### SSH over TCP
The gateway uses SSH reverse tunnels for primary communication:
- **Port 2222**: SSH connection to relay servers
- **Built-in features**: Automatic reconnection, certificate-based authentication, encrypted tunneling
- **Encryption**: SSH with certificate-based authentication and key exchange
## Firewall Configuration for SSH
The gateway uses standard SSH over TCP, making firewall configuration straightforward.
### TCP Connection Handling
SSH connections over TCP are stateful and handled seamlessly by all modern firewalls:
- **Established connections** are automatically tracked
- **Return traffic** is allowed for established outbound connections
- **No special configuration** needed for connection tracking
- **Standard SSH protocol** that enterprise firewalls handle well
### Simplified Firewall Rules
Since SSH uses TCP, you only need simple outbound rules:
1. **Allow outbound TCP** to relay servers (IP addresses or hostnames) on port 2222
2. **Allow outbound HTTPS** to Infisical API endpoints on port 443
3. **No inbound rules required** - all connections are outbound only
## Common Network Scenarios
### Corporate Firewalls
For corporate environments with strict egress filtering:
1. **Allow outbound TCP** to relay servers (IP addresses or hostnames) on port 2222
2. **Allow outbound HTTPS** to the Infisical API server on port 443
3. **No inbound rules required** - all connections are outbound only
4. **Standard TCP rules** - simple and straightforward configuration
### Cloud Environments (AWS/GCP/Azure)
Configure security groups to allow:
- **Outbound TCP** to relay servers (IP addresses or hostnames) on port 2222
- **Outbound HTTPS** to app.infisical.com/eu.infisical.com on port 443
- **No inbound rules required** - SSH reverse tunnels are outbound only
## Frequently Asked Questions
<Accordion title="What happens if there is a network interruption?">
The gateway is designed to handle network interruptions gracefully:
- **Automatic reconnection**: The gateway will automatically attempt to reconnect to relay servers if the SSH connection is lost
- **Connection retry logic**: Built-in retry mechanisms handle temporary network outages without manual intervention
- **Persistent SSH tunnels**: SSH connections are automatically re-established when connectivity is restored
- **Certificate rotation**: The gateway handles certificate renewal automatically during reconnection
- **Graceful degradation**: The gateway logs connection issues and continues attempting to restore connectivity
No manual intervention is typically required during network interruptions.
</Accordion>
<Accordion title="Why does the gateway use SSH over TCP?">
SSH over TCP provides several advantages for enterprise gateway communication:
- **Firewall-friendly**: TCP is stateful and handled seamlessly by all enterprise firewalls
- **Standard protocol**: SSH is a well-established protocol that network teams are familiar with
- **Certificate-based security**: Uses SSH certificates for strong authentication without shared secrets
- **Automatic tunneling**: SSH reverse tunnels handle all the complexity of secure communication
- **Enterprise compatibility**: Works reliably across all enterprise network configurations
TCP's reliability and firewall compatibility make it ideal for enterprise environments where network policies are strictly managed.
</Accordion>
<Accordion title="Do I need to open any inbound ports on my firewall?">
No inbound ports need to be opened. The gateway only makes outbound connections:
- **Outbound SSH** to relay servers on port 2222
- **Outbound HTTPS** to Infisical API endpoints on port 443
- **SSH reverse tunnels** handle all communication - no return traffic configuration needed
This design maintains security by avoiding the need for inbound firewall rules that could expose your network to external threats.
</Accordion>
<Accordion title="What if my firewall blocks SSH connections?">
If your firewall has strict outbound restrictions:
1. **Work with your network team** to allow outbound TCP connections on port 2222 to relay servers (IP addresses or hostnames)
2. **Allow standard SSH traffic** - most enterprises already have SSH policies in place
3. **Consider network policy exceptions** for the gateway host if needed
4. **Monitor firewall logs** to identify which specific rules are blocking traffic
</Accordion>
<Accordion title="How many relay servers does the gateway connect to?">
The gateway connects to **one relay server**:
- **Single SSH connection**: Each gateway establishes one SSH reverse tunnel to its assigned relay server
- **Named relay assignment**: Gateways connect to the specific relay server specified by `--relay`
- **Automatic reconnection**: If the relay connection is lost, the gateway automatically reconnects to the same relay
- **Certificate-based authentication**: Each connection uses SSH certificates issued by Infisical for secure authentication
</Accordion>
<Accordion title="Can the relay servers decrypt traffic going through them?">
No, relay servers cannot decrypt any traffic passing through them due to end-to-end encryption:
- **Client-to-Gateway mTLS (via TLS-pinned tunnel)**: Clients connect via a proxy that establishes a TLS-pinned tunnel to the gateway; mTLS between the client and gateway is negotiated inside this tunnel, encrypting all application traffic
- **SSH tunnel encryption**: The mTLS-encrypted traffic is then transmitted through SSH reverse tunnels to relay servers
- **Double encryption**: Traffic is encrypted twice - once by client mTLS and again by SSH tunnels
- **Relay only routes traffic**: The relay server only routes the doubly-encrypted traffic without access to either encryption layer
- **No data storage**: Relay servers do not store any traffic or sensitive information
- **Certificate isolation**: Each connection uses unique certificates, ensuring complete tenant isolation
The relay infrastructure is designed as a secure routing mechanism where only the client and gateway can decrypt the actual application traffic.
</Accordion>

View File

@@ -1,19 +1,14 @@
---
title: "Gateway"
title: "Gateway Overview"
sidebarTitle: "Overview"
description: "How to access private network resources from Infisical"
---
![Architecture Overview](../../../images/platform/gateways/gateway-highlevel-diagram.png)
The Infisical Gateway provides secure access to private resources within your network without needing direct inbound connections to your environment. This method keeps your resources fully protected from external access while enabling Infisical to securely interact with resources like databases.
**Architecture Components:**
- **Gateway**: Lightweight agent deployed within your VPCs that provides access to private resources
- **Relay**: Infrastructure that routes encrypted traffic (instance-wide or organization-specific)
Common use cases include generating dynamic credentials or rotating credentials for private databases.
The Infisical Gateway provides secure access to private resources within your network without needing direct inbound connections to your environment.
This is particularly useful when Infisical isn't hosted within the same network as the resources it needs to reach.
This method keeps your resources fully protected from external access while enabling Infisical to securely interact with resources like databases.
<Info>
Gateway is a paid feature available under the Enterprise Tier for Infisical
@@ -22,428 +17,62 @@ Common use cases include generating dynamic credentials or rotating credentials
license.
</Info>
## Core Components
The Gateway system consists of two primary components working together to enable secure network access:
<Tabs>
<Tab title="Gateway" icon="server">
A Gateway is a lightweight service that you deploy within your own network infrastructure to provide secure access to your private resources. Think of it as a secure bridge between Infisical and your internal systems.
Gateways must be deployed within the same network where your target resources are located, with direct network connectivity to the private resources you want Infisical to access.
For different networks, regions, or isolated environments, you'll need to deploy separate gateways.
**Core Functions:**
- **Network Placement**: Deployed within your VPCs, data centers, or on-premises infrastructure where your private resources live
- **Connection Model**: Only makes outbound connections to Infisical's relay servers, so no inbound firewall rules are needed
- **Security Method**: Uses SSH reverse tunnels with certificate-based authentication for maximum security
- **Resource Access**: Acts as a proxy to connect Infisical to your private databases, APIs, and other services
</Tab>
<Tab title="Relay Server" icon="route">
A Relay Server is the routing infrastructure that enables secure communication between the Infisical platform and your deployed gateways. It acts as an intermediary that never sees your actual data.
**Core Functions:**
- **Traffic Routing**: Routes encrypted traffic between the Infisical platform and your gateways without storing or inspecting the data
- **Network Isolation**: Enables secure communication without requiring direct network connections between Infisical and your private infrastructure
- **Authentication Management**: Validates SSH certificates and manages secure routing between authenticated gateways
**Deployment Options:**
To reduce operational overhead, Infisical Cloud (US/EU) provides managed relay infrastructure, though organizations can also deploy their own relays for reduced latency.
- **Infisical Managed**: Use pre-deployed relays in select regions, shared across all Infisical Cloud organizations. Each organization traffic is isolated and encrypted.
- **Self-Deployed**: Deploy your own dedicated relay servers geographically close to your infrastructure for reduced latency.
</Tab>
</Tabs>
## How It Works
The Gateway system uses SSH reverse tunnels for secure, firewall-friendly connectivity:
1. **Gateway Registration**: The gateway establishes an outbound SSH reverse tunnel to a relay server using SSH certificates issued by Infisical
2. **Relay Routing**: The relay server routes encrypted traffic between the Infisical platform and gateways
3. **Resource Access**: The Infisical platform connects to your private resources through the established gateway connections
**Key Benefits:**
- **No inbound firewall rules needed** - all connections are outbound from your network
- **Firewall-friendly** - uses standard SSH over TCP
- **Certificate-based authentication** provides enhanced security
- **Automatic reconnection** if connections are lost
## Deployment
The Infisical Gateway is integrated into the Infisical CLI under the `gateway` command, making it simple to deploy and manage.
You can install the Gateway in all the same ways you install the Infisical CLI—whether via npm, Docker, or a binary.
For detailed installation instructions, refer to the Infisical [CLI Installation instructions](/cli/overview).
**Prerequisites:**
1. **Relay Server**: Before deploying gateways, you need a running relay server:
- **Infisical Cloud**: Instance relays are already available - no setup needed
- **Self-hosted**: Instance admin must set up shared instance relays, or organizations can deploy their own
2. **Machine Identity**: Configure a machine identity with appropriate permissions to create and manage gateways
Once authenticated, the Gateway establishes an SSH reverse tunnel to the specified relay server, allowing secure access to your private resources.
### Get started
<Steps>
<Step title="Create a Gateway Identity">
1. Navigate to **Organization Access Control** in your Infisical dashboard.
2. Create a dedicated machine identity for your Gateway.
3. **Best Practice:** Assign a unique identity to each Gateway for better security and management.
![Create Gateway Identity](../../../images/platform/gateways/create-identity-for-gateway.png)
</Step>
<Step title="Configure Authentication Method">
You'll need to choose an authentication method to initiate communication with Infisical. View the available machine identity authentication methods [here](/documentation/platform/identities/machine-identities).
</Step>
<Step title="Choose Your Relay Setup">
You have two options for relay infrastructure:
<Tabs>
<Tab title="Use Instance Relays (Easiest)">
**Infisical Cloud:** Instance relays are already running and available - **no setup required**. You can immediately proceed to deploy gateways using these shared relays.
**Self-hosted:** If your instance admin has set up shared instance relays, you can use them directly. If not, the instance admin can set them up:
```bash
# Instance admin sets up shared relay (one-time setup)
export INFISICAL_RELAY_AUTH_SECRET=<instance-relay-secret>
infisical relay start --type=instance --ip=<public-ip> --name=<relay-name>
```
</Tab>
<Tab title="Deploy Your Own Organization Relay">
**Available for all users:** Deploy your own dedicated relay infrastructure for enhanced control:
```bash
# Deploy organization-specific relay
infisical relay start --type=org --ip=<public-ip> --name=<relay-name> --auth-method=universal-auth --client-id=<client-id> --client-secret=<client-secret>
```
**When to choose this:**
- You need lower latency (deploy closer to your resources)
- Enhanced security requirements
- Compliance needs (data sovereignty, air-gapped environments)
- Custom network policies
</Tab>
</Tabs>
</Step>
<Step title="Deploy the Gateway">
Use the Infisical CLI to deploy the Gateway. You can run it directly or install it as a systemd service for production:
<Tabs>
<Tab title="Production (systemd)">
For production deployments on Linux, install the Gateway as a systemd service:
<Warning>
**Gateway v2:** The `infisical gateway systemd install` command deploys the new Gateway v2 component.
If you are migrating from Gateway v1 (legacy `infisical gateway install` command), this is not in-place. Gateway v2 provisions new gateway instances with new gateway IDs. Update any resources that reference a gateway ID (for example: dynamic secret configs, app connections, or other gateway-bound resources) to use the new Gateway v2 gateway ID.
</Warning>
```bash
sudo infisical gateway systemd install --token <your-machine-identity-token> --domain <your-infisical-domain> --name <gateway-name> --relay <relay-name>
sudo systemctl start infisical-gateway
```
This will install and start the Gateway as a secure systemd service that:
- Runs with restricted privileges:
- Runs as root user (required for secure token management)
- Restricted access to home directories
- Private temporary directory
- Automatically restarts on failure
- Starts on system boot
- Manages token and domain configuration securely in `/etc/infisical/gateway.conf`
<Warning>
The install command requires:
- Linux operating system
- Root/sudo privileges
- Systemd
</Warning>
</Tab>
<Tab title="Production (Helm)">
The Gateway can be installed via [Helm](https://helm.sh/). Helm is a package manager for Kubernetes that allows you to define, install, and upgrade Kubernetes applications.
For production deployments on Kubernetes, install the Gateway using the Infisical Helm chart:
### Install the latest Helm Chart repository
```bash
helm repo add infisical-helm-charts 'https://dl.cloudsmith.io/public/infisical/helm-charts/helm/charts/'
```
### Update the Helm Chart repository
```bash
helm repo update
```
### Create a Kubernetes Secret containing gateway environment variables
The gateway supports all identity authentication methods through the use of environment variables.
The environment variables must be set in the `infisical-gateway-environment` Kubernetes secret.
#### Supported authentication methods
<AccordionGroup>
<Accordion title="Universal Auth">
The Universal Auth method is a simple and secure way to authenticate with Infisical. It requires a client ID and a client secret to authenticate with Infisical.
<ParamField query="Environment Variables">
<Expandable title="properties">
<ParamField query="INFISICAL_UNIVERSAL_AUTH_CLIENT_ID" type="string" required>
Your machine identity client ID.
</ParamField>
<ParamField query="INFISICAL_UNIVERSAL_AUTH_CLIENT_SECRET" type="string" required>
Your machine identity client secret.
</ParamField>
<ParamField query="INFISICAL_AUTH_METHOD" type="string" required>
The authentication method to use. Must be `universal-auth` when using Universal Auth.
</ParamField>
</Expandable>
</ParamField>
```bash
kubectl create secret generic infisical-gateway-environment \
--from-literal=INFISICAL_AUTH_METHOD=universal-auth \
--from-literal=INFISICAL_UNIVERSAL_AUTH_CLIENT_ID=<client-id> \
--from-literal=INFISICAL_UNIVERSAL_AUTH_CLIENT_SECRET=<client-secret> \
--from-literal=INFISICAL_RELAY_NAME=<relay-name> \
--from-literal=INFISICAL_GATEWAY_NAME=<gateway-name>
```
</Accordion>
<Accordion title="Native Kubernetes">
The Native Kubernetes method is used to authenticate with Infisical when running in a Kubernetes environment. It requires a service account token to authenticate with Infisical.
<ParamField query="Environment Variables">
<Expandable title="properties">
<ParamField query="INFISICAL_MACHINE_IDENTITY_ID" type="string" required>
Your machine identity ID.
</ParamField>
<ParamField query="INFISICAL_KUBERNETES_SERVICE_ACCOUNT_TOKEN_PATH" type="string" optional>
Path to the Kubernetes service account token to use. Default: `/var/run/secrets/kubernetes.io/serviceaccount/token`.
</ParamField>
<ParamField query="INFISICAL_AUTH_METHOD" type="string" required>
The authentication method to use. Must be `kubernetes` when using Native Kubernetes.
</ParamField>
</Expandable>
</ParamField>
```bash
kubectl create secret generic infisical-gateway-environment --from-literal=INFISICAL_AUTH_METHOD=kubernetes --from-literal=INFISICAL_MACHINE_IDENTITY_ID=<machine-identity-id>
```
</Accordion>
<Accordion title="Native Azure">
The Native Azure method is used to authenticate with Infisical when running in an Azure environment.
<ParamField query="Environment Variables">
<Expandable title="properties">
<ParamField query="INFISICAL_MACHINE_IDENTITY_ID" type="string" required>
Your machine identity ID.
</ParamField>
<ParamField query="INFISICAL_AUTH_METHOD" type="string" required>
The authentication method to use. Must be `azure` when using Native Azure.
</ParamField>
</Expandable>
</ParamField>
```bash
kubectl create secret generic infisical-gateway-environment --from-literal=INFISICAL_AUTH_METHOD=azure --from-literal=INFISICAL_MACHINE_IDENTITY_ID=<machine-identity-id>
```
</Accordion>
<Accordion title="Native GCP ID Token">
The Native GCP ID Token method is used to authenticate with Infisical when running in a GCP environment.
<ParamField query="Environment Variables">
<Expandable title="properties">
<ParamField query="INFISICAL_MACHINE_IDENTITY_ID" type="string" required>
Your machine identity ID.
</ParamField>
<ParamField query="INFISICAL_AUTH_METHOD" type="string" required>
The authentication method to use. Must be `gcp-id-token` when using Native GCP ID Token.
</ParamField>
</Expandable>
</ParamField>
```bash
kubectl create secret generic infisical-gateway-environment --from-literal=INFISICAL_AUTH_METHOD=gcp-id-token --from-literal=INFISICAL_MACHINE_IDENTITY_ID=<machine-identity-id>
```
</Accordion>
<Accordion title="GCP IAM">
The GCP IAM method is used to authenticate with Infisical with a GCP service account key.
<ParamField query="Environment Variables">
<Expandable title="properties">
<ParamField query="INFISICAL_MACHINE_IDENTITY_ID" type="string" required>
Your machine identity ID.
</ParamField>
<ParamField query="INFISICAL_GCP_SERVICE_ACCOUNT_KEY_FILE_PATH" type="string" required>
Path to your GCP service account key file _(Must be in JSON format!)_
</ParamField>
<ParamField query="INFISICAL_AUTH_METHOD" type="string" required>
The authentication method to use. Must be `gcp-iam` when using GCP IAM.
</ParamField>
</Expandable>
</ParamField>
```bash
kubectl create secret generic infisical-gateway-environment --from-literal=INFISICAL_AUTH_METHOD=gcp-iam --from-literal=INFISICAL_MACHINE_IDENTITY_ID=<machine-identity-id> --from-literal=INFISICAL_GCP_SERVICE_ACCOUNT_KEY_FILE_PATH=<service-account-key-file-path>
```
</Accordion>
<Accordion title="Native AWS IAM">
The AWS IAM method is used to authenticate with Infisical with an AWS IAM role while running in an AWS environment like EC2, Lambda, etc.
<ParamField query="Environment Variables">
<Expandable title="properties">
<ParamField query="INFISICAL_MACHINE_IDENTITY_ID" type="string" required>
Your machine identity ID.
</ParamField>
<ParamField query="INFISICAL_AUTH_METHOD" type="string" required>
The authentication method to use. Must be `aws-iam` when using Native AWS IAM.
</ParamField>
</Expandable>
</ParamField>
```bash
kubectl create secret generic infisical-gateway-environment --from-literal=INFISICAL_AUTH_METHOD=aws-iam --from-literal=INFISICAL_MACHINE_IDENTITY_ID=<machine-identity-id>
```
</Accordion>
<Accordion title="OIDC Auth">
The OIDC Auth method is used to authenticate with Infisical via identity tokens with OIDC.
<ParamField query="Environment Variables">
<Expandable title="properties">
<ParamField query="INFISICAL_MACHINE_IDENTITY_ID" type="string" required>
Your machine identity ID.
</ParamField>
<ParamField query="INFISICAL_JWT" type="string" required>
The OIDC JWT from the identity provider.
</ParamField>
<ParamField query="INFISICAL_AUTH_METHOD" type="string" required>
The authentication method to use. Must be `oidc-auth` when using OIDC Auth.
</ParamField>
</Expandable>
</ParamField>
```bash
kubectl create secret generic infisical-gateway-environment --from-literal=INFISICAL_AUTH_METHOD=oidc-auth --from-literal=INFISICAL_MACHINE_IDENTITY_ID=<machine-identity-id> --from-literal=INFISICAL_JWT=<oidc-jwt>
```
</Accordion>
<Accordion title="JWT Auth">
The JWT Auth method is used to authenticate with Infisical via a JWT token.
<ParamField query="Environment Variables">
<Expandable title="properties">
<ParamField query="INFISICAL_JWT" type="string" required>
The JWT token to use for authentication.
</ParamField>
<ParamField query="INFISICAL_MACHINE_IDENTITY_ID" type="string" required>
Your machine identity ID.
</ParamField>
<ParamField query="INFISICAL_AUTH_METHOD" type="string" required>
The authentication method to use. Must be `jwt-auth` when using JWT Auth.
</ParamField>
</Expandable>
</ParamField>
```bash
kubectl create secret generic infisical-gateway-environment --from-literal=INFISICAL_AUTH_METHOD=jwt-auth --from-literal=INFISICAL_JWT=<jwt> --from-literal=INFISICAL_MACHINE_IDENTITY_ID=<machine-identity-id>
```
</Accordion>
<Accordion title="Token Auth">
You can use the `INFISICAL_TOKEN` environment variable to authenticate with Infisical with a raw machine identity access token.
<ParamField query="Environment Variables">
<Expandable title="properties">
<ParamField query="INFISICAL_TOKEN" type="string" required>
The machine identity access token to use for authentication.
</ParamField>
</Expandable>
</ParamField>
```bash
kubectl create secret generic infisical-gateway-environment --from-literal=INFISICAL_TOKEN=<token>
```
</Accordion>
</AccordionGroup>
#### Required environment variables
In addition to the authentication method above, you **must** include these required variables:
<AccordionGroup>
<Accordion title="INFISICAL_RELAY_NAME">
The name of the relay server that this gateway should connect to.
</Accordion>
<Accordion title="INFISICAL_GATEWAY_NAME">
The name of this gateway instance.
</Accordion>
</AccordionGroup>
**Complete example with required variables:**
```bash
kubectl create secret generic infisical-gateway-environment \
--from-literal=INFISICAL_AUTH_METHOD=universal-auth \
--from-literal=INFISICAL_UNIVERSAL_AUTH_CLIENT_ID=<client-id> \
--from-literal=INFISICAL_UNIVERSAL_AUTH_CLIENT_SECRET=<client-secret> \
--from-literal=INFISICAL_RELAY_NAME=<relay-name> \
--from-literal=INFISICAL_GATEWAY_NAME=<gateway-name>
```
#### Other environment variables
<AccordionGroup>
<Accordion title="INFISICAL_API_URL">
The API URL to use for the gateway. By default, `INFISICAL_API_URL` is set to `https://app.infisical.com`.
</Accordion>
</AccordionGroup>
### Install the Infisical Gateway Helm Chart
<Warning>
**Version mapping:** Helm chart versions `>= 1.0.0` contain the new Gateway v2 component. Helm chart versions `<= 0.0.5` contain the legacy Gateway v1 component.
If you are moving from Gateway v1 (chart `<= 0.0.5`) to Gateway v2 (chart `>= 1.0.0`), this is not in-place. Gateway v2 provisions new gateway instances with new gateway IDs. Update any resources that reference a gateway ID (for example: dynamic secret configs, app connections, or other gateway-bound resources) to use the new Gateway v2 gateway ID.
</Warning>
```bash
helm install infisical-gateway infisical-helm-charts/infisical-gateway
```
### Check the gateway logs
After installing the gateway, you can check the logs to ensure it's running as expected.
```bash
kubectl logs deployment/infisical-gateway
```
You should see the following output which indicates the gateway is running as expected.
```bash
$ kubectl logs deployment/infisical-gateway
12:43AM INF Starting gateway
12:43AM INF Starting gateway certificate renewal goroutine
12:43AM INF Successfully registered gateway and received certificates
12:43AM INF Connecting to relay server infisical-start on 152.42.218.156:2222...
12:43AM INF Relay connection established for gateway
12:43AM INF Received incoming connection, starting TLS handshake
12:43AM INF TLS handshake completed successfully
12:43AM INF Negotiated ALPN protocol: infisical-ping
12:43AM INF Starting ping handler
12:43AM INF Ping handler completed
12:43AM INF Gateway is reachable by Infisical
```
</Tab>
<Tab title="Local Installation (testing)">
For development or testing, you can run the Gateway directly. Log in with your machine identity and start the Gateway in one command:
```bash
infisical gateway start --token $(infisical login --method=universal-auth --client-id=<> --client-secret=<> --plain) --relay=<relay-name> --name=<gateway-name>
```
Alternatively, if you already have the token, use it directly with the `--token` flag:
```bash
infisical gateway start --token <your-machine-identity-token> --relay=<relay-name> --name=<gateway-name>
```
Or set it as an environment variable:
```bash
export INFISICAL_TOKEN=<your-machine-identity-token>
infisical gateway start --relay=<relay-name> --name=<gateway-name>
```
</Tab>
</Tabs>
For detailed information about the gateway commands and their options, see the [gateway command documentation](/cli/commands/gateway).
<Note>
**Requirements:**
- Ensure the deployed Gateway has network access to the private resources you intend to connect with Infisical
- The gateway must be able to reach the relay server (outbound connection only)
- Replace `<relay-name>` with the name of your relay server and `<gateway-name>` with a unique name for this gateway
</Note>
</Step>
<Step title="Verify Gateway Deployment">
To confirm your Gateway is working, check the deployment status by looking for the message **"Gateway started successfully"** in the Gateway logs. This indicates the Gateway is running properly. Next, verify its registration by opening your Infisical dashboard, navigating to **Organization Access Control**, and selecting the **Gateways** tab. Your newly deployed Gateway should appear in the list.
![Gateway List](../../../images/platform/gateways/gateway-list.png)
</Step>
</Steps>
2. **Persistent Connection**: The gateway maintains an open TCP connection with the relay server, creating a secure channel for incoming requests
3. **Request Routing**: When Infisical needs to access your resources, requests are routed through the relay server to the already-established gateway connection
4. **Resource Access**: The gateway receives the routed requests and connects to your private resources on behalf of Infisical
## Getting Started
Ready to set up your gateway? Follow the guides below.
<Columns cols={2}>
<Card title="Gateway Deployment" href="/documentation/platform/gateways/gateway-deployment">
Deploy and configure your gateway within your network infrastructure.
</Card>
<Card title="Relay Deployment" href="/documentation/platform/gateways/relay-deployment">
Set up relay servers if using self-deployed infrastructure.
</Card>
</Columns>
<Columns cols={1}>
<Card title="Security Architecture" href="/documentation/platform/gateways/security">
Learn about the security model and implementation best practices.
</Card>
</Columns>

View File

@@ -0,0 +1,243 @@
---
title: "Relay Deployment"
description: "How to deploy Infisical Relay Servers"
---
Infisical Relay is a secure routing layer that allows Infisical to connect to your private network resources, such as databases or internal APIs, without exposing them to the public internet.
The relay acts as an intermediary, forwarding encrypted traffic between Infisical and your deployed gateways. This ensures that your sensitive data remains protected and never leaves your network unencrypted.
With this architecture, you can achieve secure, firewall-friendly access across network boundaries, making it possible for Infisical to interact with resources even in highly restricted environments.
Before diving in, it's important to determine whether you actually need to deploy your own relay server or if you can use Infisical's managed infrastructure.
## Do You Need to Deploy a Relay?
Not all users need to deploy their own relay servers. Infisical provides managed relay infrastructure in US/EU regions for Infisical Cloud users, which requires no setup or maintenance. You only need to deploy a relay if you:
- Are self-hosting Infisical
- Have a dedicated enterprise instance of Infisical (managed by Infisical)
- Require closer geographic proximity to target resources than managed relays provide for lower latency and reduced network congestion when accessing resources through the relay
- Need full control over relay infrastructure and traffic routing
If you are using Infisical Cloud and do not have specific requirements, you can use the managed relays provided by Infisical and skip the rest of this guide.
## Deployment Steps
To successfully deploy an Infisical Relay for use, follow these steps in order.
<Steps>
<Step title="Provision a Machine Identity">
Create a machine identity with the correct permissions to create and manage relays. This identity is used by the relay to authenticate with Infisical and should be provisioned in advance.
The relay supports several [machine identity auth methods](/documentation/platform/identities/machine-identities) for authentication, as listed below. Choose the one that best fits your environment and set the corresponding environment variables when deploying the relay.
<AccordionGroup>
<Accordion title="Universal Auth">
Simple and secure authentication using client ID and client secret.
**Environment Variables:**
- `INFISICAL_AUTH_METHOD=universal-auth`
- `INFISICAL_UNIVERSAL_AUTH_CLIENT_ID=<client-id>`
- `INFISICAL_UNIVERSAL_AUTH_CLIENT_SECRET=<client-secret>`
</Accordion>
<Accordion title="Token Auth">
Direct authentication using a machine identity access token.
**Environment Variables:**
- `INFISICAL_TOKEN=<token>`
</Accordion>
<Accordion title="Native Kubernetes">
Authentication using Kubernetes service account tokens.
**Environment Variables:**
- `INFISICAL_AUTH_METHOD=kubernetes`
- `INFISICAL_MACHINE_IDENTITY_ID=<machine-identity-id>`
</Accordion>
<Accordion title="Native AWS IAM">
Authentication using AWS IAM roles.
**Environment Variables:**
- `INFISICAL_AUTH_METHOD=aws-iam`
- `INFISICAL_MACHINE_IDENTITY_ID=<machine-identity-id>`
</Accordion>
<Accordion title="Native GCP ID Token">
Authentication using GCP identity tokens.
**Environment Variables:**
- `INFISICAL_AUTH_METHOD=gcp-id-token`
- `INFISICAL_MACHINE_IDENTITY_ID=<machine-identity-id>`
</Accordion>
<Accordion title="GCP IAM">
Authentication using GCP service account keys.
**Environment Variables:**
- `INFISICAL_AUTH_METHOD=gcp-iam`
- `INFISICAL_MACHINE_IDENTITY_ID=<machine-identity-id>`
- `INFISICAL_GCP_SERVICE_ACCOUNT_KEY_FILE_PATH=<path-to-key-file>`
</Accordion>
<Accordion title="Native Azure">
Authentication using Azure managed identity.
**Environment Variables:**
- `INFISICAL_AUTH_METHOD=azure`
- `INFISICAL_MACHINE_IDENTITY_ID=<machine-identity-id>`
</Accordion>
<Accordion title="OIDC Auth">
Authentication using OIDC identity tokens.
**Environment Variables:**
- `INFISICAL_AUTH_METHOD=oidc-auth`
- `INFISICAL_MACHINE_IDENTITY_ID=<machine-identity-id>`
- `INFISICAL_JWT=<oidc-jwt>`
</Accordion>
<Accordion title="JWT Auth">
Authentication using JWT tokens.
**Environment Variables:**
- `INFISICAL_AUTH_METHOD=jwt-auth`
- `INFISICAL_MACHINE_IDENTITY_ID=<machine-identity-id>`
- `INFISICAL_JWT=<jwt>`
</Accordion>
</AccordionGroup>
</Step>
<Step title="Install the Infisical CLI">
Install the Infisical CLI on the server where you plan to deploy the relay. The CLI is required for relay installation and management.
See the [CLI Installation Guide](/cli/overview) for instructions.
This server must have a static IP address or DNS name to be identifiable by the Infisical platform.
</Step>
<Step title="Configure Network & Firewall">
Ensure your network and firewall settings allow the server to accept inbound connections and make outbound connections:
**Inbound Connections Rules:**
| Protocol | Source | Port | Purpose |
| -------- | ------------------ | ---- | -------------------------------- |
| TCP | Gateways | 2222 | SSH reverse tunnel establishment |
| TCP | Infisical instance host (US/EU, other) | 8443 | Platform-to-relay communication |
**Outbound Connections Rules:**
| Protocol | Destination | Port | Purpose |
| -------- | ------------------------------------ | ---- | ------------------------------------------ |
| TCP | Infisical instance host (US/EU, other) | 443 | API communication and certificate requests |
</Step>
<Step title="Select a Deployment Method">
The Infisical CLI is used to install and start the relay in your chosen environment. The CLI provides commands for both production and development scenarios, and supports a variety of options/flags to configure your deployment.
To view all available flags and equivalent environment variables for relay deployment, see the [Relay CLI Command Reference](/cli/commands/relay).
<Tabs>
<Tab title="Linux Server">
For production deployments on Linux servers, install the Relay as a systemd service. This installation method only supports [Token Auth](/documentation/platform/identities/token-auth) at the moment.
Once you have a [Token Auth](/documentation/platform/identities/token-auth) token, set the following environment variables for relay authentication:
```bash
export INFISICAL_TOKEN=<your-machine-identity-token>
```
<Warning>
The systemd install command requires a Linux operating system with root/sudo privileges.
</Warning>
```bash
sudo infisical relay systemd install \
--token <your-machine-identity-token> \
--name <relay-name> \
--domain <your-infisical-domain> \
--host <static-ip-or-dns-of-the-server>
# Start the relay service
sudo systemctl start infisical-relay
sudo systemctl enable infisical-relay
```
</Tab>
<Tab title="Other Environments">
For non-Linux systems or when you need more control over the relay process:
```bash
infisical relay start \
--type=<type> \
--host=<host> \
--name=<name> \
--auth-method=<auth-method>
```
This method supports all [machine identity auth methods](/documentation/platform/identities/machine-identities) and runs in the foreground. Suitable for production use on non-Linux systems or development environments.
Set the appropriate environment variables for your chosen auth method as described in Step 1 before running the relay start command.
</Tab>
</Tabs>
</Step>
</Steps>
## Frequently Asked Questions
<Accordion title="Can the relay servers decrypt traffic going through them?">
No, relay servers cannot decrypt any traffic passing through them due to end-to-end encryption:
- **Client-to-Gateway mTLS (via TLS-pinned tunnel)**: Clients connect via a proxy that establishes a TLS-pinned tunnel to the gateway; mTLS between the client and gateway is negotiated inside this tunnel, encrypting all application traffic
- **SSH tunnel encryption**: The mTLS-encrypted traffic is then transmitted through SSH reverse tunnels to relay servers
- **Double encryption**: Traffic is encrypted twice - once by client mTLS and again by SSH tunnels
- **Relay only routes traffic**: The relay server only routes the doubly-encrypted traffic without access to either encryption layer
The relay infrastructure is designed as a secure routing mechanism where only the client and gateway can decrypt the actual application traffic.
</Accordion>
<Accordion title="What are the benefits of deploying my own relay?">
Deploying your own relay provides several advantages:
- **Dedicated resources**: Full control over relay infrastructure and performance
- **Lower latency**: Deploy closer to your gateways for optimal performance
- **Compliance**: Meet specific data routing and compliance requirements
- **Custom network policies**: Implement organization-specific network configurations
- **Geographic proximity**: Reduce network congestion and improve response times to access resources
- **High availability**: Deploy multiple relays for redundancy and load distribution
Organization-deployed relays give you complete control over your secure communication infrastructure.
</Accordion>
<Accordion title="How do I troubleshoot connectivity issues?">
For detailed troubleshooting:
**Platform cannot connect to relay:**
- Check firewall rules allow inbound TCP with TLS on port 8443
- Test connectivity: `openssl s_client -connect <relay-ip>:8443`
**Test network connectivity:**
```bash
# Test outbound API access from relay. Replace URL with your Infisical instance if self-hosted
curl -I https://app.infisical.com
# Test TCP with TLS port from platform
openssl s_client -connect <relay-ip>:8443
```
</Accordion>
<Accordion title="What happens if my relay server goes down?">
Relay server outages affect gateway connectivity:
- **Gateway reconnection**: Gateways will automatically attempt to reconnect when the relay comes back online
- **Service interruption**: While the relay is down, the Infisical platform cannot reach gateways through that relay. As a result, any secrets or resources accessed via those gateways will be temporarily unavailable until connectivity is restored.
- **Multiple relays**: Deploy multiple relay servers for redundancy and high availability
- **Automatic restart**: Use systemd or container orchestration to automatically restart failed relay services
For production environments, consider deploying multiple relay servers to avoid single points of failure.
</Accordion>

View File

@@ -1,13 +1,9 @@
---
title: "Gateway Security Architecture"
sidebarTitle: "Architecture"
description: "Understand the security model and tenant isolation of Infisical's Gateway"
title: "Security Architecture"
description: "Security model, tenant isolation, and best practices for Infisical Gateways and Relays"
---
# Gateway Security Architecture
The Infisical Gateway enables secure access to private resources using SSH reverse tunnels, certificate-based authentication, and a comprehensive PKI (Public Key Infrastructure) system. The architecture provides end-to-end encryption and complete tenant isolation through multiple certificate authorities.
This document explains the internal security architecture and how tenant isolation is maintained.
## Security Model Overview
@@ -82,16 +78,16 @@ The platform establishes secure direct connections with gateways through a **TLS
2. **Connection Flow**:
```
Platform ←→ [SSH Reverse Tunnel] ←→ Gateway
Platform ←→ [TCP with TLS] ←→ Relay ←→ [SSH Reverse Tunnel] ←→ Gateway
```
- Gateway maintains persistent outbound SSH tunnel to relay server
- Platform connects directly to gateway through this tunnel
- TLS handshake occurs over the SSH tunnel, establishing mTLS connection
- Application traffic flows through the TLS-pinned tunnel
- Platform connects to relay server using TCP with TLS
- Relay routes encrypted traffic between platform and gateway
- TLS handshake occurs between platform and gateway through the relay
- Application traffic flows through the TLS-pinned tunnel via relay routing
3. **Security Benefits**:
- **No inbound connections**: Gateway never needs to accept incoming connections
- **Certificate-based authentication**: Uses Organization Gateway certificates for mutual TLS
- **Double encryption**: TLS traffic within SSH tunnel provides layered security
@@ -132,7 +128,6 @@ The architecture provides tenant isolation through multiple certificate authorit
- Ephemeral certificate validation ensures time-bound access
2. **Network Isolation**:
- Each organization's traffic flows through isolated certificate-authenticated channels
- Relay servers route traffic based on certificate validation without content access
- Gateway validates all incoming connections against Organization Gateway Client CA

Some files were not shown because too many files have changed in this diff Show More