diff --git a/.github/workflows/nightly-tag-generation.yml b/.github/workflows/nightly-tag-generation.yml index 0a4eb3d974..b2704f96c9 100644 --- a/.github/workflows/nightly-tag-generation.yml +++ b/.github/workflows/nightly-tag-generation.yml @@ -36,11 +36,23 @@ jobs: echo "Latest production tag: $LATEST_STABLE_TAG" + # Extract version numbers and increment minor version + VERSION_NUMBERS=$(echo "$LATEST_STABLE_TAG" | sed 's/^v//') + MAJOR=$(echo "$VERSION_NUMBERS" | cut -d'.' -f1) + MINOR=$(echo "$VERSION_NUMBERS" | cut -d'.' -f2) + PATCH=$(echo "$VERSION_NUMBERS" | cut -d'.' -f3) + + # Increment minor version, reset patch to 0 + NEXT_MINOR=$((MINOR + 1)) + NEXT_VERSION="v${MAJOR}.${NEXT_MINOR}.0" + + echo "Next version for nightly: $NEXT_VERSION" + # Get current date in YYYYMMDD format DATE=$(date +%Y%m%d) - # Base nightly tag name - BASE_TAG="${LATEST_STABLE_TAG}-nightly-${DATE}" + # Base nightly tag name using next version + BASE_TAG="${NEXT_VERSION}-nightly-${DATE}" # Check if this exact tag already exists if git tag --list | grep -q "^${BASE_TAG}$"; then @@ -65,7 +77,6 @@ jobs: echo "Generated nightly tag: $NIGHTLY_TAG" echo "NIGHTLY_TAG=$NIGHTLY_TAG" >> $GITHUB_ENV - echo "LATEST_PRODUCTION_TAG=$LATEST_STABLE_TAG" >> $GITHUB_ENV git tag "$NIGHTLY_TAG" git push origin "$NIGHTLY_TAG" diff --git a/backend/e2e-test/mocks/keystore.ts b/backend/e2e-test/mocks/keystore.ts index 91b64ff0df..0cebe6ef39 100644 --- a/backend/e2e-test/mocks/keystore.ts +++ b/backend/e2e-test/mocks/keystore.ts @@ -56,6 +56,15 @@ export const mockKeyStore = (): TKeyStoreFactory => { incrementBy: async () => { return 1; }, + pgGetIntItem: async (key) => { + const value = store[key]; + if (typeof value === "number") { + return Number(value); + } + }, + pgIncrementBy: async () => { + return 1; + }, getItems: async (keys) => { const values = keys.map((key) => { const value = store[key]; diff --git a/backend/e2e-test/vitest-environment-knex.ts b/backend/e2e-test/vitest-environment-knex.ts index ff5f42286d..085b8fe305 100644 --- a/backend/e2e-test/vitest-environment-knex.ts +++ b/backend/e2e-test/vitest-environment-knex.ts @@ -15,6 +15,7 @@ import { mockSmtpServer } from "./mocks/smtp"; import { initDbConnection } from "@app/db"; import { queueServiceFactory } from "@app/queue"; import { keyStoreFactory } from "@app/keystore/keystore"; +import { keyValueStoreDALFactory } from "@app/keystore/key-value-store-dal"; import { initializeHsmModule } from "@app/ee/services/hsm/hsm-fns"; import { buildRedisFromConfig } from "@app/lib/config/redis"; import { superAdminDALFactory } from "@app/services/super-admin/super-admin-dal"; @@ -62,7 +63,8 @@ export default { const smtp = mockSmtpServer(); const queue = queueServiceFactory(envCfg, { dbConnectionUrl: envCfg.DB_CONNECTION_URI }); - const keyStore = keyStoreFactory(envCfg); + const keyValueStoreDAL = keyValueStoreDALFactory(db); + const keyStore = keyStoreFactory(envCfg, keyValueStoreDAL); await queue.initialize(); diff --git a/backend/src/@types/fastify.d.ts b/backend/src/@types/fastify.d.ts index b0a5fbb452..8ca4288b55 100644 --- a/backend/src/@types/fastify.d.ts +++ b/backend/src/@types/fastify.d.ts @@ -16,6 +16,7 @@ import { TEventBusService } from "@app/ee/services/event/event-bus-service"; import { TServerSentEventsService } from "@app/ee/services/event/event-sse-service"; import { TExternalKmsServiceFactory } from "@app/ee/services/external-kms/external-kms-service"; import { TGatewayServiceFactory } from "@app/ee/services/gateway/gateway-service"; +import { TGatewayV2ServiceFactory } from "@app/ee/services/gateway-v2/gateway-v2-service"; import { TGithubOrgSyncServiceFactory } from "@app/ee/services/github-org-sync/github-org-sync-service"; import { TGroupServiceFactory } from "@app/ee/services/group/group-service"; import { TIdentityAuthTemplateServiceFactory } from "@app/ee/services/identity-auth-template"; @@ -32,6 +33,7 @@ import { TPitServiceFactory } from "@app/ee/services/pit/pit-service"; import { TProjectTemplateServiceFactory } from "@app/ee/services/project-template/project-template-types"; import { TProjectUserAdditionalPrivilegeServiceFactory } from "@app/ee/services/project-user-additional-privilege/project-user-additional-privilege-types"; import { RateLimitConfiguration, TRateLimitServiceFactory } from "@app/ee/services/rate-limit/rate-limit-types"; +import { TRelayServiceFactory } from "@app/ee/services/relay/relay-service"; import { TSamlConfigServiceFactory } from "@app/ee/services/saml-config/saml-config-types"; import { TScimServiceFactory } from "@app/ee/services/scim/scim-types"; import { TSecretApprovalPolicyServiceFactory } from "@app/ee/services/secret-approval-policy/secret-approval-policy-service"; @@ -296,6 +298,8 @@ declare module "fastify" { secretRotationV2: TSecretRotationV2ServiceFactory; microsoftTeams: TMicrosoftTeamsServiceFactory; assumePrivileges: TAssumePrivilegeServiceFactory; + relay: TRelayServiceFactory; + gatewayV2: TGatewayV2ServiceFactory; githubOrgSync: TGithubOrgSyncServiceFactory; folderCommit: TFolderCommitServiceFactory; pit: TPitServiceFactory; diff --git a/backend/src/@types/knex.d.ts b/backend/src/@types/knex.d.ts index 525ad1619f..75a358341f 100644 --- a/backend/src/@types/knex.d.ts +++ b/backend/src/@types/knex.d.ts @@ -101,6 +101,9 @@ import { TGateways, TGatewaysInsert, TGatewaysUpdate, + TGatewaysV2, + TGatewaysV2Insert, + TGatewaysV2Update, TGitAppInstallSessions, TGitAppInstallSessionsInsert, TGitAppInstallSessionsUpdate, @@ -179,6 +182,9 @@ import { TIncidentContacts, TIncidentContactsInsert, TIncidentContactsUpdate, + TInstanceRelayConfig, + TInstanceRelayConfigInsert, + TInstanceRelayConfigUpdate, TIntegrationAuths, TIntegrationAuthsInsert, TIntegrationAuthsUpdate, @@ -191,6 +197,9 @@ import { TInternalKms, TInternalKmsInsert, TInternalKmsUpdate, + TKeyValueStore, + TKeyValueStoreInsert, + TKeyValueStoreUpdate, TKmipClientCertificates, TKmipClientCertificatesInsert, TKmipClientCertificatesUpdate, @@ -230,9 +239,15 @@ import { TOrgGatewayConfig, TOrgGatewayConfigInsert, TOrgGatewayConfigUpdate, + TOrgGatewayConfigV2, + TOrgGatewayConfigV2Insert, + TOrgGatewayConfigV2Update, TOrgMemberships, TOrgMembershipsInsert, TOrgMembershipsUpdate, + TOrgRelayConfig, + TOrgRelayConfigInsert, + TOrgRelayConfigUpdate, TOrgRoles, TOrgRolesInsert, TOrgRolesUpdate, @@ -290,6 +305,9 @@ import { TRateLimit, TRateLimitInsert, TRateLimitUpdate, + TRelays, + TRelaysInsert, + TRelaysUpdate, TResourceMetadata, TResourceMetadataInsert, TResourceMetadataUpdate, @@ -1238,6 +1256,17 @@ declare module "knex/types/tables" { TSecretScanningResourcesInsert, TSecretScanningResourcesUpdate >; + [TableName.InstanceRelayConfig]: KnexOriginal.CompositeTableType< + TInstanceRelayConfig, + TInstanceRelayConfigInsert, + TInstanceRelayConfigUpdate + >; + [TableName.OrgRelayConfig]: KnexOriginal.CompositeTableType< + TOrgRelayConfig, + TOrgRelayConfigInsert, + TOrgRelayConfigUpdate + >; + [TableName.Relay]: KnexOriginal.CompositeTableType; [TableName.SecretScanningScan]: KnexOriginal.CompositeTableType< TSecretScanningScans, TSecretScanningScansInsert, @@ -1259,10 +1288,21 @@ declare module "knex/types/tables" { TRemindersRecipientsInsert, TRemindersRecipientsUpdate >; + [TableName.OrgGatewayConfigV2]: KnexOriginal.CompositeTableType< + TOrgGatewayConfigV2, + TOrgGatewayConfigV2Insert, + TOrgGatewayConfigV2Update + >; + [TableName.GatewayV2]: KnexOriginal.CompositeTableType; [TableName.UserNotifications]: KnexOriginal.CompositeTableType< TUserNotifications, TUserNotificationsInsert, TUserNotificationsUpdate >; + [TableName.KeyValueStore]: KnexOriginal.CompositeTableType< + TKeyValueStore, + TKeyValueStoreInsert, + TKeyValueStoreUpdate + >; } } diff --git a/backend/src/db/migrations/20250825131627_add-gateway-v2-pki-and-ssh-configs.ts b/backend/src/db/migrations/20250825131627_add-gateway-v2-pki-and-ssh-configs.ts new file mode 100644 index 0000000000..812c4f48f2 --- /dev/null +++ b/backend/src/db/migrations/20250825131627_add-gateway-v2-pki-and-ssh-configs.ts @@ -0,0 +1,150 @@ +import { Knex } from "knex"; + +import { TableName } from "../schemas"; +import { createOnUpdateTrigger, dropOnUpdateTrigger } from "../utils"; + +export async function up(knex: Knex): Promise { + if (!(await knex.schema.hasTable(TableName.InstanceRelayConfig))) { + await knex.schema.createTable(TableName.InstanceRelayConfig, (t) => { + t.uuid("id", { primaryKey: true }).defaultTo(knex.fn.uuid()); + t.timestamps(true, true, true); + + // Root CA for relay PKI + t.binary("encryptedRootRelayPkiCaPrivateKey").notNullable(); + t.binary("encryptedRootRelayPkiCaCertificate").notNullable(); + + // Instance CA for relay PKI + t.binary("encryptedInstanceRelayPkiCaPrivateKey").notNullable(); + t.binary("encryptedInstanceRelayPkiCaCertificate").notNullable(); + t.binary("encryptedInstanceRelayPkiCaCertificateChain").notNullable(); + + // Instance client/server intermediates for relay PKI + t.binary("encryptedInstanceRelayPkiClientCaPrivateKey").notNullable(); + t.binary("encryptedInstanceRelayPkiClientCaCertificate").notNullable(); + t.binary("encryptedInstanceRelayPkiClientCaCertificateChain").notNullable(); + t.binary("encryptedInstanceRelayPkiServerCaPrivateKey").notNullable(); + t.binary("encryptedInstanceRelayPkiServerCaCertificate").notNullable(); + t.binary("encryptedInstanceRelayPkiServerCaCertificateChain").notNullable(); + + // Org Parent CAs for relay + t.binary("encryptedOrgRelayPkiCaPrivateKey").notNullable(); + t.binary("encryptedOrgRelayPkiCaCertificate").notNullable(); + t.binary("encryptedOrgRelayPkiCaCertificateChain").notNullable(); + + // Instance SSH CAs for relay + t.binary("encryptedInstanceRelaySshClientCaPrivateKey").notNullable(); + t.binary("encryptedInstanceRelaySshClientCaPublicKey").notNullable(); + t.binary("encryptedInstanceRelaySshServerCaPrivateKey").notNullable(); + t.binary("encryptedInstanceRelaySshServerCaPublicKey").notNullable(); + }); + + await createOnUpdateTrigger(knex, TableName.InstanceRelayConfig); + } + + // Org-level relay configuration (one-to-one with organization) + if (!(await knex.schema.hasTable(TableName.OrgRelayConfig))) { + await knex.schema.createTable(TableName.OrgRelayConfig, (t) => { + t.uuid("id", { primaryKey: true }).defaultTo(knex.fn.uuid()); + t.timestamps(true, true, true); + + t.uuid("orgId").notNullable().unique(); + t.foreign("orgId").references("id").inTable(TableName.Organization).onDelete("CASCADE"); + + // Org-scoped relay PKI (client + server) + t.binary("encryptedRelayPkiClientCaPrivateKey").notNullable(); + t.binary("encryptedRelayPkiClientCaCertificate").notNullable(); + t.binary("encryptedRelayPkiClientCaCertificateChain").notNullable(); + t.binary("encryptedRelayPkiServerCaPrivateKey").notNullable(); + t.binary("encryptedRelayPkiServerCaCertificate").notNullable(); + t.binary("encryptedRelayPkiServerCaCertificateChain").notNullable(); + + // Org-scoped relay SSH (client + server) + t.binary("encryptedRelaySshClientCaPrivateKey").notNullable(); + t.binary("encryptedRelaySshClientCaPublicKey").notNullable(); + t.binary("encryptedRelaySshServerCaPrivateKey").notNullable(); + t.binary("encryptedRelaySshServerCaPublicKey").notNullable(); + }); + + await createOnUpdateTrigger(knex, TableName.OrgRelayConfig); + } + + if (!(await knex.schema.hasTable(TableName.OrgGatewayConfigV2))) { + await knex.schema.createTable(TableName.OrgGatewayConfigV2, (t) => { + t.uuid("id", { primaryKey: true }).defaultTo(knex.fn.uuid()); + t.uuid("orgId").notNullable().unique(); + t.foreign("orgId").references("id").inTable(TableName.Organization).onDelete("CASCADE"); + t.timestamps(true, true, true); + t.binary("encryptedRootGatewayCaPrivateKey").notNullable(); + t.binary("encryptedRootGatewayCaCertificate").notNullable(); + t.binary("encryptedGatewayServerCaPrivateKey").notNullable(); + t.binary("encryptedGatewayServerCaCertificate").notNullable(); + t.binary("encryptedGatewayServerCaCertificateChain").notNullable(); + t.binary("encryptedGatewayClientCaPrivateKey").notNullable(); + t.binary("encryptedGatewayClientCaCertificate").notNullable(); + t.binary("encryptedGatewayClientCaCertificateChain").notNullable(); + }); + + await createOnUpdateTrigger(knex, TableName.OrgGatewayConfigV2); + } + + if (!(await knex.schema.hasTable(TableName.Relay))) { + await knex.schema.createTable(TableName.Relay, (t) => { + t.uuid("id", { primaryKey: true }).defaultTo(knex.fn.uuid()); + t.timestamps(true, true, true); + + t.uuid("orgId"); + t.foreign("orgId").references("id").inTable(TableName.Organization).onDelete("CASCADE"); + + t.uuid("identityId"); + t.foreign("identityId").references("id").inTable(TableName.Identity).onDelete("CASCADE"); + + t.string("name").notNullable(); + t.string("host").notNullable(); + + t.unique(["orgId", "name"]); + }); + + await createOnUpdateTrigger(knex, TableName.Relay); + } + + if (!(await knex.schema.hasTable(TableName.GatewayV2))) { + await knex.schema.createTable(TableName.GatewayV2, (t) => { + t.uuid("id", { primaryKey: true }).defaultTo(knex.fn.uuid()); + t.timestamps(true, true, true); + + t.uuid("orgId").notNullable(); + t.foreign("orgId").references("id").inTable(TableName.Organization).onDelete("CASCADE"); + + t.uuid("identityId").notNullable().unique(); + t.foreign("identityId").references("id").inTable(TableName.Identity).onDelete("CASCADE"); + + t.uuid("relayId"); + t.foreign("relayId").references("id").inTable(TableName.Relay).onDelete("SET NULL"); + + t.string("name").notNullable(); + + t.unique(["orgId", "name"]); + + t.dateTime("heartbeat"); + }); + + await createOnUpdateTrigger(knex, TableName.GatewayV2); + } +} + +export async function down(knex: Knex): Promise { + await dropOnUpdateTrigger(knex, TableName.OrgRelayConfig); + await knex.schema.dropTableIfExists(TableName.OrgRelayConfig); + + await dropOnUpdateTrigger(knex, TableName.InstanceRelayConfig); + await knex.schema.dropTableIfExists(TableName.InstanceRelayConfig); + + await dropOnUpdateTrigger(knex, TableName.OrgGatewayConfigV2); + await knex.schema.dropTableIfExists(TableName.OrgGatewayConfigV2); + + await dropOnUpdateTrigger(knex, TableName.GatewayV2); + await knex.schema.dropTableIfExists(TableName.GatewayV2); + + await dropOnUpdateTrigger(knex, TableName.Relay); + await knex.schema.dropTableIfExists(TableName.Relay); +} diff --git a/backend/src/db/migrations/20250901091637_add-gateway-v2-id-columns.ts b/backend/src/db/migrations/20250901091637_add-gateway-v2-id-columns.ts new file mode 100644 index 0000000000..cb46b1261c --- /dev/null +++ b/backend/src/db/migrations/20250901091637_add-gateway-v2-id-columns.ts @@ -0,0 +1,33 @@ +import { Knex } from "knex"; + +import { TableName } from "../schemas"; + +export async function up(knex: Knex): Promise { + if (!(await knex.schema.hasColumn(TableName.DynamicSecret, "gatewayV2Id"))) { + await knex.schema.alterTable(TableName.DynamicSecret, (table) => { + table.uuid("gatewayV2Id"); + table.foreign("gatewayV2Id").references("id").inTable(TableName.GatewayV2).onDelete("SET NULL"); + }); + } + + if (!(await knex.schema.hasColumn(TableName.IdentityKubernetesAuth, "gatewayV2Id"))) { + await knex.schema.alterTable(TableName.IdentityKubernetesAuth, (table) => { + table.uuid("gatewayV2Id"); + table.foreign("gatewayV2Id").references("id").inTable(TableName.GatewayV2).onDelete("SET NULL"); + }); + } +} + +export async function down(knex: Knex): Promise { + if (await knex.schema.hasColumn(TableName.DynamicSecret, "gatewayV2Id")) { + await knex.schema.alterTable(TableName.DynamicSecret, (table) => { + table.dropColumn("gatewayV2Id"); + }); + } + + if (await knex.schema.hasColumn(TableName.IdentityKubernetesAuth, "gatewayV2Id")) { + await knex.schema.alterTable(TableName.IdentityKubernetesAuth, (table) => { + table.dropColumn("gatewayV2Id"); + }); + } +} diff --git a/backend/src/db/migrations/20250908193226_sql-cache_int.ts b/backend/src/db/migrations/20250908193226_sql-cache_int.ts new file mode 100644 index 0000000000..0e15c10d16 --- /dev/null +++ b/backend/src/db/migrations/20250908193226_sql-cache_int.ts @@ -0,0 +1,18 @@ +import { Knex } from "knex"; + +import { TableName } from "../schemas"; + +export async function up(knex: Knex): Promise { + if (!(await knex.schema.hasTable(TableName.KeyValueStore))) { + await knex.schema.createTable(TableName.KeyValueStore, (t) => { + t.text("key").primary(); + t.bigint("integerValue"); + t.datetime("expiresAt"); + t.timestamps(true, true, true); + }); + } +} + +export async function down(knex: Knex): Promise { + await knex.schema.dropTableIfExists(TableName.KeyValueStore); +} diff --git a/backend/src/db/migrations/20250911133926_add-auth-token-payload-column.ts b/backend/src/db/migrations/20250911133926_add-auth-token-payload-column.ts new file mode 100644 index 0000000000..4a1e3f3520 --- /dev/null +++ b/backend/src/db/migrations/20250911133926_add-auth-token-payload-column.ts @@ -0,0 +1,23 @@ +import { Knex } from "knex"; + +import { TableName } from "../schemas"; + +export async function up(knex: Knex): Promise { + const hasPayloadCol = await knex.schema.hasColumn(TableName.AuthTokens, "payload"); + + if (!hasPayloadCol) { + await knex.schema.alterTable(TableName.AuthTokens, (t) => { + t.text("payload").nullable(); + }); + } +} + +export async function down(knex: Knex): Promise { + const hasPayloadCol = await knex.schema.hasColumn(TableName.AuthTokens, "payload"); + + if (hasPayloadCol) { + await knex.schema.alterTable(TableName.AuthTokens, (t) => { + t.dropColumn("payload"); + }); + } +} diff --git a/backend/src/db/schemas/auth-tokens.ts b/backend/src/db/schemas/auth-tokens.ts index 0d3e932192..396c06f130 100644 --- a/backend/src/db/schemas/auth-tokens.ts +++ b/backend/src/db/schemas/auth-tokens.ts @@ -18,7 +18,8 @@ export const AuthTokensSchema = z.object({ updatedAt: z.date(), userId: z.string().uuid().nullable().optional(), orgId: z.string().uuid().nullable().optional(), - aliasId: z.string().nullable().optional() + aliasId: z.string().nullable().optional(), + payload: z.string().nullable().optional() }); export type TAuthTokens = z.infer; diff --git a/backend/src/db/schemas/dynamic-secrets.ts b/backend/src/db/schemas/dynamic-secrets.ts index 637d0c6321..526239f1c0 100644 --- a/backend/src/db/schemas/dynamic-secrets.ts +++ b/backend/src/db/schemas/dynamic-secrets.ts @@ -29,7 +29,8 @@ export const DynamicSecretsSchema = z.object({ encryptedInput: zodBuffer, projectGatewayId: z.string().uuid().nullable().optional(), gatewayId: z.string().uuid().nullable().optional(), - usernameTemplate: z.string().nullable().optional() + usernameTemplate: z.string().nullable().optional(), + gatewayV2Id: z.string().uuid().nullable().optional() }); export type TDynamicSecrets = z.infer; diff --git a/backend/src/db/schemas/gateways-v2.ts b/backend/src/db/schemas/gateways-v2.ts new file mode 100644 index 0000000000..6aff8a1683 --- /dev/null +++ b/backend/src/db/schemas/gateways-v2.ts @@ -0,0 +1,23 @@ +// Code generated by automation script, DO NOT EDIT. +// Automated by pulling database and generating zod schema +// To update. Just run npm run generate:schema +// Written by akhilmhdh. + +import { z } from "zod"; + +import { TImmutableDBKeys } from "./models"; + +export const GatewaysV2Schema = z.object({ + id: z.string().uuid(), + createdAt: z.date(), + updatedAt: z.date(), + orgId: z.string().uuid(), + identityId: z.string().uuid(), + relayId: z.string().uuid().nullable().optional(), + name: z.string(), + heartbeat: z.date().nullable().optional() +}); + +export type TGatewaysV2 = z.infer; +export type TGatewaysV2Insert = Omit, TImmutableDBKeys>; +export type TGatewaysV2Update = Partial, TImmutableDBKeys>>; diff --git a/backend/src/db/schemas/identity-kubernetes-auths.ts b/backend/src/db/schemas/identity-kubernetes-auths.ts index deb78bf8ae..4789ef3659 100644 --- a/backend/src/db/schemas/identity-kubernetes-auths.ts +++ b/backend/src/db/schemas/identity-kubernetes-auths.ts @@ -32,7 +32,8 @@ export const IdentityKubernetesAuthsSchema = z.object({ encryptedKubernetesCaCertificate: zodBuffer.nullable().optional(), gatewayId: z.string().uuid().nullable().optional(), accessTokenPeriod: z.coerce.number().default(0), - tokenReviewMode: z.string().default("api") + tokenReviewMode: z.string().default("api"), + gatewayV2Id: z.string().uuid().nullable().optional() }); export type TIdentityKubernetesAuths = z.infer; diff --git a/backend/src/db/schemas/index.ts b/backend/src/db/schemas/index.ts index 1642c35552..f09e3c2635 100644 --- a/backend/src/db/schemas/index.ts +++ b/backend/src/db/schemas/index.ts @@ -31,6 +31,7 @@ export * from "./folder-commits"; export * from "./folder-tree-checkpoint-resources"; export * from "./folder-tree-checkpoints"; export * from "./gateways"; +export * from "./gateways-v2"; export * from "./git-app-install-sessions"; export * from "./git-app-org"; export * from "./github-org-sync-configs"; @@ -57,10 +58,12 @@ export * from "./identity-token-auths"; export * from "./identity-ua-client-secrets"; export * from "./identity-universal-auths"; export * from "./incident-contacts"; +export * from "./instance-relay-config"; export * from "./integration-auths"; export * from "./integrations"; export * from "./internal-certificate-authorities"; export * from "./internal-kms"; +export * from "./key-value-store"; export * from "./kmip-client-certificates"; export * from "./kmip-clients"; export * from "./kmip-org-configs"; @@ -75,7 +78,9 @@ export * from "./models"; export * from "./oidc-configs"; export * from "./org-bots"; export * from "./org-gateway-config"; +export * from "./org-gateway-config-v2"; export * from "./org-memberships"; +export * from "./org-relay-config"; export * from "./org-roles"; export * from "./organizations"; export * from "./pki-alerts"; @@ -96,6 +101,7 @@ export * from "./project-user-additional-privilege"; export * from "./project-user-membership-roles"; export * from "./projects"; export * from "./rate-limit"; +export * from "./relays"; export * from "./resource-metadata"; export * from "./saml-configs"; export * from "./scim-tokens"; diff --git a/backend/src/db/schemas/instance-relay-config.ts b/backend/src/db/schemas/instance-relay-config.ts new file mode 100644 index 0000000000..8b18ef0f55 --- /dev/null +++ b/backend/src/db/schemas/instance-relay-config.ts @@ -0,0 +1,38 @@ +// Code generated by automation script, DO NOT EDIT. +// Automated by pulling database and generating zod schema +// To update. Just run npm run generate:schema +// Written by akhilmhdh. + +import { z } from "zod"; + +import { zodBuffer } from "@app/lib/zod"; + +import { TImmutableDBKeys } from "./models"; + +export const InstanceRelayConfigSchema = z.object({ + id: z.string().uuid(), + createdAt: z.date(), + updatedAt: z.date(), + encryptedRootRelayPkiCaPrivateKey: zodBuffer, + encryptedRootRelayPkiCaCertificate: zodBuffer, + encryptedInstanceRelayPkiCaPrivateKey: zodBuffer, + encryptedInstanceRelayPkiCaCertificate: zodBuffer, + encryptedInstanceRelayPkiCaCertificateChain: zodBuffer, + encryptedInstanceRelayPkiClientCaPrivateKey: zodBuffer, + encryptedInstanceRelayPkiClientCaCertificate: zodBuffer, + encryptedInstanceRelayPkiClientCaCertificateChain: zodBuffer, + encryptedInstanceRelayPkiServerCaPrivateKey: zodBuffer, + encryptedInstanceRelayPkiServerCaCertificate: zodBuffer, + encryptedInstanceRelayPkiServerCaCertificateChain: zodBuffer, + encryptedOrgRelayPkiCaPrivateKey: zodBuffer, + encryptedOrgRelayPkiCaCertificate: zodBuffer, + encryptedOrgRelayPkiCaCertificateChain: zodBuffer, + encryptedInstanceRelaySshClientCaPrivateKey: zodBuffer, + encryptedInstanceRelaySshClientCaPublicKey: zodBuffer, + encryptedInstanceRelaySshServerCaPrivateKey: zodBuffer, + encryptedInstanceRelaySshServerCaPublicKey: zodBuffer +}); + +export type TInstanceRelayConfig = z.infer; +export type TInstanceRelayConfigInsert = Omit, TImmutableDBKeys>; +export type TInstanceRelayConfigUpdate = Partial, TImmutableDBKeys>>; diff --git a/backend/src/db/schemas/key-value-store.ts b/backend/src/db/schemas/key-value-store.ts new file mode 100644 index 0000000000..448c78f24d --- /dev/null +++ b/backend/src/db/schemas/key-value-store.ts @@ -0,0 +1,20 @@ +// Code generated by automation script, DO NOT EDIT. +// Automated by pulling database and generating zod schema +// To update. Just run npm run generate:schema +// Written by akhilmhdh. + +import { z } from "zod"; + +import { TImmutableDBKeys } from "./models"; + +export const KeyValueStoreSchema = z.object({ + key: z.string(), + integerValue: z.coerce.number().nullable().optional(), + expiresAt: z.date().nullable().optional(), + createdAt: z.date(), + updatedAt: z.date() +}); + +export type TKeyValueStore = z.infer; +export type TKeyValueStoreInsert = Omit, TImmutableDBKeys>; +export type TKeyValueStoreUpdate = Partial, TImmutableDBKeys>>; diff --git a/backend/src/db/schemas/models.ts b/backend/src/db/schemas/models.ts index 3e3e81fd0d..a4585972f9 100644 --- a/backend/src/db/schemas/models.ts +++ b/backend/src/db/schemas/models.ts @@ -179,7 +179,16 @@ export enum TableName { SecretScanningConfig = "secret_scanning_configs", // reminders Reminder = "reminders", - ReminderRecipient = "reminders_recipients" + ReminderRecipient = "reminders_recipients", + + // gateway v2 + InstanceRelayConfig = "instance_relay_config", + OrgRelayConfig = "org_relay_config", + OrgGatewayConfigV2 = "org_gateway_config_v2", + Relay = "relays", + GatewayV2 = "gateways_v2", + + KeyValueStore = "key_value_store" } export type TImmutableDBKeys = "id" | "createdAt" | "updatedAt" | "commitId"; diff --git a/backend/src/db/schemas/org-gateway-config-v2.ts b/backend/src/db/schemas/org-gateway-config-v2.ts new file mode 100644 index 0000000000..fab9a3182f --- /dev/null +++ b/backend/src/db/schemas/org-gateway-config-v2.ts @@ -0,0 +1,29 @@ +// Code generated by automation script, DO NOT EDIT. +// Automated by pulling database and generating zod schema +// To update. Just run npm run generate:schema +// Written by akhilmhdh. + +import { z } from "zod"; + +import { zodBuffer } from "@app/lib/zod"; + +import { TImmutableDBKeys } from "./models"; + +export const OrgGatewayConfigV2Schema = z.object({ + id: z.string().uuid(), + orgId: z.string().uuid(), + createdAt: z.date(), + updatedAt: z.date(), + encryptedRootGatewayCaPrivateKey: zodBuffer, + encryptedRootGatewayCaCertificate: zodBuffer, + encryptedGatewayServerCaPrivateKey: zodBuffer, + encryptedGatewayServerCaCertificate: zodBuffer, + encryptedGatewayServerCaCertificateChain: zodBuffer, + encryptedGatewayClientCaPrivateKey: zodBuffer, + encryptedGatewayClientCaCertificate: zodBuffer, + encryptedGatewayClientCaCertificateChain: zodBuffer +}); + +export type TOrgGatewayConfigV2 = z.infer; +export type TOrgGatewayConfigV2Insert = Omit, TImmutableDBKeys>; +export type TOrgGatewayConfigV2Update = Partial, TImmutableDBKeys>>; diff --git a/backend/src/db/schemas/org-relay-config.ts b/backend/src/db/schemas/org-relay-config.ts new file mode 100644 index 0000000000..1752da76ac --- /dev/null +++ b/backend/src/db/schemas/org-relay-config.ts @@ -0,0 +1,31 @@ +// Code generated by automation script, DO NOT EDIT. +// Automated by pulling database and generating zod schema +// To update. Just run npm run generate:schema +// Written by akhilmhdh. + +import { z } from "zod"; + +import { zodBuffer } from "@app/lib/zod"; + +import { TImmutableDBKeys } from "./models"; + +export const OrgRelayConfigSchema = z.object({ + id: z.string().uuid(), + createdAt: z.date(), + updatedAt: z.date(), + orgId: z.string().uuid(), + encryptedRelayPkiClientCaPrivateKey: zodBuffer, + encryptedRelayPkiClientCaCertificate: zodBuffer, + encryptedRelayPkiClientCaCertificateChain: zodBuffer, + encryptedRelayPkiServerCaPrivateKey: zodBuffer, + encryptedRelayPkiServerCaCertificate: zodBuffer, + encryptedRelayPkiServerCaCertificateChain: zodBuffer, + encryptedRelaySshClientCaPrivateKey: zodBuffer, + encryptedRelaySshClientCaPublicKey: zodBuffer, + encryptedRelaySshServerCaPrivateKey: zodBuffer, + encryptedRelaySshServerCaPublicKey: zodBuffer +}); + +export type TOrgRelayConfig = z.infer; +export type TOrgRelayConfigInsert = Omit, TImmutableDBKeys>; +export type TOrgRelayConfigUpdate = Partial, TImmutableDBKeys>>; diff --git a/backend/src/db/schemas/relays.ts b/backend/src/db/schemas/relays.ts new file mode 100644 index 0000000000..4bb615e969 --- /dev/null +++ b/backend/src/db/schemas/relays.ts @@ -0,0 +1,22 @@ +// Code generated by automation script, DO NOT EDIT. +// Automated by pulling database and generating zod schema +// To update. Just run npm run generate:schema +// Written by akhilmhdh. + +import { z } from "zod"; + +import { TImmutableDBKeys } from "./models"; + +export const RelaysSchema = z.object({ + id: z.string().uuid(), + createdAt: z.date(), + updatedAt: z.date(), + orgId: z.string().uuid().nullable().optional(), + identityId: z.string().uuid().nullable().optional(), + name: z.string(), + host: z.string() +}); + +export type TRelays = z.infer; +export type TRelaysInsert = Omit, TImmutableDBKeys>; +export type TRelaysUpdate = Partial, TImmutableDBKeys>>; diff --git a/backend/src/ee/routes/v1/audit-log-stream-routers/audit-log-stream-router.ts b/backend/src/ee/routes/v1/audit-log-stream-routers/audit-log-stream-router.ts index de68352f6e..48eed14c9e 100644 --- a/backend/src/ee/routes/v1/audit-log-stream-routers/audit-log-stream-router.ts +++ b/backend/src/ee/routes/v1/audit-log-stream-routers/audit-log-stream-router.ts @@ -4,6 +4,10 @@ import { AzureProviderListItemSchema, SanitizedAzureProviderSchema } from "@app/ee/services/audit-log-stream/azure/azure-provider-schemas"; +import { + CriblProviderListItemSchema, + SanitizedCriblProviderSchema +} from "@app/ee/services/audit-log-stream/cribl/cribl-provider-schemas"; import { CustomProviderListItemSchema, SanitizedCustomProviderSchema @@ -24,14 +28,16 @@ const SanitizedAuditLogStreamSchema = z.union([ SanitizedCustomProviderSchema, SanitizedDatadogProviderSchema, SanitizedSplunkProviderSchema, - SanitizedAzureProviderSchema + SanitizedAzureProviderSchema, + SanitizedCriblProviderSchema ]); const ProviderOptionsSchema = z.discriminatedUnion("provider", [ CustomProviderListItemSchema, DatadogProviderListItemSchema, SplunkProviderListItemSchema, - AzureProviderListItemSchema + AzureProviderListItemSchema, + CriblProviderListItemSchema ]); export const registerAuditLogStreamRouter = async (server: FastifyZodProvider) => { diff --git a/backend/src/ee/routes/v1/audit-log-stream-routers/index.ts b/backend/src/ee/routes/v1/audit-log-stream-routers/index.ts index 1884ce3ead..ad338c8011 100644 --- a/backend/src/ee/routes/v1/audit-log-stream-routers/index.ts +++ b/backend/src/ee/routes/v1/audit-log-stream-routers/index.ts @@ -4,6 +4,11 @@ import { SanitizedAzureProviderSchema, UpdateAzureProviderLogStreamSchema } from "@app/ee/services/audit-log-stream/azure/azure-provider-schemas"; +import { + CreateCriblProviderLogStreamSchema, + SanitizedCriblProviderSchema, + UpdateCriblProviderLogStreamSchema +} from "@app/ee/services/audit-log-stream/cribl/cribl-provider-schemas"; import { CreateCustomProviderLogStreamSchema, SanitizedCustomProviderSchema, @@ -61,5 +66,14 @@ export const AUDIT_LOG_STREAM_REGISTER_ROUTER_MAP: Record { + registerAuditLogStreamEndpoints({ + server, + provider: LogProvider.Cribl, + sanitizedResponseSchema: SanitizedCriblProviderSchema, + createSchema: CreateCriblProviderLogStreamSchema, + updateSchema: UpdateCriblProviderLogStreamSchema + }); } }; diff --git a/backend/src/ee/routes/v1/dynamic-secret-router.ts b/backend/src/ee/routes/v1/dynamic-secret-router.ts index b916bab67e..b1b3cea8e2 100644 --- a/backend/src/ee/routes/v1/dynamic-secret-router.ts +++ b/backend/src/ee/routes/v1/dynamic-secret-router.ts @@ -84,7 +84,9 @@ export const registerDynamicSecretRouter = async (server: FastifyZodProvider) => }), response: { 200: z.object({ - dynamicSecret: SanitizedDynamicSecretSchema + dynamicSecret: SanitizedDynamicSecretSchema.extend({ + inputs: z.unknown() + }) }) } }, @@ -151,7 +153,9 @@ export const registerDynamicSecretRouter = async (server: FastifyZodProvider) => }), response: { 200: z.object({ - dynamicSecret: SanitizedDynamicSecretSchema + dynamicSecret: SanitizedDynamicSecretSchema.extend({ + inputs: z.unknown() + }) }) } }, diff --git a/backend/src/ee/routes/v1/index.ts b/backend/src/ee/routes/v1/index.ts index 8bf0660136..5a43de381c 100644 --- a/backend/src/ee/routes/v1/index.ts +++ b/backend/src/ee/routes/v1/index.ts @@ -24,6 +24,7 @@ import { registerPITRouter } from "./pit-router"; import { registerProjectRoleRouter } from "./project-role-router"; import { registerProjectRouter } from "./project-router"; import { registerRateLimitRouter } from "./rate-limit-router"; +import { registerRelayRouter } from "./relay-router"; import { registerSamlRouter } from "./saml-router"; import { registerScimRouter } from "./scim-router"; import { registerSecretApprovalPolicyRouter } from "./secret-approval-policy-router"; @@ -79,6 +80,7 @@ export const registerV1EERoutes = async (server: FastifyZodProvider) => { ); await server.register(registerGatewayRouter, { prefix: "/gateways" }); + await server.register(registerRelayRouter, { prefix: "/relays" }); await server.register(registerGithubOrgSyncRouter, { prefix: "/github-org-sync-config" }); await server.register( diff --git a/backend/src/ee/routes/v1/license-router.ts b/backend/src/ee/routes/v1/license-router.ts index 0a59fa7b55..17923975d6 100644 --- a/backend/src/ee/routes/v1/license-router.ts +++ b/backend/src/ee/routes/v1/license-router.ts @@ -43,6 +43,12 @@ export const registerLicenseRouter = async (server: FastifyZodProvider) => { }, schema: { params: z.object({ organizationId: z.string().trim() }), + querystring: z.object({ + refreshCache: z + .enum(["true", "false"]) + .default("false") + .transform((value) => value === "true") + }), response: { 200: z.object({ plan: z.any() }) } @@ -54,7 +60,8 @@ export const registerLicenseRouter = async (server: FastifyZodProvider) => { actor: req.permission.type, actorOrgId: req.permission.orgId, actorAuthMethod: req.permission.authMethod, - orgId: req.params.organizationId + orgId: req.params.organizationId, + refreshCache: req.query.refreshCache }); return { plan }; } diff --git a/backend/src/ee/routes/v1/relay-router.ts b/backend/src/ee/routes/v1/relay-router.ts new file mode 100644 index 0000000000..e204800882 --- /dev/null +++ b/backend/src/ee/routes/v1/relay-router.ts @@ -0,0 +1,103 @@ +import { z } from "zod"; + +import { getConfig } from "@app/lib/config/env"; +import { crypto } from "@app/lib/crypto/cryptography"; +import { BadRequestError, UnauthorizedError } from "@app/lib/errors"; +import { writeLimit } from "@app/server/config/rateLimiter"; +import { slugSchema } from "@app/server/lib/schemas"; +import { verifyAuth } from "@app/server/plugins/auth/verify-auth"; +import { AuthMode } from "@app/services/auth/auth-type"; + +export const registerRelayRouter = async (server: FastifyZodProvider) => { + const appCfg = getConfig(); + + server.route({ + method: "POST", + url: "/register-instance-relay", + config: { + rateLimit: writeLimit + }, + schema: { + body: z.object({ + host: z.string(), + name: slugSchema({ min: 1, max: 32, field: "name" }) + }), + response: { + 200: z.object({ + pki: z.object({ + serverCertificate: z.string(), + serverPrivateKey: z.string(), + clientCertificateChain: z.string() + }), + ssh: z.object({ + serverCertificate: z.string(), + serverPrivateKey: z.string(), + clientCAPublicKey: z.string() + }) + }) + } + }, + onRequest: (req, _, next) => { + const authHeader = req.headers.authorization; + + if (appCfg.RELAY_AUTH_SECRET && authHeader) { + const expectedHeader = `Bearer ${appCfg.RELAY_AUTH_SECRET}`; + if ( + authHeader.length === expectedHeader.length && + crypto.nativeCrypto.timingSafeEqual(Buffer.from(authHeader), Buffer.from(expectedHeader)) + ) { + return next(); + } + } + + throw new UnauthorizedError({ + message: "Invalid relay auth secret" + }); + }, + handler: async (req) => { + return server.services.relay.registerRelay({ + ...req.body + }); + } + }); + + server.route({ + method: "POST", + url: "/register-org-relay", + config: { + rateLimit: writeLimit + }, + schema: { + body: z.object({ + host: z.string(), + name: slugSchema({ min: 1, max: 32, field: "name" }) + }), + response: { + 200: z.object({ + pki: z.object({ + serverCertificate: z.string(), + serverPrivateKey: z.string(), + clientCertificateChain: z.string() + }), + ssh: z.object({ + serverCertificate: z.string(), + serverPrivateKey: z.string(), + clientCAPublicKey: z.string() + }) + }) + } + }, + onRequest: verifyAuth([AuthMode.IDENTITY_ACCESS_TOKEN]), + handler: async (req) => { + throw new BadRequestError({ + message: "Org relay registration is not yet supported" + }); + + return server.services.relay.registerRelay({ + ...req.body, + identityId: req.permission.id, + orgId: req.permission.orgId + }); + } + }); +}; diff --git a/backend/src/ee/routes/v2/gateway-router.ts b/backend/src/ee/routes/v2/gateway-router.ts new file mode 100644 index 0000000000..a7e656a649 --- /dev/null +++ b/backend/src/ee/routes/v2/gateway-router.ts @@ -0,0 +1,133 @@ +import z from "zod"; + +import { GatewaysV2Schema } from "@app/db/schemas"; +import { readLimit, writeLimit } from "@app/server/config/rateLimiter"; +import { slugSchema } from "@app/server/lib/schemas"; +import { verifyAuth } from "@app/server/plugins/auth/verify-auth"; +import { AuthMode } from "@app/services/auth/auth-type"; + +const SanitizedGatewayV2Schema = GatewaysV2Schema.pick({ + id: true, + identityId: true, + name: true, + createdAt: true, + updatedAt: true, + heartbeat: true +}); + +export const registerGatewayV2Router = async (server: FastifyZodProvider) => { + server.route({ + method: "POST", + url: "/", + schema: { + body: z.object({ + relayName: slugSchema({ min: 1, max: 32, field: "relayName" }), + name: slugSchema({ min: 1, max: 32, field: "name" }) + }), + response: { + 200: z.object({ + gatewayId: z.string(), + relayHost: z.string(), + pki: z.object({ + serverCertificate: z.string(), + serverPrivateKey: z.string(), + clientCertificateChain: z.string() + }), + ssh: z.object({ + clientCertificate: z.string(), + clientPrivateKey: z.string(), + serverCAPublicKey: z.string() + }) + }) + } + }, + config: { + rateLimit: writeLimit + }, + onRequest: verifyAuth([AuthMode.IDENTITY_ACCESS_TOKEN]), + handler: async (req) => { + const gateway = await server.services.gatewayV2.registerGateway({ + orgId: req.permission.orgId, + relayName: req.body.relayName, + actorId: req.permission.id, + actorAuthMethod: req.permission.authMethod, + name: req.body.name + }); + + return gateway; + } + }); + + server.route({ + method: "POST", + url: "/heartbeat", + config: { + rateLimit: writeLimit + }, + schema: { + response: { + 200: z.object({ + message: z.string() + }) + } + }, + onRequest: verifyAuth([AuthMode.IDENTITY_ACCESS_TOKEN]), + handler: async (req) => { + await server.services.gatewayV2.heartbeat({ + orgPermission: req.permission + }); + + return { message: "Successfully triggered heartbeat" }; + } + }); + + server.route({ + method: "GET", + url: "/", + schema: { + response: { + 200: SanitizedGatewayV2Schema.extend({ + identity: z.object({ + name: z.string(), + id: z.string() + }) + }).array() + } + }, + config: { + rateLimit: readLimit + }, + onRequest: verifyAuth([AuthMode.JWT, AuthMode.IDENTITY_ACCESS_TOKEN]), + handler: async (req) => { + const gateways = await server.services.gatewayV2.listGateways({ + orgPermission: req.permission + }); + + return gateways; + } + }); + + server.route({ + method: "DELETE", + url: "/:id", + config: { + rateLimit: writeLimit + }, + schema: { + params: z.object({ + id: z.string() + }), + response: { + 200: SanitizedGatewayV2Schema + } + }, + onRequest: verifyAuth([AuthMode.IDENTITY_ACCESS_TOKEN, AuthMode.JWT]), + handler: async (req) => { + const gateway = await server.services.gatewayV2.deleteGatewayById({ + orgPermission: req.permission, + id: req.params.id + }); + return gateway; + } + }); +}; diff --git a/backend/src/ee/routes/v2/index.ts b/backend/src/ee/routes/v2/index.ts index e364f49494..e082773dd6 100644 --- a/backend/src/ee/routes/v2/index.ts +++ b/backend/src/ee/routes/v2/index.ts @@ -7,6 +7,7 @@ import { SECRET_SCANNING_REGISTER_ROUTER_MAP } from "@app/ee/routes/v2/secret-scanning-v2-routers"; +import { registerGatewayV2Router } from "./gateway-router"; import { registerIdentityProjectAdditionalPrivilegeRouter } from "./identity-project-additional-privilege-router"; import { registerProjectRoleRouter } from "./project-role-router"; @@ -23,6 +24,8 @@ export const registerV2EERoutes = async (server: FastifyZodProvider) => { prefix: "/identity-project-additional-privilege" }); + await server.register(registerGatewayV2Router, { prefix: "/gateways" }); + await server.register( async (secretRotationV2Router) => { // register generic secret rotation endpoints diff --git a/backend/src/ee/services/audit-log-stream/audit-log-stream-enums.ts b/backend/src/ee/services/audit-log-stream/audit-log-stream-enums.ts index b23a528a7e..ebef18574c 100644 --- a/backend/src/ee/services/audit-log-stream/audit-log-stream-enums.ts +++ b/backend/src/ee/services/audit-log-stream/audit-log-stream-enums.ts @@ -1,6 +1,7 @@ export enum LogProvider { Azure = "azure", + Cribl = "cribl", + Custom = "custom", Datadog = "datadog", - Splunk = "splunk", - Custom = "custom" + Splunk = "splunk" } diff --git a/backend/src/ee/services/audit-log-stream/audit-log-stream-factory.ts b/backend/src/ee/services/audit-log-stream/audit-log-stream-factory.ts index 4dea122d8c..8dde0e0795 100644 --- a/backend/src/ee/services/audit-log-stream/audit-log-stream-factory.ts +++ b/backend/src/ee/services/audit-log-stream/audit-log-stream-factory.ts @@ -1,6 +1,7 @@ import { LogProvider } from "./audit-log-stream-enums"; import { TAuditLogStreamCredentials, TLogStreamFactory } from "./audit-log-stream-types"; import { AzureProviderFactory } from "./azure/azure-provider-factory"; +import { CriblProviderFactory } from "./cribl/cribl-provider-factory"; import { CustomProviderFactory } from "./custom/custom-provider-factory"; import { DatadogProviderFactory } from "./datadog/datadog-provider-factory"; import { SplunkProviderFactory } from "./splunk/splunk-provider-factory"; @@ -11,5 +12,6 @@ export const LOG_STREAM_FACTORY_MAP: Record { getDatadogProviderListItem(), getSplunkProviderListItem(), getCustomProviderListItem(), - getAzureProviderListItem() + getAzureProviderListItem(), + getCriblProviderListItem() ].sort((a, b) => a.name.localeCompare(b.name)); }; diff --git a/backend/src/ee/services/audit-log-stream/audit-log-stream-types.ts b/backend/src/ee/services/audit-log-stream/audit-log-stream-types.ts index d3f2e0a9eb..5983e50bf9 100644 --- a/backend/src/ee/services/audit-log-stream/audit-log-stream-types.ts +++ b/backend/src/ee/services/audit-log-stream/audit-log-stream-types.ts @@ -2,17 +2,19 @@ import { TAuditLogs } from "@app/db/schemas"; import { LogProvider } from "./audit-log-stream-enums"; import { TAzureProvider, TAzureProviderCredentials } from "./azure/azure-provider-types"; +import { TCriblProvider, TCriblProviderCredentials } from "./cribl/cribl-provider-types"; import { TCustomProvider, TCustomProviderCredentials } from "./custom/custom-provider-types"; import { TDatadogProvider, TDatadogProviderCredentials } from "./datadog/datadog-provider-types"; import { TSplunkProvider, TSplunkProviderCredentials } from "./splunk/splunk-provider-types"; -export type TAuditLogStream = TDatadogProvider | TSplunkProvider | TCustomProvider | TAzureProvider; +export type TAuditLogStream = TDatadogProvider | TSplunkProvider | TCustomProvider | TAzureProvider | TCriblProvider; export type TAuditLogStreamCredentials = | TDatadogProviderCredentials | TSplunkProviderCredentials | TCustomProviderCredentials - | TAzureProviderCredentials; + | TAzureProviderCredentials + | TCriblProviderCredentials; export type TCreateAuditLogStreamDTO = { provider: LogProvider; diff --git a/backend/src/ee/services/audit-log-stream/cribl/cribl-provider-factory.ts b/backend/src/ee/services/audit-log-stream/cribl/cribl-provider-factory.ts new file mode 100644 index 0000000000..2e4eef93ba --- /dev/null +++ b/backend/src/ee/services/audit-log-stream/cribl/cribl-provider-factory.ts @@ -0,0 +1,58 @@ +import { RawAxiosRequestHeaders } from "axios"; + +import { request } from "@app/lib/config/request"; +import { BadRequestError } from "@app/lib/errors"; +import { blockLocalAndPrivateIpAddresses } from "@app/lib/validator"; + +import { AUDIT_LOG_STREAM_TIMEOUT } from "../../audit-log/audit-log-queue"; +import { TLogStreamFactoryStreamLog, TLogStreamFactoryValidateCredentials } from "../audit-log-stream-types"; +import { TCriblProviderCredentials } from "./cribl-provider-types"; + +export const CriblProviderFactory = () => { + const validateCredentials: TLogStreamFactoryValidateCredentials = async ({ + credentials + }) => { + const { url, token } = credentials; + + await blockLocalAndPrivateIpAddresses(url); + + const streamHeaders: RawAxiosRequestHeaders = { + "Content-Type": "application/json", + Authorization: `Bearer ${token}` + }; + + await request + .post(url, JSON.stringify({ ping: "ok" }), { + headers: streamHeaders, + timeout: AUDIT_LOG_STREAM_TIMEOUT, + signal: AbortSignal.timeout(AUDIT_LOG_STREAM_TIMEOUT) + }) + .catch((err) => { + throw new BadRequestError({ message: `Failed to connect with Cribl: ${(err as Error)?.message}` }); + }); + + return credentials; + }; + + const streamLog: TLogStreamFactoryStreamLog = async ({ credentials, auditLog }) => { + const { url, token } = credentials; + + await blockLocalAndPrivateIpAddresses(url); + + const streamHeaders: RawAxiosRequestHeaders = { + "Content-Type": "application/json", + Authorization: `Bearer ${token}` + }; + + await request.post(url, JSON.stringify(auditLog), { + headers: streamHeaders, + timeout: AUDIT_LOG_STREAM_TIMEOUT, + signal: AbortSignal.timeout(AUDIT_LOG_STREAM_TIMEOUT) + }); + }; + + return { + validateCredentials, + streamLog + }; +}; diff --git a/backend/src/ee/services/audit-log-stream/cribl/cribl-provider-fns.ts b/backend/src/ee/services/audit-log-stream/cribl/cribl-provider-fns.ts new file mode 100644 index 0000000000..f8b82509a5 --- /dev/null +++ b/backend/src/ee/services/audit-log-stream/cribl/cribl-provider-fns.ts @@ -0,0 +1,8 @@ +import { LogProvider } from "../audit-log-stream-enums"; + +export const getCriblProviderListItem = () => { + return { + name: "Cribl" as const, + provider: LogProvider.Cribl as const + }; +}; diff --git a/backend/src/ee/services/audit-log-stream/cribl/cribl-provider-schemas.ts b/backend/src/ee/services/audit-log-stream/cribl/cribl-provider-schemas.ts new file mode 100644 index 0000000000..8c2a51f326 --- /dev/null +++ b/backend/src/ee/services/audit-log-stream/cribl/cribl-provider-schemas.ts @@ -0,0 +1,34 @@ +import { z } from "zod"; + +import { LogProvider } from "../audit-log-stream-enums"; +import { BaseProviderSchema } from "../audit-log-stream-schemas"; + +export const CriblProviderCredentialsSchema = z.object({ + url: z.string().url().trim().min(1).max(255), + token: z.string().trim().min(21).max(255) +}); + +const BaseCriblProviderSchema = BaseProviderSchema.extend({ provider: z.literal(LogProvider.Cribl) }); + +export const CriblProviderSchema = BaseCriblProviderSchema.extend({ + credentials: CriblProviderCredentialsSchema +}); + +export const SanitizedCriblProviderSchema = BaseCriblProviderSchema.extend({ + credentials: CriblProviderCredentialsSchema.pick({ + url: true + }) +}); + +export const CriblProviderListItemSchema = z.object({ + name: z.literal("Cribl"), + provider: z.literal(LogProvider.Cribl) +}); + +export const CreateCriblProviderLogStreamSchema = z.object({ + credentials: CriblProviderCredentialsSchema +}); + +export const UpdateCriblProviderLogStreamSchema = z.object({ + credentials: CriblProviderCredentialsSchema +}); diff --git a/backend/src/ee/services/audit-log-stream/cribl/cribl-provider-types.ts b/backend/src/ee/services/audit-log-stream/cribl/cribl-provider-types.ts new file mode 100644 index 0000000000..6577f1bff2 --- /dev/null +++ b/backend/src/ee/services/audit-log-stream/cribl/cribl-provider-types.ts @@ -0,0 +1,7 @@ +import { z } from "zod"; + +import { CriblProviderCredentialsSchema, CriblProviderSchema } from "./cribl-provider-schemas"; + +export type TCriblProvider = z.infer; + +export type TCriblProviderCredentials = z.infer; diff --git a/backend/src/ee/services/dynamic-secret-lease/dynamic-secret-lease-dal.ts b/backend/src/ee/services/dynamic-secret-lease/dynamic-secret-lease-dal.ts index 525de9efd9..974e200612 100644 --- a/backend/src/ee/services/dynamic-secret-lease/dynamic-secret-lease-dal.ts +++ b/backend/src/ee/services/dynamic-secret-lease/dynamic-secret-lease-dal.ts @@ -46,7 +46,10 @@ export const dynamicSecretLeaseDALFactory = (db: TDbClient) => { const countLeasesForDynamicSecret = async (dynamicSecretId: string, tx?: Knex) => { try { - const doc = await (tx || db)(TableName.DynamicSecretLease).count("*").where({ dynamicSecretId }).first(); + const doc = await (tx || db.replicaNode())(TableName.DynamicSecretLease) + .count("*") + .where({ dynamicSecretId }) + .first(); return parseInt(doc || "0", 10); } catch (error) { throw new DatabaseError({ error, name: "DynamicSecretCountLeases" }); @@ -55,7 +58,7 @@ export const dynamicSecretLeaseDALFactory = (db: TDbClient) => { const findById = async (id: string, tx?: Knex) => { try { - const doc = await (tx || db)(TableName.DynamicSecretLease) + const doc = await (tx || db.replicaNode())(TableName.DynamicSecretLease) .where({ [`${TableName.DynamicSecretLease}.id` as "id"]: id }) .first() .join( diff --git a/backend/src/ee/services/dynamic-secret/dynamic-secret-service.ts b/backend/src/ee/services/dynamic-secret/dynamic-secret-service.ts index 73dcbe6e36..659e07bca3 100644 --- a/backend/src/ee/services/dynamic-secret/dynamic-secret-service.ts +++ b/backend/src/ee/services/dynamic-secret/dynamic-secret-service.ts @@ -19,6 +19,7 @@ import { TSecretFolderDALFactory } from "@app/services/secret-folder/secret-fold import { TDynamicSecretLeaseDALFactory } from "../dynamic-secret-lease/dynamic-secret-lease-dal"; import { TDynamicSecretLeaseQueueServiceFactory } from "../dynamic-secret-lease/dynamic-secret-lease-queue"; import { TGatewayDALFactory } from "../gateway/gateway-dal"; +import { TGatewayV2DALFactory } from "../gateway-v2/gateway-v2-dal"; import { OrgPermissionGatewayActions, OrgPermissionSubjects } from "../permission/org-permission"; import { TDynamicSecretDALFactory } from "./dynamic-secret-dal"; import { DynamicSecretStatus, TDynamicSecretServiceFactory } from "./dynamic-secret-types"; @@ -39,6 +40,7 @@ type TDynamicSecretServiceFactoryDep = { permissionService: Pick; kmsService: Pick; gatewayDAL: Pick; + gatewayV2DAL: Pick; resourceMetadataDAL: Pick; }; @@ -53,6 +55,7 @@ export const dynamicSecretServiceFactory = ({ projectDAL, kmsService, gatewayDAL, + gatewayV2DAL, resourceMetadataDAL }: TDynamicSecretServiceFactoryDep): TDynamicSecretServiceFactory => { const create: TDynamicSecretServiceFactory["create"] = async ({ @@ -70,6 +73,7 @@ export const dynamicSecretServiceFactory = ({ metadata, usernameTemplate }) => { + let isGatewayV1 = true; const project = await projectDAL.findProjectBySlug(projectSlug, actorOrgId); if (!project) throw new NotFoundError({ message: `Project with slug '${projectSlug}' not found` }); @@ -118,17 +122,22 @@ export const dynamicSecretServiceFactory = ({ const gatewayId = inputs.gatewayId as string; const [gateway] = await gatewayDAL.find({ id: gatewayId, orgId: actorOrgId }); + const [gatewayv2] = await gatewayV2DAL.find({ id: gatewayId, orgId: actorOrgId }); - if (!gateway) { + if (!gateway && !gatewayv2) { throw new NotFoundError({ message: `Gateway with ID ${gatewayId} not found` }); } + if (!gateway) { + isGatewayV1 = false; + } + const { permission: orgPermission } = await permissionService.getOrgPermission( actor, actorId, - gateway.orgId, + gateway?.orgId ?? gatewayv2?.orgId, actorAuthMethod, actorOrgId ); @@ -138,7 +147,7 @@ export const dynamicSecretServiceFactory = ({ OrgPermissionSubjects.Gateway ); - selectedGatewayId = gateway.id; + selectedGatewayId = gateway?.id ?? gatewayv2?.id; } const isConnected = await selectedProvider.validateConnection(provider.inputs, { projectId }); @@ -159,7 +168,8 @@ export const dynamicSecretServiceFactory = ({ defaultTTL, folderId: folder.id, name, - gatewayId: selectedGatewayId, + gatewayId: isGatewayV1 ? selectedGatewayId : undefined, + gatewayV2Id: isGatewayV1 ? undefined : selectedGatewayId, usernameTemplate }, tx @@ -180,7 +190,7 @@ export const dynamicSecretServiceFactory = ({ return cfg; }); - return dynamicSecretCfg; + return { ...dynamicSecretCfg, inputs }; }; const updateByName: TDynamicSecretServiceFactory["updateByName"] = async ({ @@ -270,20 +280,27 @@ export const dynamicSecretServiceFactory = ({ const updatedInput = await selectedProvider.validateProviderInputs(newInput, { projectId }); let selectedGatewayId: string | null = null; + let isGatewayV1 = true; if (updatedInput && typeof updatedInput === "object" && "gatewayId" in updatedInput && updatedInput?.gatewayId) { const gatewayId = updatedInput.gatewayId as string; const [gateway] = await gatewayDAL.find({ id: gatewayId, orgId: actorOrgId }); - if (!gateway) { + const [gatewayv2] = await gatewayV2DAL.find({ id: gatewayId, orgId: actorOrgId }); + + if (!gateway && !gatewayv2) { throw new NotFoundError({ message: `Gateway with ID ${gatewayId} not found` }); } + if (!gateway) { + isGatewayV1 = false; + } + const { permission: orgPermission } = await permissionService.getOrgPermission( actor, actorId, - gateway.orgId, + actorOrgId, actorAuthMethod, actorOrgId ); @@ -293,7 +310,7 @@ export const dynamicSecretServiceFactory = ({ OrgPermissionSubjects.Gateway ); - selectedGatewayId = gateway.id; + selectedGatewayId = gateway?.id ?? gatewayv2?.id; } const isConnected = await selectedProvider.validateConnection(newInput, { projectId }); @@ -309,7 +326,8 @@ export const dynamicSecretServiceFactory = ({ defaultTTL, name: newName ?? name, status: null, - gatewayId: selectedGatewayId, + gatewayId: isGatewayV1 ? selectedGatewayId : null, + gatewayV2Id: isGatewayV1 ? null : selectedGatewayId, usernameTemplate }, tx @@ -337,7 +355,7 @@ export const dynamicSecretServiceFactory = ({ return cfg; }); - return updatedDynamicCfg; + return { ...updatedDynamicCfg, inputs: updatedInput }; }; const deleteByName: TDynamicSecretServiceFactory["deleteByName"] = async ({ diff --git a/backend/src/ee/services/dynamic-secret/providers/index.ts b/backend/src/ee/services/dynamic-secret/providers/index.ts index 184b9fc895..3ec0f795e3 100644 --- a/backend/src/ee/services/dynamic-secret/providers/index.ts +++ b/backend/src/ee/services/dynamic-secret/providers/index.ts @@ -1,6 +1,7 @@ import { SnowflakeProvider } from "@app/ee/services/dynamic-secret/providers/snowflake"; import { TGatewayServiceFactory } from "../../gateway/gateway-service"; +import { TGatewayV2ServiceFactory } from "../../gateway-v2/gateway-v2-service"; import { AwsElastiCacheDatabaseProvider } from "./aws-elasticache"; import { AwsIamProvider } from "./aws-iam"; import { AzureEntraIDProvider } from "./azure-entra-id"; @@ -24,12 +25,14 @@ import { VerticaProvider } from "./vertica"; type TBuildDynamicSecretProviderDTO = { gatewayService: Pick; + gatewayV2Service: Pick; }; export const buildDynamicSecretProviders = ({ - gatewayService + gatewayService, + gatewayV2Service }: TBuildDynamicSecretProviderDTO): Record => ({ - [DynamicSecretProviders.SqlDatabase]: SqlDatabaseProvider({ gatewayService }), + [DynamicSecretProviders.SqlDatabase]: SqlDatabaseProvider({ gatewayService, gatewayV2Service }), [DynamicSecretProviders.Cassandra]: CassandraProvider(), [DynamicSecretProviders.AwsIam]: AwsIamProvider(), [DynamicSecretProviders.Redis]: RedisDatabaseProvider(), @@ -44,7 +47,7 @@ export const buildDynamicSecretProviders = ({ [DynamicSecretProviders.Snowflake]: SnowflakeProvider(), [DynamicSecretProviders.Totp]: TotpProvider(), [DynamicSecretProviders.SapAse]: SapAseProvider(), - [DynamicSecretProviders.Kubernetes]: KubernetesProvider({ gatewayService }), + [DynamicSecretProviders.Kubernetes]: KubernetesProvider({ gatewayService, gatewayV2Service }), [DynamicSecretProviders.Vertica]: VerticaProvider({ gatewayService }), [DynamicSecretProviders.GcpIam]: GcpIamProvider(), [DynamicSecretProviders.Github]: GithubProvider(), diff --git a/backend/src/ee/services/dynamic-secret/providers/kubernetes.ts b/backend/src/ee/services/dynamic-secret/providers/kubernetes.ts index 3d69c3282e..3c924458d8 100644 --- a/backend/src/ee/services/dynamic-secret/providers/kubernetes.ts +++ b/backend/src/ee/services/dynamic-secret/providers/kubernetes.ts @@ -5,12 +5,14 @@ import https from "https"; import { BadRequestError } from "@app/lib/errors"; import { sanitizeString } from "@app/lib/fn"; import { GatewayHttpProxyActions, GatewayProxyProtocol, withGatewayProxy } from "@app/lib/gateway"; +import { withGatewayV2Proxy } from "@app/lib/gateway-v2/gateway-v2"; import { alphaNumericNanoId } from "@app/lib/nanoid"; import { blockLocalAndPrivateIpAddresses } from "@app/lib/validator"; import { TKubernetesTokenRequest } from "@app/services/identity-kubernetes-auth/identity-kubernetes-auth-types"; import { TDynamicSecretKubernetesLeaseConfig } from "../../dynamic-secret-lease/dynamic-secret-lease-types"; import { TGatewayServiceFactory } from "../../gateway/gateway-service"; +import { TGatewayV2ServiceFactory } from "../../gateway-v2/gateway-v2-service"; import { DynamicSecretKubernetesSchema, KubernetesAuthMethod, @@ -26,6 +28,7 @@ const GATEWAY_AUTH_DEFAULT_URL = "https://kubernetes.default.svc.cluster.local"; type TKubernetesProviderDTO = { gatewayService: Pick; + gatewayV2Service: Pick; }; const generateUsername = (usernameTemplate?: string | null) => { @@ -38,7 +41,10 @@ const generateUsername = (usernameTemplate?: string | null) => { }); }; -export const KubernetesProvider = ({ gatewayService }: TKubernetesProviderDTO): TDynamicProviderFns => { +export const KubernetesProvider = ({ + gatewayService, + gatewayV2Service +}: TKubernetesProviderDTO): TDynamicProviderFns => { const validateProviderInputs = async (inputs: unknown) => { const providerInputs = await DynamicSecretKubernetesSchema.parseAsync(inputs); if (!providerInputs.gatewayId && providerInputs.url) { @@ -58,6 +64,32 @@ export const KubernetesProvider = ({ gatewayService }: TKubernetesProviderDTO): }, gatewayCallback: (host: string, port: number, httpsAgent?: https.Agent) => Promise ): Promise => { + const gatewayV2ConnectionDetails = await gatewayV2Service.getPlatformConnectionDetailsByGatewayId({ + gatewayId: inputs.gatewayId, + targetHost: inputs.targetHost, + targetPort: inputs.targetPort + }); + if (gatewayV2ConnectionDetails) { + const callbackResult = await withGatewayV2Proxy( + async (port) => { + return gatewayCallback( + inputs.reviewTokenThroughGateway ? "http://localhost" : "https://localhost", + port, + inputs.httpsAgent + ); + }, + { + relayHost: gatewayV2ConnectionDetails.relayHost, + gateway: gatewayV2ConnectionDetails.gateway, + relay: gatewayV2ConnectionDetails.relay, + protocol: inputs.reviewTokenThroughGateway ? GatewayProxyProtocol.Http : GatewayProxyProtocol.Tcp, + httpsAgent: inputs.httpsAgent + } + ); + + return callbackResult; + } + const relayDetails = await gatewayService.fnGetGatewayClientTlsByGatewayId(inputs.gatewayId); const [relayHost, relayPort] = relayDetails.relayAddress.split(":"); @@ -353,8 +385,18 @@ export const KubernetesProvider = ({ gatewayService }: TKubernetesProviderDTO): return true; } catch (error) { let errorMessage = error instanceof Error ? error.message : "Unknown error"; - if (axios.isAxiosError(error) && (error.response?.data as { message: string })?.message) { - errorMessage = (error.response?.data as { message: string }).message; + if (axios.isAxiosError(error)) { + if (error.response) { + let { message } = error?.response?.data as unknown as { message?: string }; + + if (!message && typeof error.response.data === "string") { + message = error.response.data; + } + + if (message) { + errorMessage = message; + } + } } const sanitizedErrorMessage = sanitizeString({ @@ -603,8 +645,18 @@ export const KubernetesProvider = ({ gatewayService }: TKubernetesProviderDTO): }; } catch (error) { let errorMessage = error instanceof Error ? error.message : "Unknown error"; - if (axios.isAxiosError(error) && (error.response?.data as { message: string })?.message) { - errorMessage = (error.response?.data as { message: string }).message; + if (axios.isAxiosError(error)) { + if (error.response) { + let { message } = error?.response?.data as unknown as { message?: string }; + + if (!message && typeof error.response.data === "string") { + message = error.response.data; + } + + if (message) { + errorMessage = message; + } + } } const sanitizedErrorMessage = sanitizeString({ @@ -740,8 +792,18 @@ export const KubernetesProvider = ({ gatewayService }: TKubernetesProviderDTO): } } catch (error) { let errorMessage = error instanceof Error ? error.message : "Unknown error"; - if (axios.isAxiosError(error) && (error.response?.data as { message: string })?.message) { - errorMessage = (error.response?.data as { message: string }).message; + if (axios.isAxiosError(error)) { + if (error.response) { + let { message } = error?.response?.data as unknown as { message?: string }; + + if (!message && typeof error.response.data === "string") { + message = error.response.data; + } + + if (message) { + errorMessage = message; + } + } } const sanitizedErrorMessage = sanitizeString({ diff --git a/backend/src/ee/services/dynamic-secret/providers/models.ts b/backend/src/ee/services/dynamic-secret/providers/models.ts index ae1bcfc252..f076bf8836 100644 --- a/backend/src/ee/services/dynamic-secret/providers/models.ts +++ b/backend/src/ee/services/dynamic-secret/providers/models.ts @@ -165,6 +165,7 @@ export const DynamicSecretSqlDBSchema = z.object({ revocationStatement: z.string().trim(), renewStatement: z.string().trim().optional(), ca: z.string().optional(), + sslEnabled: z.boolean().optional(), gatewayId: z.string().nullable().optional() }); @@ -275,11 +276,11 @@ export const DynamicSecretMongoAtlasSchema = z.object({ export const DynamicSecretMongoDBSchema = z.object({ host: z.string().min(1).trim().toLowerCase(), - port: z.number().optional(), + port: z.number().optional().nullable(), username: z.string().min(1).trim(), password: z.string().min(1).trim(), database: z.string().min(1).trim(), - ca: z.string().min(1).optional(), + ca: z.string().trim().optional().nullable(), roles: z .string() .array() diff --git a/backend/src/ee/services/dynamic-secret/providers/mongo-db.ts b/backend/src/ee/services/dynamic-secret/providers/mongo-db.ts index db1d30dfac..8154f3e133 100644 --- a/backend/src/ee/services/dynamic-secret/providers/mongo-db.ts +++ b/backend/src/ee/services/dynamic-secret/providers/mongo-db.ts @@ -44,7 +44,7 @@ export const MongoDBProvider = (): TDynamicProviderFns => { password: providerInputs.password }, directConnection: !isSrv, - ca: providerInputs.ca + ca: providerInputs.ca || undefined }); return client; }; diff --git a/backend/src/ee/services/dynamic-secret/providers/sql-database.ts b/backend/src/ee/services/dynamic-secret/providers/sql-database.ts index 57ce710b14..a9011b9936 100644 --- a/backend/src/ee/services/dynamic-secret/providers/sql-database.ts +++ b/backend/src/ee/services/dynamic-secret/providers/sql-database.ts @@ -1,15 +1,18 @@ import handlebars from "handlebars"; import knex from "knex"; +import RE2 from "re2"; import { z } from "zod"; import { crypto } from "@app/lib/crypto/cryptography"; import { BadRequestError } from "@app/lib/errors"; import { sanitizeString } from "@app/lib/fn"; import { GatewayProxyProtocol, withGatewayProxy } from "@app/lib/gateway"; +import { withGatewayV2Proxy } from "@app/lib/gateway-v2/gateway-v2"; import { alphaNumericNanoId } from "@app/lib/nanoid"; import { validateHandlebarTemplate } from "@app/lib/template/validate-handlebars"; import { TGatewayServiceFactory } from "../../gateway/gateway-service"; +import { TGatewayV2ServiceFactory } from "../../gateway-v2/gateway-v2-service"; import { verifyHostInputValidity } from "../dynamic-secret-fns"; import { DynamicSecretSqlDBSchema, PasswordRequirements, SqlProviders, TDynamicProviderFns } from "./models"; import { compileUsernameTemplate } from "./templateUtils"; @@ -128,9 +131,13 @@ const generateUsername = (provider: SqlProviders, usernameTemplate?: string | nu type TSqlDatabaseProviderDTO = { gatewayService: Pick; + gatewayV2Service: Pick; }; -export const SqlDatabaseProvider = ({ gatewayService }: TSqlDatabaseProviderDTO): TDynamicProviderFns => { +export const SqlDatabaseProvider = ({ + gatewayService, + gatewayV2Service +}: TSqlDatabaseProviderDTO): TDynamicProviderFns => { const validateProviderInputs = async (inputs: unknown) => { const providerInputs = await DynamicSecretSqlDBSchema.parseAsync(inputs); @@ -150,19 +157,40 @@ export const SqlDatabaseProvider = ({ gatewayService }: TSqlDatabaseProviderDTO) return { ...providerInputs, hostIp }; }; - const $getClient = async (providerInputs: z.infer & { hostIp: string }) => { + const $getClient = async ( + providerInputs: z.infer & { hostIp: string; originalHost: string } + ) => { const ssl = providerInputs.ca ? { rejectUnauthorized: false, ca: providerInputs.ca, servername: providerInputs.host } : undefined; + const isMsSQLClient = providerInputs.client === SqlProviders.MsSQL; + /* + We route through the gateway by setting connection.host = "localhost". + Azure SQL identifies the logical server from the TDS login name when the host + isn’t the Azure FQDN. Therefore, when using the gateway, ensure username is + "user@" so Azure opens the correct logical server. + Direct connections to the Azure FQDN usually don’t require this suffix. + */ + const isAzureSql = isMsSQLClient && new RE2(/\.database\.windows\.net$/i).test(providerInputs.originalHost); + const azureServerLabel = + isAzureSql && providerInputs.gatewayId ? providerInputs.originalHost?.split(".")[0] : undefined; + const effectiveUser = + isAzureSql && !providerInputs.username.includes("@") && azureServerLabel + ? `${providerInputs.username}@${azureServerLabel}` + : providerInputs.username; + const db = knex({ client: providerInputs.client, connection: { database: providerInputs.database, port: providerInputs.port, - host: providerInputs.client === SqlProviders.Postgres ? providerInputs.hostIp : providerInputs.host, - user: providerInputs.username, + host: + providerInputs.client === SqlProviders.Postgres && !providerInputs.gatewayId + ? providerInputs.hostIp + : providerInputs.host, + user: effectiveUser, password: providerInputs.password, ssl, // @ts-expect-error this is because of knexjs type signature issue. This is directly passed to driver @@ -170,6 +198,7 @@ export const SqlDatabaseProvider = ({ gatewayService }: TSqlDatabaseProviderDTO) // https://github.com/tediousjs/tedious/blob/ebb023ed90969a7ec0e4b036533ad52739d921f7/test/config.ci.ts#L19 options: isMsSQLClient ? { + ...(providerInputs.sslEnabled !== undefined ? { encrypt: providerInputs.sslEnabled } : {}), trustServerCertificate: !providerInputs.ca, cryptoCredentialsDetails: providerInputs.ca ? { ca: providerInputs.ca } : {} } @@ -185,6 +214,26 @@ export const SqlDatabaseProvider = ({ gatewayService }: TSqlDatabaseProviderDTO) providerInputs: z.infer, gatewayCallback: (host: string, port: number) => Promise ) => { + const gatewayV2ConnectionDetails = await gatewayV2Service.getPlatformConnectionDetailsByGatewayId({ + gatewayId: providerInputs.gatewayId as string, + targetHost: providerInputs.host, + targetPort: providerInputs.port + }); + + if (gatewayV2ConnectionDetails) { + return withGatewayV2Proxy( + async (port) => { + await gatewayCallback("localhost", port); + }, + { + relayHost: gatewayV2ConnectionDetails.relayHost, + gateway: gatewayV2ConnectionDetails.gateway, + relay: gatewayV2ConnectionDetails.relay, + protocol: GatewayProxyProtocol.Tcp + } + ); + } + const relayDetails = await gatewayService.fnGetGatewayClientTlsByGatewayId(providerInputs.gatewayId as string); const [relayHost, relayPort] = relayDetails.relayAddress.split(":"); await withGatewayProxy( @@ -212,7 +261,13 @@ export const SqlDatabaseProvider = ({ gatewayService }: TSqlDatabaseProviderDTO) const providerInputs = await validateProviderInputs(inputs); let isConnected = false; const gatewayCallback = async (host = providerInputs.host, port = providerInputs.port) => { - const db = await $getClient({ ...providerInputs, port, host, hostIp: providerInputs.hostIp }); + const db = await $getClient({ + ...providerInputs, + port, + host, + hostIp: providerInputs.hostIp, + originalHost: providerInputs.host + }); // oracle needs from keyword const testStatement = providerInputs.client === SqlProviders.Oracle ? "SELECT 1 FROM DUAL" : "SELECT 1"; @@ -253,7 +308,12 @@ export const SqlDatabaseProvider = ({ gatewayService }: TSqlDatabaseProviderDTO) const password = generatePassword(providerInputs.client, providerInputs.passwordRequirements); const gatewayCallback = async (host = providerInputs.host, port = providerInputs.port) => { - const db = await $getClient({ ...providerInputs, port, host }); + const db = await $getClient({ + ...providerInputs, + port, + host, + originalHost: providerInputs.host + }); try { const expiration = new Date(expireAt).toISOString(); @@ -296,7 +356,12 @@ export const SqlDatabaseProvider = ({ gatewayService }: TSqlDatabaseProviderDTO) const username = entityId; const { database } = providerInputs; const gatewayCallback = async (host = providerInputs.host, port = providerInputs.port) => { - const db = await $getClient({ ...providerInputs, port, host }); + const db = await $getClient({ + ...providerInputs, + port, + host, + originalHost: providerInputs.host + }); try { const revokeStatement = handlebars.compile(providerInputs.revocationStatement)({ username, database }); const queries = revokeStatement.toString().split(";").filter(Boolean); @@ -331,7 +396,12 @@ export const SqlDatabaseProvider = ({ gatewayService }: TSqlDatabaseProviderDTO) if (!providerInputs.renewStatement) return { entityId }; const gatewayCallback = async (host = providerInputs.host, port = providerInputs.port) => { - const db = await $getClient({ ...providerInputs, port, host }); + const db = await $getClient({ + ...providerInputs, + port, + host, + originalHost: providerInputs.host + }); const expiration = new Date(expireAt).toISOString(); const { database } = providerInputs; diff --git a/backend/src/ee/services/gateway-v2/gateway-v2-constants.ts b/backend/src/ee/services/gateway-v2/gateway-v2-constants.ts new file mode 100644 index 0000000000..e67d4e8902 --- /dev/null +++ b/backend/src/ee/services/gateway-v2/gateway-v2-constants.ts @@ -0,0 +1,2 @@ +export const GATEWAY_ROUTING_INFO_OID = "1.3.6.1.4.1.12345.100.1"; +export const GATEWAY_ACTOR_OID = "1.3.6.1.4.1.12345.100.2"; diff --git a/backend/src/ee/services/gateway-v2/gateway-v2-dal.ts b/backend/src/ee/services/gateway-v2/gateway-v2-dal.ts new file mode 100644 index 0000000000..da9d3c1ef6 --- /dev/null +++ b/backend/src/ee/services/gateway-v2/gateway-v2-dal.ts @@ -0,0 +1,60 @@ +import { Knex } from "knex"; + +import { TDbClient } from "@app/db"; +import { GatewaysV2Schema, TableName, TGatewaysV2 } from "@app/db/schemas"; +import { DatabaseError } from "@app/lib/errors"; +import { buildFindFilter, ormify, selectAllTableCols, TFindFilter, TFindOpt } from "@app/lib/knex"; + +export type TGatewayV2DALFactory = ReturnType; + +export const gatewayV2DalFactory = (db: TDbClient) => { + const orm = ormify(db, TableName.GatewayV2); + + const find = async (filter: TFindFilter, { offset, limit, sort, tx }: TFindOpt = {}) => { + try { + const query = (tx || db.replicaNode())(TableName.GatewayV2) + // eslint-disable-next-line @typescript-eslint/no-misused-promises + .where(buildFindFilter(filter, TableName.GatewayV2)) + .join(TableName.Identity, `${TableName.Identity}.id`, `${TableName.GatewayV2}.identityId`) + .join( + TableName.IdentityOrgMembership, + `${TableName.IdentityOrgMembership}.identityId`, + `${TableName.GatewayV2}.identityId` + ) + .select(selectAllTableCols(TableName.GatewayV2)) + .select(db.ref("name").withSchema(TableName.Identity).as("identityName")); + + if (limit) void query.limit(limit); + if (offset) void query.offset(offset); + if (sort) { + void query.orderBy(sort.map(([column, order, nulls]) => ({ column: column as string, order, nulls }))); + } + + const docs = await query; + + return docs.map((el) => ({ + ...GatewaysV2Schema.parse(el), + identity: { id: el.identityId, name: el.identityName } + })); + } catch (error) { + throw new DatabaseError({ error, name: `${TableName.GatewayV2}: Find` }); + } + }; + + const findById = async (id: string, tx?: Knex) => { + try { + const doc = await (tx || db.replicaNode())(TableName.GatewayV2) + .join(TableName.Organization, `${TableName.GatewayV2}.orgId`, `${TableName.Organization}.id`) + .where(`${TableName.GatewayV2}.id`, id) + .select(selectAllTableCols(TableName.GatewayV2)) + .select(db.ref("name").withSchema(TableName.Organization).as("orgName")) + .first(); + + return doc; + } catch (error) { + throw new DatabaseError({ error, name: `${TableName.GatewayV2}: Find by id` }); + } + }; + + return { ...orm, find, findById }; +}; diff --git a/backend/src/ee/services/gateway-v2/gateway-v2-service.ts b/backend/src/ee/services/gateway-v2/gateway-v2-service.ts new file mode 100644 index 0000000000..317e5da6d2 --- /dev/null +++ b/backend/src/ee/services/gateway-v2/gateway-v2-service.ts @@ -0,0 +1,656 @@ +import net from "node:net"; + +import { ForbiddenError } from "@casl/ability"; +import * as x509 from "@peculiar/x509"; + +import { TRelays } from "@app/db/schemas"; +import { PgSqlLock } from "@app/keystore/keystore"; +import { crypto } from "@app/lib/crypto"; +import { DatabaseErrorCode } from "@app/lib/error-codes"; +import { BadRequestError, DatabaseError, NotFoundError } from "@app/lib/errors"; +import { GatewayProxyProtocol } from "@app/lib/gateway/types"; +import { withGatewayV2Proxy } from "@app/lib/gateway-v2/gateway-v2"; +import { OrgServiceActor } from "@app/lib/types"; +import { ActorAuthMethod, ActorType } from "@app/services/auth/auth-type"; +import { constructPemChainFromCerts } from "@app/services/certificate/certificate-fns"; +import { CertExtendedKeyUsage, CertKeyAlgorithm, CertKeyUsage } from "@app/services/certificate/certificate-types"; +import { + createSerialNumber, + keyAlgorithmToAlgCfg +} from "@app/services/certificate-authority/certificate-authority-fns"; +import { TKmsServiceFactory } from "@app/services/kms/kms-service"; +import { KmsDataKey } from "@app/services/kms/kms-types"; + +import { TLicenseServiceFactory } from "../license/license-service"; +import { OrgPermissionGatewayActions, OrgPermissionSubjects } from "../permission/org-permission"; +import { TPermissionServiceFactory } from "../permission/permission-service-types"; +import { TRelayDALFactory } from "../relay/relay-dal"; +import { TRelayServiceFactory } from "../relay/relay-service"; +import { GATEWAY_ACTOR_OID, GATEWAY_ROUTING_INFO_OID } from "./gateway-v2-constants"; +import { TGatewayV2DALFactory } from "./gateway-v2-dal"; +import { TOrgGatewayConfigV2DALFactory } from "./org-gateway-config-v2-dal"; + +type TGatewayV2ServiceFactoryDep = { + orgGatewayConfigV2DAL: Pick; + licenseService: Pick; + kmsService: TKmsServiceFactory; + relayService: TRelayServiceFactory; + gatewayV2DAL: TGatewayV2DALFactory; + relayDAL: TRelayDALFactory; + permissionService: TPermissionServiceFactory; +}; + +export type TGatewayV2ServiceFactory = ReturnType; + +export const gatewayV2ServiceFactory = ({ + orgGatewayConfigV2DAL, + licenseService, + kmsService, + relayService, + gatewayV2DAL, + relayDAL, + permissionService +}: TGatewayV2ServiceFactoryDep) => { + const $validateIdentityAccessToGateway = async (orgId: string, actorId: string, actorAuthMethod: ActorAuthMethod) => { + const orgLicensePlan = await licenseService.getPlan(orgId); + if (!orgLicensePlan.gateway) { + throw new BadRequestError({ + message: + "Gateway operation failed due to organization plan restrictions. Please upgrade your instance to Infisical's Enterprise plan." + }); + } + + const { permission } = await permissionService.getOrgPermission( + ActorType.IDENTITY, + actorId, + orgId, + actorAuthMethod, + orgId + ); + + ForbiddenError.from(permission).throwUnlessCan( + OrgPermissionGatewayActions.CreateGateways, + OrgPermissionSubjects.Gateway + ); + }; + + const $getOrgCAs = async (orgId: string) => { + const { encryptor: orgKmsEncryptor, decryptor: orgKmsDecryptor } = await kmsService.createCipherPairWithDataKey({ + type: KmsDataKey.Organization, + orgId + }); + + const orgCAs = await orgGatewayConfigV2DAL.transaction(async (tx) => { + const orgGatewayConfigV2 = await orgGatewayConfigV2DAL.findOne({ orgId }); + if (orgGatewayConfigV2) return orgGatewayConfigV2; + + await tx.raw("SELECT pg_advisory_xact_lock(?)", [PgSqlLock.OrgGatewayV2Init(orgId)]); + + // generate root CA + const rootCaKeyAlgorithm = CertKeyAlgorithm.RSA_2048; + const alg = keyAlgorithmToAlgCfg(rootCaKeyAlgorithm); + const rootCaKeys = await crypto.nativeCrypto.subtle.generateKey(alg, true, ["sign", "verify"]); + + const rootCaSerialNumber = createSerialNumber(); + const rootCaSkObj = crypto.nativeCrypto.KeyObject.from(rootCaKeys.privateKey); + const rootCaIssuedAt = new Date(); + const rootCaExpiration = new Date(new Date().setFullYear(2045)); + + const rootCaCert = await x509.X509CertificateGenerator.createSelfSigned({ + name: `O=${orgId},CN=Infisical Gateway Root CA`, + serialNumber: rootCaSerialNumber, + notBefore: rootCaIssuedAt, + notAfter: rootCaExpiration, + signingAlgorithm: alg, + keys: rootCaKeys, + extensions: [ + // eslint-disable-next-line no-bitwise + new x509.KeyUsagesExtension(x509.KeyUsageFlags.keyCertSign | x509.KeyUsageFlags.cRLSign, true), + await x509.SubjectKeyIdentifierExtension.create(rootCaKeys.publicKey) + ] + }); + + // generate server CA + const serverCaSerialNumber = createSerialNumber(); + const serverCaIssuedAt = new Date(); + const serverCaExpiration = new Date(new Date().setFullYear(2045)); + const serverCaKeys = await crypto.nativeCrypto.subtle.generateKey(alg, true, ["sign", "verify"]); + const serverCaSkObj = crypto.nativeCrypto.KeyObject.from(serverCaKeys.privateKey); + const serverCaCert = await x509.X509CertificateGenerator.create({ + serialNumber: serverCaSerialNumber, + subject: `O=${orgId},CN=Infisical Gateway Server CA`, + issuer: rootCaCert.subject, + notBefore: serverCaIssuedAt, + notAfter: serverCaExpiration, + signingKey: rootCaKeys.privateKey, + publicKey: serverCaKeys.publicKey, + signingAlgorithm: alg, + extensions: [ + new x509.KeyUsagesExtension( + // eslint-disable-next-line no-bitwise + x509.KeyUsageFlags.keyCertSign | + x509.KeyUsageFlags.cRLSign | + x509.KeyUsageFlags.digitalSignature | + x509.KeyUsageFlags.keyEncipherment, + true + ), + new x509.BasicConstraintsExtension(true, 0, true), + await x509.AuthorityKeyIdentifierExtension.create(rootCaCert, false), + await x509.SubjectKeyIdentifierExtension.create(serverCaKeys.publicKey) + ] + }); + + // generate client CA + const clientCaSerialNumber = createSerialNumber(); + const clientCaIssuedAt = new Date(); + const clientCaExpiration = new Date(new Date().setFullYear(2045)); + const clientCaKeys = await crypto.nativeCrypto.subtle.generateKey(alg, true, ["sign", "verify"]); + const clientCaSkObj = crypto.nativeCrypto.KeyObject.from(clientCaKeys.privateKey); + const clientCaCert = await x509.X509CertificateGenerator.create({ + serialNumber: clientCaSerialNumber, + subject: `O=${orgId},CN=Infisical Gateway Client CA`, + issuer: rootCaCert.subject, + notBefore: clientCaIssuedAt, + notAfter: clientCaExpiration, + signingKey: rootCaKeys.privateKey, + publicKey: clientCaKeys.publicKey, + signingAlgorithm: alg, + extensions: [ + new x509.KeyUsagesExtension( + // eslint-disable-next-line no-bitwise + x509.KeyUsageFlags.keyCertSign | + x509.KeyUsageFlags.cRLSign | + x509.KeyUsageFlags.digitalSignature | + x509.KeyUsageFlags.keyEncipherment, + true + ), + new x509.BasicConstraintsExtension(true, 0, true), + await x509.AuthorityKeyIdentifierExtension.create(rootCaCert, false), + await x509.SubjectKeyIdentifierExtension.create(clientCaKeys.publicKey) + ] + }); + + const encryptedRootGatewayCaPrivateKey = orgKmsEncryptor({ + plainText: Buffer.from( + rootCaSkObj.export({ + type: "pkcs8", + format: "der" + }) + ) + }).cipherTextBlob; + const encryptedRootGatewayCaCertificate = orgKmsEncryptor({ + plainText: Buffer.from(rootCaCert.rawData) + }).cipherTextBlob; + + const encryptedGatewayServerCaPrivateKey = orgKmsEncryptor({ + plainText: Buffer.from(serverCaSkObj.export({ type: "pkcs8", format: "der" })) + }).cipherTextBlob; + const encryptedGatewayServerCaCertificate = orgKmsEncryptor({ + plainText: Buffer.from(serverCaCert.rawData) + }).cipherTextBlob; + const encryptedGatewayServerCaCertificateChain = orgKmsEncryptor({ + plainText: Buffer.from(constructPemChainFromCerts([rootCaCert])) + }).cipherTextBlob; + + const encryptedGatewayClientCaPrivateKey = orgKmsEncryptor({ + plainText: Buffer.from(clientCaSkObj.export({ type: "pkcs8", format: "der" })) + }).cipherTextBlob; + const encryptedGatewayClientCaCertificate = orgKmsEncryptor({ + plainText: Buffer.from(clientCaCert.rawData) + }).cipherTextBlob; + const encryptedGatewayClientCaCertificateChain = orgKmsEncryptor({ + plainText: Buffer.from(constructPemChainFromCerts([rootCaCert])) + }).cipherTextBlob; + + return orgGatewayConfigV2DAL.create({ + orgId, + encryptedRootGatewayCaPrivateKey, + encryptedRootGatewayCaCertificate, + encryptedGatewayServerCaPrivateKey, + encryptedGatewayServerCaCertificate, + encryptedGatewayServerCaCertificateChain, + encryptedGatewayClientCaPrivateKey, + encryptedGatewayClientCaCertificate, + encryptedGatewayClientCaCertificateChain + }); + }); + + const rootGatewayCaPrivateKey = orgKmsDecryptor({ cipherTextBlob: orgCAs.encryptedRootGatewayCaPrivateKey }); + const rootGatewayCaCertificate = orgKmsDecryptor({ cipherTextBlob: orgCAs.encryptedRootGatewayCaCertificate }); + + const gatewayServerCaPrivateKey = orgKmsDecryptor({ cipherTextBlob: orgCAs.encryptedGatewayServerCaPrivateKey }); + const gatewayServerCaCertificate = orgKmsDecryptor({ cipherTextBlob: orgCAs.encryptedGatewayServerCaCertificate }); + const gatewayServerCaCertificateChain = orgKmsDecryptor({ + cipherTextBlob: orgCAs.encryptedGatewayServerCaCertificateChain + }); + + const gatewayClientCaPrivateKey = orgKmsDecryptor({ cipherTextBlob: orgCAs.encryptedGatewayClientCaPrivateKey }); + const gatewayClientCaCertificate = orgKmsDecryptor({ + cipherTextBlob: orgCAs.encryptedGatewayClientCaCertificate + }); + const gatewayClientCaCertificateChain = orgKmsDecryptor({ + cipherTextBlob: orgCAs.encryptedGatewayClientCaCertificateChain + }); + + return { + rootGatewayCaPrivateKey, + rootGatewayCaCertificate, + gatewayServerCaPrivateKey, + gatewayServerCaCertificate, + gatewayServerCaCertificateChain, + gatewayClientCaPrivateKey, + gatewayClientCaCertificate, + gatewayClientCaCertificateChain + }; + }; + + const listGateways = async ({ orgPermission }: { orgPermission: OrgServiceActor }) => { + const { permission } = await permissionService.getOrgPermission( + orgPermission.type, + orgPermission.id, + orgPermission.orgId, + orgPermission.authMethod, + orgPermission.orgId + ); + + ForbiddenError.from(permission).throwUnlessCan( + OrgPermissionGatewayActions.ListGateways, + OrgPermissionSubjects.Gateway + ); + + const gateways = await gatewayV2DAL.find({ + orgId: orgPermission.orgId + }); + + return gateways; + }; + + const getPlatformConnectionDetailsByGatewayId = async ({ + gatewayId, + targetHost, + targetPort + }: { + gatewayId: string; + targetHost: string; + targetPort: number; + }) => { + const gateway = await gatewayV2DAL.findById(gatewayId); + if (!gateway) { + return; + } + + const orgGatewayConfig = await orgGatewayConfigV2DAL.findOne({ orgId: gateway.orgId }); + if (!orgGatewayConfig) { + throw new NotFoundError({ message: `Gateway Config for org ${gateway.orgId} not found.` }); + } + + if (!gateway.relayId) { + throw new BadRequestError({ + message: "Gateway is not associated with a relay" + }); + } + + const orgLicensePlan = await licenseService.getPlan(orgGatewayConfig.orgId); + if (!orgLicensePlan.gateway) { + throw new BadRequestError({ + message: "Please upgrade your instance to Infisical's Enterprise plan to use gateways." + }); + } + + const { decryptor: orgKmsDecryptor } = await kmsService.createCipherPairWithDataKey({ + type: KmsDataKey.Organization, + orgId: orgGatewayConfig.orgId + }); + + const alg = keyAlgorithmToAlgCfg(CertKeyAlgorithm.RSA_2048); + + const rootGatewayCaCert = new x509.X509Certificate( + orgKmsDecryptor({ + cipherTextBlob: orgGatewayConfig.encryptedRootGatewayCaCertificate + }) + ); + + const gatewayClientCaCert = new x509.X509Certificate( + orgKmsDecryptor({ + cipherTextBlob: orgGatewayConfig.encryptedGatewayClientCaCertificate + }) + ); + + const gatewayServerCaCert = new x509.X509Certificate( + orgKmsDecryptor({ + cipherTextBlob: orgGatewayConfig.encryptedGatewayServerCaCertificate + }) + ); + + const gatewayClientCaPrivateKey = orgKmsDecryptor({ + cipherTextBlob: orgGatewayConfig.encryptedGatewayClientCaPrivateKey + }); + + const gatewayClientCaSkObj = crypto.nativeCrypto.createPrivateKey({ + key: gatewayClientCaPrivateKey, + format: "der", + type: "pkcs8" + }); + + const importedGatewayClientCaPrivateKey = await crypto.nativeCrypto.subtle.importKey( + "pkcs8", + gatewayClientCaSkObj.export({ format: "der", type: "pkcs8" }), + alg, + true, + ["sign"] + ); + + const clientCertIssuedAt = new Date(); + const clientCertExpiration = new Date(new Date().getTime() + 5 * 60 * 1000); + const clientKeys = await crypto.nativeCrypto.subtle.generateKey(alg, true, ["sign", "verify"]); + const clientCertSerialNumber = createSerialNumber(); + + const routingInfo = { + targetHost, + targetPort + }; + + const routingExtension = new x509.Extension( + GATEWAY_ROUTING_INFO_OID, + false, + Buffer.from(JSON.stringify(routingInfo)) + ); + + const actorExtension = new x509.Extension( + GATEWAY_ACTOR_OID, + false, + Buffer.from(JSON.stringify({ type: ActorType.PLATFORM })) + ); + + const clientCert = await x509.X509CertificateGenerator.create({ + serialNumber: clientCertSerialNumber, + subject: `O=${orgGatewayConfig.orgId},OU=gateway-client,CN=${ActorType.PLATFORM}:${gatewayId}`, + issuer: gatewayClientCaCert.subject, + notAfter: clientCertExpiration, + notBefore: clientCertIssuedAt, + signingKey: importedGatewayClientCaPrivateKey, + publicKey: clientKeys.publicKey, + signingAlgorithm: alg, + extensions: [ + new x509.BasicConstraintsExtension(false), + await x509.AuthorityKeyIdentifierExtension.create(gatewayClientCaCert, false), + await x509.SubjectKeyIdentifierExtension.create(clientKeys.publicKey), + new x509.CertificatePolicyExtension(["2.5.29.32.0"]), // anyPolicy + new x509.KeyUsagesExtension( + // eslint-disable-next-line no-bitwise + x509.KeyUsageFlags[CertKeyUsage.DIGITAL_SIGNATURE] | + x509.KeyUsageFlags[CertKeyUsage.KEY_ENCIPHERMENT] | + x509.KeyUsageFlags[CertKeyUsage.KEY_AGREEMENT], + true + ), + new x509.ExtendedKeyUsageExtension([x509.ExtendedKeyUsage[CertExtendedKeyUsage.CLIENT_AUTH]], true), + routingExtension, + actorExtension + ] + }); + + const gatewayClientCertPrivateKey = crypto.nativeCrypto.KeyObject.from(clientKeys.privateKey); + + const relayCredentials = await relayService.getCredentialsForClient({ + relayId: gateway.relayId, + orgId: gateway.orgId, + orgName: gateway.orgName, + gatewayId + }); + + return { + relayHost: relayCredentials.relayHost, + gateway: { + clientCertificate: clientCert.toString("pem"), + clientPrivateKey: gatewayClientCertPrivateKey.export({ format: "pem", type: "pkcs8" }).toString(), + serverCertificateChain: constructPemChainFromCerts([gatewayServerCaCert, rootGatewayCaCert]) + }, + relay: { + clientCertificate: relayCredentials.clientCertificate, + clientPrivateKey: relayCredentials.clientPrivateKey, + serverCertificateChain: relayCredentials.serverCertificateChain + } + }; + }; + + const registerGateway = async ({ + orgId, + actorId, + actorAuthMethod, + relayName, + name + }: { + orgId: string; + actorId: string; + actorAuthMethod: ActorAuthMethod; + relayName: string; + name: string; + }) => { + await $validateIdentityAccessToGateway(orgId, actorId, actorAuthMethod); + const orgCAs = await $getOrgCAs(orgId); + + let relay: TRelays = await relayDAL.findOne({ orgId, name: relayName }); + if (!relay) { + relay = await relayDAL.findOne({ name: relayName, orgId: null }); + } + + if (!relay) { + throw new NotFoundError({ message: `Relay ${relayName} not found` }); + } + + try { + const [gateway] = await gatewayV2DAL.upsert( + [ + { + orgId, + name, + identityId: actorId, + relayId: relay.id + } + ], + ["identityId"] + ); + + const alg = keyAlgorithmToAlgCfg(CertKeyAlgorithm.RSA_2048); + const gatewayServerCaCert = new x509.X509Certificate(orgCAs.gatewayServerCaCertificate); + const rootGatewayCaCert = new x509.X509Certificate(orgCAs.rootGatewayCaCertificate); + const gatewayClientCaCert = new x509.X509Certificate(orgCAs.gatewayClientCaCertificate); + + const gatewayServerCaSkObj = crypto.nativeCrypto.createPrivateKey({ + key: orgCAs.gatewayServerCaPrivateKey, + format: "der", + type: "pkcs8" + }); + const gatewayServerCaPrivateKey = await crypto.nativeCrypto.subtle.importKey( + "pkcs8", + gatewayServerCaSkObj.export({ format: "der", type: "pkcs8" }), + alg, + true, + ["sign"] + ); + + const gatewayServerKeys = await crypto.nativeCrypto.subtle.generateKey(alg, true, ["sign", "verify"]); + const gatewayServerCertIssuedAt = new Date(); + const gatewayServerCertExpireAt = new Date(new Date().setDate(new Date().getDate() + 1)); + const gatewayServerCertPrivateKey = crypto.nativeCrypto.KeyObject.from(gatewayServerKeys.privateKey); + + const gatewayServerCertExtensions: x509.Extension[] = [ + new x509.BasicConstraintsExtension(false), + await x509.AuthorityKeyIdentifierExtension.create(gatewayServerCaCert, false), + await x509.SubjectKeyIdentifierExtension.create(gatewayServerKeys.publicKey), + new x509.CertificatePolicyExtension(["2.5.29.32.0"]), // anyPolicy + new x509.KeyUsagesExtension( + // eslint-disable-next-line no-bitwise + x509.KeyUsageFlags[CertKeyUsage.DIGITAL_SIGNATURE] | x509.KeyUsageFlags[CertKeyUsage.KEY_ENCIPHERMENT], + true + ), + new x509.ExtendedKeyUsageExtension([x509.ExtendedKeyUsage[CertExtendedKeyUsage.SERVER_AUTH]], true), + new x509.SubjectAlternativeNameExtension([ + { type: "dns", value: "localhost" }, + { type: "ip", value: "127.0.0.1" }, + { type: "ip", value: "::1" } + ]) + ]; + + const gatewayServerSerialNumber = createSerialNumber(); + const gatewayServerCertificate = await x509.X509CertificateGenerator.create({ + serialNumber: gatewayServerSerialNumber, + subject: `O=${orgId},CN=Gateway`, + issuer: gatewayServerCaCert.subject, + notBefore: gatewayServerCertIssuedAt, + notAfter: gatewayServerCertExpireAt, + signingKey: gatewayServerCaPrivateKey, + publicKey: gatewayServerKeys.publicKey, + signingAlgorithm: alg, + extensions: gatewayServerCertExtensions + }); + + const relayCredentials = await relayService.getCredentialsForGateway({ + relayName, + orgId, + gatewayId: gateway.id + }); + + return { + gatewayId: gateway.id, + relayHost: relayCredentials.relayHost, + pki: { + serverCertificate: gatewayServerCertificate.toString("pem"), + serverPrivateKey: gatewayServerCertPrivateKey.export({ format: "pem", type: "pkcs8" }).toString(), + clientCertificateChain: constructPemChainFromCerts([gatewayClientCaCert, rootGatewayCaCert]) + }, + ssh: { + clientCertificate: relayCredentials.clientSshCert, + clientPrivateKey: relayCredentials.clientSshPrivateKey, + serverCAPublicKey: relayCredentials.serverCAPublicKey + } + }; + } catch (err) { + if (err instanceof DatabaseError && (err.error as { code: string })?.code === DatabaseErrorCode.UniqueViolation) { + throw new BadRequestError({ message: `Gateway with name "${name}" already exists` }); + } + + throw err; + } + }; + + const heartbeat = async ({ orgPermission }: { orgPermission: OrgServiceActor }) => { + await $validateIdentityAccessToGateway(orgPermission.orgId, orgPermission.id, orgPermission.authMethod); + + const gateway = await gatewayV2DAL.findOne({ + orgId: orgPermission.orgId, + identityId: orgPermission.id + }); + + if (!gateway) { + throw new NotFoundError({ message: `Gateway for identity ${orgPermission.id} not found.` }); + } + + const gatewayV2ConnectionDetails = await getPlatformConnectionDetailsByGatewayId({ + gatewayId: gateway.id, + targetHost: "health-check", + targetPort: 443 + }); + + if (!gatewayV2ConnectionDetails) { + throw new NotFoundError({ message: `Gateway connection details for gateway ${gateway.id} not found.` }); + } + + const isGatewayReachable = await withGatewayV2Proxy( + async (port) => { + return new Promise((resolve, reject) => { + const socket = new net.Socket(); + let responseReceived = false; + let isResolved = false; + + // Set socket timeout + socket.setTimeout(10000); + + const cleanup = () => { + if (!socket.destroyed) { + socket.destroy(); + } + }; + + socket.on("data", (data: Buffer) => { + const response = data.toString().trim(); + if (response === "PONG" && !isResolved) { + isResolved = true; + responseReceived = true; + cleanup(); + resolve(true); + } + }); + + socket.on("error", (err: Error) => { + if (!isResolved) { + isResolved = true; + cleanup(); + reject(new Error(`TCP connection error: ${err.message}`)); + } + }); + + socket.on("timeout", () => { + if (!isResolved) { + isResolved = true; + cleanup(); + reject(new Error("TCP connection timeout")); + } + }); + + socket.on("close", () => { + if (!isResolved && !responseReceived) { + isResolved = true; + cleanup(); + reject(new Error("Connection closed without receiving PONG")); + } + }); + + socket.connect(port, "localhost"); + }); + }, + { + protocol: GatewayProxyProtocol.Ping, + relayHost: gatewayV2ConnectionDetails.relayHost, + gateway: gatewayV2ConnectionDetails.gateway, + relay: gatewayV2ConnectionDetails.relay + } + ); + + if (!isGatewayReachable) { + throw new BadRequestError({ message: `Gateway ${gateway.id} is not reachable` }); + } + + await gatewayV2DAL.updateById(gateway.id, { heartbeat: new Date() }); + }; + + const deleteGatewayById = async ({ orgPermission, id }: { orgPermission: OrgServiceActor; id: string }) => { + const gateway = await gatewayV2DAL.findOne({ id, orgId: orgPermission.orgId }); + if (!gateway) { + throw new NotFoundError({ message: `Gateway ${id} not found` }); + } + + const { permission } = await permissionService.getOrgPermission( + orgPermission.type, + orgPermission.id, + gateway.orgId, + orgPermission.authMethod, + orgPermission.orgId + ); + + ForbiddenError.from(permission).throwUnlessCan( + OrgPermissionGatewayActions.DeleteGateways, + OrgPermissionSubjects.Gateway + ); + + return gatewayV2DAL.deleteById(gateway.id); + }; + + return { + listGateways, + registerGateway, + getPlatformConnectionDetailsByGatewayId, + deleteGatewayById, + heartbeat + }; +}; diff --git a/backend/src/ee/services/gateway-v2/org-gateway-config-v2-dal.ts b/backend/src/ee/services/gateway-v2/org-gateway-config-v2-dal.ts new file mode 100644 index 0000000000..8f16d798a6 --- /dev/null +++ b/backend/src/ee/services/gateway-v2/org-gateway-config-v2-dal.ts @@ -0,0 +1,11 @@ +import { TDbClient } from "@app/db"; +import { TableName } from "@app/db/schemas"; +import { ormify } from "@app/lib/knex"; + +export type TOrgGatewayConfigV2DALFactory = ReturnType; + +export const orgGatewayConfigV2DalFactory = (db: TDbClient) => { + const orm = ormify(db, TableName.OrgGatewayConfigV2); + + return orm; +}; diff --git a/backend/src/ee/services/gateway/gateway-dal.ts b/backend/src/ee/services/gateway/gateway-dal.ts index 31b4b727b9..c21ff31c01 100644 --- a/backend/src/ee/services/gateway/gateway-dal.ts +++ b/backend/src/ee/services/gateway/gateway-dal.ts @@ -13,7 +13,7 @@ export const gatewayDALFactory = (db: TDbClient) => { { offset, limit, sort, tx }: TFindOpt = {} ) => { try { - const query = (tx || db)(TableName.Gateway) + const query = (tx || db.replicaNode())(TableName.Gateway) // eslint-disable-next-line @typescript-eslint/no-misused-promises .where(buildFindFilter(filter, TableName.Gateway, ["orgId"])) .join(TableName.Identity, `${TableName.Identity}.id`, `${TableName.Gateway}.identityId`) diff --git a/backend/src/ee/services/group/user-group-membership-dal.ts b/backend/src/ee/services/group/user-group-membership-dal.ts index 5ee97e4573..374459b0c9 100644 --- a/backend/src/ee/services/group/user-group-membership-dal.ts +++ b/backend/src/ee/services/group/user-group-membership-dal.ts @@ -23,7 +23,7 @@ export const userGroupMembershipDALFactory = (db: TDbClient) => { .whereIn(`${TableName.ProjectMembership}.projectId`, projectIds) .pluck(`${TableName.ProjectMembership}.projectId`); - const userGroupMemberships: string[] = await (tx || db)(TableName.UserGroupMembership) + const userGroupMemberships: string[] = await (tx || db.replicaNode())(TableName.UserGroupMembership) .where(`${TableName.UserGroupMembership}.userId`, userId) .whereNot(`${TableName.UserGroupMembership}.groupId`, groupId) .join( @@ -79,7 +79,7 @@ export const userGroupMembershipDALFactory = (db: TDbClient) => { .pluck(`${TableName.GroupProjectMembership}.groupId`); // main query - const members = await (tx || db)(TableName.UserGroupMembership) + const members = await (tx || db.replicaNode())(TableName.UserGroupMembership) .where(`${TableName.UserGroupMembership}.groupId`, groupId) .where(`${TableName.UserGroupMembership}.isPending`, false) .join(TableName.Users, `${TableName.UserGroupMembership}.userId`, `${TableName.Users}.id`) diff --git a/backend/src/ee/services/ldap-config/ldap-config-service.ts b/backend/src/ee/services/ldap-config/ldap-config-service.ts index a72d507607..8643ecdac4 100644 --- a/backend/src/ee/services/ldap-config/ldap-config-service.ts +++ b/backend/src/ee/services/ldap-config/ldap-config-service.ts @@ -5,6 +5,7 @@ import { OrgMembershipStatus, TableName, TLdapConfigsUpdate, TUsers } from "@app import { TGroupDALFactory } from "@app/ee/services/group/group-dal"; import { addUsersToGroupByUserIds, removeUsersFromGroupByUserIds } from "@app/ee/services/group/group-fns"; import { TUserGroupMembershipDALFactory } from "@app/ee/services/group/user-group-membership-dal"; +import { throwOnPlanSeatLimitReached } from "@app/ee/services/license/license-fns"; import { getConfig } from "@app/lib/config/env"; import { crypto } from "@app/lib/crypto"; import { BadRequestError, ForbiddenRequestError, NotFoundError } from "@app/lib/errors"; @@ -127,6 +128,20 @@ export const ldapConfigServiceFactory = ({ message: "Failed to create LDAP configuration due to plan restriction. Upgrade plan to create LDAP configuration." }); + + const org = await orgDAL.findOrgById(orgId); + + if (!org) { + throw new NotFoundError({ message: `Could not find organization with ID "${orgId}"` }); + } + + if (org.googleSsoAuthEnforced && isActive) { + throw new BadRequestError({ + message: + "You cannot enable LDAP SSO while Google OAuth is enforced. Disable Google OAuth enforcement to enable LDAP SSO." + }); + } + const { encryptor } = await kmsService.createCipherPairWithDataKey({ type: KmsDataKey.Organization, orgId @@ -233,6 +248,19 @@ export const ldapConfigServiceFactory = ({ "Failed to update LDAP configuration due to plan restriction. Upgrade plan to update LDAP configuration." }); + const org = await orgDAL.findOrgById(orgId); + + if (!org) { + throw new NotFoundError({ message: `Could not find organization with ID "${orgId}"` }); + } + + if (org.googleSsoAuthEnforced && isActive) { + throw new BadRequestError({ + message: + "You cannot enable LDAP SSO while Google OAuth is enforced. Disable Google OAuth enforcement to enable LDAP SSO." + }); + } + const updateQuery: TLdapConfigsUpdate = { isActive, url, @@ -390,14 +418,6 @@ export const ldapConfigServiceFactory = ({ } }); } else { - const plan = await licenseService.getPlan(orgId); - if (plan?.slug !== "enterprise" && plan?.identityLimit && plan.identitiesUsed >= plan.identityLimit) { - // limit imposed on number of identities allowed / number of identities used exceeds the number of identities allowed - throw new BadRequestError({ - message: "Failed to create new member via LDAP due to member limit reached. Upgrade plan to add more members." - }); - } - userAlias = await userDAL.transaction(async (tx) => { let newUser: TUsers | undefined; newUser = await userDAL.findOne( @@ -446,6 +466,8 @@ export const ldapConfigServiceFactory = ({ ); if (!orgMembership) { + await throwOnPlanSeatLimitReached(licenseService, orgId, UserAliasType.LDAP); + const { role, roleId } = await getDefaultOrgMembershipRole(organization.defaultMembershipRole); await orgMembershipDAL.create( diff --git a/backend/src/ee/services/license/license-dal.ts b/backend/src/ee/services/license/license-dal.ts index 88a2dadf66..cfea2573d1 100644 --- a/backend/src/ee/services/license/license-dal.ts +++ b/backend/src/ee/services/license/license-dal.ts @@ -28,7 +28,7 @@ export const licenseDALFactory = (db: TDbClient) => { const countOrgUsersAndIdentities = async (orgId: string | null, tx?: Knex) => { try { // count org users - const userDoc = await (tx || db)(TableName.OrgMembership) + const userDoc = await (tx || db.replicaNode())(TableName.OrgMembership) .where({ status: OrgMembershipStatus.Accepted }) .andWhere((bd) => { if (orgId) { @@ -42,7 +42,7 @@ export const licenseDALFactory = (db: TDbClient) => { const userCount = Number(userDoc?.[0].count); // count org identities - const identityDoc = await (tx || db)(TableName.IdentityOrgMembership) + const identityDoc = await (tx || db.replicaNode())(TableName.IdentityOrgMembership) .where((bd) => { if (orgId) { void bd.where({ orgId }); diff --git a/backend/src/ee/services/license/license-fns.ts b/backend/src/ee/services/license/license-fns.ts index 8d2d6fdbe8..a302e956a1 100644 --- a/backend/src/ee/services/license/license-fns.ts +++ b/backend/src/ee/services/license/license-fns.ts @@ -1,8 +1,11 @@ import axios, { AxiosError } from "axios"; +import { TLicenseServiceFactory } from "@app/ee/services/license/license-service"; import { getConfig } from "@app/lib/config/env"; import { request } from "@app/lib/config/request"; +import { BadRequestError } from "@app/lib/errors"; import { logger } from "@app/lib/logger"; +import { UserAliasType } from "@app/services/user-alias/user-alias-types"; import { TFeatureSet } from "./license-types"; @@ -133,3 +136,18 @@ export const setupLicenseRequestWithStore = ( return { request: licenseReq, refreshLicense }; }; + +export const throwOnPlanSeatLimitReached = async ( + licenseService: Pick, + orgId: string, + type?: UserAliasType +) => { + const plan = await licenseService.getPlan(orgId); + + if (plan?.slug !== "enterprise" && plan?.identityLimit && plan.identitiesUsed >= plan.identityLimit) { + // limit imposed on number of identities allowed / number of identities used exceeds the number of identities allowed + throw new BadRequestError({ + message: `Failed to create new member${type ? ` via ${type.toUpperCase()}` : ""} due to member limit reached. Upgrade plan to add more members.` + }); + } +}; diff --git a/backend/src/ee/services/license/license-service.ts b/backend/src/ee/services/license/license-service.ts index a3327b264f..a5353b7e5b 100644 --- a/backend/src/ee/services/license/license-service.ts +++ b/backend/src/ee/services/license/license-service.ts @@ -99,6 +99,17 @@ export const licenseServiceFactory = ({ const workspacesUsed = await projectDAL.countOfOrgProjects(null); currentPlan.workspacesUsed = workspacesUsed; + const usedIdentitySeats = await licenseDAL.countOrgUsersAndIdentities(null); + if (usedIdentitySeats !== currentPlan.identitiesUsed) { + const usedSeats = await licenseDAL.countOfOrgMembers(null); + await licenseServerOnPremApi.request.patch(`/api/license/v1/license`, { + usedSeats, + usedIdentitySeats + }); + currentPlan.identitiesUsed = usedIdentitySeats; + currentPlan.membersUsed = usedSeats; + } + onPremFeatures = currentPlan; logger.info("Successfully synchronized license key features"); } catch (error) { @@ -226,10 +237,13 @@ export const licenseServiceFactory = ({ }; const refreshPlan = async (orgId: string) => { + await keyStore.deleteItem(FEATURE_CACHE_KEY(orgId)); if (instanceType === InstanceType.Cloud) { - await keyStore.deleteItem(FEATURE_CACHE_KEY(orgId)); await getPlan(orgId); } + if (instanceType === InstanceType.EnterpriseOnPrem) { + await syncLicenseKeyOnPremFeatures(true); + } }; const generateOrgCustomerId = async (orgName: string, email?: string | null) => { @@ -296,8 +310,19 @@ export const licenseServiceFactory = ({ return data; }; - const getOrgPlan = async ({ orgId, actor, actorId, actorOrgId, actorAuthMethod, projectId }: TOrgPlanDTO) => { + const getOrgPlan = async ({ + orgId, + actor, + actorId, + actorOrgId, + actorAuthMethod, + projectId, + refreshCache + }: TOrgPlanDTO) => { await permissionService.getOrgPermission(actor, actorId, orgId, actorAuthMethod, actorOrgId); + if (refreshCache) { + await refreshPlan(orgId); + } const plan = await getPlan(orgId, projectId); return plan; }; @@ -417,6 +442,62 @@ export const licenseServiceFactory = ({ }; }; + const calculateUsageValue = ( + rowName: string, + field: string, + projectCount: number, + totalIdentities: number + ): string => { + if (rowName === BillingPlanRows.WorkspaceLimit.name || field === BillingPlanRows.WorkspaceLimit.field) { + return projectCount.toString(); + } + if (rowName === BillingPlanRows.IdentityLimit.name || field === BillingPlanRows.IdentityLimit.field) { + return totalIdentities.toString(); + } + return "-"; + }; + + const fetchPlanTableFromServer = async (customerId: string | null | undefined) => { + if (!customerId) { + throw new NotFoundError({ message: "Organization customer ID is required for plan table retrieval" }); + } + + const baseUrl = `/api/license-server/v1/customers/${customerId}`; + + if (instanceType === InstanceType.Cloud) { + const { data } = await licenseServerCloudApi.request.get<{ + head: { name: string }[]; + rows: { name: string; allowed: boolean }[]; + }>(`${baseUrl}/cloud-plan/table`); + return data; + } + + if (instanceType === InstanceType.EnterpriseOnPrem) { + const { data } = await licenseServerOnPremApi.request.get<{ + head: { name: string }[]; + rows: { name: string; allowed: boolean }[]; + }>(`${baseUrl}/on-prem-plan/table`); + return data; + } + + throw new Error(`Unsupported instance type for server-based plan table: ${instanceType}`); + }; + + const getUsageMetrics = async (orgId: string) => { + const [orgMembersUsed, identityUsed, projectCount] = await Promise.all([ + orgDAL.countAllOrgMembers(orgId), + identityOrgMembershipDAL.countAllOrgIdentities({ orgId }), + projectDAL.countOfOrgProjects(orgId) + ]); + + return { + orgMembersUsed, + identityUsed, + projectCount, + totalIdentities: identityUsed + orgMembersUsed + }; + }; + // returns org current plan feature table const getOrgPlanTable = async ({ orgId, actor, actorId, actorAuthMethod, actorOrgId }: TGetOrgBillInfoDTO) => { const { permission } = await permissionService.getOrgPermission(actor, actorId, orgId, actorAuthMethod, actorOrgId); @@ -429,55 +510,25 @@ export const licenseServiceFactory = ({ }); } - const orgMembersUsed = await orgDAL.countAllOrgMembers(orgId); - const identityUsed = await identityOrgMembershipDAL.countAllOrgIdentities({ orgId }); - const projects = await projectDAL.find({ orgId }); - const projectCount = projects.length; + const { projectCount, totalIdentities } = await getUsageMetrics(orgId); - if (instanceType === InstanceType.Cloud) { - const { data } = await licenseServerCloudApi.request.get<{ - head: { name: string }[]; - rows: { name: string; allowed: boolean }[]; - }>(`/api/license-server/v1/customers/${organization.customerId}/cloud-plan/table`); + if (instanceType === InstanceType.Cloud || instanceType === InstanceType.EnterpriseOnPrem) { + const tableResponse = await fetchPlanTableFromServer(organization.customerId); - const formattedData = { - head: data.head, - rows: data.rows.map((el) => { - let used = "-"; - - if (el.name === BillingPlanRows.WorkspaceLimit.name) { - used = projectCount.toString(); - } else if (el.name === BillingPlanRows.IdentityLimit.name) { - used = (identityUsed + orgMembersUsed).toString(); - } - - return { - ...el, - used - }; - }) + return { + head: tableResponse.head, + rows: tableResponse.rows.map((row) => ({ + ...row, + used: calculateUsageValue(row.name, "", projectCount, totalIdentities) + })) }; - return formattedData; } - const mappedRows = await Promise.all( - Object.values(BillingPlanRows).map(async ({ name, field }: { name: string; field: string }) => { - const allowed = onPremFeatures[field as keyof TFeatureSet]; - let used = "-"; - - if (field === BillingPlanRows.WorkspaceLimit.field) { - used = projectCount.toString(); - } else if (field === BillingPlanRows.IdentityLimit.field) { - used = (identityUsed + orgMembersUsed).toString(); - } - - return { - name, - allowed, - used - }; - }) - ); + const mappedRows = Object.values(BillingPlanRows).map(({ name, field }) => ({ + name, + allowed: onPremFeatures[field as keyof TFeatureSet] || false, + used: calculateUsageValue(name, field, projectCount, totalIdentities) + })); return { head: Object.values(BillingPlanTableHead), diff --git a/backend/src/ee/services/license/license-types.ts b/backend/src/ee/services/license/license-types.ts index 345e266380..2ccd3ac8fc 100644 --- a/backend/src/ee/services/license/license-types.ts +++ b/backend/src/ee/services/license/license-types.ts @@ -87,6 +87,7 @@ export type TOrgPlansTableDTO = { export type TOrgPlanDTO = { projectId?: string; + refreshCache?: boolean; } & TOrgPermission; export type TStartOrgTrialDTO = { diff --git a/backend/src/ee/services/oidc/oidc-config-service.ts b/backend/src/ee/services/oidc/oidc-config-service.ts index 8f479b12c4..445ace2b5e 100644 --- a/backend/src/ee/services/oidc/oidc-config-service.ts +++ b/backend/src/ee/services/oidc/oidc-config-service.ts @@ -8,6 +8,7 @@ import { EventType, TAuditLogServiceFactory } from "@app/ee/services/audit-log/a import { TGroupDALFactory } from "@app/ee/services/group/group-dal"; import { addUsersToGroupByUserIds, removeUsersFromGroupByUserIds } from "@app/ee/services/group/group-fns"; import { TUserGroupMembershipDALFactory } from "@app/ee/services/group/user-group-membership-dal"; +import { throwOnPlanSeatLimitReached } from "@app/ee/services/license/license-fns"; import { TLicenseServiceFactory } from "@app/ee/services/license/license-service"; import { OrgPermissionActions, OrgPermissionSubjects } from "@app/ee/services/permission/org-permission"; import { TPermissionServiceFactory } from "@app/ee/services/permission/permission-service-types"; @@ -294,6 +295,8 @@ export const oidcConfigServiceFactory = ({ ); if (!orgMembership) { + await throwOnPlanSeatLimitReached(licenseService, orgId, UserAliasType.OIDC); + const { role, roleId } = await getDefaultOrgMembershipRole(organization.defaultMembershipRole); await orgMembershipDAL.create( @@ -499,6 +502,13 @@ export const oidcConfigServiceFactory = ({ ); ForbiddenError.from(permission).throwUnlessCan(OrgPermissionActions.Edit, OrgPermissionSubjects.Sso); + if (org.googleSsoAuthEnforced && isActive) { + throw new BadRequestError({ + message: + "You cannot enable OIDC SSO while Google OAuth is enforced. Disable Google OAuth enforcement to enable OIDC SSO." + }); + } + const { encryptor } = await kmsService.createCipherPairWithDataKey({ type: KmsDataKey.Organization, orgId: org.id @@ -586,6 +596,13 @@ export const oidcConfigServiceFactory = ({ ); ForbiddenError.from(permission).throwUnlessCan(OrgPermissionActions.Create, OrgPermissionSubjects.Sso); + if (org.googleSsoAuthEnforced && isActive) { + throw new BadRequestError({ + message: + "You cannot enable OIDC SSO while Google OAuth is enforced. Disable Google OAuth enforcement to enable OIDC SSO." + }); + } + const { encryptor } = await kmsService.createCipherPairWithDataKey({ type: KmsDataKey.Organization, orgId: org.id diff --git a/backend/src/ee/services/relay/instance-relay-config-dal.ts b/backend/src/ee/services/relay/instance-relay-config-dal.ts new file mode 100644 index 0000000000..6db3b93e76 --- /dev/null +++ b/backend/src/ee/services/relay/instance-relay-config-dal.ts @@ -0,0 +1,11 @@ +import { TDbClient } from "@app/db"; +import { TableName } from "@app/db/schemas"; +import { ormify } from "@app/lib/knex"; + +export type TInstanceRelayConfigDALFactory = ReturnType; + +export const instanceRelayConfigDalFactory = (db: TDbClient) => { + const orm = ormify(db, TableName.InstanceRelayConfig); + + return orm; +}; diff --git a/backend/src/ee/services/relay/org-relay-config-dal.ts b/backend/src/ee/services/relay/org-relay-config-dal.ts new file mode 100644 index 0000000000..7da35b9dc1 --- /dev/null +++ b/backend/src/ee/services/relay/org-relay-config-dal.ts @@ -0,0 +1,11 @@ +import { TDbClient } from "@app/db"; +import { TableName } from "@app/db/schemas"; +import { ormify } from "@app/lib/knex"; + +export type TOrgRelayConfigDALFactory = ReturnType; + +export const orgRelayConfigDalFactory = (db: TDbClient) => { + const orm = ormify(db, TableName.OrgRelayConfig); + + return orm; +}; diff --git a/backend/src/ee/services/relay/relay-dal.ts b/backend/src/ee/services/relay/relay-dal.ts new file mode 100644 index 0000000000..9107e0807c --- /dev/null +++ b/backend/src/ee/services/relay/relay-dal.ts @@ -0,0 +1,11 @@ +import { TDbClient } from "@app/db"; +import { TableName } from "@app/db/schemas"; +import { ormify } from "@app/lib/knex"; + +export type TRelayDALFactory = ReturnType; + +export const relayDalFactory = (db: TDbClient) => { + const orm = ormify(db, TableName.Relay); + + return orm; +}; diff --git a/backend/src/ee/services/relay/relay-service.ts b/backend/src/ee/services/relay/relay-service.ts new file mode 100644 index 0000000000..92401faafd --- /dev/null +++ b/backend/src/ee/services/relay/relay-service.ts @@ -0,0 +1,1003 @@ +import * as x509 from "@peculiar/x509"; + +import { TRelays } from "@app/db/schemas"; +import { PgSqlLock } from "@app/keystore/keystore"; +import { crypto } from "@app/lib/crypto"; +import { BadRequestError, NotFoundError } from "@app/lib/errors"; +import { constructPemChainFromCerts, prependCertToPemChain } from "@app/services/certificate/certificate-fns"; +import { CertExtendedKeyUsage, CertKeyAlgorithm, CertKeyUsage } from "@app/services/certificate/certificate-types"; +import { + createSerialNumber, + keyAlgorithmToAlgCfg +} from "@app/services/certificate-authority/certificate-authority-fns"; +import { TKmsServiceFactory } from "@app/services/kms/kms-service"; +import { KmsDataKey } from "@app/services/kms/kms-types"; + +import { verifyHostInputValidity } from "../dynamic-secret/dynamic-secret-fns"; +import { createSshCert, createSshKeyPair } from "../ssh/ssh-certificate-authority-fns"; +import { SshCertType } from "../ssh/ssh-certificate-authority-types"; +import { SshCertKeyAlgorithm } from "../ssh-certificate/ssh-certificate-types"; +import { TInstanceRelayConfigDALFactory } from "./instance-relay-config-dal"; +import { TOrgRelayConfigDALFactory } from "./org-relay-config-dal"; +import { TRelayDALFactory } from "./relay-dal"; + +export type TRelayServiceFactory = ReturnType; + +const INSTANCE_RELAY_CONFIG_UUID = "00000000-0000-0000-0000-000000000000"; + +export const relayServiceFactory = ({ + instanceRelayConfigDAL, + orgRelayConfigDAL, + relayDAL, + kmsService +}: { + instanceRelayConfigDAL: TInstanceRelayConfigDALFactory; + orgRelayConfigDAL: TOrgRelayConfigDALFactory; + relayDAL: TRelayDALFactory; + kmsService: TKmsServiceFactory; +}) => { + const $getInstanceCAs = async () => { + const instanceConfig = await instanceRelayConfigDAL.transaction(async (tx) => { + const existingInstanceRelayConfig = await instanceRelayConfigDAL.findById(INSTANCE_RELAY_CONFIG_UUID); + if (existingInstanceRelayConfig) return existingInstanceRelayConfig; + + await tx.raw("SELECT pg_advisory_xact_lock(?)", [PgSqlLock.InstanceRelayConfigInit()]); + + const alg = keyAlgorithmToAlgCfg(CertKeyAlgorithm.RSA_2048); + const rootCaKeys = await crypto.nativeCrypto.subtle.generateKey(alg, true, ["sign", "verify"]); + + // generate root CA + const rootCaSerialNumber = createSerialNumber(); + const rootCaSkObj = crypto.nativeCrypto.KeyObject.from(rootCaKeys.privateKey); + const rootCaIssuedAt = new Date(); + const rootCaExpiration = new Date(new Date().setFullYear(2045)); + const rootCaCert = await x509.X509CertificateGenerator.createSelfSigned({ + name: `O=Infisical,CN=Infisical Instance Root Relay CA`, + serialNumber: rootCaSerialNumber, + notBefore: rootCaIssuedAt, + notAfter: rootCaExpiration, + signingAlgorithm: alg, + keys: rootCaKeys, + extensions: [ + // eslint-disable-next-line no-bitwise + new x509.KeyUsagesExtension(x509.KeyUsageFlags.keyCertSign | x509.KeyUsageFlags.cRLSign, true), + await x509.SubjectKeyIdentifierExtension.create(rootCaKeys.publicKey) + ] + }); + + // generate org relay CA + const orgRelayCaSerialNumber = createSerialNumber(); + const orgRelayCaIssuedAt = new Date(); + const orgRelayCaExpiration = new Date(new Date().setFullYear(2045)); + const orgRelayCaKeys = await crypto.nativeCrypto.subtle.generateKey(alg, true, ["sign", "verify"]); + const orgRelayCaSkObj = crypto.nativeCrypto.KeyObject.from(orgRelayCaKeys.privateKey); + const orgRelayCaCert = await x509.X509CertificateGenerator.create({ + serialNumber: orgRelayCaSerialNumber, + subject: `O=Infisical,CN=Infisical Organization Relay CA`, + issuer: rootCaCert.subject, + notBefore: orgRelayCaIssuedAt, + notAfter: orgRelayCaExpiration, + signingKey: rootCaKeys.privateKey, + publicKey: orgRelayCaKeys.publicKey, + signingAlgorithm: alg, + extensions: [ + new x509.KeyUsagesExtension( + // eslint-disable-next-line no-bitwise + x509.KeyUsageFlags.keyCertSign | + x509.KeyUsageFlags.cRLSign | + x509.KeyUsageFlags.digitalSignature | + x509.KeyUsageFlags.keyEncipherment, + true + ), + new x509.BasicConstraintsExtension(true, 2, true), + await x509.AuthorityKeyIdentifierExtension.create(rootCaCert, false), + await x509.SubjectKeyIdentifierExtension.create(orgRelayCaKeys.publicKey) + ] + }); + const orgRelayCaChain = constructPemChainFromCerts([rootCaCert]); + + // generate instance relay CA + const instanceRelayCaSerialNumber = createSerialNumber(); + const instanceRelayCaIssuedAt = new Date(); + const instanceRelayCaExpiration = new Date(new Date().setFullYear(2045)); + const instanceRelayCaKeys = await crypto.nativeCrypto.subtle.generateKey(alg, true, ["sign", "verify"]); + const instanceRelayCaSkObj = crypto.nativeCrypto.KeyObject.from(instanceRelayCaKeys.privateKey); + const instanceRelayCaCert = await x509.X509CertificateGenerator.create({ + serialNumber: instanceRelayCaSerialNumber, + subject: `O=Infisical,CN=Infisical Instance Relay CA`, + issuer: rootCaCert.subject, + notBefore: instanceRelayCaIssuedAt, + notAfter: instanceRelayCaExpiration, + signingKey: rootCaKeys.privateKey, + publicKey: instanceRelayCaKeys.publicKey, + signingAlgorithm: alg, + extensions: [ + new x509.KeyUsagesExtension( + // eslint-disable-next-line no-bitwise + x509.KeyUsageFlags.keyCertSign | + x509.KeyUsageFlags.cRLSign | + x509.KeyUsageFlags.digitalSignature | + x509.KeyUsageFlags.keyEncipherment, + true + ), + new x509.BasicConstraintsExtension(true, 1, true), + await x509.AuthorityKeyIdentifierExtension.create(rootCaCert, false), + await x509.SubjectKeyIdentifierExtension.create(instanceRelayCaKeys.publicKey) + ] + }); + const instanceRelayCaChain = constructPemChainFromCerts([rootCaCert]); + + // generate instance relay client CA + const instanceRelayClientCaSerialNumber = createSerialNumber(); + const instanceRelayClientCaIssuedAt = new Date(); + const instanceRelayClientCaExpiration = new Date(new Date().setFullYear(2045)); + const instanceRelayClientCaKeys = await crypto.nativeCrypto.subtle.generateKey(alg, true, ["sign", "verify"]); + const instanceRelayClientCaSkObj = crypto.nativeCrypto.KeyObject.from(instanceRelayClientCaKeys.privateKey); + const instanceRelayClientCaCert = await x509.X509CertificateGenerator.create({ + serialNumber: instanceRelayClientCaSerialNumber, + subject: `O=Infisical,CN=Infisical Instance Relay Client CA`, + issuer: instanceRelayCaCert.subject, + notBefore: instanceRelayClientCaIssuedAt, + notAfter: instanceRelayClientCaExpiration, + signingKey: instanceRelayCaKeys.privateKey, + publicKey: instanceRelayClientCaKeys.publicKey, + signingAlgorithm: alg, + extensions: [ + new x509.KeyUsagesExtension( + // eslint-disable-next-line no-bitwise + x509.KeyUsageFlags.keyCertSign | + x509.KeyUsageFlags.cRLSign | + x509.KeyUsageFlags.digitalSignature | + x509.KeyUsageFlags.keyEncipherment, + true + ), + new x509.BasicConstraintsExtension(true, 0, true), + await x509.AuthorityKeyIdentifierExtension.create(instanceRelayCaCert, false), + await x509.SubjectKeyIdentifierExtension.create(instanceRelayClientCaKeys.publicKey) + ] + }); + const instanceRelayClientCaChain = constructPemChainFromCerts([instanceRelayCaCert, rootCaCert]); + + // generate instance relay server CA + const instanceRelayServerCaSerialNumber = createSerialNumber(); + const instanceRelayServerCaIssuedAt = new Date(); + const instanceRelayServerCaExpiration = new Date(new Date().setFullYear(2045)); + const instanceRelayServerCaKeys = await crypto.nativeCrypto.subtle.generateKey(alg, true, ["sign", "verify"]); + const instanceRelayServerCaSkObj = crypto.nativeCrypto.KeyObject.from(instanceRelayServerCaKeys.privateKey); + const instanceRelayServerCaCert = await x509.X509CertificateGenerator.create({ + serialNumber: instanceRelayServerCaSerialNumber, + subject: `O=Infisical,CN=Infisical Instance Relay Server CA`, + issuer: instanceRelayCaCert.subject, + notBefore: instanceRelayServerCaIssuedAt, + notAfter: instanceRelayServerCaExpiration, + signingKey: instanceRelayCaKeys.privateKey, + publicKey: instanceRelayServerCaKeys.publicKey, + signingAlgorithm: alg, + extensions: [ + new x509.KeyUsagesExtension( + // eslint-disable-next-line no-bitwise + x509.KeyUsageFlags.keyCertSign | + x509.KeyUsageFlags.cRLSign | + x509.KeyUsageFlags.digitalSignature | + x509.KeyUsageFlags.keyEncipherment, + true + ), + new x509.BasicConstraintsExtension(true, 0, true), + await x509.AuthorityKeyIdentifierExtension.create(instanceRelayCaCert, false), + await x509.SubjectKeyIdentifierExtension.create(instanceRelayServerCaKeys.publicKey) + ] + }); + const instanceRelayServerCaChain = constructPemChainFromCerts([instanceRelayCaCert, rootCaCert]); + + const instanceSshServerCaKeyPair = await createSshKeyPair(SshCertKeyAlgorithm.RSA_2048); + const instanceSshClientCaKeyPair = await createSshKeyPair(SshCertKeyAlgorithm.RSA_2048); + + const encryptWithRoot = kmsService.encryptWithRootKey(); + + // root relay CA + const encryptedRootRelayPkiCaPrivateKey = encryptWithRoot( + Buffer.from( + rootCaSkObj.export({ + type: "pkcs8", + format: "der" + }) + ) + ); + const encryptedRootRelayPkiCaCertificate = encryptWithRoot(Buffer.from(rootCaCert.rawData)); + + // org relay CA + const encryptedOrgRelayPkiCaPrivateKey = encryptWithRoot( + Buffer.from( + orgRelayCaSkObj.export({ + type: "pkcs8", + format: "der" + }) + ) + ); + const encryptedOrgRelayPkiCaCertificate = encryptWithRoot(Buffer.from(orgRelayCaCert.rawData)); + const encryptedOrgRelayPkiCaCertificateChain = encryptWithRoot(Buffer.from(orgRelayCaChain)); + + // instance relay CA + const encryptedInstanceRelayPkiCaPrivateKey = encryptWithRoot( + Buffer.from( + instanceRelayCaSkObj.export({ + type: "pkcs8", + format: "der" + }) + ) + ); + const encryptedInstanceRelayPkiCaCertificate = encryptWithRoot(Buffer.from(instanceRelayCaCert.rawData)); + const encryptedInstanceRelayPkiCaCertificateChain = encryptWithRoot(Buffer.from(instanceRelayCaChain)); + + // instance relay client CA + const encryptedInstanceRelayPkiClientCaPrivateKey = encryptWithRoot( + Buffer.from( + instanceRelayClientCaSkObj.export({ + type: "pkcs8", + format: "der" + }) + ) + ); + const encryptedInstanceRelayPkiClientCaCertificate = encryptWithRoot( + Buffer.from(instanceRelayClientCaCert.rawData) + ); + const encryptedInstanceRelayPkiClientCaCertificateChain = encryptWithRoot( + Buffer.from(instanceRelayClientCaChain) + ); + + // instance relay server CA + const encryptedInstanceRelayPkiServerCaPrivateKey = encryptWithRoot( + Buffer.from( + instanceRelayServerCaSkObj.export({ + type: "pkcs8", + format: "der" + }) + ) + ); + const encryptedInstanceRelayPkiServerCaCertificate = encryptWithRoot( + Buffer.from(instanceRelayServerCaCert.rawData) + ); + const encryptedInstanceRelayPkiServerCaCertificateChain = encryptWithRoot( + Buffer.from(instanceRelayServerCaChain) + ); + + const encryptedInstanceRelaySshClientCaPublicKey = encryptWithRoot( + Buffer.from(instanceSshClientCaKeyPair.publicKey) + ); + const encryptedInstanceRelaySshClientCaPrivateKey = encryptWithRoot( + Buffer.from(instanceSshClientCaKeyPair.privateKey) + ); + + const encryptedInstanceRelaySshServerCaPublicKey = encryptWithRoot( + Buffer.from(instanceSshServerCaKeyPair.publicKey) + ); + const encryptedInstanceRelaySshServerCaPrivateKey = encryptWithRoot( + Buffer.from(instanceSshServerCaKeyPair.privateKey) + ); + + return instanceRelayConfigDAL.create({ + // @ts-expect-error id is kept as fixed for idempotence and to avoid race condition + id: INSTANCE_RELAY_CONFIG_UUID, + encryptedRootRelayPkiCaPrivateKey, + encryptedRootRelayPkiCaCertificate, + encryptedInstanceRelayPkiCaPrivateKey, + encryptedInstanceRelayPkiCaCertificate, + encryptedInstanceRelayPkiCaCertificateChain, + encryptedInstanceRelayPkiClientCaPrivateKey, + encryptedInstanceRelayPkiClientCaCertificate, + encryptedInstanceRelayPkiClientCaCertificateChain, + encryptedInstanceRelayPkiServerCaPrivateKey, + encryptedInstanceRelayPkiServerCaCertificate, + encryptedInstanceRelayPkiServerCaCertificateChain, + encryptedOrgRelayPkiCaPrivateKey, + encryptedOrgRelayPkiCaCertificate, + encryptedOrgRelayPkiCaCertificateChain, + encryptedInstanceRelaySshClientCaPublicKey, + encryptedInstanceRelaySshClientCaPrivateKey, + encryptedInstanceRelaySshServerCaPublicKey, + encryptedInstanceRelaySshServerCaPrivateKey + }); + }); + + // decrypt the instance config + const decryptWithRoot = kmsService.decryptWithRootKey(); + + // decrypt root relay CA + const rootRelayPkiCaPrivateKey = decryptWithRoot(instanceConfig.encryptedRootRelayPkiCaPrivateKey); + const rootRelayPkiCaCertificate = decryptWithRoot(instanceConfig.encryptedRootRelayPkiCaCertificate); + + // decrypt org relay CA + const orgRelayPkiCaPrivateKey = decryptWithRoot(instanceConfig.encryptedOrgRelayPkiCaPrivateKey); + const orgRelayPkiCaCertificate = decryptWithRoot(instanceConfig.encryptedOrgRelayPkiCaCertificate); + const orgRelayPkiCaCertificateChain = decryptWithRoot(instanceConfig.encryptedOrgRelayPkiCaCertificateChain); + + // decrypt instance relay CA + const instanceRelayPkiCaPrivateKey = decryptWithRoot(instanceConfig.encryptedInstanceRelayPkiCaPrivateKey); + const instanceRelayPkiCaCertificate = decryptWithRoot(instanceConfig.encryptedInstanceRelayPkiCaCertificate); + const instanceRelayPkiCaCertificateChain = decryptWithRoot( + instanceConfig.encryptedInstanceRelayPkiCaCertificateChain + ); + + // decrypt instance relay client CA + const instanceRelayPkiClientCaPrivateKey = decryptWithRoot( + instanceConfig.encryptedInstanceRelayPkiClientCaPrivateKey + ); + const instanceRelayPkiClientCaCertificate = decryptWithRoot( + instanceConfig.encryptedInstanceRelayPkiClientCaCertificate + ); + const instanceRelayPkiClientCaCertificateChain = decryptWithRoot( + instanceConfig.encryptedInstanceRelayPkiClientCaCertificateChain + ); + + // decrypt instance relay server CA + const instanceRelayPkiServerCaPrivateKey = decryptWithRoot( + instanceConfig.encryptedInstanceRelayPkiServerCaPrivateKey + ); + const instanceRelayPkiServerCaCertificate = decryptWithRoot( + instanceConfig.encryptedInstanceRelayPkiServerCaCertificate + ); + const instanceRelayPkiServerCaCertificateChain = decryptWithRoot( + instanceConfig.encryptedInstanceRelayPkiServerCaCertificateChain + ); + + // decrypt SSH keys + const instanceRelaySshClientCaPublicKey = decryptWithRoot( + instanceConfig.encryptedInstanceRelaySshClientCaPublicKey + ); + const instanceRelaySshClientCaPrivateKey = decryptWithRoot( + instanceConfig.encryptedInstanceRelaySshClientCaPrivateKey + ); + const instanceRelaySshServerCaPublicKey = decryptWithRoot( + instanceConfig.encryptedInstanceRelaySshServerCaPublicKey + ); + const instanceRelaySshServerCaPrivateKey = decryptWithRoot( + instanceConfig.encryptedInstanceRelaySshServerCaPrivateKey + ); + + return { + rootRelayPkiCaPrivateKey, + rootRelayPkiCaCertificate, + orgRelayPkiCaPrivateKey, + orgRelayPkiCaCertificate, + orgRelayPkiCaCertificateChain, + instanceRelayPkiCaPrivateKey, + instanceRelayPkiCaCertificate, + instanceRelayPkiCaCertificateChain, + instanceRelayPkiClientCaPrivateKey, + instanceRelayPkiClientCaCertificate, + instanceRelayPkiClientCaCertificateChain, + instanceRelayPkiServerCaPrivateKey, + instanceRelayPkiServerCaCertificate, + instanceRelayPkiServerCaCertificateChain, + instanceRelaySshClientCaPublicKey, + instanceRelaySshClientCaPrivateKey, + instanceRelaySshServerCaPublicKey, + instanceRelaySshServerCaPrivateKey + }; + }; + + const $getOrgCAs = async (orgId: string) => { + const instanceCAs = await $getInstanceCAs(); + const { encryptor: orgKmsEncryptor, decryptor: orgKmsDecryptor } = await kmsService.createCipherPairWithDataKey({ + type: KmsDataKey.Organization, + orgId + }); + + const orgRelayConfig = await orgRelayConfigDAL.transaction(async (tx) => { + const existingOrgRelayConfig = await orgRelayConfigDAL.findOne( + { + orgId + }, + tx + ); + + if (existingOrgRelayConfig) { + return existingOrgRelayConfig; + } + + await tx.raw("SELECT pg_advisory_xact_lock(?)", [PgSqlLock.OrgRelayConfigInit(orgId)]); + + const alg = keyAlgorithmToAlgCfg(CertKeyAlgorithm.RSA_2048); + const orgRelayCaCert = new x509.X509Certificate(instanceCAs.orgRelayPkiCaCertificate); + const rootRelayCaCert = new x509.X509Certificate(instanceCAs.rootRelayPkiCaCertificate); + const orgRelayCaSkObj = crypto.nativeCrypto.createPrivateKey({ + key: instanceCAs.orgRelayPkiCaPrivateKey, + format: "der", + type: "pkcs8" + }); + const orgRelayCaPrivateKey = await crypto.nativeCrypto.subtle.importKey( + "pkcs8", + orgRelayCaSkObj.export({ format: "der", type: "pkcs8" }), + alg, + true, + ["sign"] + ); + + // generate org relay client CA + const orgRelayClientCaSerialNumber = createSerialNumber(); + const orgRelayClientCaIssuedAt = new Date(); + const orgRelayClientCaExpiration = new Date(new Date().setFullYear(2045)); + const orgRelayClientCaKeys = await crypto.nativeCrypto.subtle.generateKey(alg, true, ["sign", "verify"]); + const orgRelayClientCaSkObj = crypto.nativeCrypto.KeyObject.from(orgRelayClientCaKeys.privateKey); + const orgRelayClientCaCert = await x509.X509CertificateGenerator.create({ + serialNumber: orgRelayClientCaSerialNumber, + subject: `O=${orgId},CN=Infisical Org Relay Client CA`, + issuer: orgRelayCaCert.subject, + notBefore: orgRelayClientCaIssuedAt, + notAfter: orgRelayClientCaExpiration, + signingKey: orgRelayCaPrivateKey, + publicKey: orgRelayClientCaKeys.publicKey, + signingAlgorithm: alg, + extensions: [ + new x509.KeyUsagesExtension( + // eslint-disable-next-line no-bitwise + x509.KeyUsageFlags.keyCertSign | + x509.KeyUsageFlags.cRLSign | + x509.KeyUsageFlags.digitalSignature | + x509.KeyUsageFlags.keyEncipherment, + true + ), + new x509.BasicConstraintsExtension(true, 0, true), + await x509.AuthorityKeyIdentifierExtension.create(orgRelayCaCert, false), + await x509.SubjectKeyIdentifierExtension.create(orgRelayClientCaKeys.publicKey) + ] + }); + const orgRelayClientCaChain = constructPemChainFromCerts([orgRelayCaCert, rootRelayCaCert]); + + // generate org SSH CA + const orgSshServerCaKeyPair = await createSshKeyPair(SshCertKeyAlgorithm.RSA_2048); + const orgSshClientCaKeyPair = await createSshKeyPair(SshCertKeyAlgorithm.RSA_2048); + + // generate org relay server CA + const orgRelayServerCaSerialNumber = createSerialNumber(); + const orgRelayServerCaIssuedAt = new Date(); + const orgRelayServerCaExpiration = new Date(new Date().setFullYear(2045)); + const orgRelayServerCaKeys = await crypto.nativeCrypto.subtle.generateKey(alg, true, ["sign", "verify"]); + const orgRelayServerCaSkObj = crypto.nativeCrypto.KeyObject.from(orgRelayServerCaKeys.privateKey); + const orgRelayServerCaCert = await x509.X509CertificateGenerator.create({ + serialNumber: orgRelayServerCaSerialNumber, + subject: `O=${orgId},CN=Infisical Org Relay Server CA`, + issuer: orgRelayCaCert.subject, + notBefore: orgRelayServerCaIssuedAt, + notAfter: orgRelayServerCaExpiration, + signingKey: orgRelayCaPrivateKey, + publicKey: orgRelayServerCaKeys.publicKey, + signingAlgorithm: alg, + extensions: [ + new x509.KeyUsagesExtension( + // eslint-disable-next-line no-bitwise + x509.KeyUsageFlags.keyCertSign | + x509.KeyUsageFlags.cRLSign | + x509.KeyUsageFlags.digitalSignature | + x509.KeyUsageFlags.keyEncipherment, + true + ), + new x509.BasicConstraintsExtension(true, 0, true), + await x509.AuthorityKeyIdentifierExtension.create(orgRelayCaCert, false), + await x509.SubjectKeyIdentifierExtension.create(orgRelayServerCaKeys.publicKey) + ] + }); + const orgRelayServerCaChain = constructPemChainFromCerts([orgRelayCaCert, rootRelayCaCert]); + + const encryptedRelayPkiClientCaPrivateKey = orgKmsEncryptor({ + plainText: Buffer.from( + orgRelayClientCaSkObj.export({ + type: "pkcs8", + format: "der" + }) + ) + }).cipherTextBlob; + const encryptedRelayPkiClientCaCertificate = orgKmsEncryptor({ + plainText: Buffer.from(orgRelayClientCaCert.rawData) + }).cipherTextBlob; + + const encryptedRelayPkiClientCaCertificateChain = orgKmsEncryptor({ + plainText: Buffer.from(orgRelayClientCaChain) + }).cipherTextBlob; + + const encryptedRelayPkiServerCaPrivateKey = orgKmsEncryptor({ + plainText: Buffer.from( + orgRelayServerCaSkObj.export({ + type: "pkcs8", + format: "der" + }) + ) + }).cipherTextBlob; + const encryptedRelayPkiServerCaCertificate = orgKmsEncryptor({ + plainText: Buffer.from(orgRelayServerCaCert.rawData) + }).cipherTextBlob; + const encryptedRelayPkiServerCaCertificateChain = orgKmsEncryptor({ + plainText: Buffer.from(orgRelayServerCaChain) + }).cipherTextBlob; + + const encryptedRelaySshClientCaPublicKey = orgKmsEncryptor({ + plainText: Buffer.from(orgSshClientCaKeyPair.publicKey) + }).cipherTextBlob; + const encryptedRelaySshClientCaPrivateKey = orgKmsEncryptor({ + plainText: Buffer.from(orgSshClientCaKeyPair.privateKey) + }).cipherTextBlob; + + const encryptedRelaySshServerCaPublicKey = orgKmsEncryptor({ + plainText: Buffer.from(orgSshServerCaKeyPair.publicKey) + }).cipherTextBlob; + const encryptedRelaySshServerCaPrivateKey = orgKmsEncryptor({ + plainText: Buffer.from(orgSshServerCaKeyPair.privateKey) + }).cipherTextBlob; + + return orgRelayConfigDAL.create({ + orgId, + encryptedRelayPkiClientCaPrivateKey, + encryptedRelayPkiClientCaCertificate, + encryptedRelayPkiClientCaCertificateChain, + encryptedRelayPkiServerCaPrivateKey, + encryptedRelayPkiServerCaCertificate, + encryptedRelayPkiServerCaCertificateChain, + encryptedRelaySshClientCaPublicKey, + encryptedRelaySshClientCaPrivateKey, + encryptedRelaySshServerCaPublicKey, + encryptedRelaySshServerCaPrivateKey + }); + }); + + const relayPkiClientCaPrivateKey = orgKmsDecryptor({ + cipherTextBlob: orgRelayConfig.encryptedRelayPkiClientCaPrivateKey + }); + const relayPkiClientCaCertificate = orgKmsDecryptor({ + cipherTextBlob: orgRelayConfig.encryptedRelayPkiClientCaCertificate + }); + const relayPkiClientCaCertificateChain = orgKmsDecryptor({ + cipherTextBlob: orgRelayConfig.encryptedRelayPkiClientCaCertificateChain + }); + + const relayPkiServerCaPrivateKey = orgKmsDecryptor({ + cipherTextBlob: orgRelayConfig.encryptedRelayPkiServerCaPrivateKey + }); + const relayPkiServerCaCertificate = orgKmsDecryptor({ + cipherTextBlob: orgRelayConfig.encryptedRelayPkiServerCaCertificate + }); + const relayPkiServerCaCertificateChain = orgKmsDecryptor({ + cipherTextBlob: orgRelayConfig.encryptedRelayPkiServerCaCertificateChain + }); + + const relaySshClientCaPublicKey = orgKmsDecryptor({ + cipherTextBlob: orgRelayConfig.encryptedRelaySshClientCaPublicKey + }); + const relaySshClientCaPrivateKey = orgKmsDecryptor({ + cipherTextBlob: orgRelayConfig.encryptedRelaySshClientCaPrivateKey + }); + + const relaySshServerCaPublicKey = orgKmsDecryptor({ + cipherTextBlob: orgRelayConfig.encryptedRelaySshServerCaPublicKey + }); + const relaySshServerCaPrivateKey = orgKmsDecryptor({ + cipherTextBlob: orgRelayConfig.encryptedRelaySshServerCaPrivateKey + }); + + return { + relayPkiClientCaPrivateKey, + relayPkiClientCaCertificate, + relayPkiClientCaCertificateChain, + relayPkiServerCaPrivateKey, + relayPkiServerCaCertificate, + relayPkiServerCaCertificateChain, + relaySshClientCaPublicKey, + relaySshClientCaPrivateKey, + relaySshServerCaPublicKey, + relaySshServerCaPrivateKey + }; + }; + + const $generateRelayServerCredentials = async ({ + host, + orgId, + relayPkiServerCaCertificate, + relayPkiServerCaPrivateKey, + relayPkiClientCaCertificate, + relayPkiClientCaCertificateChain, + relaySshClientCaPublicKey, + relaySshServerCaPrivateKey + }: { + host: string; + relayPkiServerCaCertificate: Buffer; + relayPkiServerCaPrivateKey: Buffer; + relayPkiClientCaCertificateChain: Buffer; + relayPkiClientCaCertificate: Buffer; + relaySshServerCaPrivateKey: Buffer; + relaySshClientCaPublicKey: Buffer; + orgId?: string; + }) => { + const alg = keyAlgorithmToAlgCfg(CertKeyAlgorithm.RSA_2048); + const relayServerCaCert = new x509.X509Certificate(relayPkiServerCaCertificate); + const relayClientCaCert = new x509.X509Certificate(relayPkiClientCaCertificate); + const relayServerCaSkObj = crypto.nativeCrypto.createPrivateKey({ + key: relayPkiServerCaPrivateKey, + format: "der", + type: "pkcs8" + }); + + const relayServerCaPrivateKey = await crypto.nativeCrypto.subtle.importKey( + "pkcs8", + relayServerCaSkObj.export({ format: "der", type: "pkcs8" }), + alg, + true, + ["sign"] + ); + + const relayServerKeys = await crypto.nativeCrypto.subtle.generateKey(alg, true, ["sign", "verify"]); + const relayServerCertIssuedAt = new Date(); + const relayServerCertExpireAt = new Date(new Date().setDate(new Date().getDate() + 1)); + const relayServerCertPrivateKey = crypto.nativeCrypto.KeyObject.from(relayServerKeys.privateKey); + + const relayServerCertExtensions: x509.Extension[] = [ + new x509.BasicConstraintsExtension(false), + await x509.AuthorityKeyIdentifierExtension.create(relayServerCaCert, false), + await x509.SubjectKeyIdentifierExtension.create(relayServerKeys.publicKey), + new x509.CertificatePolicyExtension(["2.5.29.32.0"]), // anyPolicy + new x509.KeyUsagesExtension( + // eslint-disable-next-line no-bitwise + x509.KeyUsageFlags[CertKeyUsage.DIGITAL_SIGNATURE] | x509.KeyUsageFlags[CertKeyUsage.KEY_ENCIPHERMENT], + true + ), + new x509.ExtendedKeyUsageExtension([x509.ExtendedKeyUsage[CertExtendedKeyUsage.SERVER_AUTH]], true), + // san + new x509.SubjectAlternativeNameExtension([{ type: "ip", value: host }], false) + ]; + + const relayServerSerialNumber = createSerialNumber(); + const relayServerCertificate = await x509.X509CertificateGenerator.create({ + serialNumber: relayServerSerialNumber, + subject: `CN=${host},O=${orgId ?? "Infisical"},OU=Relay`, + issuer: relayServerCaCert.subject, + notBefore: relayServerCertIssuedAt, + notAfter: relayServerCertExpireAt, + signingKey: relayServerCaPrivateKey, + publicKey: relayServerKeys.publicKey, + signingAlgorithm: alg, + extensions: relayServerCertExtensions + }); + + // generate relay server SSH certificate + const keyAlgorithm = SshCertKeyAlgorithm.RSA_2048; + const { publicKey: relayServerSshPublicKey, privateKey: relayServerSshPrivateKey } = + await createSshKeyPair(keyAlgorithm); + + const relayServerSshCert = await createSshCert({ + caPrivateKey: relaySshServerCaPrivateKey.toString("utf8"), + clientPublicKey: relayServerSshPublicKey, + keyId: "relay-server", + principals: [`${host}:2222`], + certType: SshCertType.HOST, + requestedTtl: "30d" + }); + + return { + pki: { + serverCertificate: relayServerCertificate.toString("pem"), + serverPrivateKey: relayServerCertPrivateKey.export({ format: "pem", type: "pkcs8" }).toString(), + clientCertificateChain: prependCertToPemChain( + relayClientCaCert, + relayPkiClientCaCertificateChain.toString("utf8") + ) + }, + ssh: { + serverCertificate: relayServerSshCert.signedPublicKey, + serverPrivateKey: relayServerSshPrivateKey, + clientCAPublicKey: relaySshClientCaPublicKey.toString("utf8") + } + }; + }; + + const $generateRelayClientCredentials = async ({ + gatewayId, + orgId, + orgName, + relayPkiClientCaCertificate, + relayPkiClientCaPrivateKey, + relayPkiServerCaCertificate, + relayPkiServerCaCertificateChain + }: { + gatewayId: string; + orgId: string; + orgName: string; + relayPkiClientCaCertificate: Buffer; + relayPkiClientCaPrivateKey: Buffer; + relayPkiServerCaCertificate: Buffer; + relayPkiServerCaCertificateChain: Buffer; + }) => { + const alg = keyAlgorithmToAlgCfg(CertKeyAlgorithm.RSA_2048); + const relayClientCaCert = new x509.X509Certificate(relayPkiClientCaCertificate); + const relayServerCaCert = new x509.X509Certificate(relayPkiServerCaCertificate); + const relayClientCaSkObj = crypto.nativeCrypto.createPrivateKey({ + key: relayPkiClientCaPrivateKey, + format: "der", + type: "pkcs8" + }); + + const importedRelayClientCaPrivateKey = await crypto.nativeCrypto.subtle.importKey( + "pkcs8", + relayClientCaSkObj.export({ format: "der", type: "pkcs8" }), + alg, + true, + ["sign"] + ); + + const clientCertIssuedAt = new Date(); + const clientCertExpiration = new Date(new Date().getTime() + 5 * 60 * 1000); + const clientKeys = await crypto.nativeCrypto.subtle.generateKey(alg, true, ["sign", "verify"]); + const clientCertPrivateKey = crypto.nativeCrypto.KeyObject.from(clientKeys.privateKey); + const clientCertSerialNumber = createSerialNumber(); + + // Build standard extensions + const extensions: x509.Extension[] = [ + new x509.BasicConstraintsExtension(false), + await x509.AuthorityKeyIdentifierExtension.create(relayClientCaCert, false), + await x509.SubjectKeyIdentifierExtension.create(clientKeys.publicKey), + new x509.CertificatePolicyExtension(["2.5.29.32.0"]), // anyPolicy + new x509.KeyUsagesExtension( + // eslint-disable-next-line no-bitwise + x509.KeyUsageFlags[CertKeyUsage.DIGITAL_SIGNATURE] | + x509.KeyUsageFlags[CertKeyUsage.KEY_ENCIPHERMENT] | + x509.KeyUsageFlags[CertKeyUsage.KEY_AGREEMENT], + true + ), + new x509.ExtendedKeyUsageExtension([x509.ExtendedKeyUsage[CertExtendedKeyUsage.CLIENT_AUTH]], true) + ]; + + const clientCert = await x509.X509CertificateGenerator.create({ + serialNumber: clientCertSerialNumber, + subject: `O=${orgName}-${orgId},OU=relay-client,CN=${gatewayId}`, + issuer: relayClientCaCert.subject, + notAfter: clientCertExpiration, + notBefore: clientCertIssuedAt, + signingKey: importedRelayClientCaPrivateKey, + publicKey: clientKeys.publicKey, + signingAlgorithm: alg, + extensions + }); + + return { + clientCertificate: clientCert.toString("pem"), + clientPrivateKey: clientCertPrivateKey.export({ format: "pem", type: "pkcs8" }).toString(), + serverCertificateChain: prependCertToPemChain( + relayServerCaCert, + relayPkiServerCaCertificateChain.toString("utf8") + ) + }; + }; + + const getCredentialsForGateway = async ({ + relayName, + orgId, + gatewayId + }: { + relayName: string; + orgId: string; + gatewayId: string; + }) => { + let relay: TRelays | null = await relayDAL.findOne({ + orgId, + name: relayName + }); + + if (!relay) { + relay = await relayDAL.findOne({ + name: relayName, + orgId: null + }); + } + + if (!relay) { + throw new NotFoundError({ + message: "Relay not found" + }); + } + + const keyAlgorithm = SshCertKeyAlgorithm.RSA_2048; + const { publicKey: relayClientSshPublicKey, privateKey: relayClientSshPrivateKey } = + await createSshKeyPair(keyAlgorithm); + + if (relay.orgId === null) { + const instanceCAs = await $getInstanceCAs(); + const relayClientSshCert = await createSshCert({ + caPrivateKey: instanceCAs.instanceRelaySshClientCaPrivateKey.toString("utf8"), + clientPublicKey: relayClientSshPublicKey, + keyId: `client-${relayName}`, + principals: [gatewayId], + certType: SshCertType.USER, + requestedTtl: "1d" + }); + + return { + relayHost: relay.host, + clientSshCert: relayClientSshCert.signedPublicKey, + clientSshPrivateKey: relayClientSshPrivateKey, + serverCAPublicKey: instanceCAs.instanceRelaySshServerCaPublicKey.toString("utf8") + }; + } + + const orgCAs = await $getOrgCAs(orgId); + const relayClientSshCert = await createSshCert({ + caPrivateKey: orgCAs.relaySshClientCaPrivateKey.toString("utf8"), + clientPublicKey: relayClientSshPublicKey, + keyId: `relay-client-${relay.id}`, + principals: [gatewayId], + certType: SshCertType.USER, + requestedTtl: "30d" + }); + + return { + relayHost: relay.host, + clientSshCert: relayClientSshCert.signedPublicKey, + clientSshPrivateKey: relayClientSshPrivateKey, + serverCAPublicKey: orgCAs.relaySshServerCaPublicKey.toString("utf8") + }; + }; + + const getCredentialsForClient = async ({ + relayId, + orgId, + orgName, + gatewayId + }: { + relayId: string; + orgId: string; + orgName: string; + gatewayId: string; + }) => { + const relay = await relayDAL.findOne({ + id: relayId + }); + + if (!relay) { + throw new NotFoundError({ + message: "Relay not found" + }); + } + + await verifyHostInputValidity(relay.host); + + if (relay.orgId === null) { + const instanceCAs = await $getInstanceCAs(); + const relayCertificateCredentials = await $generateRelayClientCredentials({ + gatewayId, + orgId, + orgName, + relayPkiClientCaCertificate: instanceCAs.instanceRelayPkiClientCaCertificate, + relayPkiClientCaPrivateKey: instanceCAs.instanceRelayPkiClientCaPrivateKey, + relayPkiServerCaCertificate: instanceCAs.instanceRelayPkiServerCaCertificate, + relayPkiServerCaCertificateChain: instanceCAs.instanceRelayPkiServerCaCertificateChain + }); + + return { + ...relayCertificateCredentials, + relayHost: relay.host + }; + } + + const orgCAs = await $getOrgCAs(orgId); + const relayCertificateCredentials = await $generateRelayClientCredentials({ + gatewayId, + orgId, + orgName, + relayPkiClientCaCertificate: orgCAs.relayPkiClientCaCertificate, + relayPkiClientCaPrivateKey: orgCAs.relayPkiClientCaPrivateKey, + relayPkiServerCaCertificate: orgCAs.relayPkiServerCaCertificate, + relayPkiServerCaCertificateChain: orgCAs.relayPkiServerCaCertificateChain + }); + + return { + ...relayCertificateCredentials, + relayHost: relay.host + }; + }; + + const registerRelay = async ({ + host, + name, + identityId, + orgId + }: { + host: string; + name: string; + identityId?: string; + orgId?: string; + }) => { + let relay: TRelays; + const isOrgRelay = identityId && orgId; + + await verifyHostInputValidity(host); + + if (isOrgRelay) { + relay = await relayDAL.transaction(async (tx) => { + const existingRelay = await relayDAL.findOne( + { + identityId, + orgId + }, + tx + ); + + if (existingRelay && (existingRelay.host !== host || existingRelay.name !== name)) { + return relayDAL.updateById(existingRelay.id, { host, name }, tx); + } + + if (!existingRelay) { + return relayDAL.create( + { + host, + name, + identityId, + orgId + }, + tx + ); + } + + return existingRelay; + }); + } else { + relay = await relayDAL.transaction(async (tx) => { + const existingRelay = await relayDAL.findOne( + { + name, + orgId: null + }, + tx + ); + + if (existingRelay && existingRelay.host !== host) { + return relayDAL.updateById(existingRelay.id, { host }, tx); + } + + if (!existingRelay) { + return relayDAL.create( + { + host, + name + }, + tx + ); + } + + return existingRelay; + }); + } + + if (relay.orgId === null) { + const instanceCAs = await $getInstanceCAs(); + return $generateRelayServerCredentials({ + host, + relayPkiServerCaCertificate: instanceCAs.instanceRelayPkiServerCaCertificate, + relayPkiServerCaPrivateKey: instanceCAs.instanceRelayPkiServerCaPrivateKey, + relayPkiClientCaCertificate: instanceCAs.instanceRelayPkiClientCaCertificate, + relayPkiClientCaCertificateChain: instanceCAs.instanceRelayPkiClientCaCertificateChain, + relaySshServerCaPrivateKey: instanceCAs.instanceRelaySshServerCaPrivateKey, + relaySshClientCaPublicKey: instanceCAs.instanceRelaySshClientCaPublicKey + }); + } + + if (relay.orgId) { + const orgCAs = await $getOrgCAs(relay.orgId); + return $generateRelayServerCredentials({ + host, + orgId: relay.orgId, + relayPkiServerCaCertificate: orgCAs.relayPkiServerCaCertificate, + relayPkiServerCaPrivateKey: orgCAs.relayPkiServerCaPrivateKey, + relayPkiClientCaCertificate: orgCAs.relayPkiClientCaCertificate, + relayPkiClientCaCertificateChain: orgCAs.relayPkiClientCaCertificateChain, + relaySshServerCaPrivateKey: orgCAs.relaySshServerCaPrivateKey, + relaySshClientCaPublicKey: orgCAs.relaySshClientCaPublicKey + }); + } + + throw new BadRequestError({ + message: "Unhandled relay type" + }); + }; + + return { + registerRelay, + getCredentialsForGateway, + getCredentialsForClient + }; +}; diff --git a/backend/src/ee/services/saml-config/saml-config-service.ts b/backend/src/ee/services/saml-config/saml-config-service.ts index 6b8bbe3042..31aaa70aa1 100644 --- a/backend/src/ee/services/saml-config/saml-config-service.ts +++ b/backend/src/ee/services/saml-config/saml-config-service.ts @@ -1,6 +1,7 @@ import { ForbiddenError } from "@casl/ability"; import { OrgMembershipStatus, TableName, TSamlConfigs, TSamlConfigsUpdate, TUsers } from "@app/db/schemas"; +import { throwOnPlanSeatLimitReached } from "@app/ee/services/license/license-fns"; import { getConfig } from "@app/lib/config/env"; import { crypto } from "@app/lib/crypto"; import { BadRequestError, ForbiddenRequestError, NotFoundError } from "@app/lib/errors"; @@ -82,6 +83,19 @@ export const samlConfigServiceFactory = ({ "Failed to create SAML SSO configuration due to plan restriction. Upgrade plan to create SSO configuration." }); + const org = await orgDAL.findOrgById(orgId); + + if (!org) { + throw new NotFoundError({ message: `Could not find organization with ID "${orgId}"` }); + } + + if (org.googleSsoAuthEnforced && isActive) { + throw new BadRequestError({ + message: + "You cannot enable SAML SSO while Google OAuth is enforced. Disable Google OAuth enforcement to enable SAML SSO." + }); + } + const { encryptor } = await kmsService.createCipherPairWithDataKey({ type: KmsDataKey.Organization, orgId @@ -120,6 +134,19 @@ export const samlConfigServiceFactory = ({ "Failed to update SAML SSO configuration due to plan restriction. Upgrade plan to update SSO configuration." }); + const org = await orgDAL.findOrgById(orgId); + + if (!org) { + throw new NotFoundError({ message: `Could not find organization with ID "${orgId}"` }); + } + + if (org.googleSsoAuthEnforced && isActive) { + throw new BadRequestError({ + message: + "Cannot enable SAML SSO while Google OAuth is enforced. Disable Google OAuth enforcement to enable SAML SSO." + }); + } + const updateQuery: TSamlConfigsUpdate = { authProvider, isActive, lastUsed: null }; const { encryptor } = await kmsService.createCipherPairWithDataKey({ type: KmsDataKey.Organization, @@ -310,14 +337,6 @@ export const samlConfigServiceFactory = ({ return foundUser; }); } else { - const plan = await licenseService.getPlan(orgId); - if (plan?.slug !== "enterprise" && plan?.identityLimit && plan.identitiesUsed >= plan.identityLimit) { - // limit imposed on number of identities allowed / number of identities used exceeds the number of identities allowed - throw new BadRequestError({ - message: "Failed to create new member via SAML due to member limit reached. Upgrade plan to add more members." - }); - } - user = await userDAL.transaction(async (tx) => { let newUser: TUsers | undefined; newUser = await userDAL.findOne( @@ -365,6 +384,8 @@ export const samlConfigServiceFactory = ({ ); if (!orgMembership) { + await throwOnPlanSeatLimitReached(licenseService, orgId, UserAliasType.SAML); + const { role, roleId } = await getDefaultOrgMembershipRole(organization.defaultMembershipRole); await orgMembershipDAL.create( diff --git a/backend/src/ee/services/secret-approval-request/secret-approval-request-dal.ts b/backend/src/ee/services/secret-approval-request/secret-approval-request-dal.ts index fe4ca94e1e..01caef2238 100644 --- a/backend/src/ee/services/secret-approval-request/secret-approval-request-dal.ts +++ b/backend/src/ee/services/secret-approval-request/secret-approval-request-dal.ts @@ -345,7 +345,7 @@ export const secretApprovalRequestDALFactory = (db: TDbClient) => { const findProjectRequestCount = async (projectId: string, userId: string, policyId?: string, tx?: Knex) => { try { - const docs = await (tx || db) + const docs = await (tx || db.replicaNode()) .with( "temp", (tx || db.replicaNode())(TableName.SecretApprovalRequest) @@ -494,7 +494,7 @@ export const secretApprovalRequestDALFactory = (db: TDbClient) => { .distinctOn(`${TableName.SecretApprovalRequest}.id`) .as("inner"); - const query = (tx || db) + const query = (tx || db.replicaNode()) .select("*") .select(db.raw("count(*) OVER() as total_count")) .from(innerQuery) diff --git a/backend/src/ee/services/secret-approval-request/secret-approval-request-secret-dal.ts b/backend/src/ee/services/secret-approval-request/secret-approval-request-secret-dal.ts index c1b18e43d3..17182cddf5 100644 --- a/backend/src/ee/services/secret-approval-request/secret-approval-request-secret-dal.ts +++ b/backend/src/ee/services/secret-approval-request/secret-approval-request-secret-dal.ts @@ -377,7 +377,7 @@ export const secretApprovalRequestSecretDALFactory = (db: TDbClient) => { // special query for migration to v2 secret const findByProjectId = async (projectId: string, tx?: Knex) => { try { - const docs = await (tx || db)(TableName.SecretApprovalRequestSecret) + const docs = await (tx || db.replicaNode())(TableName.SecretApprovalRequestSecret) .join( TableName.SecretApprovalRequest, `${TableName.SecretApprovalRequest}.id`, diff --git a/backend/src/ee/services/secret-approval-request/secret-approval-request-service.ts b/backend/src/ee/services/secret-approval-request/secret-approval-request-service.ts index d485f7ea06..17b7d83472 100644 --- a/backend/src/ee/services/secret-approval-request/secret-approval-request-service.ts +++ b/backend/src/ee/services/secret-approval-request/secret-approval-request-service.ts @@ -787,6 +787,7 @@ export const secretApprovalRequestServiceFactory = ({ }, tx ); + await secretV2BridgeDAL.invalidateSecretCacheByProjectId(projectId, tx); return { secrets: { created: newSecrets, updated: updatedSecrets, deleted: deletedSecret }, approval: updatedSecretApproval @@ -976,6 +977,7 @@ export const secretApprovalRequestServiceFactory = ({ }, tx ); + await secretV2BridgeDAL.invalidateSecretCacheByProjectId(projectId, tx); return { secrets: { created: newSecrets, updated: updatedSecrets, deleted: deletedSecret }, approval: updatedSecretApproval @@ -983,7 +985,6 @@ export const secretApprovalRequestServiceFactory = ({ }); } - await secretV2BridgeDAL.invalidateSecretCacheByProjectId(projectId); await snapshotService.performSnapshot(folderId); const [folder] = await folderDAL.findSecretPathByFolderIds(projectId, [folderId]); if (!folder) { diff --git a/backend/src/ee/services/secret-replication/secret-replication-service.ts b/backend/src/ee/services/secret-replication/secret-replication-service.ts index db41d00f73..93147d9e40 100644 --- a/backend/src/ee/services/secret-replication/secret-replication-service.ts +++ b/backend/src/ee/services/secret-replication/secret-replication-service.ts @@ -509,9 +509,9 @@ export const secretReplicationServiceFactory = ({ tx ); } + await secretV2BridgeDAL.invalidateSecretCacheByProjectId(projectId, tx); }); - await secretV2BridgeDAL.invalidateSecretCacheByProjectId(projectId); await secretQueueService.syncSecrets({ projectId, orgId, diff --git a/backend/src/ee/services/secret-rotation-v2/secret-rotation-v2-service.ts b/backend/src/ee/services/secret-rotation-v2/secret-rotation-v2-service.ts index 65f60972f5..97a0f57006 100644 --- a/backend/src/ee/services/secret-rotation-v2/secret-rotation-v2-service.ts +++ b/backend/src/ee/services/secret-rotation-v2/secret-rotation-v2-service.ts @@ -82,6 +82,7 @@ import { import { TSecretVersionV2DALFactory } from "@app/services/secret-v2-bridge/secret-version-dal"; import { TSecretVersionV2TagDALFactory } from "@app/services/secret-v2-bridge/secret-version-tag-dal"; +import { TGatewayV2ServiceFactory } from "../gateway-v2/gateway-v2-service"; import { awsIamUserSecretRotationFactory } from "./aws-iam-user-secret/aws-iam-user-secret-rotation-fns"; import { oktaClientSecretRotationFactory } from "./okta-client-secret/okta-client-secret-rotation-fns"; import { TSecretRotationV2DALFactory } from "./secret-rotation-v2-dal"; @@ -110,6 +111,7 @@ export type TSecretRotationV2ServiceFactoryDep = { appConnectionDAL: Pick; folderCommitService: Pick; gatewayService: Pick; + gatewayV2Service: Pick; }; export type TSecretRotationV2ServiceFactory = ReturnType; @@ -153,7 +155,8 @@ export const secretRotationV2ServiceFactory = ({ queueService, folderCommitService, appConnectionDAL, - gatewayService + gatewayService, + gatewayV2Service }: TSecretRotationV2ServiceFactoryDep) => { const $queueSendSecretRotationStatusNotification = async (secretRotation: TSecretRotationV2Raw) => { const appCfg = getConfig(); @@ -467,7 +470,8 @@ export const secretRotationV2ServiceFactory = ({ } as TSecretRotationV2WithConnection, appConnectionDAL, kmsService, - gatewayService + gatewayService, + gatewayV2Service ); // even though we have a db constraint we want to check before any rotation of credentials is attempted @@ -831,7 +835,8 @@ export const secretRotationV2ServiceFactory = ({ } as TSecretRotationV2WithConnection, appConnectionDAL, kmsService, - gatewayService + gatewayService, + gatewayV2Service ); const generatedCredentials = await decryptSecretRotationCredentials({ @@ -915,7 +920,8 @@ export const secretRotationV2ServiceFactory = ({ } as TSecretRotationV2WithConnection, appConnectionDAL, kmsService, - gatewayService + gatewayService, + gatewayV2Service ); const updatedRotation = await rotationFactory.rotateCredentials( diff --git a/backend/src/ee/services/secret-rotation-v2/secret-rotation-v2-types.ts b/backend/src/ee/services/secret-rotation-v2/secret-rotation-v2-types.ts index ab348f1723..2af2ddc7ba 100644 --- a/backend/src/ee/services/secret-rotation-v2/secret-rotation-v2-types.ts +++ b/backend/src/ee/services/secret-rotation-v2/secret-rotation-v2-types.ts @@ -6,6 +6,7 @@ import { TAppConnectionDALFactory } from "@app/services/app-connection/app-conne import { TKmsServiceFactory } from "@app/services/kms/kms-service"; import { SecretsOrderBy } from "@app/services/secret/secret-types"; +import { TGatewayV2ServiceFactory } from "../gateway-v2/gateway-v2-service"; import { TAuth0ClientSecretRotation, TAuth0ClientSecretRotationGeneratedCredentials, @@ -253,7 +254,8 @@ export type TRotationFactory< secretRotation: T, appConnectionDAL: Pick, kmsService: Pick, - gatewayService: Pick + gatewayService: Pick, + gatewayV2Service: Pick ) => { issueCredentials: TRotationFactoryIssueCredentials; revokeCredentials: TRotationFactoryRevokeCredentials; diff --git a/backend/src/ee/services/secret-rotation-v2/shared/sql-credentials/sql-credentials-rotation-fns.ts b/backend/src/ee/services/secret-rotation-v2/shared/sql-credentials/sql-credentials-rotation-fns.ts index 1da1db3767..6673baab17 100644 --- a/backend/src/ee/services/secret-rotation-v2/shared/sql-credentials/sql-credentials-rotation-fns.ts +++ b/backend/src/ee/services/secret-rotation-v2/shared/sql-credentials/sql-credentials-rotation-fns.ts @@ -41,7 +41,7 @@ const ORACLE_PASSWORD_REQUIREMENTS = { export const sqlCredentialsRotationFactory: TRotationFactory< TSqlCredentialsRotationWithConnection, TSqlCredentialsRotationGeneratedCredentials -> = (secretRotation, _appConnectionDAL, _kmsService, gatewayService) => { +> = (secretRotation, _appConnectionDAL, _kmsService, gatewayService, gatewayV2Service) => { const { connection, parameters: { username1, username2 }, @@ -67,6 +67,7 @@ export const sqlCredentialsRotationFactory: TRotationFactory< credentials: finalCredentials }, gatewayService, + gatewayV2Service, (client) => operation(client) ); }; diff --git a/backend/src/ee/services/secret-rotation/secret-rotation-queue/secret-rotation-queue.ts b/backend/src/ee/services/secret-rotation/secret-rotation-queue/secret-rotation-queue.ts index 1d5c1cedf0..557e71e6c4 100644 --- a/backend/src/ee/services/secret-rotation/secret-rotation-queue/secret-rotation-queue.ts +++ b/backend/src/ee/services/secret-rotation/secret-rotation-queue/secret-rotation-queue.ts @@ -361,9 +361,8 @@ export const secretRotationQueueFactory = ({ }, tx ); + await secretV2BridgeDAL.invalidateSecretCacheByProjectId(secretRotation.projectId, tx); }); - - await secretV2BridgeDAL.invalidateSecretCacheByProjectId(secretRotation.projectId); } else { if (!botKey) throw new NotFoundError({ diff --git a/backend/src/ee/services/secret-snapshot/snapshot-dal.ts b/backend/src/ee/services/secret-snapshot/snapshot-dal.ts index c547d85c26..17f1fad057 100644 --- a/backend/src/ee/services/secret-snapshot/snapshot-dal.ts +++ b/backend/src/ee/services/secret-snapshot/snapshot-dal.ts @@ -265,7 +265,7 @@ export const snapshotDALFactory = (db: TDbClient) => { // then joins with respective secrets and folder const findRecursivelySnapshots = async (snapshotId: string, tx?: Knex) => { try { - const data = await (tx || db) + const data = await (tx || db.replicaNode()) .withRecursive("parent", (qb) => { void qb .from(TableName.Snapshot) @@ -419,7 +419,7 @@ export const snapshotDALFactory = (db: TDbClient) => { // then joins with respective secrets and folder const findRecursivelySnapshotsV2Bridge = async (snapshotId: string, tx?: Knex) => { try { - const data = await (tx || db) + const data = await (tx || db.replicaNode()) .withRecursive("parent", (qb) => { void qb .from(TableName.Snapshot) @@ -581,7 +581,11 @@ export const snapshotDALFactory = (db: TDbClient) => { const docs = await (tx || db.replicaNode())(TableName.Snapshot) .where(`${TableName.Snapshot}.folderId`, folderId) .join( - (tx || db)(TableName.Snapshot).groupBy("folderId").max("createdAt").select("folderId").as("latestVersion"), + (tx || db.replicaNode())(TableName.Snapshot) + .groupBy("folderId") + .max("createdAt") + .select("folderId") + .as("latestVersion"), (bd) => { bd.on(`${TableName.Snapshot}.folderId`, "latestVersion.folderId").andOn( `${TableName.Snapshot}.createdAt`, @@ -766,7 +770,7 @@ export const snapshotDALFactory = (db: TDbClient) => { ) .orderBy(`${TableName.Snapshot}.createdAt`, "desc") .where(`${TableName.Snapshot}.folderId`, folderId); - const data = await (tx || db) + const data = await (tx || db.replicaNode()) .with("w", query) .select("*") .from[number]>("w") diff --git a/backend/src/keystore/key-value-store-dal.ts b/backend/src/keystore/key-value-store-dal.ts new file mode 100644 index 0000000000..bccedf4ac1 --- /dev/null +++ b/backend/src/keystore/key-value-store-dal.ts @@ -0,0 +1,91 @@ +import { Knex } from "knex"; + +import { TDbClient } from "@app/db"; +import { TableName } from "@app/db/schemas"; +import { ormify, TOrmify } from "@app/lib/knex"; +import { logger } from "@app/lib/logger"; +import { QueueName } from "@app/queue"; + +export interface TKeyValueStoreDALFactory extends TOrmify { + incrementBy: (key: string, dto: { incr?: number; tx?: Knex; expiresAt?: Date }) => Promise; + findOneInt: (key: string, tx?: Knex) => Promise; + pruneExpiredKeys: () => Promise; +} + +const QUERY_TIMEOUT_MS = 10 * 60 * 1000; // 10 minutes +const CACHE_KEY_PRUNE_BATCH_SIZE = 10000; +const MAX_RETRY_ON_FAILURE = 3; + +export const keyValueStoreDALFactory = (db: TDbClient): TKeyValueStoreDALFactory => { + const keyValueStoreOrm = ormify(db, TableName.KeyValueStore); + + const incrementBy: TKeyValueStoreDALFactory["incrementBy"] = async (key, { incr = 1, tx, expiresAt }) => { + return (tx || db)(TableName.KeyValueStore) + .insert({ key, integerValue: 1, expiresAt }) + .onConflict("key") + .merge({ + integerValue: db.raw(`"${TableName.KeyValueStore}"."integerValue" + ?`, [incr]), + expiresAt + }) + .returning("integerValue") + .then((result) => Number(result[0]?.integerValue || 0)); + }; + + const findOneInt: TKeyValueStoreDALFactory["findOneInt"] = async (key, tx) => { + const doc = await (tx || db.replicaNode())(TableName.KeyValueStore) + .where({ key }) + .andWhere( + (builder) => + void builder + .whereNull("expiresAt") // no expiry + .orWhere("expiresAt", ">", db.fn.now()) // or not expired + ) + .first() + .select("integerValue"); + return Number(doc?.integerValue || 0); + }; + + const pruneExpiredKeys: TKeyValueStoreDALFactory["pruneExpiredKeys"] = async () => { + let deletedIds: { key: string }[] = []; + let numberOfRetryOnFailure = 0; + let isRetrying = false; + + logger.info(`${QueueName.DailyResourceCleanUp}: db key value store clean up started`); + do { + try { + // eslint-disable-next-line no-await-in-loop + deletedIds = await db.transaction(async (trx) => { + await trx.raw(`SET statement_timeout = ${QUERY_TIMEOUT_MS}`); + + const findExpiredKeysSubQuery = trx(TableName.KeyValueStore) + .where("expiresAt", "<", db.fn.now()) + .select("key") + .limit(CACHE_KEY_PRUNE_BATCH_SIZE); + + // eslint-disable-next-line no-await-in-loop + const results = await trx(TableName.KeyValueStore) + .whereIn("key", findExpiredKeysSubQuery) + .del() + .returning("key"); + + return results; + }); + + numberOfRetryOnFailure = 0; // reset + } catch (error) { + numberOfRetryOnFailure += 1; + deletedIds = []; + logger.error(error, "Failed to clean up db key value"); + } finally { + // eslint-disable-next-line no-await-in-loop + await new Promise((resolve) => { + setTimeout(resolve, 10); // time to breathe for db + }); + } + isRetrying = numberOfRetryOnFailure > 0; + } while (deletedIds.length > 0 || (isRetrying && numberOfRetryOnFailure < MAX_RETRY_ON_FAILURE)); + logger.info(`${QueueName.DailyResourceCleanUp}: db key value store clean up completed`); + }; + + return { ...keyValueStoreOrm, incrementBy, findOneInt, pruneExpiredKeys }; +}; diff --git a/backend/src/keystore/keystore.ts b/backend/src/keystore/keystore.ts index a6135e0ca9..8b72ce4640 100644 --- a/backend/src/keystore/keystore.ts +++ b/backend/src/keystore/keystore.ts @@ -1,11 +1,15 @@ import { Cluster, Redis } from "ioredis"; +import { Knex } from "knex"; import { buildRedisFromConfig, TRedisConfigKeys } from "@app/lib/config/redis"; import { pgAdvisoryLockHashText } from "@app/lib/crypto/hashtext"; import { applyJitter } from "@app/lib/dates"; import { delay as delayMs } from "@app/lib/delay"; +import { ms } from "@app/lib/ms"; import { ExecutionResult, Redlock, Settings } from "@app/lib/red-lock"; +import { TKeyValueStoreDALFactory } from "./key-value-store-dal"; + export const PgSqlLock = { BootUpMigration: 2023, SuperAdminInit: 2024, @@ -16,6 +20,9 @@ export const PgSqlLock = { CreateProject: (orgId: string) => pgAdvisoryLockHashText(`create-project:${orgId}`), CreateFolder: (envId: string, projectId: string) => pgAdvisoryLockHashText(`create-folder:${envId}-${projectId}`), SshInit: (projectId: string) => pgAdvisoryLockHashText(`ssh-bootstrap:${projectId}`), + InstanceRelayConfigInit: () => pgAdvisoryLockHashText("instance-relay-config-init"), + OrgGatewayV2Init: (orgId: string) => pgAdvisoryLockHashText(`org-gateway-v2-init:${orgId}`), + OrgRelayConfigInit: (orgId: string) => pgAdvisoryLockHashText(`org-relay-config-init:${orgId}`), IdentityLogin: (identityId: string, nonce: string) => pgAdvisoryLockHashText(`identity-login:${identityId}:${nonce}`) } as const; @@ -95,13 +102,17 @@ export type TKeyStoreFactory = { deleteItemsByKeyIn: (keys: string[]) => Promise; deleteItems: (arg: TDeleteItems) => Promise; incrementBy: (key: string, value: number) => Promise; + getKeysByPattern: (pattern: string, limit?: number) => Promise; + // pg + pgIncrementBy: (key: string, dto: { incr?: number; expiry?: string; tx?: Knex }) => Promise; + pgGetIntItem: (key: string, prefix?: string) => Promise; + // locks acquireLock( resources: string[], duration: number, settings?: Partial ): Promise<{ release: () => Promise }>; waitTillReady: ({ key, waitingCb, keyCheckCb, waitIteration, delay, jitter }: TWaitTillReady) => Promise; - getKeysByPattern: (pattern: string, limit?: number) => Promise; }; const pickPrimaryOrSecondaryRedis = (primary: Redis | Cluster, secondaries?: Array) => { @@ -114,7 +125,10 @@ interface TKeyStoreFactoryDTO extends TRedisConfigKeys { REDIS_READ_REPLICAS?: { host: string; port: number }[]; } -export const keyStoreFactory = (redisConfigKeys: TKeyStoreFactoryDTO): TKeyStoreFactory => { +export const keyStoreFactory = ( + redisConfigKeys: TKeyStoreFactoryDTO, + keyValueStoreDAL: TKeyValueStoreDALFactory +): TKeyStoreFactory => { const primaryRedis = buildRedisFromConfig(redisConfigKeys); const redisReadReplicas = redisConfigKeys.REDIS_READ_REPLICAS?.map((el) => { if (redisConfigKeys.REDIS_URL) { @@ -189,29 +203,6 @@ export const keyStoreFactory = (redisConfigKeys: TKeyStoreFactoryDTO): TKeyStore const setExpiry = async (key: string, expiryInSeconds: number) => primaryRedis.expire(key, expiryInSeconds); - const waitTillReady = async ({ - key, - waitingCb, - keyCheckCb, - waitIteration = 10, - delay = 1000, - jitter = 200 - }: TWaitTillReady) => { - let attempts = 0; - let isReady = keyCheckCb(await getItem(key)); - while (!isReady) { - if (attempts > waitIteration) return; - // eslint-disable-next-line - await new Promise((resolve) => { - waitingCb?.(); - setTimeout(resolve, Math.max(0, applyJitter(delay, jitter))); - }); - attempts += 1; - // eslint-disable-next-line - isReady = keyCheckCb(await getItem(key)); - } - }; - const getKeysByPattern = async (pattern: string, limit?: number) => { let cursor = "0"; const allKeys: string[] = []; @@ -236,6 +227,37 @@ export const keyStoreFactory = (redisConfigKeys: TKeyStoreFactoryDTO): TKeyStore return allKeys; }; + const pgIncrementBy: TKeyStoreFactory["pgIncrementBy"] = async (key, { incr = 1, tx, expiry }) => { + const expiresAt = expiry ? new Date(Date.now() + ms(expiry)) : undefined; + return keyValueStoreDAL.incrementBy(key, { incr, expiresAt, tx }); + }; + + const pgGetIntItem = async (key: string, prefix?: string) => + keyValueStoreDAL.findOneInt(prefix ? `${prefix}:${key}` : key); + + const waitTillReady = async ({ + key, + waitingCb, + keyCheckCb, + waitIteration = 10, + delay = 1000, + jitter = 200 + }: TWaitTillReady) => { + let attempts = 0; + let isReady = keyCheckCb(await getItem(key)); + while (!isReady) { + if (attempts > waitIteration) return; + // eslint-disable-next-line + await new Promise((resolve) => { + waitingCb?.(); + setTimeout(resolve, Math.max(0, applyJitter(delay, jitter))); + }); + attempts += 1; + // eslint-disable-next-line + isReady = keyCheckCb(await getItem(key)); + } + }; + return { setItem, getItem, @@ -250,6 +272,8 @@ export const keyStoreFactory = (redisConfigKeys: TKeyStoreFactoryDTO): TKeyStore waitTillReady, getKeysByPattern, deleteItemsByKeyIn, - getItems + getItems, + pgGetIntItem, + pgIncrementBy }; }; diff --git a/backend/src/keystore/memory.ts b/backend/src/keystore/memory.ts index cf9ba83bd8..2f9b77cede 100644 --- a/backend/src/keystore/memory.ts +++ b/backend/src/keystore/memory.ts @@ -53,6 +53,15 @@ export const inMemoryKeyStore = (): TKeyStoreFactory => { } return null; }, + pgGetIntItem: async (key) => { + const value = store[key]; + if (typeof value === "number") { + return Number(value); + } + }, + pgIncrementBy: async () => { + return 1; + }, incrementBy: async () => { return 1; }, diff --git a/backend/src/lib/config/env.ts b/backend/src/lib/config/env.ts index c4170aac30..da0dd61a78 100644 --- a/backend/src/lib/config/env.ts +++ b/backend/src/lib/config/env.ts @@ -259,6 +259,8 @@ const envSchema = z GATEWAY_RELAY_REALM: zpStr(z.string().optional()), GATEWAY_RELAY_AUTH_SECRET: zpStr(z.string().optional()), + RELAY_AUTH_SECRET: zpStr(z.string().optional()), + DYNAMIC_SECRET_ALLOW_INTERNAL_IP: zodStrBool.default("false"), DYNAMIC_SECRET_AWS_ACCESS_KEY_ID: zpStr(z.string().optional()).default( process.env.INF_APP_CONNECTION_AWS_ACCESS_KEY_ID @@ -410,6 +412,7 @@ const envSchema = z Boolean(data.INF_APP_CONNECTION_GITHUB_RADAR_APP_CLIENT_ID) && Boolean(data.INF_APP_CONNECTION_GITHUB_RADAR_APP_CLIENT_SECRET) && Boolean(data.INF_APP_CONNECTION_GITHUB_RADAR_APP_WEBHOOK_SECRET), + isSecondaryInstance: Boolean(data.INFISICAL_PRIMARY_INSTANCE_URL), isHsmConfigured: Boolean(data.HSM_LIB_PATH) && Boolean(data.HSM_PIN) && Boolean(data.HSM_KEY_LABEL) && data.HSM_SLOT !== undefined, samlDefaultOrgSlug: data.DEFAULT_SAML_ORG_SLUG, diff --git a/backend/src/lib/crypto/cryptography/crypto.ts b/backend/src/lib/crypto/cryptography/crypto.ts index 05ea315cd5..45c7a19862 100644 --- a/backend/src/lib/crypto/cryptography/crypto.ts +++ b/backend/src/lib/crypto/cryptography/crypto.ts @@ -424,7 +424,8 @@ const cryptographyFactory = () => { constants: crypto.constants, X509Certificate: crypto.X509Certificate, KeyObject: crypto.KeyObject, - Hash: crypto.Hash + Hash: crypto.Hash, + timingSafeEqual: crypto.timingSafeEqual } }; }; diff --git a/backend/src/lib/gateway-v2/gateway-v2.ts b/backend/src/lib/gateway-v2/gateway-v2.ts new file mode 100644 index 0000000000..e6e873f113 --- /dev/null +++ b/backend/src/lib/gateway-v2/gateway-v2.ts @@ -0,0 +1,281 @@ +import net from "node:net"; +import tls from "node:tls"; + +import axios from "axios"; +import https from "https"; + +import { verifyHostInputValidity } from "@app/ee/services/dynamic-secret/dynamic-secret-fns"; +import { splitPemChain } from "@app/services/certificate/certificate-fns"; + +import { BadRequestError } from "../errors"; +import { GatewayProxyProtocol } from "../gateway/types"; +import { logger } from "../logger"; + +interface IGatewayRelayServer { + server: net.Server; + port: number; + cleanup: () => Promise; + getRelayError: () => string; +} + +const createRelayConnection = async ({ + relayHost, + clientCertificate, + clientPrivateKey, + serverCertificateChain +}: { + relayHost: string; + clientCertificate: string; + clientPrivateKey: string; + serverCertificateChain: string; +}): Promise => { + const [targetHost] = await verifyHostInputValidity(relayHost); + const [, portStr] = relayHost.split(":"); + const port = parseInt(portStr, 10) || 8443; + + const serverCAs = splitPemChain(serverCertificateChain); + const tlsOptions: tls.ConnectionOptions = { + host: targetHost, + servername: relayHost, + port, + cert: clientCertificate, + key: clientPrivateKey, + ca: serverCAs, + minVersion: "TLSv1.2", + rejectUnauthorized: true + }; + + return new Promise((resolve, reject) => { + try { + const socket = tls.connect(tlsOptions, () => { + logger.info("Relay TLS connection established successfully"); + resolve(socket); + }); + + socket.on("error", (err: Error) => { + reject(new Error(`TLS connection error: ${err.message}`)); + }); + + socket.on("close", (hadError: boolean) => { + if (hadError) { + logger.error("TLS connection closed with error"); + } + }); + + socket.on("timeout", () => { + logger.error(`TLS connection timeout after 30 seconds`); + socket.destroy(); + reject(new Error("TLS connection timeout")); + }); + + socket.setTimeout(30000); + } catch (error: unknown) { + reject(new Error(`Failed to create TLS connection: ${error instanceof Error ? error.message : String(error)}`)); + } + }); +}; + +const createGatewayConnection = async ( + relayConn: net.Socket, + gateway: { clientCertificate: string; clientPrivateKey: string; serverCertificateChain: string }, + protocol: GatewayProxyProtocol +): Promise => { + const protocolToAlpn = { + [GatewayProxyProtocol.Http]: "infisical-http-proxy", + [GatewayProxyProtocol.Tcp]: "infisical-tcp-proxy", + [GatewayProxyProtocol.Ping]: "infisical-ping" + }; + + const tlsOptions: tls.ConnectionOptions = { + socket: relayConn, + cert: gateway.clientCertificate, + key: gateway.clientPrivateKey, + ca: splitPemChain(gateway.serverCertificateChain), + minVersion: "TLSv1.2", + maxVersion: "TLSv1.3", + rejectUnauthorized: true, + ALPNProtocols: [protocolToAlpn[protocol]] + }; + + return new Promise((resolve, reject) => { + try { + const gatewaySocket = tls.connect(tlsOptions, () => { + if (!gatewaySocket.authorized) { + const error = gatewaySocket.authorizationError; + gatewaySocket.destroy(); + reject(new Error(`Gateway TLS authorization failed: ${error?.message}`)); + return; + } + + logger.info("Gateway mTLS connection established successfully"); + resolve(gatewaySocket); + }); + + gatewaySocket.on("error", (err: Error) => { + reject(new Error(`Failed to establish gateway mTLS: ${err.message}`)); + }); + + gatewaySocket.setTimeout(30000); + gatewaySocket.on("timeout", () => { + gatewaySocket.destroy(); + reject(new Error("Gateway connection timeout")); + }); + } catch (error: unknown) { + reject( + new Error(`Failed to create gateway TLS connection: ${error instanceof Error ? error.message : String(error)}`) + ); + } + }); +}; + +const setupRelayServer = async ({ + protocol, + relayHost, + gateway, + relay, + httpsAgent +}: { + protocol: GatewayProxyProtocol; + relayHost: string; + gateway: { clientCertificate: string; clientPrivateKey: string; serverCertificateChain: string }; + relay: { clientCertificate: string; clientPrivateKey: string; serverCertificateChain: string }; + httpsAgent?: https.Agent; +}): Promise => { + const relayErrorMsg: string[] = []; + + return new Promise((resolve, reject) => { + const server = net.createServer(); + + server.on("connection", (clientConn) => { + void (async () => { + try { + clientConn.setKeepAlive(true, 30000); + clientConn.setNoDelay(true); + + // Stage 1: Connect to relay with TLS + const relayConn = await createRelayConnection({ + relayHost, + clientCertificate: relay.clientCertificate, + clientPrivateKey: relay.clientPrivateKey, + serverCertificateChain: relay.serverCertificateChain + }); + + // Stage 2: Establish mTLS connection to gateway through the relay + const gatewayConn = await createGatewayConnection(relayConn, gateway, protocol); + + // Send protocol-specific configuration for HTTP requests + if (protocol === GatewayProxyProtocol.Http) { + if (httpsAgent) { + const agentOptions = httpsAgent.options; + if (agentOptions && agentOptions.ca) { + const caCert = Array.isArray(agentOptions.ca) ? agentOptions.ca.join("\n") : agentOptions.ca; + const caB64 = Buffer.from(caCert as string).toString("base64"); + const rejectUnauthorized = agentOptions.rejectUnauthorized !== false; + + const configCommand = `CONFIG ca=${caB64} verify=${rejectUnauthorized}\n`; + gatewayConn.write(Buffer.from(configCommand)); + } else { + // Send empty config to signal end of configuration + gatewayConn.write(Buffer.from("CONFIG\n")); + } + } else { + // Send empty config to signal end of configuration + gatewayConn.write(Buffer.from("CONFIG\n")); + } + } + + // Bidirectional data forwarding + clientConn.pipe(gatewayConn); + gatewayConn.pipe(clientConn); + + // Handle connection closure + clientConn.on("close", () => { + relayConn.destroy(); + gatewayConn.destroy(); + }); + + relayConn.on("close", () => { + clientConn.destroy(); + gatewayConn.destroy(); + }); + + gatewayConn.on("close", () => { + clientConn.destroy(); + relayConn.destroy(); + }); + } catch (err) { + const errorMsg = err instanceof Error ? err.message : String(err); + relayErrorMsg.push(errorMsg); + clientConn.destroy(); + } + })(); + }); + + server.on("error", (err) => { + reject(err); + }); + + server.listen(0, () => { + const address = server.address(); + if (!address || typeof address === "string") { + server.close(); + reject(new Error("Failed to get server port")); + return; + } + + resolve({ + server, + port: address.port, + cleanup: async () => { + try { + server.close(); + } catch (err) { + logger.debug("Error closing server:", err instanceof Error ? err.message : String(err)); + } + }, + getRelayError: () => relayErrorMsg.join(",") + }); + }); + }); +}; + +export const withGatewayV2Proxy = async ( + callback: (port: number) => Promise, + options: { + protocol: GatewayProxyProtocol; + relayHost: string; + gateway: { clientCertificate: string; clientPrivateKey: string; serverCertificateChain: string }; + relay: { clientCertificate: string; clientPrivateKey: string; serverCertificateChain: string }; + httpsAgent?: https.Agent; + } +): Promise => { + const { protocol, relayHost, gateway, relay, httpsAgent } = options; + + const { port, cleanup, getRelayError } = await setupRelayServer({ + protocol, + relayHost, + gateway, + relay, + httpsAgent + }); + + try { + // Execute the callback with the allocated port + return await callback(port); + } catch (err) { + const relayErrorMessage = getRelayError(); + if (relayErrorMessage) { + logger.error("Relay error:", relayErrorMessage); + } + logger.error("Gateway error:", err instanceof Error ? err.message : String(err)); + let errorMessage = relayErrorMessage || (err instanceof Error ? err.message : String(err)); + if (axios.isAxiosError(err) && (err.response?.data as { message?: string })?.message) { + errorMessage = (err.response?.data as { message: string }).message; + } + + throw new BadRequestError({ message: errorMessage }); + } finally { + // Ensure cleanup happens regardless of success or failure + await cleanup(); + } +}; diff --git a/backend/src/lib/gateway/types.ts b/backend/src/lib/gateway/types.ts index 8552fbf544..e9b8b71141 100644 --- a/backend/src/lib/gateway/types.ts +++ b/backend/src/lib/gateway/types.ts @@ -6,7 +6,8 @@ export type TGatewayTlsOptions = { ca: string; cert: string; key: string }; export enum GatewayProxyProtocol { Http = "http", - Tcp = "tcp" + Tcp = "tcp", + Ping = "ping" } export enum GatewayHttpProxyActions { diff --git a/backend/src/lib/knex/index.ts b/backend/src/lib/knex/index.ts index 090df561a3..499e7cb26d 100644 --- a/backend/src/lib/knex/index.ts +++ b/backend/src/lib/knex/index.ts @@ -250,12 +250,12 @@ export const ormify = ( .returning("*"); if ($incr) { Object.entries($incr).forEach(([incrementField, incrementValue]) => { - void query.increment(incrementField, incrementValue); + void query.increment(incrementField, incrementValue as number); }); } if ($decr) { Object.entries($decr).forEach(([incrementField, incrementValue]) => { - void query.decrement(incrementField, incrementValue); + void query.decrement(incrementField, incrementValue as number); }); } const [docs] = await query; @@ -273,12 +273,12 @@ export const ormify = ( // increment and decrement operation in update if ($incr) { Object.entries($incr).forEach(([incrementField, incrementValue]) => { - void query.increment(incrementField, incrementValue); + void query.increment(incrementField, incrementValue as number); }); } if ($decr) { Object.entries($decr).forEach(([incrementField, incrementValue]) => { - void query.increment(incrementField, incrementValue); + void query.decrement(incrementField, incrementValue as number); }); } return (await query) as Tables[Tname]["base"][]; diff --git a/backend/src/main.ts b/backend/src/main.ts index 8af47eb0b3..7be9f43ec5 100644 --- a/backend/src/main.ts +++ b/backend/src/main.ts @@ -5,6 +5,7 @@ import "./lib/telemetry/instrumentation"; import dotenv from "dotenv"; import { initializeHsmModule } from "@app/ee/services/hsm/hsm-fns"; +import { keyValueStoreDALFactory } from "@app/keystore/key-value-store-dal"; import { runMigrations } from "./auto-start-migrations"; import { initAuditLogDbConnection, initDbConnection } from "./db"; @@ -54,7 +55,8 @@ const run = async () => { await queue.initialize(); - const keyStore = keyStoreFactory(envConfig); + const keyValueStoreDAL = keyValueStoreDALFactory(db); + const keyStore = keyStoreFactory(envConfig, keyValueStoreDAL); const redis = buildRedisFromConfig(envConfig); const hsmModule = initializeHsmModule(envConfig); diff --git a/backend/src/server/plugins/auth/inject-identity.ts b/backend/src/server/plugins/auth/inject-identity.ts index 2f128dda11..1bff11879f 100644 --- a/backend/src/server/plugins/auth/inject-identity.ts +++ b/backend/src/server/plugins/auth/inject-identity.ts @@ -122,6 +122,11 @@ export const injectIdentity = fp( return; } + // Authentication is handled on a route-level + if (req.url === "/api/v1/relays/register-instance-relay") { + return; + } + // Authentication is handled on a route-level here. if (req.url.includes("/api/v1/workflow-integrations/microsoft-teams/message-endpoint")) { return; diff --git a/backend/src/server/routes/index.ts b/backend/src/server/routes/index.ts index 64c9b33892..eccad29565 100644 --- a/backend/src/server/routes/index.ts +++ b/backend/src/server/routes/index.ts @@ -38,6 +38,9 @@ import { externalKmsServiceFactory } from "@app/ee/services/external-kms/externa import { gatewayDALFactory } from "@app/ee/services/gateway/gateway-dal"; import { gatewayServiceFactory } from "@app/ee/services/gateway/gateway-service"; import { orgGatewayConfigDALFactory } from "@app/ee/services/gateway/org-gateway-config-dal"; +import { gatewayV2DalFactory } from "@app/ee/services/gateway-v2/gateway-v2-dal"; +import { gatewayV2ServiceFactory } from "@app/ee/services/gateway-v2/gateway-v2-service"; +import { orgGatewayConfigV2DalFactory } from "@app/ee/services/gateway-v2/org-gateway-config-v2-dal"; import { githubOrgSyncDALFactory } from "@app/ee/services/github-org-sync/github-org-sync-dal"; import { githubOrgSyncServiceFactory } from "@app/ee/services/github-org-sync/github-org-sync-service"; import { groupDALFactory } from "@app/ee/services/group/group-dal"; @@ -72,6 +75,10 @@ import { projectUserAdditionalPrivilegeDALFactory } from "@app/ee/services/proje import { projectUserAdditionalPrivilegeServiceFactory } from "@app/ee/services/project-user-additional-privilege/project-user-additional-privilege-service"; import { rateLimitDALFactory } from "@app/ee/services/rate-limit/rate-limit-dal"; import { rateLimitServiceFactory } from "@app/ee/services/rate-limit/rate-limit-service"; +import { instanceRelayConfigDalFactory } from "@app/ee/services/relay/instance-relay-config-dal"; +import { orgRelayConfigDalFactory } from "@app/ee/services/relay/org-relay-config-dal"; +import { relayDalFactory } from "@app/ee/services/relay/relay-dal"; +import { relayServiceFactory } from "@app/ee/services/relay/relay-service"; import { samlConfigDALFactory } from "@app/ee/services/saml-config/saml-config-dal"; import { samlConfigServiceFactory } from "@app/ee/services/saml-config/saml-config-service"; import { scimDALFactory } from "@app/ee/services/scim/scim-dal"; @@ -123,6 +130,7 @@ import { sshHostGroupMembershipDALFactory } from "@app/ee/services/ssh-host-grou import { sshHostGroupServiceFactory } from "@app/ee/services/ssh-host-group/ssh-host-group-service"; import { trustedIpDALFactory } from "@app/ee/services/trusted-ip/trusted-ip-dal"; import { trustedIpServiceFactory } from "@app/ee/services/trusted-ip/trusted-ip-service"; +import { keyValueStoreDALFactory } from "@app/keystore/key-value-store-dal"; import { TKeyStoreFactory } from "@app/keystore/keystore"; import { getConfig, TEnvConfig } from "@app/lib/config/env"; import { crypto } from "@app/lib/crypto/cryptography"; @@ -507,6 +515,7 @@ export const registerRoutes = async ( const microsoftTeamsIntegrationDAL = microsoftTeamsIntegrationDALFactory(db); const projectMicrosoftTeamsConfigDAL = projectMicrosoftTeamsConfigDALFactory(db); const secretScanningV2DAL = secretScanningV2DALFactory(db); + const keyValueStoreDAL = keyValueStoreDALFactory(db); const eventBusService = eventBusFactory(server.redis); const sseService = sseServiceFactory(eventBusService, server.redis); @@ -643,6 +652,7 @@ export const registerRoutes = async ( const folderTreeCheckpointDAL = folderTreeCheckpointDALFactory(db); const folderCommitDAL = folderCommitDALFactory(db); const folderTreeCheckpointResourcesDAL = folderTreeCheckpointResourcesDALFactory(db); + const folderCommitQueueService = folderCommitQueueServiceFactory({ queueService, folderTreeCheckpointDAL, @@ -739,6 +749,7 @@ export const registerRoutes = async ( const userService = userServiceFactory({ userDAL, + orgDAL, orgMembershipDAL, tokenService, permissionService, @@ -807,6 +818,7 @@ export const registerRoutes = async ( groupDAL, orgBotDAL, oidcConfigDAL, + ldapConfigDAL, loginService, projectBotService, reminderService @@ -965,6 +977,13 @@ export const registerRoutes = async ( const pkiSubscriberDAL = pkiSubscriberDALFactory(db); const pkiTemplatesDAL = pkiTemplatesDALFactory(db); + const instanceRelayConfigDAL = instanceRelayConfigDalFactory(db); + const orgRelayConfigDAL = orgRelayConfigDalFactory(db); + const relayDAL = relayDalFactory(db); + const gatewayV2DAL = gatewayV2DalFactory(db); + + const orgGatewayConfigV2DAL = orgGatewayConfigV2DalFactory(db); + const certificateService = certificateServiceFactory({ certificateDAL, certificateBodyDAL, @@ -1083,6 +1102,23 @@ export const registerRoutes = async ( keyStore }); + const relayService = relayServiceFactory({ + instanceRelayConfigDAL, + orgRelayConfigDAL, + relayDAL, + kmsService + }); + + const gatewayV2Service = gatewayV2ServiceFactory({ + kmsService, + licenseService, + relayService, + orgGatewayConfigV2DAL, + gatewayV2DAL, + relayDAL, + permissionService + }); + const secretSyncQueue = secretSyncQueueFactory({ queueService, secretSyncDAL, @@ -1107,7 +1143,8 @@ export const registerRoutes = async ( resourceMetadataDAL, appConnectionDAL, licenseService, - gatewayService + gatewayService, + gatewayV2Service }); const secretQueueService = secretQueueFactory({ @@ -1531,6 +1568,7 @@ export const registerRoutes = async ( permissionService, licenseService }); + const identityUaService = identityUaServiceFactory({ identityOrgMembershipDAL, permissionService, @@ -1548,6 +1586,8 @@ export const registerRoutes = async ( permissionService, licenseService, gatewayService, + gatewayV2Service, + gatewayV2DAL, gatewayDAL, kmsService }); @@ -1644,8 +1684,10 @@ export const registerRoutes = async ( }); const dynamicSecretProviders = buildDynamicSecretProviders({ - gatewayService + gatewayService, + gatewayV2Service }); + const dynamicSecretQueueService = dynamicSecretLeaseQueueServiceFactory({ queueService, dynamicSecretLeaseDAL, @@ -1665,6 +1707,7 @@ export const registerRoutes = async ( licenseService, kmsService, gatewayDAL, + gatewayV2DAL, resourceMetadataDAL }); @@ -1681,6 +1724,7 @@ export const registerRoutes = async ( userDAL, identityDAL }); + const dailyResourceCleanUp = dailyResourceCleanUpQueueServiceFactory({ auditLogDAL, queueService, @@ -1693,7 +1737,8 @@ export const registerRoutes = async ( identityUniversalAuthClientSecretDAL: identityUaClientSecretDAL, serviceTokenService, orgService, - userNotificationDAL + userNotificationDAL, + keyValueStoreDAL }); const dailyReminderQueueService = dailyReminderQueueServiceFactory({ @@ -1790,7 +1835,9 @@ export const registerRoutes = async ( kmsService, licenseService, gatewayService, - gatewayDAL + gatewayV2Service, + gatewayDAL, + gatewayV2DAL }); const secretSyncService = secretSyncServiceFactory({ @@ -1889,7 +1936,8 @@ export const registerRoutes = async ( secretQueueService, queueService, appConnectionDAL, - gatewayService + gatewayService, + gatewayV2Service }); const certificateAuthorityService = certificateAuthorityServiceFactory({ @@ -2115,6 +2163,8 @@ export const registerRoutes = async ( kmip: kmipService, kmipOperation: kmipOperationService, gateway: gatewayService, + relay: relayService, + gatewayV2: gatewayV2Service, secretRotationV2: secretRotationV2Service, microsoftTeams: microsoftTeamsService, assumePrivileges: assumePrivilegeService, diff --git a/backend/src/server/routes/v2/user-router.ts b/backend/src/server/routes/v2/user-router.ts index f416fe8bb1..397967fa22 100644 --- a/backend/src/server/routes/v2/user-router.ts +++ b/backend/src/server/routes/v2/user-router.ts @@ -129,6 +129,63 @@ export const registerUserRouter = async (server: FastifyZodProvider) => { } }); + server.route({ + method: "POST", + url: "/me/email-change/otp", + config: { + rateLimit: smtpRateLimit({ + keyGenerator: (req) => req.permission.id + }) + }, + schema: { + body: z.object({ + newEmail: z.string().email().trim() + }), + response: { + 200: z.object({ + success: z.boolean(), + message: z.string() + }) + } + }, + preHandler: verifyAuth([AuthMode.JWT], { requireOrg: false }), + handler: async (req) => { + const result = await server.services.user.requestEmailChangeOTP({ + userId: req.permission.id, + newEmail: req.body.newEmail + }); + return result; + } + }); + + server.route({ + method: "PATCH", + url: "/me/email", + config: { + rateLimit: writeLimit + }, + schema: { + body: z.object({ + newEmail: z.string().email().trim(), + otpCode: z.string().trim().length(6) + }), + response: { + 200: z.object({ + user: UsersSchema + }) + } + }, + preHandler: verifyAuth([AuthMode.JWT], { requireOrg: false }), + handler: async (req) => { + const user = await server.services.user.updateUserEmail({ + userId: req.permission.id, + newEmail: req.body.newEmail, + otpCode: req.body.otpCode + }); + return { user }; + } + }); + server.route({ method: "GET", url: "/me/organizations", diff --git a/backend/src/services/app-connection/app-connection-fns.ts b/backend/src/services/app-connection/app-connection-fns.ts index 94de51f1ed..5aacab1b1d 100644 --- a/backend/src/services/app-connection/app-connection-fns.ts +++ b/backend/src/services/app-connection/app-connection-fns.ts @@ -6,6 +6,7 @@ import { } from "@app/ee/services/app-connections/oci"; import { getOracleDBConnectionListItem, OracleDBConnectionMethod } from "@app/ee/services/app-connections/oracledb"; import { TGatewayServiceFactory } from "@app/ee/services/gateway/gateway-service"; +import { TGatewayV2ServiceFactory } from "@app/ee/services/gateway-v2/gateway-v2-service"; import { TLicenseServiceFactory } from "@app/ee/services/license/license-service"; import { crypto } from "@app/lib/crypto/cryptography"; import { BadRequestError } from "@app/lib/errors"; @@ -219,7 +220,8 @@ export const decryptAppConnectionCredentials = async ({ export const validateAppConnectionCredentials = async ( appConnection: TAppConnectionConfig, - gatewayService: Pick + gatewayService: Pick, + gatewayV2Service: Pick ): Promise => { const VALIDATE_APP_CONNECTION_CREDENTIALS_MAP: Record = { [AppConnection.AWS]: validateAwsConnectionCredentials as TAppConnectionCredentialsValidator, @@ -264,7 +266,7 @@ export const validateAppConnectionCredentials = async ( [AppConnection.Netlify]: validateNetlifyConnectionCredentials as TAppConnectionCredentialsValidator }; - return VALIDATE_APP_CONNECTION_CREDENTIALS_MAP[appConnection.app](appConnection, gatewayService); + return VALIDATE_APP_CONNECTION_CREDENTIALS_MAP[appConnection.app](appConnection, gatewayService, gatewayV2Service); }; export const getAppConnectionMethodName = (method: TAppConnection["method"]) => { diff --git a/backend/src/services/app-connection/app-connection-service.ts b/backend/src/services/app-connection/app-connection-service.ts index 869364853d..f5654d2fb9 100644 --- a/backend/src/services/app-connection/app-connection-service.ts +++ b/backend/src/services/app-connection/app-connection-service.ts @@ -5,6 +5,8 @@ import { ociConnectionService } from "@app/ee/services/app-connections/oci/oci-c import { ValidateOracleDBConnectionCredentialsSchema } from "@app/ee/services/app-connections/oracledb"; import { TGatewayDALFactory } from "@app/ee/services/gateway/gateway-dal"; import { TGatewayServiceFactory } from "@app/ee/services/gateway/gateway-service"; +import { TGatewayV2DALFactory } from "@app/ee/services/gateway-v2/gateway-v2-dal"; +import { TGatewayV2ServiceFactory } from "@app/ee/services/gateway-v2/gateway-v2-service"; import { TLicenseServiceFactory } from "@app/ee/services/license/license-service"; import { OrgPermissionAppConnectionActions, @@ -110,7 +112,9 @@ export type TAppConnectionServiceFactoryDep = { kmsService: Pick; licenseService: Pick; gatewayService: Pick; + gatewayV2Service: Pick; gatewayDAL: Pick; + gatewayV2DAL: Pick; }; export type TAppConnectionServiceFactory = ReturnType; @@ -162,7 +166,9 @@ export const appConnectionServiceFactory = ({ kmsService, licenseService, gatewayService, - gatewayDAL + gatewayV2Service, + gatewayDAL, + gatewayV2DAL }: TAppConnectionServiceFactoryDep) => { const listAppConnectionsByOrg = async (actor: OrgServiceActor, app?: AppConnection) => { const { permission } = await permissionService.getOrgPermission( @@ -266,7 +272,8 @@ export const appConnectionServiceFactory = ({ ); const [gateway] = await gatewayDAL.find({ id: gatewayId, orgId: actor.orgId }); - if (!gateway) { + const [gatewayV2] = await gatewayV2DAL.find({ id: gatewayId, orgId: actor.orgId }); + if (!gateway && !gatewayV2) { throw new NotFoundError({ message: `Gateway with ID ${gatewayId} not found for org` }); @@ -288,7 +295,8 @@ export const appConnectionServiceFactory = ({ orgId: actor.orgId, gatewayId } as TAppConnectionConfig, - gatewayService + gatewayService, + gatewayV2Service ); try { @@ -321,7 +329,8 @@ export const appConnectionServiceFactory = ({ gatewayId } as TAppConnectionConfig, (platformCredentials) => createConnection(platformCredentials), - gatewayService + gatewayService, + gatewayV2Service ); } else { connection = await createConnection(validatedCredentials); @@ -377,7 +386,8 @@ export const appConnectionServiceFactory = ({ if (gatewayId) { const [gateway] = await gatewayDAL.find({ id: gatewayId, orgId: actor.orgId }); - if (!gateway) { + const [gatewayV2] = await gatewayV2DAL.find({ id: gatewayId, orgId: actor.orgId }); + if (!gateway && !gatewayV2) { throw new NotFoundError({ message: `Gateway with ID ${gatewayId} not found for org` }); @@ -417,7 +427,8 @@ export const appConnectionServiceFactory = ({ method, gatewayId } as TAppConnectionConfig, - gatewayService + gatewayService, + gatewayV2Service ); if (!updatedCredentials) @@ -458,7 +469,8 @@ export const appConnectionServiceFactory = ({ gatewayId } as TAppConnectionConfig, (platformCredentials) => updateConnection(platformCredentials), - gatewayService + gatewayService, + gatewayV2Service ); } else { updatedConnection = await updateConnection(updatedCredentials); @@ -588,7 +600,7 @@ export const appConnectionServiceFactory = ({ deleteAppConnection, connectAppConnectionById, listAvailableAppConnectionsForUser, - github: githubConnectionService(connectAppConnectionById, gatewayService), + github: githubConnectionService(connectAppConnectionById, gatewayService, gatewayV2Service), githubRadar: githubRadarConnectionService(connectAppConnectionById), gcp: gcpConnectionService(connectAppConnectionById), databricks: databricksConnectionService(connectAppConnectionById, appConnectionDAL, kmsService), diff --git a/backend/src/services/app-connection/app-connection-types.ts b/backend/src/services/app-connection/app-connection-types.ts index 43a1416971..e4af79926f 100644 --- a/backend/src/services/app-connection/app-connection-types.ts +++ b/backend/src/services/app-connection/app-connection-types.ts @@ -10,6 +10,7 @@ import { TValidateOracleDBConnectionCredentialsSchema } from "@app/ee/services/app-connections/oracledb"; import { TGatewayServiceFactory } from "@app/ee/services/gateway/gateway-service"; +import { TGatewayV2ServiceFactory } from "@app/ee/services/gateway-v2/gateway-v2-service"; import { TAppConnectionDALFactory } from "@app/services/app-connection/app-connection-dal"; import { TSqlConnectionConfig } from "@app/services/app-connection/shared/sql/sql-connection-types"; import { SecretSync } from "@app/services/secret-sync/secret-sync-enums"; @@ -411,13 +412,15 @@ export type TListAwsConnectionIamUsers = { export type TAppConnectionCredentialsValidator = ( appConnection: TAppConnectionConfig, - gatewayService: Pick + gatewayService: Pick, + gatewayV2Service: Pick ) => Promise; export type TAppConnectionTransitionCredentialsToPlatform = ( appConnection: TAppConnectionConfig, callback: (credentials: TAppConnection["credentials"]) => Promise, - gatewayService: Pick + gatewayService: Pick, + gatewayV2Service: Pick ) => Promise; export type TAppConnectionBaseConfig = { diff --git a/backend/src/services/app-connection/github/github-connection-fns.ts b/backend/src/services/app-connection/github/github-connection-fns.ts index 05e6fdda34..5cbf5c60d6 100644 --- a/backend/src/services/app-connection/github/github-connection-fns.ts +++ b/backend/src/services/app-connection/github/github-connection-fns.ts @@ -4,11 +4,13 @@ import RE2 from "re2"; import { verifyHostInputValidity } from "@app/ee/services/dynamic-secret/dynamic-secret-fns"; import { TGatewayServiceFactory } from "@app/ee/services/gateway/gateway-service"; +import { TGatewayV2ServiceFactory } from "@app/ee/services/gateway-v2/gateway-v2-service"; import { getConfig } from "@app/lib/config/env"; import { request as httpRequest } from "@app/lib/config/request"; import { crypto } from "@app/lib/crypto"; import { BadRequestError, ForbiddenRequestError, InternalServerError } from "@app/lib/errors"; import { GatewayProxyProtocol, withGatewayProxy } from "@app/lib/gateway"; +import { withGatewayV2Proxy } from "@app/lib/gateway-v2/gateway-v2"; import { logger } from "@app/lib/logger"; import { blockLocalAndPrivateIpAddresses } from "@app/lib/validator"; import { getAppConnectionMethodName } from "@app/services/app-connection/app-connection-fns"; @@ -49,6 +51,7 @@ export const getGitHubInstanceApiUrl = async (config: { export const requestWithGitHubGateway = async ( appConnection: { gatewayId?: string | null }, gatewayService: Pick, + gatewayV2Service: Pick, requestConfig: AxiosRequestConfig ): Promise> => { const { gatewayId } = appConnection; @@ -63,6 +66,52 @@ export const requestWithGitHubGateway = async ( await blockLocalAndPrivateIpAddresses(url.toString()); const [targetHost] = await verifyHostInputValidity(url.host, true); + const gatewayConnectionDetails = await gatewayV2Service.getPlatformConnectionDetailsByGatewayId({ + gatewayId, + targetHost, + targetPort: 443 + }); + + if (gatewayConnectionDetails) { + return withGatewayV2Proxy( + async (proxyPort) => { + const httpsAgent = new https.Agent({ + servername: targetHost + }); + + url.protocol = "https:"; + url.host = `localhost:${proxyPort}`; + + const finalRequestConfig: AxiosRequestConfig = { + ...requestConfig, + url: url.toString(), + httpsAgent, + headers: { + ...requestConfig.headers, + Host: targetHost + } + }; + + try { + return await httpRequest.request(finalRequestConfig); + } catch (error) { + const axiosError = error as AxiosError; + logger.error( + { message: axiosError.message, data: axiosError.response?.data }, + "Error during GitHub gateway request:" + ); + throw error; + } + }, + { + protocol: GatewayProxyProtocol.Tcp, + relayHost: gatewayConnectionDetails.relayHost, + gateway: gatewayConnectionDetails.gateway, + relay: gatewayConnectionDetails.relay + } + ); + } + const relayDetails = await gatewayService.fnGetGatewayClientTlsByGatewayId(gatewayId); const [relayHost, relayPort] = relayDetails.relayAddress.split(":"); @@ -115,7 +164,8 @@ export const requestWithGitHubGateway = async ( export const getGitHubAppAuthToken = async ( appConnection: TGitHubConnection, - gatewayService: Pick + gatewayService: Pick, + gatewayV2Service: Pick ) => { const appCfg = getConfig(); const appId = appCfg.INF_APP_CONNECTION_GITHUB_APP_ID; @@ -151,6 +201,7 @@ export const getGitHubAppAuthToken = async ( const response = await requestWithGitHubGateway<{ token: string; expires_at: string }>( appConnection, gatewayService, + gatewayV2Service, { url: `https://${apiBaseUrl}/app/installations/${installationId}/access_tokens`, method: "POST", @@ -191,6 +242,7 @@ function extractNextPageUrl(linkHeader: string | undefined): string | null { export const makePaginatedGitHubRequest = async ( appConnection: TGitHubConnection, gatewayService: Pick, + gatewayV2Service: Pick, path: string, dataMapper?: (data: R) => T[] ): Promise => { @@ -199,7 +251,7 @@ export const makePaginatedGitHubRequest = async ( const token = method === GitHubConnectionMethod.OAuth ? credentials.accessToken - : await getGitHubAppAuthToken(appConnection, gatewayService); + : await getGitHubAppAuthToken(appConnection, gatewayService, gatewayV2Service); const baseUrl = `https://${await getGitHubInstanceApiUrl(appConnection)}${path}`; const initialUrlObj = new URL(baseUrl); @@ -209,15 +261,20 @@ export const makePaginatedGitHubRequest = async ( const maxIterations = 1000; // Make initial request to get link header - const firstResponse: AxiosResponse = await requestWithGitHubGateway(appConnection, gatewayService, { - url: initialUrlObj.toString(), - method: "GET", - headers: { - Accept: "application/vnd.github+json", - Authorization: `Bearer ${token}`, - "X-GitHub-Api-Version": "2022-11-28" + const firstResponse: AxiosResponse = await requestWithGitHubGateway( + appConnection, + gatewayService, + gatewayV2Service, + { + url: initialUrlObj.toString(), + method: "GET", + headers: { + Accept: "application/vnd.github+json", + Authorization: `Bearer ${token}`, + "X-GitHub-Api-Version": "2022-11-28" + } } - }); + ); const firstPageItems = dataMapper ? dataMapper(firstResponse.data) : (firstResponse.data as unknown as T[]); results = results.concat(firstPageItems); @@ -237,7 +294,7 @@ export const makePaginatedGitHubRequest = async ( pageUrlObj.searchParams.set("page", pageNum.toString()); pageRequests.push( - requestWithGitHubGateway(appConnection, gatewayService, { + requestWithGitHubGateway(appConnection, gatewayService, gatewayV2Service, { url: pageUrlObj.toString(), method: "GET", headers: { @@ -261,15 +318,20 @@ export const makePaginatedGitHubRequest = async ( while (url && i < maxIterations) { // eslint-disable-next-line no-await-in-loop - const response: AxiosResponse = await requestWithGitHubGateway(appConnection, gatewayService, { - url, - method: "GET", - headers: { - Accept: "application/vnd.github+json", - Authorization: `Bearer ${token}`, - "X-GitHub-Api-Version": "2022-11-28" + const response: AxiosResponse = await requestWithGitHubGateway( + appConnection, + gatewayService, + gatewayV2Service, + { + url, + method: "GET", + headers: { + Accept: "application/vnd.github+json", + Authorization: `Bearer ${token}`, + "X-GitHub-Api-Version": "2022-11-28" + } } - }); + ); const items = dataMapper ? dataMapper(response.data) : (response.data as unknown as T[]); results = results.concat(items); @@ -308,30 +370,39 @@ type GitHubEnvironment = { export const getGitHubRepositories = async ( appConnection: TGitHubConnection, - gatewayService: Pick + gatewayService: Pick, + gatewayV2Service: Pick ) => { if (appConnection.method === GitHubConnectionMethod.App) { return makePaginatedGitHubRequest( appConnection, gatewayService, + gatewayV2Service, "/installation/repositories", (data) => data.repositories ); } - const repos = await makePaginatedGitHubRequest(appConnection, gatewayService, "/user/repos"); + const repos = await makePaginatedGitHubRequest( + appConnection, + gatewayService, + gatewayV2Service, + "/user/repos" + ); + return repos.filter((repo) => repo.permissions?.admin); }; export const getGitHubOrganizations = async ( appConnection: TGitHubConnection, - gatewayService: Pick + gatewayService: Pick, + gatewayV2Service: Pick ) => { if (appConnection.method === GitHubConnectionMethod.App) { const installationRepositories = await makePaginatedGitHubRequest< GitHubRepository, { repositories: GitHubRepository[] } - >(appConnection, gatewayService, "/installation/repositories", (data) => data.repositories); + >(appConnection, gatewayService, gatewayV2Service, "/installation/repositories", (data) => data.repositories); const organizationMap: Record = {}; installationRepositories.forEach((repo) => { @@ -343,12 +414,13 @@ export const getGitHubOrganizations = async ( return Object.values(organizationMap); } - return makePaginatedGitHubRequest(appConnection, gatewayService, "/user/orgs"); + return makePaginatedGitHubRequest(appConnection, gatewayService, gatewayV2Service, "/user/orgs"); }; export const getGitHubEnvironments = async ( appConnection: TGitHubConnection, gatewayService: Pick, + gatewayV2Service: Pick, owner: string, repo: string ) => { @@ -356,6 +428,7 @@ export const getGitHubEnvironments = async ( return await makePaginatedGitHubRequest( appConnection, gatewayService, + gatewayV2Service, `/repos/${encodeURIComponent(owner)}/${encodeURIComponent(repo)}/environments`, (data) => data.environments ); @@ -383,7 +456,8 @@ export function isGithubErrorResponse(data: GithubTokenRespData): data is Github export const validateGitHubConnectionCredentials = async ( config: TGitHubConnectionConfig, - gatewayService: Pick + gatewayService: Pick, + gatewayV2Service: Pick ) => { const { credentials, method } = config; const { @@ -419,7 +493,7 @@ export const validateGitHubConnectionCredentials = async ( const host = credentials.host || "github.com"; try { - tokenResp = await requestWithGitHubGateway(config, gatewayService, { + tokenResp = await requestWithGitHubGateway(config, gatewayService, gatewayV2Service, { url: `https://${host}/login/oauth/access_token`, method: "POST", data: { @@ -471,7 +545,7 @@ export const validateGitHubConnectionCredentials = async ( id: number; }; }[]; - }>(config, gatewayService, { + }>(config, gatewayService, gatewayV2Service, { url: `https://${await getGitHubInstanceApiUrl(config)}/user/installations`, headers: { Accept: "application/json", diff --git a/backend/src/services/app-connection/github/github-connection-service.ts b/backend/src/services/app-connection/github/github-connection-service.ts index f1198ddfab..8292d94e0c 100644 --- a/backend/src/services/app-connection/github/github-connection-service.ts +++ b/backend/src/services/app-connection/github/github-connection-service.ts @@ -1,4 +1,5 @@ import { TGatewayServiceFactory } from "@app/ee/services/gateway/gateway-service"; +import { TGatewayV2ServiceFactory } from "@app/ee/services/gateway-v2/gateway-v2-service"; import { OrgServiceActor } from "@app/lib/types"; import { AppConnection } from "@app/services/app-connection/app-connection-enums"; import { @@ -22,12 +23,13 @@ type TListGitHubEnvironmentsDTO = { export const githubConnectionService = ( getAppConnection: TGetAppConnectionFunc, - gatewayService: Pick + gatewayService: Pick, + gatewayV2Service: Pick ) => { const listRepositories = async (connectionId: string, actor: OrgServiceActor) => { const appConnection = await getAppConnection(AppConnection.GitHub, connectionId, actor); - const repositories = await getGitHubRepositories(appConnection, gatewayService); + const repositories = await getGitHubRepositories(appConnection, gatewayService, gatewayV2Service); return repositories; }; @@ -35,7 +37,7 @@ export const githubConnectionService = ( const listOrganizations = async (connectionId: string, actor: OrgServiceActor) => { const appConnection = await getAppConnection(AppConnection.GitHub, connectionId, actor); - const organizations = await getGitHubOrganizations(appConnection, gatewayService); + const organizations = await getGitHubOrganizations(appConnection, gatewayService, gatewayV2Service); return organizations; }; @@ -46,7 +48,7 @@ export const githubConnectionService = ( ) => { const appConnection = await getAppConnection(AppConnection.GitHub, connectionId, actor); - const environments = await getGitHubEnvironments(appConnection, gatewayService, owner, repo); + const environments = await getGitHubEnvironments(appConnection, gatewayService, gatewayV2Service, owner, repo); return environments; }; diff --git a/backend/src/services/app-connection/hc-vault/hc-vault-connection-fns.ts b/backend/src/services/app-connection/hc-vault/hc-vault-connection-fns.ts index 46a59bcecb..3a79e2f8e0 100644 --- a/backend/src/services/app-connection/hc-vault/hc-vault-connection-fns.ts +++ b/backend/src/services/app-connection/hc-vault/hc-vault-connection-fns.ts @@ -3,6 +3,7 @@ import https from "https"; import { verifyHostInputValidity } from "@app/ee/services/dynamic-secret/dynamic-secret-fns"; import { TGatewayServiceFactory } from "@app/ee/services/gateway/gateway-service"; +import { TGatewayV2ServiceFactory } from "@app/ee/services/gateway-v2/gateway-v2-service"; import { request } from "@app/lib/config/request"; import { BadRequestError } from "@app/lib/errors"; import { removeTrailingSlash } from "@app/lib/fn"; @@ -144,7 +145,9 @@ export const getHCVaultAccessToken = async ( export const validateHCVaultConnectionCredentials = async ( connection: THCVaultConnection, - gatewayService: Pick + gatewayService: Pick, + // eslint-disable-next-line @typescript-eslint/no-unused-vars + _gatewayV2Service: Pick ) => { const instanceUrl = await getHCVaultInstanceUrl(connection); diff --git a/backend/src/services/app-connection/shared/sql/sql-connection-fns.ts b/backend/src/services/app-connection/shared/sql/sql-connection-fns.ts index 02bd0c77f2..ca50bae8a3 100644 --- a/backend/src/services/app-connection/shared/sql/sql-connection-fns.ts +++ b/backend/src/services/app-connection/shared/sql/sql-connection-fns.ts @@ -2,12 +2,14 @@ import knex, { Knex } from "knex"; import { verifyHostInputValidity } from "@app/ee/services/dynamic-secret/dynamic-secret-fns"; import { TGatewayServiceFactory } from "@app/ee/services/gateway/gateway-service"; +import { TGatewayV2ServiceFactory } from "@app/ee/services/gateway-v2/gateway-v2-service"; import { TSqlCredentialsRotationGeneratedCredentials, TSqlCredentialsRotationWithConnection } from "@app/ee/services/secret-rotation-v2/shared/sql-credentials/sql-credentials-rotation-types"; import { BadRequestError, DatabaseError } from "@app/lib/errors"; import { GatewayProxyProtocol, withGatewayProxy } from "@app/lib/gateway"; +import { withGatewayV2Proxy } from "@app/lib/gateway-v2/gateway-v2"; import { alphaNumericNanoId } from "@app/lib/nanoid"; import { AppConnection } from "@app/services/app-connection/app-connection-enums"; import { TAppConnectionRaw, TSqlConnection } from "@app/services/app-connection/app-connection-types"; @@ -104,12 +106,49 @@ export const getSqlConnectionClient = async (appConnection: Pick( config: TSqlConnectionConfig, gatewayService: Pick, + gatewayV2Service: Pick, operation: (client: Knex) => Promise ): Promise => { const { credentials, app, gatewayId } = config; - if (gatewayId && gatewayService) { + if (gatewayId && gatewayService && gatewayV2Service) { const [targetHost] = await verifyHostInputValidity(credentials.host, true); + const platformConnectionDetails = await gatewayV2Service.getPlatformConnectionDetailsByGatewayId({ + gatewayId, + targetHost, + targetPort: credentials.port + }); + + if (platformConnectionDetails) { + return withGatewayV2Proxy( + async (proxyPort) => { + const client = knex({ + client: SQL_CONNECTION_CLIENT_MAP[app], + connection: { + database: credentials.database, + port: proxyPort, + host: "localhost", + user: credentials.username, + password: credentials.password, + connectionTimeoutMillis: EXTERNAL_REQUEST_TIMEOUT, + ...getConnectionConfig({ app, credentials }) + } + }); + try { + return await operation(client); + } finally { + await client.destroy(); + } + }, + { + protocol: GatewayProxyProtocol.Tcp, + relayHost: platformConnectionDetails.relayHost, + gateway: platformConnectionDetails.gateway, + relay: platformConnectionDetails.relay + } + ); + } + const relayDetails = await gatewayService.fnGetGatewayClientTlsByGatewayId(gatewayId); const [relayHost, relayPort] = relayDetails.relayAddress.split(":"); @@ -161,10 +200,11 @@ export const executeWithPotentialGateway = async ( export const validateSqlConnectionCredentials = async ( config: TSqlConnectionConfig, - gatewayService: Pick + gatewayService: Pick, + gatewayV2Service: Pick ) => { try { - await executeWithPotentialGateway(config, gatewayService, async (client) => { + await executeWithPotentialGateway(config, gatewayService, gatewayV2Service, async (client) => { await client.raw(config.app === AppConnection.OracleDB ? `SELECT 1 FROM DUAL` : `Select 1`); }); return config.credentials; @@ -191,14 +231,15 @@ export const SQL_CONNECTION_ALTER_LOGIN_STATEMENT: Record< export const transferSqlConnectionCredentialsToPlatform = async ( config: TSqlConnectionConfig, callback: (credentials: TSqlConnectionConfig["credentials"]) => Promise, - gatewayService: Pick + gatewayService: Pick, + gatewayV2Service: Pick ) => { const { credentials, app } = config; const newPassword = alphaNumericNanoId(32); try { - return await executeWithPotentialGateway(config, gatewayService, (client) => { + return await executeWithPotentialGateway(config, gatewayService, gatewayV2Service, (client) => { return client.transaction(async (tx) => { await tx.raw( ...SQL_CONNECTION_ALTER_LOGIN_STATEMENT[app]({ username: credentials.username, password: newPassword }) diff --git a/backend/src/services/auth-token/auth-token-service.ts b/backend/src/services/auth-token/auth-token-service.ts index c309b39985..613aa0766a 100644 --- a/backend/src/services/auth-token/auth-token-service.ts +++ b/backend/src/services/auth-token/auth-token-service.ts @@ -36,6 +36,12 @@ export const getTokenConfig = (tokenType: TokenType) => { const expiresAt = new Date(new Date().getTime() + 86400000); return { token, triesLeft, expiresAt }; } + case TokenType.TOKEN_EMAIL_CHANGE_OTP: { + const token = String(crypto.randomInt(10 ** 5, 10 ** 6 - 1)); + const triesLeft = 1; + const expiresAt = new Date(new Date().getTime() + 600000); + return { token, triesLeft, expiresAt }; + } case TokenType.TOKEN_EMAIL_MFA: { // generate random 6-digit code const token = String(crypto.randomInt(10 ** 5, 10 ** 6 - 1)); @@ -75,7 +81,7 @@ export const getTokenConfig = (tokenType: TokenType) => { }; export const tokenServiceFactory = ({ tokenDAL, userDAL, orgMembershipDAL }: TAuthTokenServiceFactoryDep) => { - const createTokenForUser = async ({ type, userId, orgId, aliasId }: TCreateTokenForUserDTO) => { + const createTokenForUser = async ({ type, userId, orgId, aliasId, payload }: TCreateTokenForUserDTO) => { const { token, ...tkCfg } = getTokenConfig(type); const appCfg = getConfig(); const tokenHash = await crypto.hashing().createHash(token, appCfg.SALT_ROUNDS); @@ -89,7 +95,8 @@ export const tokenServiceFactory = ({ tokenDAL, userDAL, orgMembershipDAL }: TAu userId, orgId, triesLeft: tkCfg?.triesLeft, - aliasId + aliasId, + payload }, tx ); diff --git a/backend/src/services/auth-token/auth-token-types.ts b/backend/src/services/auth-token/auth-token-types.ts index 7deb719a90..3255fbbbc6 100644 --- a/backend/src/services/auth-token/auth-token-types.ts +++ b/backend/src/services/auth-token/auth-token-types.ts @@ -3,6 +3,7 @@ import { ProjectMembershipRole } from "@app/db/schemas"; export enum TokenType { TOKEN_EMAIL_CONFIRMATION = "emailConfirmation", TOKEN_EMAIL_VERIFICATION = "emailVerification", // unverified -> verified + TOKEN_EMAIL_CHANGE_OTP = "emailChangeOtp", TOKEN_EMAIL_MFA = "emailMfa", TOKEN_EMAIL_ORG_INVITATION = "organizationInvitation", TOKEN_EMAIL_PASSWORD_RESET = "passwordReset", @@ -15,6 +16,7 @@ export type TCreateTokenForUserDTO = { userId: string; orgId?: string; aliasId?: string; + payload?: string; }; export type TCreateOrgInviteTokenDTO = { diff --git a/backend/src/services/certificate/certificate-fns.ts b/backend/src/services/certificate/certificate-fns.ts index eee220ce9e..b2bc4df41a 100644 --- a/backend/src/services/certificate/certificate-fns.ts +++ b/backend/src/services/certificate/certificate-fns.ts @@ -52,6 +52,9 @@ export const constructPemChainFromCerts = (certificates: x509.X509Certificate[]) .join("\n") .trim(); +export const prependCertToPemChain = (cert: x509.X509Certificate, pemChain: string) => + `${cert.toString("pem")}\n${pemChain}`; + export const splitPemChain = (pemText: string) => { const re2Pattern = new RE2("-----BEGIN CERTIFICATE-----[^-]+-----END CERTIFICATE-----", "g"); diff --git a/backend/src/services/folder-commit/folder-commit-service.test.ts b/backend/src/services/folder-commit/folder-commit-service.test.ts index 28d6038299..0a73d6e0cf 100644 --- a/backend/src/services/folder-commit/folder-commit-service.test.ts +++ b/backend/src/services/folder-commit/folder-commit-service.test.ts @@ -661,7 +661,7 @@ describe("folderCommitServiceFactory", () => { // Assert expect(mockFolderCommitDAL.create).toHaveBeenCalled(); - expect(mockSecretV2BridgeDAL.invalidateSecretCacheByProjectId).toHaveBeenCalledWith(projectId); + expect(mockSecretV2BridgeDAL.invalidateSecretCacheByProjectId).toHaveBeenCalledWith(projectId, {}); // Check that we got the right counts expect(result.totalChanges).toEqual(2); diff --git a/backend/src/services/folder-commit/folder-commit-service.ts b/backend/src/services/folder-commit/folder-commit-service.ts index 470edbbba9..e4c151ff1e 100644 --- a/backend/src/services/folder-commit/folder-commit-service.ts +++ b/backend/src/services/folder-commit/folder-commit-service.ts @@ -1386,7 +1386,7 @@ export const folderCommitServiceFactory = ({ ); // Invalidate cache to reflect the changes - await secretV2BridgeDAL.invalidateSecretCacheByProjectId(projectId); + await secretV2BridgeDAL.invalidateSecretCacheByProjectId(projectId, tx); return { secretChangesCount: secretChanges.length, diff --git a/backend/src/services/identity-kubernetes-auth/identity-kubernetes-auth-service.ts b/backend/src/services/identity-kubernetes-auth/identity-kubernetes-auth-service.ts index 9584b122a2..bc231c6d6d 100644 --- a/backend/src/services/identity-kubernetes-auth/identity-kubernetes-auth-service.ts +++ b/backend/src/services/identity-kubernetes-auth/identity-kubernetes-auth-service.ts @@ -6,6 +6,8 @@ import RE2 from "re2"; import { IdentityAuthMethod, TIdentityKubernetesAuthsUpdate } from "@app/db/schemas"; import { TGatewayDALFactory } from "@app/ee/services/gateway/gateway-dal"; import { TGatewayServiceFactory } from "@app/ee/services/gateway/gateway-service"; +import { TGatewayV2DALFactory } from "@app/ee/services/gateway-v2/gateway-v2-dal"; +import { TGatewayV2ServiceFactory } from "@app/ee/services/gateway-v2/gateway-v2-service"; import { TLicenseServiceFactory } from "@app/ee/services/license/license-service"; import { OrgPermissionGatewayActions, @@ -21,6 +23,7 @@ import { getConfig } from "@app/lib/config/env"; import { crypto } from "@app/lib/crypto"; import { BadRequestError, NotFoundError, PermissionBoundaryError, UnauthorizedError } from "@app/lib/errors"; import { GatewayHttpProxyActions, GatewayProxyProtocol, withGatewayProxy } from "@app/lib/gateway"; +import { withGatewayV2Proxy } from "@app/lib/gateway-v2/gateway-v2"; import { extractIPDetails, isValidIpOrCidr } from "@app/lib/ip"; import { logger } from "@app/lib/logger"; @@ -54,11 +57,15 @@ type TIdentityKubernetesAuthServiceFactoryDep = { licenseService: Pick; kmsService: Pick; gatewayService: TGatewayServiceFactory; + gatewayV2Service: TGatewayV2ServiceFactory; gatewayDAL: Pick; + gatewayV2DAL: Pick; }; export type TIdentityKubernetesAuthServiceFactory = ReturnType; +const GATEWAY_AUTH_DEFAULT_HOST = "https://kubernetes.default.svc.cluster.local"; + export const identityKubernetesAuthServiceFactory = ({ identityKubernetesAuthDAL, identityOrgMembershipDAL, @@ -66,7 +73,9 @@ export const identityKubernetesAuthServiceFactory = ({ permissionService, licenseService, gatewayService, + gatewayV2Service, gatewayDAL, + gatewayV2DAL, kmsService }: TIdentityKubernetesAuthServiceFactoryDep) => { const $gatewayProxyWrapper = async ( @@ -79,6 +88,42 @@ export const identityKubernetesAuthServiceFactory = ({ }, gatewayCallback: (host: string, port: number, httpsAgent?: https.Agent) => Promise ): Promise => { + const gatewayV2ConnectionDetails = await gatewayV2Service.getPlatformConnectionDetailsByGatewayId({ + gatewayId: inputs.gatewayId, + targetHost: inputs.targetHost ?? GATEWAY_AUTH_DEFAULT_HOST, + targetPort: inputs.targetPort ?? 443 + }); + + if (gatewayV2ConnectionDetails) { + let httpsAgent: https.Agent | undefined; + if (!inputs.reviewTokenThroughGateway) { + httpsAgent = new https.Agent({ + ca: inputs.caCert, + rejectUnauthorized: Boolean(inputs.caCert) + }); + } + + const callbackResult = await withGatewayV2Proxy( + async (port) => { + const res = await gatewayCallback( + inputs.reviewTokenThroughGateway ? "http://localhost" : "https://localhost", + port, + httpsAgent + ); + return res; + }, + { + protocol: inputs.reviewTokenThroughGateway ? GatewayProxyProtocol.Http : GatewayProxyProtocol.Tcp, + relayHost: gatewayV2ConnectionDetails.relayHost, + gateway: gatewayV2ConnectionDetails.gateway, + relay: gatewayV2ConnectionDetails.relay, + httpsAgent + } + ); + + return callbackResult; + } + const relayDetails = await gatewayService.fnGetGatewayClientTlsByGatewayId(inputs.gatewayId); const [relayHost, relayPort] = relayDetails.relayAddress.split(":"); @@ -277,7 +322,7 @@ export const identityKubernetesAuthServiceFactory = ({ let data: TCreateTokenReviewResponse | undefined; if (identityKubernetesAuth.tokenReviewMode === IdentityKubernetesAuthTokenReviewMode.Gateway) { - if (!identityKubernetesAuth.gatewayId) { + if (!identityKubernetesAuth.gatewayId && !identityKubernetesAuth.gatewayV2Id) { throw new BadRequestError({ message: "Gateway ID is required when token review mode is set to Gateway" }); @@ -285,7 +330,7 @@ export const identityKubernetesAuthServiceFactory = ({ data = await $gatewayProxyWrapper( { - gatewayId: identityKubernetesAuth.gatewayId, + gatewayId: (identityKubernetesAuth.gatewayV2Id ?? identityKubernetesAuth.gatewayId) as string, reviewTokenThroughGateway: true }, tokenReviewCallbackThroughGateway @@ -304,17 +349,18 @@ export const identityKubernetesAuthServiceFactory = ({ const [k8sHost, k8sPort] = kubernetesHost.split(":"); - data = identityKubernetesAuth.gatewayId - ? await $gatewayProxyWrapper( - { - gatewayId: identityKubernetesAuth.gatewayId, - targetHost: k8sHost, - targetPort: k8sPort ? Number(k8sPort) : 443, - reviewTokenThroughGateway: false - }, - tokenReviewCallbackRaw - ) - : await tokenReviewCallbackRaw(); + data = + identityKubernetesAuth.gatewayId || identityKubernetesAuth.gatewayV2Id + ? await $gatewayProxyWrapper( + { + gatewayId: (identityKubernetesAuth.gatewayV2Id ?? identityKubernetesAuth.gatewayId) as string, + targetHost: k8sHost, + targetPort: k8sPort ? Number(k8sPort) : 443, + reviewTokenThroughGateway: false + }, + tokenReviewCallbackRaw + ) + : await tokenReviewCallbackRaw(); } else { throw new BadRequestError({ message: `Invalid token review mode: ${identityKubernetesAuth.tokenReviewMode}` @@ -490,14 +536,20 @@ export const identityKubernetesAuthServiceFactory = ({ return extractIPDetails(accessTokenTrustedIp.ipAddress); }); + let isGatewayV1 = true; if (gatewayId) { const [gateway] = await gatewayDAL.find({ id: gatewayId, orgId: identityMembershipOrg.orgId }); - if (!gateway) { + const [gatewayV2] = await gatewayV2DAL.find({ id: gatewayId, orgId: identityMembershipOrg.orgId }); + if (!gateway && !gatewayV2) { throw new NotFoundError({ message: `Gateway with ID ${gatewayId} not found` }); } + if (!gateway) { + isGatewayV1 = false; + } + const { permission: orgPermission } = await permissionService.getOrgPermission( actor, actorId, @@ -528,7 +580,8 @@ export const identityKubernetesAuthServiceFactory = ({ accessTokenMaxTTL, accessTokenTTL, accessTokenNumUsesLimit, - gatewayId, + gatewayId: isGatewayV1 ? gatewayId : null, + gatewayV2Id: isGatewayV1 ? null : gatewayId, accessTokenTrustedIps: JSON.stringify(reformattedAccessTokenTrustedIps), encryptedKubernetesTokenReviewerJwt: tokenReviewerJwt ? encryptor({ plainText: Buffer.from(tokenReviewerJwt) }).cipherTextBlob @@ -608,14 +661,21 @@ export const identityKubernetesAuthServiceFactory = ({ return extractIPDetails(accessTokenTrustedIp.ipAddress); }); + let isGatewayV1 = true; if (gatewayId) { const [gateway] = await gatewayDAL.find({ id: gatewayId, orgId: identityMembershipOrg.orgId }); - if (!gateway) { + const [gatewayV2] = await gatewayV2DAL.find({ id: gatewayId, orgId: identityMembershipOrg.orgId }); + + if (!gateway && !gatewayV2) { throw new NotFoundError({ message: `Gateway with ID ${gatewayId} not found` }); } + if (!gateway) { + isGatewayV1 = false; + } + const { permission: orgPermission } = await permissionService.getOrgPermission( actor, actorId, @@ -629,13 +689,18 @@ export const identityKubernetesAuthServiceFactory = ({ ); } + const shouldUpdateGatewayId = Boolean(gatewayId); + const gatewayIdValue = isGatewayV1 ? gatewayId : null; + const gatewayV2IdValue = isGatewayV1 ? null : gatewayId; + const updateQuery: TIdentityKubernetesAuthsUpdate = { kubernetesHost, tokenReviewMode, allowedNamespaces, allowedNames, allowedAudience, - gatewayId, + gatewayId: shouldUpdateGatewayId ? gatewayIdValue : undefined, + gatewayV2Id: shouldUpdateGatewayId ? gatewayV2IdValue : undefined, accessTokenMaxTTL, accessTokenTTL, accessTokenNumUsesLimit, @@ -730,7 +795,13 @@ export const identityKubernetesAuthServiceFactory = ({ }).toString(); } - return { ...identityKubernetesAuth, caCert, tokenReviewerJwt, orgId: identityMembershipOrg.orgId }; + return { + ...identityKubernetesAuth, + caCert, + tokenReviewerJwt, + orgId: identityMembershipOrg.orgId, + gatewayId: identityKubernetesAuth.gatewayId ?? identityKubernetesAuth.gatewayV2Id + }; }; const revokeIdentityKubernetesAuth = async ({ diff --git a/backend/src/services/identity-ua/identity-ua-service.ts b/backend/src/services/identity-ua/identity-ua-service.ts index 5977476830..8aec163713 100644 --- a/backend/src/services/identity-ua/identity-ua-service.ts +++ b/backend/src/services/identity-ua/identity-ua-service.ts @@ -84,18 +84,20 @@ export const identityUaServiceFactory = ({ const LOCKOUT_KEY = `lockout:identity:${identityUa.identityId}:${IdentityAuthMethod.UNIVERSAL_AUTH}:${clientId}`; - let lock: Awaited>; - try { - lock = await keyStore.acquireLock([KeyStorePrefixes.IdentityLockoutLock(LOCKOUT_KEY)], 500, { - retryCount: 3, - retryDelay: 300, - retryJitter: 100 - }); - } catch (e) { - logger.info( - `identity login failed to acquire lock [identityId=${identityUa.identityId}] [authMethod=${IdentityAuthMethod.UNIVERSAL_AUTH}]` - ); - throw new RateLimitError({ message: "Rate limit exceeded" }); + let lock: Awaited> | undefined; + if (identityUa.lockoutEnabled) { + try { + lock = await keyStore.acquireLock([KeyStorePrefixes.IdentityLockoutLock(LOCKOUT_KEY)], 500, { + retryCount: 3, + retryDelay: 300, + retryJitter: 100 + }); + } catch (e) { + logger.info( + `identity login failed to acquire lock [identityId=${identityUa.identityId}] [authMethod=${IdentityAuthMethod.UNIVERSAL_AUTH}]` + ); + throw new RateLimitError({ message: "Failed to acquire lock: rate limit exceeded" }); + } } try { @@ -257,7 +259,7 @@ export const identityUaServiceFactory = ({ ...accessTokenTTLParams }; } finally { - await lock.release(); + if (lock) await lock.release(); } }; diff --git a/backend/src/services/identity/identity-dal.ts b/backend/src/services/identity/identity-dal.ts index 3634124930..7bc7976004 100644 --- a/backend/src/services/identity/identity-dal.ts +++ b/backend/src/services/identity/identity-dal.ts @@ -25,7 +25,7 @@ export const identityDALFactory = (db: TDbClient) => { } as const; const tableName = authMethodToTableName[authMethod]; if (!tableName) return; - const data = await db(tableName).where({ identityId }).first(); + const data = await db.replicaNode()(tableName).where({ identityId }).first(); if (!data) return; return data.accessTokenTrustedIps; }; diff --git a/backend/src/services/integration-auth/integration-auth-dal.ts b/backend/src/services/integration-auth/integration-auth-dal.ts index 7a56afcbb3..d3ccf610b2 100644 --- a/backend/src/services/integration-auth/integration-auth-dal.ts +++ b/backend/src/services/integration-auth/integration-auth-dal.ts @@ -30,7 +30,7 @@ export const integrationAuthDALFactory = (db: TDbClient) => { const getByOrg = async (orgId: string, tx?: Knex) => { try { - const integrationAuths = await (tx || db)(TableName.IntegrationAuth) + const integrationAuths = await (tx || db.replicaNode())(TableName.IntegrationAuth) .join(TableName.Project, `${TableName.Project}.id`, `${TableName.IntegrationAuth}.projectId`) .join(TableName.Organization, `${TableName.Organization}.id`, `${TableName.Project}.orgId`) .where(`${TableName.Organization}.id`, "=", orgId) diff --git a/backend/src/services/kms/kms-root-config-dal.ts b/backend/src/services/kms/kms-root-config-dal.ts index 31826b79d3..1b9b9a2301 100644 --- a/backend/src/services/kms/kms-root-config-dal.ts +++ b/backend/src/services/kms/kms-root-config-dal.ts @@ -12,7 +12,7 @@ export const kmsRootConfigDALFactory = (db: TDbClient) => { const findById = async (id: string, tx?: Knex) => { try { - const result = await (tx || db)(TableName.KmsServerRootConfig) + const result = await (tx || db?.replicaNode?.() || db)(TableName.KmsServerRootConfig) .where({ id } as never) .first("*"); return result; diff --git a/backend/src/services/org/org-service.ts b/backend/src/services/org/org-service.ts index 978bc8daeb..ffcf4459ee 100644 --- a/backend/src/services/org/org-service.ts +++ b/backend/src/services/org/org-service.ts @@ -9,11 +9,14 @@ import { ProjectMembershipRole, ProjectVersion, TableName, + TOidcConfigs, TProjectMemberships, TProjectUserMembershipRolesInsert, + TSamlConfigs, TUsers } from "@app/db/schemas"; import { TGroupDALFactory } from "@app/ee/services/group/group-dal"; +import { TLdapConfigDALFactory } from "@app/ee/services/ldap-config/ldap-config-dal"; import { TLicenseServiceFactory } from "@app/ee/services/license/license-service"; import { TOidcConfigDALFactory } from "@app/ee/services/oidc/oidc-config-dal"; import { @@ -125,6 +128,7 @@ type TOrgServiceFactoryDep = { incidentContactDAL: TIncidentContactsDALFactory; samlConfigDAL: Pick; oidcConfigDAL: Pick; + ldapConfigDAL: Pick; smtpService: TSmtpService; tokenService: TAuthTokenServiceFactory; permissionService: TPermissionServiceFactory; @@ -165,6 +169,7 @@ export const orgServiceFactory = ({ projectRoleDAL, samlConfigDAL, oidcConfigDAL, + ldapConfigDAL, projectUserMembershipRoleDAL, identityMetadataDAL, projectBotService, @@ -446,16 +451,20 @@ export const orgServiceFactory = ({ }); } - if (authEnforced) { - const samlCfg = await samlConfigDAL.findOne({ + let samlCfg: TSamlConfigs | undefined; + let oidcCfg: TOidcConfigs | undefined; + if (authEnforced || googleSsoAuthEnforced) { + samlCfg = await samlConfigDAL.findOne({ orgId, isActive: true }); - const oidcCfg = await oidcConfigDAL.findOne({ + oidcCfg = await oidcConfigDAL.findOne({ orgId, isActive: true }); + } + if (authEnforced) { if (!samlCfg && !oidcCfg) throw new NotFoundError({ message: `SAML or OIDC configuration for organization with ID '${orgId}' not found` @@ -483,6 +492,32 @@ export const orgServiceFactory = ({ }); } + if (samlCfg) { + throw new BadRequestError({ + message: + "Cannot enable Google OAuth enforcement while SAML SSO is configured. Disable SAML SSO to enforce Google OAuth." + }); + } + + if (oidcCfg) { + throw new BadRequestError({ + message: + "Cannot enable Google OAuth enforcement while OIDC SSO is configured. Disable OIDC SSO to enforce Google OAuth." + }); + } + + const ldapCfg = await ldapConfigDAL.findOne({ + orgId, + isActive: true + }); + + if (ldapCfg) { + throw new BadRequestError({ + message: + "Cannot enable Google OAuth enforcement while LDAP SSO is configured. Disable LDAP SSO to enforce Google OAuth." + }); + } + if (!currentOrg.googleSsoAuthLastUsed) { throw new BadRequestError({ message: diff --git a/backend/src/services/reminder/reminder-dal.ts b/backend/src/services/reminder/reminder-dal.ts index 897a75234d..4161552a95 100644 --- a/backend/src/services/reminder/reminder-dal.ts +++ b/backend/src/services/reminder/reminder-dal.ts @@ -39,7 +39,7 @@ export const reminderDALFactory = (db: TDbClient) => { const findSecretDailyReminders = async (tx?: Knex) => { const { startOfDay, endOfDay } = getTodayDateRange(); - const rawReminders = await (tx || db)(TableName.Reminder) + const rawReminders = await (tx || db.replicaNode())(TableName.Reminder) .whereBetween("nextReminderDate", [startOfDay, endOfDay]) .leftJoin(TableName.ReminderRecipient, `${TableName.Reminder}.id`, `${TableName.ReminderRecipient}.reminderId`) .leftJoin(TableName.Users, `${TableName.ReminderRecipient}.userId`, `${TableName.Users}.id`) @@ -90,7 +90,7 @@ export const reminderDALFactory = (db: TDbClient) => { const futureDate = new Date(startOfDay); futureDate.setDate(futureDate.getDate() + daysAhead); - const reminders = await (tx || db)(TableName.Reminder) + const reminders = await (tx || db.replicaNode())(TableName.Reminder) .where("nextReminderDate", ">=", startOfDay) .where("nextReminderDate", "<=", futureDate) .orderBy("nextReminderDate", "asc") @@ -101,7 +101,7 @@ export const reminderDALFactory = (db: TDbClient) => { }; const findSecretReminder = async (secretId: string, tx?: Knex) => { - const rawReminders = await (tx || db)(TableName.Reminder) + const rawReminders = await (tx || db.replicaNode())(TableName.Reminder) .where(`${TableName.Reminder}.secretId`, secretId) .leftJoin(TableName.ReminderRecipient, `${TableName.Reminder}.id`, `${TableName.ReminderRecipient}.reminderId`) .select(selectAllTableCols(TableName.Reminder)) @@ -125,7 +125,7 @@ export const reminderDALFactory = (db: TDbClient) => { }; const findSecretReminders = async (secretIds: string[], tx?: Knex) => { - const rawReminders = await (tx || db)(TableName.Reminder) + const rawReminders = await (tx || db.replicaNode())(TableName.Reminder) .whereIn(`${TableName.Reminder}.secretId`, secretIds) .leftJoin(TableName.ReminderRecipient, `${TableName.Reminder}.id`, `${TableName.ReminderRecipient}.reminderId`) .select(selectAllTableCols(TableName.Reminder)) diff --git a/backend/src/services/resource-cleanup/resource-cleanup-queue.ts b/backend/src/services/resource-cleanup/resource-cleanup-queue.ts index fbd9d3fd68..185ab5e940 100644 --- a/backend/src/services/resource-cleanup/resource-cleanup-queue.ts +++ b/backend/src/services/resource-cleanup/resource-cleanup-queue.ts @@ -1,5 +1,6 @@ import { TAuditLogDALFactory } from "@app/ee/services/audit-log/audit-log-dal"; import { TSnapshotDALFactory } from "@app/ee/services/secret-snapshot/snapshot-dal"; +import { TKeyValueStoreDALFactory } from "@app/keystore/key-value-store-dal"; import { getConfig } from "@app/lib/config/env"; import { logger } from "@app/lib/logger"; import { QueueJobs, QueueName, TQueueServiceFactory } from "@app/queue"; @@ -27,6 +28,7 @@ type TDailyResourceCleanUpQueueServiceFactoryDep = { queueService: TQueueServiceFactory; orgService: TOrgServiceFactory; userNotificationDAL: Pick; + keyValueStoreDAL: Pick; }; export type TDailyResourceCleanUpQueueServiceFactory = ReturnType; @@ -43,7 +45,8 @@ export const dailyResourceCleanUpQueueServiceFactory = ({ identityUniversalAuthClientSecretDAL, serviceTokenService, orgService, - userNotificationDAL + userNotificationDAL, + keyValueStoreDAL }: TDailyResourceCleanUpQueueServiceFactoryDep) => { const appCfg = getConfig(); @@ -52,6 +55,10 @@ export const dailyResourceCleanUpQueueServiceFactory = ({ } const init = async () => { + if (appCfg.isSecondaryInstance) { + return; + } + await queueService.stopRepeatableJob( QueueName.AuditLogPrune, QueueJobs.AuditLogPrune, @@ -82,6 +89,7 @@ export const dailyResourceCleanUpQueueServiceFactory = ({ await orgService.notifyInvitedUsers(); await auditLogDAL.pruneAuditLog(); await userNotificationDAL.pruneNotifications(); + await keyValueStoreDAL.pruneExpiredKeys(); logger.info(`${QueueName.DailyResourceCleanUp}: queue task completed`); } catch (error) { logger.error(error, `${QueueName.DailyResourceCleanUp}: resource cleanup failed`); diff --git a/backend/src/services/secret-folder/secret-folder-service.ts b/backend/src/services/secret-folder/secret-folder-service.ts index c5eb0adde1..8e0892bd16 100644 --- a/backend/src/services/secret-folder/secret-folder-service.ts +++ b/backend/src/services/secret-folder/secret-folder-service.ts @@ -47,7 +47,7 @@ type TSecretFolderServiceFactoryDep = { folderCommitService: Pick; projectDAL: Pick; secretApprovalPolicyService: Pick; - secretV2BridgeDAL: Pick; + secretV2BridgeDAL: Pick; }; export type TSecretFolderServiceFactory = ReturnType; @@ -398,6 +398,7 @@ export const secretFolderServiceFactory = ({ await Promise.all(result.map(async (res) => snapshotService.performSnapshot(res.newFolder.parentId as string))); + await secretV2BridgeDAL.invalidateSecretCacheByProjectId(projectId); return { projectId, newFolders: result.map((res) => res.newFolder), @@ -522,6 +523,7 @@ export const secretFolderServiceFactory = ({ } await snapshotService.performSnapshot(newFolder.parentId as string); + await secretV2BridgeDAL.invalidateSecretCacheByProjectId(projectId); return { folder: { ...newFolder, path: newFolderWithFullPath.path }, old: { ...folder, path: folderWithFullPath.path } @@ -724,6 +726,7 @@ export const secretFolderServiceFactory = ({ }); await snapshotService.performSnapshot(folder.parentId as string); + await secretV2BridgeDAL.invalidateSecretCacheByProjectId(projectId); return folder; }; diff --git a/backend/src/services/secret-folder/secret-folder-version-dal.ts b/backend/src/services/secret-folder/secret-folder-version-dal.ts index 46ff49692f..5504c6e0bd 100644 --- a/backend/src/services/secret-folder/secret-folder-version-dal.ts +++ b/backend/src/services/secret-folder/secret-folder-version-dal.ts @@ -45,7 +45,7 @@ export const secretFolderVersionDALFactory = (db: TDbClient) => { ) .whereIn(`${TableName.SecretFolderVersion}.folderId`, folderIds) .join( - (tx || db)(TableName.SecretFolderVersion) + (tx || db.replicaNode())(TableName.SecretFolderVersion) .groupBy("folderId") .max("version") .select("folderId") diff --git a/backend/src/services/secret-import/secret-import-dal.ts b/backend/src/services/secret-import/secret-import-dal.ts index db611dc6c6..2261c24184 100644 --- a/backend/src/services/secret-import/secret-import-dal.ts +++ b/backend/src/services/secret-import/secret-import-dal.ts @@ -15,7 +15,7 @@ export const secretImportDALFactory = (db: TDbClient) => { // we are using postion based sorting as its a small list // this will return the last value of the position in a folder with secret imports const findLastImportPosition = async (folderId: string, tx?: Knex) => { - const lastPos = await (tx || db)(TableName.SecretImport) + const lastPos = await (tx || db.replicaNode())(TableName.SecretImport) .where({ folderId }) .max("position", { as: "position" }) .first(); diff --git a/backend/src/services/secret-sharing/secret-sharing-dal.ts b/backend/src/services/secret-sharing/secret-sharing-dal.ts index 7cdccd4f89..08e0ad2575 100644 --- a/backend/src/services/secret-sharing/secret-sharing-dal.ts +++ b/backend/src/services/secret-sharing/secret-sharing-dal.ts @@ -119,7 +119,7 @@ export const secretSharingDALFactory = (db: TDbClient) => { const findActiveSharedSecrets = async (filters: Partial, tx?: Knex) => { try { const now = new Date(); - return await (tx || db)(TableName.SecretSharing) + return await (tx || db.replicaNode())(TableName.SecretSharing) .where(filters) .andWhere("expiresAt", ">", now) .andWhere("encryptedValue", "<>", "") diff --git a/backend/src/services/secret-sync/github/github-sync-fns.ts b/backend/src/services/secret-sync/github/github-sync-fns.ts index 2cae048aa4..4b174ca2a4 100644 --- a/backend/src/services/secret-sync/github/github-sync-fns.ts +++ b/backend/src/services/secret-sync/github/github-sync-fns.ts @@ -1,6 +1,7 @@ import sodium from "libsodium-wrappers"; import { TGatewayServiceFactory } from "@app/ee/services/gateway/gateway-service"; +import { TGatewayV2ServiceFactory } from "@app/ee/services/gateway-v2/gateway-v2-service"; import { getGitHubAppAuthToken, getGitHubInstanceApiUrl, @@ -20,7 +21,8 @@ import { TGitHubPublicKey, TGitHubSecret, TGitHubSecretPayload, TGitHubSyncWithC const getEncryptedSecrets = async ( secretSync: TGitHubSyncWithCredentials, - gatewayService: Pick + gatewayService: Pick, + gatewayV2Service: Pick ) => { const { destinationConfig, connection } = secretSync; @@ -44,6 +46,7 @@ const getEncryptedSecrets = async ( return makePaginatedGitHubRequest( connection, gatewayService, + gatewayV2Service, path, (data) => data.secrets ); @@ -52,6 +55,7 @@ const getEncryptedSecrets = async ( const getPublicKey = async ( secretSync: TGitHubSyncWithCredentials, gatewayService: Pick, + gatewayV2Service: Pick, token: string ) => { const { destinationConfig, connection } = secretSync; @@ -73,7 +77,7 @@ const getPublicKey = async ( } } - const response = await requestWithGitHubGateway(connection, gatewayService, { + const response = await requestWithGitHubGateway(connection, gatewayService, gatewayV2Service, { url: `https://${await getGitHubInstanceApiUrl(connection)}${path}`, method: "GET", headers: { @@ -89,6 +93,7 @@ const getPublicKey = async ( const deleteSecret = async ( secretSync: TGitHubSyncWithCredentials, gatewayService: Pick, + gatewayV2Service: Pick, token: string, encryptedSecret: TGitHubSecret ) => { @@ -111,7 +116,7 @@ const deleteSecret = async ( } } - await requestWithGitHubGateway(connection, gatewayService, { + await requestWithGitHubGateway(connection, gatewayService, gatewayV2Service, { url: `https://${await getGitHubInstanceApiUrl(connection)}${path}`, method: "DELETE", headers: { @@ -125,6 +130,7 @@ const deleteSecret = async ( const putSecret = async ( secretSync: TGitHubSyncWithCredentials, gatewayService: Pick, + gatewayV2Service: Pick, token: string, payload: TGitHubSecretPayload ) => { @@ -157,7 +163,7 @@ const putSecret = async ( } } - await requestWithGitHubGateway(connection, gatewayService, { + await requestWithGitHubGateway(connection, gatewayService, gatewayV2Service, { url: `https://${await getGitHubInstanceApiUrl(connection)}${path}`, method: "PUT", headers: { @@ -173,7 +179,8 @@ export const GithubSyncFns = { syncSecrets: async ( secretSync: TGitHubSyncWithCredentials, ogSecretMap: TSecretMap, - gatewayService: Pick + gatewayService: Pick, + gatewayV2Service: Pick ) => { const secretMap = Object.fromEntries(Object.entries(ogSecretMap).map(([i, v]) => [i.toUpperCase(), v])); @@ -207,10 +214,10 @@ export const GithubSyncFns = { const token = connection.method === GitHubConnectionMethod.OAuth ? connection.credentials.accessToken - : await getGitHubAppAuthToken(connection, gatewayService); + : await getGitHubAppAuthToken(connection, gatewayService, gatewayV2Service); - const encryptedSecrets = await getEncryptedSecrets(secretSync, gatewayService); - const publicKey = await getPublicKey(secretSync, gatewayService, token); + const encryptedSecrets = await getEncryptedSecrets(secretSync, gatewayService, gatewayV2Service); + const publicKey = await getPublicKey(secretSync, gatewayService, gatewayV2Service, token); await sodium.ready; for await (const key of Object.keys(secretMap)) { @@ -225,7 +232,7 @@ export const GithubSyncFns = { const encryptedSecretValue = sodium.to_base64(encryptedBytes, sodium.base64_variants.ORIGINAL); try { - await putSecret(secretSync, gatewayService, token, { + await putSecret(secretSync, gatewayService, gatewayV2Service, token, { secret_name: key, encrypted_value: encryptedSecretValue, key_id: publicKey.key_id @@ -246,7 +253,7 @@ export const GithubSyncFns = { continue; if (!(encryptedSecret.name in secretMap)) { - await deleteSecret(secretSync, gatewayService, token, encryptedSecret); + await deleteSecret(secretSync, gatewayService, gatewayV2Service, token, encryptedSecret); } } }, @@ -256,7 +263,8 @@ export const GithubSyncFns = { removeSecrets: async ( secretSync: TGitHubSyncWithCredentials, ogSecretMap: TSecretMap, - gatewayService: Pick + gatewayService: Pick, + gatewayV2Service: Pick ) => { const secretMap = Object.fromEntries(Object.entries(ogSecretMap).map(([i, v]) => [i.toUpperCase(), v])); @@ -264,13 +272,13 @@ export const GithubSyncFns = { const token = connection.method === GitHubConnectionMethod.OAuth ? connection.credentials.accessToken - : await getGitHubAppAuthToken(connection, gatewayService); + : await getGitHubAppAuthToken(connection, gatewayService, gatewayV2Service); - const encryptedSecrets = await getEncryptedSecrets(secretSync, gatewayService); + const encryptedSecrets = await getEncryptedSecrets(secretSync, gatewayService, gatewayV2Service); for await (const encryptedSecret of encryptedSecrets) { if (encryptedSecret.name in secretMap) { - await deleteSecret(secretSync, gatewayService, token, encryptedSecret); + await deleteSecret(secretSync, gatewayService, gatewayV2Service, token, encryptedSecret); } } } diff --git a/backend/src/services/secret-sync/secret-sync-fns.ts b/backend/src/services/secret-sync/secret-sync-fns.ts index 5b083baf66..a6faebd1ec 100644 --- a/backend/src/services/secret-sync/secret-sync-fns.ts +++ b/backend/src/services/secret-sync/secret-sync-fns.ts @@ -2,6 +2,7 @@ import { AxiosError } from "axios"; import handlebars from "handlebars"; import { TGatewayServiceFactory } from "@app/ee/services/gateway/gateway-service"; +import { TGatewayV2ServiceFactory } from "@app/ee/services/gateway-v2/gateway-v2-service"; import { TLicenseServiceFactory } from "@app/ee/services/license/license-service"; import { OCI_VAULT_SYNC_LIST_OPTION, OCIVaultSyncFns } from "@app/ee/services/secret-sync/oci-vault"; import { BadRequestError } from "@app/lib/errors"; @@ -101,6 +102,7 @@ type TSyncSecretDeps = { appConnectionDAL: Pick; kmsService: Pick; gatewayService: Pick; + gatewayV2Service: Pick; }; // Add schema to secret keys @@ -195,7 +197,7 @@ export const SecretSyncFns = { syncSecrets: ( secretSync: TSecretSyncWithCredentials, secretMap: TSecretMap, - { kmsService, appConnectionDAL, gatewayService }: TSyncSecretDeps + { kmsService, appConnectionDAL, gatewayService, gatewayV2Service }: TSyncSecretDeps ): Promise => { const schemaSecretMap = addSchema(secretMap, secretSync.environment?.slug || "", secretSync.syncOptions.keySchema); @@ -205,7 +207,7 @@ export const SecretSyncFns = { case SecretSync.AWSSecretsManager: return AwsSecretsManagerSyncFns.syncSecrets(secretSync, schemaSecretMap); case SecretSync.GitHub: - return GithubSyncFns.syncSecrets(secretSync, schemaSecretMap, gatewayService); + return GithubSyncFns.syncSecrets(secretSync, schemaSecretMap, gatewayService, gatewayV2Service); case SecretSync.GCPSecretManager: return GcpSyncFns.syncSecrets(secretSync, schemaSecretMap); case SecretSync.AzureKeyVault: @@ -404,7 +406,7 @@ export const SecretSyncFns = { removeSecrets: ( secretSync: TSecretSyncWithCredentials, secretMap: TSecretMap, - { kmsService, appConnectionDAL, gatewayService }: TSyncSecretDeps + { kmsService, appConnectionDAL, gatewayService, gatewayV2Service }: TSyncSecretDeps ): Promise => { const schemaSecretMap = addSchema(secretMap, secretSync.environment?.slug || "", secretSync.syncOptions.keySchema); @@ -414,7 +416,7 @@ export const SecretSyncFns = { case SecretSync.AWSSecretsManager: return AwsSecretsManagerSyncFns.removeSecrets(secretSync, schemaSecretMap); case SecretSync.GitHub: - return GithubSyncFns.removeSecrets(secretSync, schemaSecretMap, gatewayService); + return GithubSyncFns.removeSecrets(secretSync, schemaSecretMap, gatewayService, gatewayV2Service); case SecretSync.GCPSecretManager: return GcpSyncFns.removeSecrets(secretSync, schemaSecretMap); case SecretSync.AzureKeyVault: diff --git a/backend/src/services/secret-sync/secret-sync-queue.ts b/backend/src/services/secret-sync/secret-sync-queue.ts index 915ff89bf9..f75d84eda5 100644 --- a/backend/src/services/secret-sync/secret-sync-queue.ts +++ b/backend/src/services/secret-sync/secret-sync-queue.ts @@ -5,6 +5,7 @@ import { Job } from "bullmq"; import { ProjectMembershipRole, SecretType } from "@app/db/schemas"; import { EventType, TAuditLogServiceFactory } from "@app/ee/services/audit-log/audit-log-types"; import { TGatewayServiceFactory } from "@app/ee/services/gateway/gateway-service"; +import { TGatewayV2ServiceFactory } from "@app/ee/services/gateway-v2/gateway-v2-service"; import { TLicenseServiceFactory } from "@app/ee/services/license/license-service"; import { KeyStorePrefixes, TKeyStoreFactory } from "@app/keystore/keystore"; import { getConfig } from "@app/lib/config/env"; @@ -98,6 +99,7 @@ type TSecretSyncQueueFactoryDep = { folderCommitService: Pick; licenseService: Pick; gatewayService: Pick; + gatewayV2Service: Pick; }; type SecretSyncActionJob = Job< @@ -139,7 +141,8 @@ export const secretSyncQueueFactory = ({ resourceMetadataDAL, folderCommitService, licenseService, - gatewayService + gatewayService, + gatewayV2Service }: TSecretSyncQueueFactoryDep) => { const appCfg = getConfig(); @@ -395,7 +398,8 @@ export const secretSyncQueueFactory = ({ const importedSecrets = await SecretSyncFns.getSecrets(secretSync, { appConnectionDAL, kmsService, - gatewayService + gatewayService, + gatewayV2Service }); if (!Object.keys(importedSecrets).length) return {}; @@ -520,7 +524,8 @@ export const secretSyncQueueFactory = ({ await SecretSyncFns.syncSecrets(secretSyncWithCredentials, secretMap, { appConnectionDAL, kmsService, - gatewayService + gatewayService, + gatewayV2Service }); isSynced = true; @@ -762,7 +767,8 @@ export const secretSyncQueueFactory = ({ { appConnectionDAL, kmsService, - gatewayService + gatewayService, + gatewayV2Service } ); diff --git a/backend/src/services/secret-v2-bridge/secret-v2-bridge-dal.ts b/backend/src/services/secret-v2-bridge/secret-v2-bridge-dal.ts index 8d9c6958ad..93afb3b558 100644 --- a/backend/src/services/secret-v2-bridge/secret-v2-bridge-dal.ts +++ b/backend/src/services/secret-v2-bridge/secret-v2-bridge-dal.ts @@ -50,20 +50,19 @@ interface TSecretV2DalArg { } export const SECRET_DAL_TTL = () => applyJitter(10 * 60, 2 * 60); -export const SECRET_DAL_VERSION_TTL = 15 * 60; +export const SECRET_DAL_VERSION_TTL = "15m"; export const MAX_SECRET_CACHE_BYTES = 25 * 1024 * 1024; export const secretV2BridgeDALFactory = ({ db, keyStore }: TSecretV2DalArg) => { const secretOrm = ormify(db, TableName.SecretV2); - const invalidateSecretCacheByProjectId = async (projectId: string) => { + const invalidateSecretCacheByProjectId = async (projectId: string, tx?: Knex) => { const secretDalVersionKey = SecretServiceCacheKeys.getSecretDalVersion(projectId); - await keyStore.incrementBy(secretDalVersionKey, 1); - await keyStore.setExpiry(secretDalVersionKey, SECRET_DAL_VERSION_TTL); + await keyStore.pgIncrementBy(secretDalVersionKey, { incr: 1, tx, expiry: SECRET_DAL_VERSION_TTL }); }; const findOne = async (filter: Partial, tx?: Knex) => { try { - const docs = await (tx || db)(TableName.SecretV2) + const docs = await (tx || db.replicaNode())(TableName.SecretV2) // eslint-disable-next-line @typescript-eslint/no-misused-promises .where(buildFindFilter(filter, TableName.SecretV2)) .leftJoin( @@ -144,7 +143,7 @@ export const secretV2BridgeDALFactory = ({ db, keyStore }: TSecretV2DalArg) => { const find = async (filter: TFindFilter, opts: TFindOpt = {}) => { const { offset, limit, sort, tx } = opts; try { - const query = (tx || db)(TableName.SecretV2) + const query = (tx || db.replicaNode())(TableName.SecretV2) // eslint-disable-next-line @typescript-eslint/no-misused-promises .where(buildFindFilter(filter)) .leftJoin( @@ -888,13 +887,13 @@ export const secretV2BridgeDALFactory = ({ db, keyStore }: TSecretV2DalArg) => { const findSecretsWithReminderRecipients = async (ids: string[], limit: number, tx?: Knex) => { try { // Create a subquery to get limited secret IDs - const limitedSecretIds = (tx || db)(TableName.SecretV2) + const limitedSecretIds = (tx || db.replicaNode())(TableName.SecretV2) .whereIn(`${TableName.SecretV2}.id`, ids) .limit(limit) .select("id"); // Join with all recipients for the limited secrets - const docs = await (tx || db)(TableName.SecretV2) + const docs = await (tx || db.replicaNode())(TableName.SecretV2) .whereIn(`${TableName.SecretV2}.id`, limitedSecretIds) .leftJoin(TableName.Reminder, `${TableName.SecretV2}.id`, `${TableName.Reminder}.secretId`) .leftJoin(TableName.ReminderRecipient, `${TableName.Reminder}.id`, `${TableName.ReminderRecipient}.reminderId`) @@ -926,13 +925,13 @@ export const secretV2BridgeDALFactory = ({ db, keyStore }: TSecretV2DalArg) => { const findSecretsWithReminderRecipientsOld = async (ids: string[], limit: number, tx?: Knex) => { try { // Create a subquery to get limited secret IDs - const limitedSecretIds = (tx || db)(TableName.SecretV2) + const limitedSecretIds = (tx || db.replicaNode())(TableName.SecretV2) .whereIn(`${TableName.SecretV2}.id`, ids) .limit(limit) .select("id"); // Join with all recipients for the limited secrets - const docs = await (tx || db)(TableName.SecretV2) + const docs = await (tx || db.replicaNode())(TableName.SecretV2) .whereIn(`${TableName.SecretV2}.id`, limitedSecretIds) .leftJoin(TableName.Reminder, `${TableName.SecretV2}.id`, `${TableName.Reminder}.secretId`) .leftJoin( diff --git a/backend/src/services/secret-v2-bridge/secret-v2-bridge-service.ts b/backend/src/services/secret-v2-bridge/secret-v2-bridge-service.ts index 48ab078162..9db535fbe3 100644 --- a/backend/src/services/secret-v2-bridge/secret-v2-bridge-service.ts +++ b/backend/src/services/secret-v2-bridge/secret-v2-bridge-service.ts @@ -118,7 +118,7 @@ type TSecretV2BridgeServiceFactoryDep = { >; snapshotService: Pick; resourceMetadataDAL: Pick; - keyStore: Pick; + keyStore: Pick; reminderService: Pick; }; @@ -360,6 +360,7 @@ export const secretV2BridgeServiceFactory = ({ tx }); + await secretDAL.invalidateSecretCacheByProjectId(projectId, tx); return createdSecret; }); @@ -377,7 +378,6 @@ export const secretV2BridgeServiceFactory = ({ }); } - await secretDAL.invalidateSecretCacheByProjectId(projectId); if (inputSecret.type === SecretType.Shared) { await snapshotService.performSnapshot(folderId); await secretQueueService.syncSecrets({ @@ -566,8 +566,8 @@ export const secretV2BridgeServiceFactory = ({ await $validateSecretReferences(projectId, permission, allSecretReferences); } - const updatedSecret = await secretDAL.transaction(async (tx) => - fnSecretBulkUpdate({ + const updatedSecret = await secretDAL.transaction(async (tx) => { + const modifiedSecretsInDB = await fnSecretBulkUpdate({ folderId, orgId: actorOrgId, resourceMetadataDAL, @@ -598,8 +598,11 @@ export const secretV2BridgeServiceFactory = ({ actorId }, tx - }) - ); + }); + + await secretDAL.invalidateSecretCacheByProjectId(projectId, tx); + return modifiedSecretsInDB; + }); if (inputSecret.secretReminderRepeatDays) { await reminderService.createReminder({ actor, @@ -615,7 +618,6 @@ export const secretV2BridgeServiceFactory = ({ }); } - await secretDAL.invalidateSecretCacheByProjectId(projectId); if (inputSecret.type === SecretType.Shared) { await snapshotService.performSnapshot(folderId); await secretQueueService.syncSecrets({ @@ -715,8 +717,8 @@ export const secretV2BridgeServiceFactory = ({ ); try { - const deletedSecret = await secretDAL.transaction(async (tx) => - fnSecretBulkDelete({ + const deletedSecret = await secretDAL.transaction(async (tx) => { + const modifiedSecretsInDB = await fnSecretBulkDelete({ projectId, folderId, actorId, @@ -732,10 +734,11 @@ export const secretV2BridgeServiceFactory = ({ } ], tx - }) - ); + }); + await secretDAL.invalidateSecretCacheByProjectId(projectId, tx); + return modifiedSecretsInDB; + }); - await secretDAL.invalidateSecretCacheByProjectId(projectId); if (inputSecret.type === SecretType.Shared) { await snapshotService.performSnapshot(folderId); await secretQueueService.syncSecrets({ @@ -1027,7 +1030,7 @@ export const secretV2BridgeServiceFactory = ({ }); throwIfMissingSecretReadValueOrDescribePermission(permission, ProjectPermissionSecretActions.DescribeSecret); - const cachedSecretDalVersion = await keyStore.getItem(SecretServiceCacheKeys.getSecretDalVersion(projectId)); + const cachedSecretDalVersion = await keyStore.pgGetIntItem(SecretServiceCacheKeys.getSecretDalVersion(projectId)); const secretDalVersion = Number(cachedSecretDalVersion || 0); const cacheKey = SecretServiceCacheKeys.getSecretsOfServiceLayer(projectId, secretDalVersion, { ...dto, @@ -1692,7 +1695,7 @@ export const secretV2BridgeServiceFactory = ({ await kmsService.createCipherPairWithDataKey({ type: KmsDataKey.SecretManager, projectId }); const executeBulkInsert = async (tx: Knex) => { - return fnSecretBulkInsert({ + const modifiedSecretsInDB = await fnSecretBulkInsert({ inputSecrets: inputSecrets.map((el) => { const references = secretReferencesGroupByInputSecretKey[el.secretKey]?.nestedReferences; @@ -1728,13 +1731,14 @@ export const secretV2BridgeServiceFactory = ({ }, tx }); + await secretDAL.invalidateSecretCacheByProjectId(projectId, tx); + return modifiedSecretsInDB; }; const newSecrets = providedTx ? await executeBulkInsert(providedTx) : await secretDAL.transaction(executeBulkInsert); - await secretDAL.invalidateSecretCacheByProjectId(projectId); await snapshotService.performSnapshot(folderId); await secretQueueService.syncSecrets({ actor, @@ -2099,6 +2103,7 @@ export const secretV2BridgeServiceFactory = ({ } } + await secretDAL.invalidateSecretCacheByProjectId(projectId, tx); return updatedSecrets; }; @@ -2106,7 +2111,6 @@ export const secretV2BridgeServiceFactory = ({ ? await executeBulkUpdate(providedTx) : await secretDAL.transaction(executeBulkUpdate); - await secretDAL.invalidateSecretCacheByProjectId(projectId); await Promise.allSettled(folders.map((el) => (el?.id ? snapshotService.performSnapshot(el.id) : undefined))); await Promise.allSettled( folders.map((el) => @@ -2233,7 +2237,7 @@ export const secretV2BridgeServiceFactory = ({ }); const executeBulkDelete = async (tx: Knex) => { - return fnSecretBulkDelete({ + const modifiedSecretsInDB = await fnSecretBulkDelete({ secretDAL, secretQueueService, folderCommitService, @@ -2249,6 +2253,8 @@ export const secretV2BridgeServiceFactory = ({ commitChanges, tx }); + await secretDAL.invalidateSecretCacheByProjectId(projectId, tx); + return modifiedSecretsInDB; }; try { @@ -2256,7 +2262,6 @@ export const secretV2BridgeServiceFactory = ({ ? await executeBulkDelete(providedTx) : await secretDAL.transaction(executeBulkDelete); - await secretDAL.invalidateSecretCacheByProjectId(projectId); await snapshotService.performSnapshot(folderId); await secretQueueService.syncSecrets({ actor, diff --git a/backend/src/services/secret-v2-bridge/secret-version-dal.ts b/backend/src/services/secret-v2-bridge/secret-version-dal.ts index 9537b79e34..0282fa5372 100644 --- a/backend/src/services/secret-v2-bridge/secret-version-dal.ts +++ b/backend/src/services/secret-v2-bridge/secret-version-dal.ts @@ -72,7 +72,7 @@ export const secretVersionV2BridgeDALFactory = (db: TDbClient) => { .where(`${TableName.SecretVersionV2}.folderId`, folderId) .join(TableName.SecretV2, `${TableName.SecretV2}.id`, `${TableName.SecretVersionV2}.secretId`) .join( - (tx || db)(TableName.SecretVersionV2) + (tx || db.replicaNode())(TableName.SecretVersionV2) .where(`${TableName.SecretVersionV2}.folderId`, folderId) .groupBy("secretId") .max("version") @@ -121,7 +121,7 @@ export const secretVersionV2BridgeDALFactory = (db: TDbClient) => { .where("folderId", folderId) .whereIn(`${TableName.SecretVersionV2}.secretId`, secretIds) .join( - (tx || db)(TableName.SecretVersionV2) + (tx || db.replicaNode())(TableName.SecretVersionV2) .groupBy("secretId") .max("version") .select("secretId") @@ -189,7 +189,7 @@ export const secretVersionV2BridgeDALFactory = (db: TDbClient) => { }) => { try { const { offset, limit, sort = [["createdAt", "desc"]] } = findOpt; - const query = (tx || db)(TableName.SecretVersionV2) + const query = (tx || db.replicaNode())(TableName.SecretVersionV2) .leftJoin(TableName.Users, `${TableName.Users}.id`, `${TableName.SecretVersionV2}.userActorId`) .leftJoin( TableName.ProjectMembership, diff --git a/backend/src/services/super-admin/super-admin-dal.ts b/backend/src/services/super-admin/super-admin-dal.ts index d7d11a5d26..571583cc32 100644 --- a/backend/src/services/super-admin/super-admin-dal.ts +++ b/backend/src/services/super-admin/super-admin-dal.ts @@ -11,7 +11,7 @@ export const superAdminDALFactory = (db: TDbClient) => { const superAdminOrm = ormify(db, TableName.SuperAdmin); const findById = async (id: string, tx?: Knex) => { - const config = await (tx || db)(TableName.SuperAdmin) + const config = await (tx || db.replicaNode())(TableName.SuperAdmin) .where(`${TableName.SuperAdmin}.id`, id) .leftJoin(TableName.Organization, `${TableName.SuperAdmin}.defaultAuthOrgId`, `${TableName.Organization}.id`) .leftJoin(TableName.SamlConfig, (qb) => { diff --git a/backend/src/services/user/user-dal.ts b/backend/src/services/user/user-dal.ts index 0f623dff1c..4267d13ee1 100644 --- a/backend/src/services/user/user-dal.ts +++ b/backend/src/services/user/user-dal.ts @@ -19,12 +19,16 @@ export type TUserDALFactory = ReturnType; export const userDALFactory = (db: TDbClient) => { const userOrm = ormify(db, TableName.Users); const findUserByUsername = async (username: string, tx?: Knex) => - (tx || db)(TableName.Users).whereRaw('lower("username") = :username', { username: username.toLowerCase() }); + (tx || db.replicaNode())(TableName.Users).whereRaw('lower("username") = :username', { + username: username.toLowerCase() + }); const findUserByEmail = async (email: string, tx?: Knex) => - (tx || db)(TableName.Users).whereRaw('lower("email") = :email', { email: email.toLowerCase() }).where({ - isEmailVerified: true - }); + (tx || db.replicaNode())(TableName.Users) + .whereRaw('lower("email") = :email', { email: email.toLowerCase() }) + .where({ + isEmailVerified: true + }); const getUsersByFilter = async ({ limit, diff --git a/backend/src/services/user/user-service.ts b/backend/src/services/user/user-service.ts index b0d7be0fe6..f97e98fa34 100644 --- a/backend/src/services/user/user-service.ts +++ b/backend/src/services/user/user-service.ts @@ -1,4 +1,5 @@ import { ForbiddenError } from "@casl/ability"; +import { Knex } from "knex"; import { OrgPermissionActions, OrgPermissionSubjects } from "@app/ee/services/permission/org-permission"; import { TPermissionServiceFactory } from "@app/ee/services/permission/permission-service-types"; @@ -7,6 +8,7 @@ import { BadRequestError, ForbiddenRequestError, NotFoundError } from "@app/lib/ import { logger } from "@app/lib/logger"; import { TAuthTokenServiceFactory } from "@app/services/auth-token/auth-token-service"; import { TokenType } from "@app/services/auth-token/auth-token-types"; +import { TOrgDALFactory } from "@app/services/org/org-dal"; import { TOrgMembershipDALFactory } from "@app/services/org-membership/org-membership-dal"; import { SmtpTemplates, TSmtpService } from "@app/services/smtp/smtp-service"; @@ -15,7 +17,7 @@ import { TGroupProjectDALFactory } from "../group-project/group-project-dal"; import { TProjectMembershipDALFactory } from "../project-membership/project-membership-dal"; import { TUserAliasDALFactory } from "../user-alias/user-alias-dal"; import { TUserDALFactory } from "./user-dal"; -import { TListUserGroupsDTO, TUpdateUserMfaDTO } from "./user-types"; +import { TListUserGroupsDTO, TUpdateUserEmailDTO, TUpdateUserMfaDTO } from "./user-types"; type TUserServiceFactoryDep = { userDAL: Pick< @@ -34,18 +36,20 @@ type TUserServiceFactoryDep = { | "findAllMyAccounts" >; groupProjectDAL: Pick; + orgDAL: Pick; orgMembershipDAL: Pick; - tokenService: Pick; + tokenService: Pick; projectMembershipDAL: Pick; smtpService: Pick; permissionService: TPermissionServiceFactory; - userAliasDAL: Pick; + userAliasDAL: Pick; }; export type TUserServiceFactory = ReturnType; export const userServiceFactory = ({ userDAL, + orgDAL, orgMembershipDAL, projectMembershipDAL, groupProjectDAL, @@ -178,6 +182,135 @@ export const userServiceFactory = ({ return updatedUser; }; + const checkUserScimRestriction = async (userId: string, tx?: Knex) => { + const userOrgs = await orgMembershipDAL.find({ userId }, { tx }); + + if (userOrgs.length === 0) { + return false; + } + + const orgIds = userOrgs.map((membership) => membership.orgId); + const organizations = await orgDAL.find({ $in: { id: orgIds } }, { tx }); + + return organizations.some((org) => org.scimEnabled); + }; + + const requestEmailChangeOTP = async ({ userId, newEmail }: TUpdateUserEmailDTO) => { + const startTime = new Date(); + const changeEmailOTP = await userDAL.transaction(async (tx) => { + const user = await userDAL.findById(userId, tx); + if (!user) + throw new NotFoundError({ message: `User with ID '${userId}' not found`, name: "RequestEmailChangeOTP" }); + + if (user.authMethods?.includes(AuthMethod.LDAP)) { + throw new BadRequestError({ message: "Cannot update email for LDAP users", name: "RequestEmailChangeOTP" }); + } + + const hasScimRestriction = await checkUserScimRestriction(userId, tx); + if (hasScimRestriction) { + throw new BadRequestError({ + message: "Email changes are disabled because SCIM is enabled for one or more of your organizations", + name: "RequestEmailChangeOTP" + }); + } + + // Silently check if another user already has this email - don't send OTP if email is taken + const existingUsers = await userDAL.findUserByUsername(newEmail.toLowerCase(), tx); + const existingUser = existingUsers?.find((u) => u.id !== userId); + if (!existingUser) { + // Generate 6-digit OTP + const otpCode = await tokenService.createTokenForUser({ + type: TokenType.TOKEN_EMAIL_CHANGE_OTP, + userId, + payload: newEmail.toLowerCase() + }); + + // Send OTP to NEW email address + await smtpService.sendMail({ + template: SmtpTemplates.EmailVerification, + subjectLine: "Infisical email change verification", + recipients: [newEmail.toLowerCase()], + substitutions: { + code: otpCode + } + }); + } + + return { success: true, message: "Verification code sent to new email address" }; + }); + // Force this function to have a minimum execution time of 2 seconds to avoid possible information disclosure about existing users + const endTime = new Date(); + const timeDiff = endTime.getTime() - startTime.getTime(); + if (timeDiff < 2000) { + await new Promise((resolve) => { + setTimeout(resolve, 2000 - timeDiff); + }); + } + return changeEmailOTP; + }; + + const updateUserEmail = async ({ userId, newEmail, otpCode }: TUpdateUserEmailDTO & { otpCode: string }) => { + const changedUser = await userDAL.transaction(async (tx) => { + const user = await userDAL.findById(userId, tx); + if (!user) throw new NotFoundError({ message: `User with ID '${userId}' not found`, name: "UpdateUserEmail" }); + + if (user.authMethods?.includes(AuthMethod.LDAP)) { + throw new BadRequestError({ message: "Cannot update email for LDAP users", name: "UpdateUserEmail" }); + } + + const hasScimRestriction = await checkUserScimRestriction(userId, tx); + if (hasScimRestriction) { + throw new BadRequestError({ + message: "You are part of an organization that has SCIM enabled, and email changes are not allowed", + name: "UpdateUserEmail" + }); + } + + // Validate OTP and get the new email from token aliasId field + let tokenData; + try { + tokenData = await tokenService.validateTokenForUser({ + type: TokenType.TOKEN_EMAIL_CHANGE_OTP, + userId, + code: otpCode + }); + } catch (error) { + throw new BadRequestError({ message: "Invalid verification code", name: "UpdateUserEmail" }); + } + + // Verify the new email matches what was stored in payload + const tokenNewEmail = tokenData?.payload; + if (!tokenNewEmail || tokenNewEmail !== newEmail.toLowerCase()) { + throw new BadRequestError({ message: "Invalid verification code", name: "UpdateUserEmail" }); + } + + // Final check if another user has this email + const existingUsers = await userDAL.findUserByUsername(newEmail.toLowerCase(), tx); + const existingUser = existingUsers?.find((u) => u.id !== userId); + if (existingUser) { + throw new BadRequestError({ message: "Email is no longer available", name: "UpdateUserEmail" }); + } + + // Delete all user aliases since the email is changing + await userAliasDAL.delete({ userId }, tx); + + const updatedUser = await userDAL.updateById( + userId, + { + email: newEmail.toLowerCase(), + username: newEmail.toLowerCase() + }, + tx + ); + + // Revoke all sessions to force re-login + await tokenService.revokeAllMySessions(userId); + + return updatedUser; + }); + return changedUser; + }; + const getAllMyAccounts = async (email: string, userId: string) => { const users = await userDAL.findAllMyAccounts(email); return users?.map((el) => ({ ...el, isMyAccount: el.id === userId })); @@ -313,6 +446,8 @@ export const userServiceFactory = ({ updateUserMfa, updateUserName, updateAuthMethods, + requestEmailChangeOTP, + updateUserEmail, deleteUser, getMe, createUserAction, diff --git a/backend/src/services/user/user-types.ts b/backend/src/services/user/user-types.ts index cef13f27a7..7a974a899b 100644 --- a/backend/src/services/user/user-types.ts +++ b/backend/src/services/user/user-types.ts @@ -16,3 +16,8 @@ export type TUpdateUserMfaDTO = { isMfaEnabled?: boolean; selectedMfaMethod?: MfaMethod; }; + +export type TUpdateUserEmailDTO = { + userId: string; + newEmail: string; +}; diff --git a/docs/cli/commands/gateway.mdx b/docs/cli/commands/gateway.mdx index a12493c58a..99d0e10865 100644 --- a/docs/cli/commands/gateway.mdx +++ b/docs/cli/commands/gateway.mdx @@ -4,34 +4,384 @@ description: "Run the Infisical gateway or manage its systemd service" --- - + ```bash - infisical gateway --token= + infisical gateway start --name= --relay= --auth-method= ``` - + ```bash - sudo infisical gateway install --token= --domain= + sudo infisical gateway systemd install --token= --domain= --name= --relay= ``` ## Description -Run the Infisical gateway in the foreground or manage its systemd service installation. The gateway allows secure communication between your self-hosted Infisical instance and client applications. +The Infisical gateway provides secure access to private resources using modern TCP-based SSH tunnel architecture with enhanced security and flexible deployment options. + +The gateway system uses SSH reverse tunnels over TCP, eliminating firewall complexity and providing excellent performance for enterprise environments. + + +**Deprecation and Migration Notice:** The legacy `infisical gateway` command (v1) will be removed in a future release. Please migrate to `infisical gateway start` (Gateway v2). + +If you are moving from Gateway v1 to Gateway v2, this is NOT a drop-in switch. Gateway v2 creates new gateway instances with new gateway IDs. You must update any existing resources that reference gateway IDs (for example: dynamic secret configs, app connections, or other gateway-bound resources) to point to the new Gateway v2 gateway ID. Until you update those references, traffic will continue to target the old v1 gateway. + + ## Subcommands & flags - - Run the Infisical gateway in the foreground. The gateway will connect to the relay service and maintain a persistent connection. + + Run the Infisical gateway component within your VPC. The gateway establishes an SSH reverse tunnel to the specified relay server and provides secure access to private resources. - ```bash - infisical gateway --domain= --auth-method= - ``` +```bash +infisical gateway start --relay= --name= --auth-method= +``` - ### Authentication +The gateway component: - The Infisical CLI supports multiple authentication methods. Below are the available authentication methods, with their respective flags. +- Establishes outbound SSH reverse tunnels to relay servers (no inbound firewall rules needed) +- Authenticates using SSH certificates issued by Infisical +- Automatically reconnects if the connection is lost +- Provides access to private resources within your network + +### Authentication + +The Infisical CLI supports multiple authentication methods. Below are the available authentication methods, with their respective flags. + + + + The Universal Auth method is a simple and secure way to authenticate with Infisical. It requires a client ID and a client secret to authenticate with Infisical. + + + + + Your machine identity client ID. + + + Your machine identity client secret. + + + The authentication method to use. Must be `universal-auth` when using Universal Auth. + + + + + ```bash + infisical gateway start --auth-method=universal-auth --client-id= --client-secret= --relay= --name= + ``` + + + + The Native Kubernetes method is used to authenticate with Infisical when running in a Kubernetes environment. It requires a service account token to authenticate with Infisical. + + + + + Your machine identity ID. + + + Path to the Kubernetes service account token to use. Default: `/var/run/secrets/kubernetes.io/serviceaccount/token`. + + + The authentication method to use. Must be `kubernetes` when using Native Kubernetes. + + + + + + + ```bash + infisical gateway start --auth-method=kubernetes --machine-identity-id= --relay= --name= + ``` + + + + The Native Azure method is used to authenticate with Infisical when running in an Azure environment. + + + + + Your machine identity ID. + + + The authentication method to use. Must be `azure` when using Native Azure. + + + + + + + ```bash + infisical gateway start --auth-method=azure --machine-identity-id= --relay= --name= + ``` + + + + The Native GCP ID Token method is used to authenticate with Infisical when running in a GCP environment. + + + + + Your machine identity ID. + + + The authentication method to use. Must be `gcp-id-token` when using Native GCP ID Token. + + + + + + + ```bash + infisical gateway start --auth-method=gcp-id-token --machine-identity-id= --relay= --name= + ``` + + + + The GCP IAM method is used to authenticate with Infisical with a GCP service account key. + + + + + Your machine identity ID. + + + Path to your GCP service account key file _(Must be in JSON format!)_ + + + The authentication method to use. Must be `gcp-iam` when using GCP IAM. + + + + + ```bash + infisical gateway start --auth-method=gcp-iam --machine-identity-id= --service-account-key-file-path= --relay= --name= + ``` + + + + The AWS IAM method is used to authenticate with Infisical with an AWS IAM role while running in an AWS environment like EC2, Lambda, etc. + + + + + Your machine identity ID. + + + The authentication method to use. Must be `aws-iam` when using Native AWS IAM. + + + + + ```bash + infisical gateway start --auth-method=aws-iam --machine-identity-id= --relay= --name= + ``` + + + + The OIDC Auth method is used to authenticate with Infisical via identity tokens with OIDC. + + + + + Your machine identity ID. + + + The OIDC JWT from the identity provider. + + + The authentication method to use. Must be `oidc-auth` when using OIDC Auth. + + + + + ```bash + infisical gateway start --auth-method=oidc-auth --machine-identity-id= --jwt= --relay= --name= + ``` + + + + + The JWT Auth method is used to authenticate with Infisical via a JWT token. + + + + + The JWT token to use for authentication. + + + Your machine identity ID. + + + The authentication method to use. Must be `jwt-auth` when using JWT Auth. + + + + + + ```bash + infisical gateway start --auth-method=jwt-auth --jwt= --machine-identity-id= --relay= --name= + ``` + + + + You can use the `INFISICAL_TOKEN` environment variable to authenticate with Infisical with a raw machine identity access token. + + + + + The machine identity access token to use for authentication. + + + + + ```bash + infisical gateway start --token= --relay= --name= + ``` + + + + +### Other Flags + + + The name of the relay that this gateway should connect to. The relay must be running and registered before starting the gateway. + + ```bash + # Example + infisical gateway start --relay=my-relay --name=my-gateway --token= + ``` + + **Note:** If using organization relays or self-hosted instance relays, you must first start a relay server using `infisical relay start` before connecting gateways to it. For Infisical Cloud users using instance relays, the relay infrastructure is already running and managed by Infisical. + + + + + The name of the gateway instance. + + ```bash + # Example + infisical gateway start --name=my-gateway --relay=my-relay --token= + ``` + + + + + Domain of your self-hosted Infisical instance. + + ```bash + # Example + infisical gateway start --domain=https://app.your-domain.com --relay= --name= + ``` + + + + + + Install and enable the gateway as a systemd service. This command must be run with sudo on Linux. + +```bash +sudo infisical gateway systemd install --token= --domain= --name= --relay= +``` + +### Requirements + +- Must be run on Linux +- Must be run with root/sudo privileges +- Requires systemd + +### Flags + + + The machine identity access token to authenticate with Infisical. + + ```bash + # Example + sudo infisical gateway systemd install --token= --name= --relay= + ``` + + You may also expose the token to the CLI by setting the environment variable `INFISICAL_TOKEN` before executing the install command. + + + + + Domain of your self-hosted Infisical instance. + + ```bash + # Example + sudo infisical gateway systemd install --domain=https://app.your-domain.com --name= --relay= + ``` + + + + + The name of the gateway instance. + + ```bash + # Example + sudo infisical gateway systemd install --name=my-gateway --token= --relay= + ``` + + + + + The name of the relay that this gateway should connect to. + + ```bash + # Example + sudo infisical gateway systemd install --relay=my-relay --token= --name= + ``` + + + +### Service Details + +The systemd service is installed with secure defaults: + +- Service file: `/etc/systemd/system/infisical-gateway.service` +- Config file: `/etc/infisical/gateway.conf` +- Runs with restricted privileges: + - InaccessibleDirectories=/home + - PrivateTmp=yes + - Resource limits configured for stability +- Automatically restarts on failure +- Enabled to start on boot +- Maintains persistent SSH reverse tunnel connections to the specified relay +- Handles certificate rotation and connection recovery automatically + +After installation, manage the service with standard systemd commands: + +```bash +sudo systemctl start infisical-gateway # Start the service +sudo systemctl stop infisical-gateway # Stop the service +sudo systemctl status infisical-gateway # Check service status +sudo systemctl disable infisical-gateway # Disable auto-start on boot +``` + + + +## Legacy Gateway Commands (Deprecated) + + + + **This command is deprecated and will be removed in a future release.** + + Please migrate to `infisical gateway start` for the new TCP-based SSH tunnel architecture. + +**Migration required:** If you are currently using Gateway v1 (via `infisical gateway`), moving to Gateway v2 is not in-place. Gateway v2 provisions new gateway instances with new gateway IDs. Update any resources that reference a gateway ID (for example: dynamic secret configs, app connections, or other gateway-bound resources) to use the new Gateway v2 gateway ID. Until you update those references, traffic will continue to target the old v1 gateway. + + + +Run the legacy Infisical gateway in the foreground. The gateway will connect to the relay service and maintain a persistent connection. + +```bash +infisical gateway --domain= --auth-method= +``` + +### Authentication + +The Infisical CLI supports multiple authentication methods. Below are the available authentication methods, with their respective flags. @@ -121,7 +471,6 @@ Run the Infisical gateway in the foreground or manage its systemd service instal infisical gateway --auth-method=gcp-id-token --machine-identity-id= ``` - The GCP IAM method is used to authenticate with Infisical with a GCP service account key. @@ -163,7 +512,6 @@ Run the Infisical gateway in the foreground or manage its systemd service instal infisical gateway --auth-method=aws-iam --machine-identity-id= ``` - The OIDC Auth method is used to authenticate with Infisical via identity tokens with OIDC. @@ -185,6 +533,7 @@ Run the Infisical gateway in the foreground or manage its systemd service instal ```bash infisical gateway --auth-method=oidc-auth --machine-identity-id= --jwt= ``` + @@ -208,6 +557,7 @@ Run the Infisical gateway in the foreground or manage its systemd service instal ```bash infisical gateway --auth-method=jwt-auth --jwt= --machine-identity-id= ``` + You can use the `INFISICAL_TOKEN` environment variable to authenticate with Infisical with a raw machine identity access token. @@ -227,7 +577,7 @@ Run the Infisical gateway in the foreground or manage its systemd service instal - ### Other Flags +### Other Flags Domain of your self-hosted Infisical instance. @@ -236,22 +586,33 @@ Run the Infisical gateway in the foreground or manage its systemd service instal # Example infisical gateway --domain=https://app.your-domain.com ``` + - - Install and enable the gateway as a systemd service. This command must be run with sudo on Linux. + + + **This command is deprecated and will be removed in a future release.** + + Please migrate to `infisical gateway systemd install` for the new TCP-based SSH tunnel architecture with enhanced security and better performance. - ```bash - sudo infisical gateway install --token= --domain= - ``` +**Migration required:** If you previously installed Gateway v1 via `infisical gateway install`, moving to Gateway v2 is not in-place. Gateway v2 provisions new gateway instances with new gateway IDs. Update any resources that reference a gateway ID (for example: dynamic secret configs, app connections, or other gateway-bound resources) to use the new Gateway v2 gateway ID. Until you update those references, traffic will continue to target the old v1 gateway. - ### Requirements - - Must be run on Linux - - Must be run with root/sudo privileges - - Requires systemd + - ### Flags +Install and enable the legacy gateway as a systemd service. This command must be run with sudo on Linux. + +```bash +sudo infisical gateway install --token= --domain= +``` + +### Requirements + +- Must be run on Linux +- Must be run with root/sudo privileges +- Requires systemd + +### Flags The machine identity access token to authenticate with Infisical. @@ -262,6 +623,7 @@ Run the Infisical gateway in the foreground or manage its systemd service instal ``` You may also expose the token to the CLI by setting the environment variable `INFISICAL_TOKEN` before executing the install command. + @@ -271,24 +633,29 @@ Run the Infisical gateway in the foreground or manage its systemd service instal # Example sudo infisical gateway install --domain=https://app.your-domain.com ``` + - ### Service Details - The systemd service is installed with secure defaults: - - Service file: `/etc/systemd/system/infisical-gateway.service` - - Config file: `/etc/infisical/gateway.conf` - - Runs with restricted privileges: - - InaccessibleDirectories=/home - - PrivateTmp=yes - - Resource limits configured for stability - - Automatically restarts on failure - - Enabled to start on boot +### Service Details + +The systemd service is installed with secure defaults: + +- Service file: `/etc/systemd/system/infisical-gateway.service` +- Config file: `/etc/infisical/gateway.conf` +- Runs with restricted privileges: + - InaccessibleDirectories=/home + - PrivateTmp=yes + - Resource limits configured for stability +- Automatically restarts on failure +- Enabled to start on boot + +After installation, manage the service with standard systemd commands: + +```bash +sudo systemctl start infisical-gateway # Start the service +sudo systemctl stop infisical-gateway # Stop the service +sudo systemctl status infisical-gateway # Check service status +sudo systemctl disable infisical-gateway # Disable auto-start on boot +``` - After installation, manage the service with standard systemd commands: - ```bash - sudo systemctl start infisical-gateway # Start the service - sudo systemctl stop infisical-gateway # Stop the service - sudo systemctl status infisical-gateway # Check service status - sudo systemctl disable infisical-gateway # Disable auto-start on boot - ``` diff --git a/docs/cli/commands/relay.mdx b/docs/cli/commands/relay.mdx new file mode 100644 index 0000000000..46a061da30 --- /dev/null +++ b/docs/cli/commands/relay.mdx @@ -0,0 +1,306 @@ +--- +title: "infisical relay" +description: "Relay-related commands for Infisical" +--- + + + + ```bash + infisical relay start --type= --host= --name= --auth-method= + ``` + + + +## Description + +Relay-related commands for Infisical that provide identity-aware relay infrastructure for routing encrypted traffic: + +- **Relay**: Identity-aware server that routes encrypted traffic (can be instance-wide or organization-specific) + +The relay system uses SSH reverse tunnels over TCP, eliminating firewall complexity and providing excellent performance for enterprise environments. + +## Subcommands & flags + + + Run the Infisical relay component. The relay handles network traffic routing and can operate in different modes. + +```bash +infisical relay start --type= --host= --name= --auth-method= +``` + +### Flags + + + The type of relay to run. Must be either 'instance' or 'org'. + + - **`instance`**: Shared relay server that can be used by all organizations on your Infisical instance. Set up by the instance administrator. Uses `INFISICAL_RELAY_AUTH_SECRET` environment variable for authentication, which must be configured by the instance admin. + - **`org`**: Dedicated relay server that individual organizations deploy and manage in their own infrastructure. Provides enhanced security, custom geographic placement, and compliance benefits. Uses standard Infisical authentication methods. + + ```bash + # Organization relay (customer-deployed) + infisical relay start --type=org --host=192.168.1.100 --name=my-org-relay + + # Instance relay (configured by instance admin) + INFISICAL_RELAY_AUTH_SECRET= infisical relay start --type=instance --host=10.0.1.50 --name=shared-relay + ``` + + + + + The host (IP address or hostname) of the instance where the relay is deployed. This must be a static public IP or resolvable hostname that gateways can reach. + + ```bash + # Example with IP address + infisical relay start --host=203.0.113.100 --type=org --name=my-relay + + # Example with hostname + infisical relay start --host=relay.example.com --type=org --name=my-relay + ``` + + + + + The name of the relay. + + ```bash + # Example + infisical relay start --name=my-relay --type=org --host=192.168.1.100 + ``` + + + +### Authentication + +**Organization Relays (`--type=org`):** +Deploy your own relay server in your infrastructure for enhanced security and reduced latency. Supports all standard Infisical authentication methods documented below. + +**Instance Relays (`--type=instance`):** +Shared relay servers that serve all organizations on your Infisical instance. For Infisical Cloud, these are already running and ready to use. For self-hosted deployments, they're set up by the instance administrator. Authentication is handled via the `INFISICAL_RELAY_AUTH_SECRET` environment variable. + +```bash +# Organization relay with Universal Auth (customer-deployed) +infisical relay start --type=org --host=192.168.1.100 --name=my-org-relay --auth-method=universal-auth --client-id= --client-secret= + +# Instance relay (configured by instance admin) +INFISICAL_RELAY_AUTH_SECRET= infisical relay start --type=instance --host=10.0.1.50 --name=shared-relay +``` + +### Authentication Methods + +The Infisical CLI supports multiple authentication methods for organization relays. Below are the available authentication methods, with their respective flags. + + + + The Universal Auth method is a simple and secure way to authenticate with Infisical. It requires a client ID and a client secret to authenticate with Infisical. + + + + + Your machine identity client ID. + + + Your machine identity client secret. + + + The authentication method to use. Must be `universal-auth` when using Universal Auth. + + + + + ```bash + infisical relay start --auth-method=universal-auth --client-id= --client-secret= --type=org --host= --name= + ``` + + + + The Native Kubernetes method is used to authenticate with Infisical when running in a Kubernetes environment. It requires a service account token to authenticate with Infisical. + + + + + Your machine identity ID. + + + Path to the Kubernetes service account token to use. Default: `/var/run/secrets/kubernetes.io/serviceaccount/token`. + + + The authentication method to use. Must be `kubernetes` when using Native Kubernetes. + + + + + + + ```bash + infisical relay start --auth-method=kubernetes --machine-identity-id= --type=org --host= --name= + ``` + + + + The Native Azure method is used to authenticate with Infisical when running in an Azure environment. + + + + + Your machine identity ID. + + + The authentication method to use. Must be `azure` when using Native Azure. + + + + + + + ```bash + infisical relay start --auth-method=azure --machine-identity-id= --type=org --host= --name= + ``` + + + + The Native GCP ID Token method is used to authenticate with Infisical when running in a GCP environment. + + + + + Your machine identity ID. + + + The authentication method to use. Must be `gcp-id-token` when using Native GCP ID Token. + + + + + + + ```bash + infisical relay start --auth-method=gcp-id-token --machine-identity-id= --type=org --host= --name= + ``` + + + + The GCP IAM method is used to authenticate with Infisical with a GCP service account key. + + + + + Your machine identity ID. + + + Path to your GCP service account key file _(Must be in JSON format!)_ + + + The authentication method to use. Must be `gcp-iam` when using GCP IAM. + + + + + ```bash + infisical relay start --auth-method=gcp-iam --machine-identity-id= --service-account-key-file-path= --type=org --host= --name= + ``` + + + + The AWS IAM method is used to authenticate with Infisical with an AWS IAM role while running in an AWS environment like EC2, Lambda, etc. + + + + + Your machine identity ID. + + + The authentication method to use. Must be `aws-iam` when using Native AWS IAM. + + + + + ```bash + infisical relay start --auth-method=aws-iam --machine-identity-id= --type=org --host= --name= + ``` + + + + The OIDC Auth method is used to authenticate with Infisical via identity tokens with OIDC. + + + + + Your machine identity ID. + + + The OIDC JWT from the identity provider. + + + The authentication method to use. Must be `oidc-auth` when using OIDC Auth. + + + + + ```bash + infisical relay start --auth-method=oidc-auth --machine-identity-id= --jwt= --type=org --host= --name= + ``` + + + + + The JWT Auth method is used to authenticate with Infisical via a JWT token. + + + + + The JWT token to use for authentication. + + + Your machine identity ID. + + + The authentication method to use. Must be `jwt-auth` when using JWT Auth. + + + + + + ```bash + infisical relay start --auth-method=jwt-auth --jwt= --machine-identity-id= --type=org --host= --name= + ``` + + + + You can use the `INFISICAL_TOKEN` environment variable to authenticate with Infisical with a raw machine identity access token. + + + + + The machine identity access token to use for authentication. + + + + + ```bash + infisical relay start --token= --type=org --host= --name= + ``` + + + + +### Deployment Considerations + +**When to use Instance Relays (`--type=instance`):** + +- You want to get started quickly without setting up your own relay infrastructure +- You're using Infisical Cloud and want to leverage the existing relay infrastructure +- You're on a self-hosted instance where the admin has already set up shared relays +- You don't need custom geographic placement of relay servers +- You don't have specific compliance requirements that require dedicated infrastructure +- You want to minimize operational overhead by using shared infrastructure + +**When to use Organization Relays (`--type=org`):** + +- You need lower latency by deploying relay servers closer to your resources +- You have security requirements that mandate running infrastructure in your own environment +- You have compliance requirements such as data sovereignty or air-gapped environments +- You need custom network policies or specific networking configurations +- You have high-scale performance requirements that shared infrastructure can't meet +- You want full control over your relay infrastructure and its configuration + + diff --git a/docs/docs.json b/docs/docs.json index 9ea4764870..285e11a608 100644 --- a/docs/docs.json +++ b/docs/docs.json @@ -174,7 +174,15 @@ "pages": [ "documentation/platform/gateways/overview", "documentation/platform/gateways/gateway-security", - "documentation/platform/gateways/networking" + "documentation/platform/gateways/networking", + { + "group": "Gateway (Deprecated)", + "pages": [ + "documentation/platform/gateways-deprecated/overview", + "documentation/platform/gateways-deprecated/gateway-security", + "documentation/platform/gateways-deprecated/networking" + ] + } ] } ] @@ -346,7 +354,10 @@ }, { "group": "Architecture", - "pages": ["internals/architecture/components", "internals/architecture/cloud"] + "pages": [ + "internals/architecture/components", + "internals/architecture/cloud" + ] }, "internals/security", "internals/service-tokens" @@ -564,7 +575,10 @@ "integrations/cloud/gcp-secret-manager", { "group": "Cloudflare", - "pages": ["integrations/cloud/cloudflare-pages", "integrations/cloud/cloudflare-workers"] + "pages": [ + "integrations/cloud/cloudflare-pages", + "integrations/cloud/cloudflare-workers" + ] }, "integrations/cloud/terraform-cloud", "integrations/cloud/databricks", @@ -659,7 +673,9 @@ "documentation/platform/secret-scanning/overview", { "group": "Concepts", - "pages": ["documentation/platform/secret-scanning/concepts/secret-scanning"] + "pages": [ + "documentation/platform/secret-scanning/concepts/secret-scanning" + ] } ] }, @@ -709,13 +725,18 @@ "documentation/platform/ssh/overview", { "group": "Concepts", - "pages": ["documentation/platform/ssh/concepts/ssh-certificates"] + "pages": [ + "documentation/platform/ssh/concepts/ssh-certificates" + ] } ] }, { "group": "Platform Reference", - "pages": ["documentation/platform/ssh/usage", "documentation/platform/ssh/host-groups"] + "pages": [ + "documentation/platform/ssh/usage", + "documentation/platform/ssh/host-groups" + ] } ] }, @@ -753,6 +774,7 @@ "cli/commands/dynamic-secrets", "cli/commands/ssh", "cli/commands/gateway", + "cli/commands/relay", "cli/commands/bootstrap", "cli/commands/export", "cli/commands/token", @@ -762,7 +784,11 @@ "cli/commands/reset", { "group": "infisical scan", - "pages": ["cli/commands/scan", "cli/commands/scan-git-changes", "cli/commands/scan-install"] + "pages": [ + "cli/commands/scan", + "cli/commands/scan-git-changes", + "cli/commands/scan-install" + ] } ] }, @@ -1096,7 +1122,9 @@ "pages": [ { "group": "Kubernetes", - "pages": ["api-reference/endpoints/dynamic-secrets/kubernetes/create-lease"] + "pages": [ + "api-reference/endpoints/dynamic-secrets/kubernetes/create-lease" + ] }, "api-reference/endpoints/dynamic-secrets/create", "api-reference/endpoints/dynamic-secrets/update", diff --git a/docs/documentation/platform/audit-log-streams/audit-log-streams.mdx b/docs/documentation/platform/audit-log-streams/audit-log-streams.mdx index 0a3a414583..ad030ec26a 100644 --- a/docs/documentation/platform/audit-log-streams/audit-log-streams.mdx +++ b/docs/documentation/platform/audit-log-streams/audit-log-streams.mdx @@ -182,6 +182,48 @@ Infisical Audit Log Streaming enables you to transmit your organization's audit + + Stream Infisical audit logs to Cribl Stream for centralized processing and routing. Infisical supports Cribl as a provider for seamless integration. + + + + In Cribl Stream, navigate to **Worker Groups** and select your Worker Group. Take note of the **Ingress Address** for later steps. + + ![cribl ingress address](/images/platform/audit-log-streams/cribl-ingress-address.png) + + Within your Worker Group, navigate to **Data > Sources > HTTP** and click **Add Source**. + + ![cribl add source](/images/platform/audit-log-streams/cribl-add-source.png) + + Configure the **Input ID**, **Port**, and **Cribl HTTP event API** path (e.g., `/infisical`). Then, generate an **Auth Token**. + + You can optionally configure TLS in the **TLS Settings** tab and add a pipeline in the **Pre-Processing** tab. + + + Ensure that you're using a port that's open on your instance. + + + ![cribl general settings](/images/platform/audit-log-streams/cribl-general-settings.png) + + Once you've configured the Data Source, click **Save** and deploy your changes. + + + On Infisical, create a new audit log stream and select the **Cribl** provider option. + + Input the following credentials: + - **Cribl Stream URL**: Your HTTP source endpoint composed of `http://://_bulk` + - **Cribl Stream Token**: The authentication token from Step 1 + + + If you configured TLS for your Data Source, use the `https://` protocol. + + + ![cribl details](/images/platform/audit-log-streams/cribl-details.png) + + Once you're finished, click **Create Log Stream**. + + + You can stream to Datadog using the **Datadog** provider log stream. diff --git a/docs/documentation/platform/auth-methods/email-password.mdx b/docs/documentation/platform/auth-methods/email-password.mdx index db23026b86..f5cb0e6f5f 100644 --- a/docs/documentation/platform/auth-methods/email-password.mdx +++ b/docs/documentation/platform/auth-methods/email-password.mdx @@ -7,8 +7,39 @@ description: "Learn how to authenticate into Infisical with email and password." It is currently possible to use the **Email and Password** auth method to authenticate into the Web Dashboard and Infisical CLI. +### Emergency Kit Every **Email and Password** is accompanied by an emergency kit given to users during signup. If the password is lost or forgotten, emergency kit is only way to retrieve the access to your account. It is possible to generate a new emergency kit with the following steps: 1. Open the `Personal Settings` menu. ![open personal settings](../../../images/auth-methods/access-personal-settings.png) 2. Scroll down to the `Emergency Kit` section. 3. Enter your current password and click `Save`. + +### Change Password +You can update your account password at any time: +1. Open the `Personal Settings` menu. +![open personal settings](../../../images/auth-methods/access-personal-settings.png) +2. Navigate to the `Authentication` tab. +![open authentication tab](../../../images/auth-methods/personal-settings-authentication-tab.png) +3. In the `Change Password` section, enter your current password and new password. +![change password section](../../../images/auth-methods/personal-settings-authentication-change-email-password.png) +4. Click `Save` to save your new password. + +### Change Email +You can update your account email address: +1. Open the `Personal Settings` menu. +2. Navigate to the `Authentication` tab. +3. In the `Change Email` section, enter your new email address. +![change email section](../../../images/auth-methods/personal-settings-authentication-change-email-password.png) +4. Click `Send Verification Code` to receive an 6-digit verification code at your new email address. +5. Check your new email inbox and enter the verification code. +![change email section](../../../images/auth-methods/personal-settings-authentication-change-email-confirmation.png) +6. Click `Confirm Email Change` to complete the process. +7. You will be logged out and need to sign in again with your new email address. + + +Changing your email will remove all connected external authentication methods and terminate all active sessions for security. + + + +Email changes are disabled if SCIM is enabled for any of your organizations. Contact your organization administrator if you need to change your email address in a SCIM-enabled environment. + \ No newline at end of file diff --git a/docs/documentation/platform/gateways-deprecated/gateway-security.mdx b/docs/documentation/platform/gateways-deprecated/gateway-security.mdx new file mode 100644 index 0000000000..93a7f662f8 --- /dev/null +++ b/docs/documentation/platform/gateways-deprecated/gateway-security.mdx @@ -0,0 +1,91 @@ +--- +title: "Gateway Security Architecture" +sidebarTitle: "Architecture" +description: "Understand the security model and tenant isolation of Infisical's Gateway" +--- + +# Gateway Security Architecture + +The Infisical Gateway enables Infisical Cloud to securely interact with private resources using mutual TLS authentication and private PKI (Public Key Infrastructure) system to ensure secure, isolated communication between multiple tenants. +This document explains the internal security architecture and how tenant isolation is maintained. + +## Security Model Overview + +### Private PKI System +Each organization (tenant) in Infisical has its own private PKI system consisting of: + +1. **Root CA**: The ultimate trust anchor for the organization +2. **Intermediate CAs**: + - Client CA: Issues certificates for cloud components + - Gateway CA: Issues certificates for gateway instances + +This hierarchical structure ensures complete isolation between organizations as each has its own independent certificate chain. + +### Certificate Hierarchy +``` +Root CA (Organization Specific) +├── Client CA +│ └── Client Certificates (Cloud Components) +└── Gateway CA + └── Gateway Certificates (Gateway Instances) +``` + +## Communication Security + +### 1. Gateway Registration +When a gateway is first deployed: + +1. Establishes initial connection using machine identity token +2. Allocates a relay address for communication +3. Exchanges certificates through a secure handshake: + - Gateway receives a unique certificate signed by organization's Gateway CA along with certificate chain for verification + +### 2. Mutual TLS Authentication +All communication between gateway and cloud uses mutual TLS (mTLS): + +- **Gateway Authentication**: + - Presents certificate signed by organization's Gateway CA + - Certificate contains unique identifiers (Organization ID, Gateway ID) + - Cloud validates complete certificate chain + +- **Cloud Authentication**: + - Presents certificate signed by organization's Client CA + - Certificate includes required organizational unit ("gateway-client") + - Gateway validates certificate chain back to organization's root CA + +### 3. Relay Communication +The relay system provides secure tunneling: + +1. **Connection Establishment**: + - Uses QUIC protocol over UDP for efficient, secure communication + - Provides built-in encryption, congestion control, and multiplexing + - Enables faster connection establishment and reduced latency + - Each organization's traffic is isolated using separate relay sessions + +2. **Traffic Isolation**: + - Each gateway gets unique relay credentials + - Traffic is end-to-end encrypted using QUIC's TLS 1.3 + - Organization's private keys never leave their environment + +## Tenant Isolation + +### Certificate-Based Isolation +- Each organization has unique root CA and intermediate CAs +- Certificates contain organization-specific identifiers +- Cross-tenant communication is cryptographically impossible + +### Gateway-Project Mapping +- Gateways are explicitly mapped to specific projects +- Access controls enforce organization boundaries +- Project-level permissions determine resource accessibility + +### Resource Access Control +1. **Project Verification**: + - Gateway verifies project membership + - Validates organization ownership + - Enforces project-level permissions + +2. **Resource Restrictions**: + - Gateways only accept connections to approved resources + - Each connection requires explicit project authorization + - Resources remain private to their assigned organization diff --git a/docs/documentation/platform/gateways/images/gateway-highlevel-diagram.png b/docs/documentation/platform/gateways-deprecated/images/gateway-highlevel-diagram.png similarity index 100% rename from docs/documentation/platform/gateways/images/gateway-highlevel-diagram.png rename to docs/documentation/platform/gateways-deprecated/images/gateway-highlevel-diagram.png diff --git a/docs/documentation/platform/gateways-deprecated/networking.mdx b/docs/documentation/platform/gateways-deprecated/networking.mdx new file mode 100644 index 0000000000..6acdc19937 --- /dev/null +++ b/docs/documentation/platform/gateways-deprecated/networking.mdx @@ -0,0 +1,168 @@ +--- +title: "Networking" +description: "Network configuration and firewall requirements for Infisical Gateway" +--- + +The Infisical Gateway requires outbound network connectivity to establish secure communication with Infisical's relay infrastructure. +This page outlines the required ports, protocols, and firewall configurations needed for optimal gateway usage. + +## Network Architecture + +The gateway uses a relay-based architecture to establish secure connections: + +1. **Gateway** connects outbound to **Relay Servers** using UDP/QUIC protocol +2. **Relay Servers** facilitate secure communication between Gateway and Infisical Cloud +3. All traffic is end-to-end encrypted using mutual TLS over QUIC + +## Required Network Connectivity + +### Outbound Connections (Required) + +The gateway requires the following outbound connectivity: + +| Protocol | Destination | Ports | Purpose | +|----------|-------------|-------|---------| +| UDP | Relay Servers | 49152-65535 | Allocated relay communication (TLS) | +| TCP | app.infisical.com / eu.infisical.com | 443 | API communication and relay allocation | + +### Relay Server IP Addresses + +Your firewall must allow outbound connectivity to the following Infisical relay servers on dynamically allocated ports. + + + + ``` + 54.235.197.91:49152-65535 + 18.215.196.229:49152-65535 + 3.222.120.233:49152-65535 + 34.196.115.157:49152-65535 + ``` + + + ``` + 3.125.237.40:49152-65535 + 52.28.157.98:49152-65535 + 3.125.176.90:49152-65535 + ``` + + + Please contact your Infisical account manager for dedicated relay server IP addresses. + + + + + These IP addresses are static and managed by Infisical. Any changes will be communicated with 60-day advance notice. + + +## Protocol Details + +### QUIC over UDP + +The gateway uses QUIC (Quick UDP Internet Connections) for primary communication: + +- **Port 5349**: STUN/TURN over TLS (secure relay communication) +- **Built-in features**: Connection migration, multiplexing, reduced latency +- **Encryption**: TLS 1.3 with certificate pinning + +## Understanding Firewall Behavior with UDP + +Unlike TCP connections, UDP is a stateless protocol, and depending on your organization's firewall configuration, you may need to adjust network rules accordingly. +When the gateway sends UDP packets to a relay server, the return responses need to be allowed back through the firewall. +Modern firewalls handle this through "connection tracking" (also called "stateful inspection"), but the behavior can vary depending on your firewall configuration. + + +### Connection Tracking + +Modern firewalls automatically track UDP connections and allow return responses. This is the preferred configuration as it: +- Automatically handles return responses +- Reduces firewall rule complexity +- Avoids the need for manual IP whitelisting + +In the event that your firewall does not support connection tracking, you will need to whitelist the relay IPs to explicitly define return traffic manually. + +## Common Network Scenarios + +### Corporate Firewalls + +For corporate environments with strict egress filtering: + +1. **Whitelist relay IP addresses** (listed above) +2. **Allow UDP port 5349** outbound +3. **Configure connection tracking** for UDP return traffic +4. **Allow ephemeral port range** 49152-65535 for return traffic if connection tracking is disabled + +### Cloud Environments (AWS/GCP/Azure) + +Configure security groups to allow: +- **Outbound UDP** to relay IPs on port 5349 +- **Outbound HTTPS** to app.infisical.com/eu.infisical.com on port 443 +- **Inbound UDP** on ephemeral ports (if not using stateful rules) + +## Frequently Asked Questions + + +The gateway is designed to handle network interruptions gracefully: + +- **Automatic reconnection**: The gateway will automatically attempt to reconnect to relay servers every 5 seconds if the connection is lost +- **Connection retry logic**: Built-in retry mechanisms handle temporary network outages without manual intervention +- **Multiple relay servers**: If one relay server is unavailable, the gateway can connect to alternative relay servers +- **Persistent sessions**: Existing connections are maintained where possible during brief network interruptions +- **Graceful degradation**: The gateway logs connection issues and continues attempting to restore connectivity + +No manual intervention is typically required during network interruptions. + + + +QUIC (Quick UDP Internet Connections) provides several advantages over traditional TCP for gateway communication: + +- **Faster connection establishment**: QUIC combines transport and security handshakes, reducing connection setup time +- **Built-in encryption**: TLS 1.3 is integrated into the protocol, ensuring all traffic is encrypted by default +- **Connection migration**: QUIC connections can survive IP address changes (useful for NAT rebinding) +- **Reduced head-of-line blocking**: Multiple data streams can be multiplexed without blocking each other +- **Better performance over unreliable networks**: Advanced congestion control and packet loss recovery +- **Lower latency**: Optimized for real-time communication between gateway and cloud services + +While TCP is stateful and easier for firewalls to track, QUIC's performance benefits outweigh the additional firewall configuration requirements. + + + +No inbound ports need to be opened. The gateway only makes outbound connections: + +- **Outbound UDP** to relay servers on ports 49152-65535 +- **Outbound HTTPS** to Infisical API endpoints +- **Return responses** are handled by connection tracking or explicit IP whitelisting + +This design maintains security by avoiding the need for inbound firewall rules that could expose your network to external threats. + + + +If your firewall has strict UDP restrictions: + +1. **Work with your network team** to allow outbound UDP to the specific relay IP addresses +2. **Use explicit IP whitelisting** if connection tracking is disabled +3. **Consider network policy exceptions** for the gateway host +4. **Monitor firewall logs** to identify which specific rules are blocking traffic + +The gateway requires UDP connectivity to function - TCP-only configurations are not supported. + + + +The gateway connects to **one relay server at a time**: + +- **Single active connection**: Only one relay connection is established per gateway instance +- **Automatic failover**: If the current relay becomes unavailable, the gateway will connect to an alternative relay +- **Load distribution**: Different gateway instances may connect to different relay servers for load balancing +- **No manual selection**: The Infisical API automatically assigns the optimal relay server based on availability and proximity + +You should whitelist all relay IP addresses to ensure proper failover functionality. + + +No, relay servers cannot decrypt any traffic passing through them: + +- **End-to-end encryption**: All traffic between the gateway and Infisical Cloud is encrypted using mutual TLS with certificate pinning +- **Relay acts as a tunnel**: The relay server only forwards encrypted packets - it has no access to encryption keys +- **No data storage**: Relay servers do not store any traffic or network-identifiable information +- **Certificate isolation**: Each organization has its own private PKI system, ensuring complete tenant isolation + +The relay infrastructure is designed as a secure forwarding mechanism, similar to a VPN tunnel, where the relay provider cannot see the contents of the traffic flowing through it. + \ No newline at end of file diff --git a/docs/documentation/platform/gateways-deprecated/overview.mdx b/docs/documentation/platform/gateways-deprecated/overview.mdx new file mode 100644 index 0000000000..f81809f7b7 --- /dev/null +++ b/docs/documentation/platform/gateways-deprecated/overview.mdx @@ -0,0 +1,352 @@ +--- +title: "Gateway" +sidebarTitle: "Overview" +description: "How to access private network resources from Infisical" +--- + +![Alt text](/documentation/platform/gateways-deprecated/images/gateway-highlevel-diagram.png) + +The Infisical Gateway provides secure access to private resources within your network without needing direct inbound connections to your environment. +This method keeps your resources fully protected from external access while enabling Infisical to securely interact with resources like databases. +Common use cases include generating dynamic credentials or rotating credentials for private databases. + + + Gateway is a paid feature available under the Enterprise Tier for Infisical + Cloud users. Self-hosted Infisical users can contact + [sales@infisical.com](mailto:sales@infisical.com) to purchase an enterprise + license. + + +## How It Works + +The Gateway serves as a secure intermediary that facilitates direct communication between the Infisical server and your private network. +It’s a lightweight daemon packaged within the Infisical CLI, making it easy to deploy and manage. Once set up, the Gateway establishes a connection with a relay server, ensuring that all communication between Infisical and your Gateway is fully end-to-end encrypted. +This setup guarantees that only the platform and your Gateway can decrypt the transmitted information, keeping communication with your resources secure, private and isolated. + +## Deployment + +The Infisical Gateway is seamlessly integrated into the Infisical CLI under the `gateway` command, making it simple to deploy and manage. +You can install the Gateway in all the same ways you install the Infisical CLI—whether via npm, Docker, or a binary. +For detailed installation instructions, refer to the Infisical [CLI Installation instructions](/cli/overview). + +To function, the Gateway must authenticate with Infisical. This requires a machine identity configured with the appropriate permissions to create and manage a Gateway. +Once authenticated, the Gateway establishes a secure connection with Infisical to allow your private resources to be reachable. + +### Get started + + + + 1. Navigate to **Organization Access Control** in your Infisical dashboard. + 2. Create a dedicated machine identity for your Gateway. + 3. **Best Practice:** Assign a unique identity to each Gateway for better security and management. + ![Create Gateway Identity](../../../images/platform/gateways/create-identity-for-gateway.png) + + + + You'll need to choose an authentication method to initiate communication with Infisical. View the available machine identity authentication methods [here](/documentation/platform/identities/machine-identities). + + + + Use the Infisical CLI to deploy the Gateway. You can run it directly or install it as a systemd service for production: + + + + For production deployments on Linux, install the Gateway as a systemd service: + ```bash + sudo infisical gateway install --token --domain + sudo systemctl start infisical-gateway + ``` + This will install and start the Gateway as a secure systemd service that: + - Runs with restricted privileges: + - Runs as root user (required for secure token management) + - Restricted access to home directories + - Private temporary directory + - Automatically restarts on failure + - Starts on system boot + - Manages token and domain configuration securely in `/etc/infisical/gateway.conf` + + + The install command requires: + - Linux operating system + - Root/sudo privileges + - Systemd + + + + + + The Gateway can be installed via [Helm](https://helm.sh/). Helm is a package manager for Kubernetes that allows you to define, install, and upgrade Kubernetes applications. + + For production deployments on Kubernetes, install the Gateway using the Infisical Helm chart: + + ### Install the latest Helm Chart repository + ```bash + helm repo add infisical-helm-charts 'https://dl.cloudsmith.io/public/infisical/helm-charts/helm/charts/' + ``` + + ### Update the Helm Chart repository + ```bash + helm repo update + ``` + + ### Create a Kubernetes Secret containing gateway environment variables + + The gateway supports all identity authentication methods through the use of environment variables. + The environment variables must be set in the `infisical-gateway-environment` Kubernetes secret. + + + #### Supported authentication methods + + + + The Universal Auth method is a simple and secure way to authenticate with Infisical. It requires a client ID and a client secret to authenticate with Infisical. + + + + + Your machine identity client ID. + + + Your machine identity client secret. + + + The authentication method to use. Must be `universal-auth` when using Universal Auth. + + + + + ```bash + kubectl create secret generic infisical-gateway-environment --from-literal=INFISICAL_AUTH_METHOD=universal-auth --from-literal=INFISICAL_UNIVERSAL_AUTH_CLIENT_ID= --from-literal=INFISICAL_UNIVERSAL_AUTH_CLIENT_SECRET= + ``` + + + + The Native Kubernetes method is used to authenticate with Infisical when running in a Kubernetes environment. It requires a service account token to authenticate with Infisical. + + + + + Your machine identity ID. + + + Path to the Kubernetes service account token to use. Default: `/var/run/secrets/kubernetes.io/serviceaccount/token`. + + + The authentication method to use. Must be `kubernetes` when using Native Kubernetes. + + + + + + ```bash + kubectl create secret generic infisical-gateway-environment --from-literal=INFISICAL_AUTH_METHOD=kubernetes --from-literal=INFISICAL_MACHINE_IDENTITY_ID= + ``` + + + + The Native Azure method is used to authenticate with Infisical when running in an Azure environment. + + + + + Your machine identity ID. + + + The authentication method to use. Must be `azure` when using Native Azure. + + + + + ```bash + kubectl create secret generic infisical-gateway-environment --from-literal=INFISICAL_AUTH_METHOD=azure --from-literal=INFISICAL_MACHINE_IDENTITY_ID= + ``` + + + The Native GCP ID Token method is used to authenticate with Infisical when running in a GCP environment. + + + + + Your machine identity ID. + + + The authentication method to use. Must be `gcp-id-token` when using Native GCP ID Token. + + + + + ```bash + kubectl create secret generic infisical-gateway-environment --from-literal=INFISICAL_AUTH_METHOD=gcp-id-token --from-literal=INFISICAL_MACHINE_IDENTITY_ID= + ``` + + + + The GCP IAM method is used to authenticate with Infisical with a GCP service account key. + + + + + Your machine identity ID. + + + Path to your GCP service account key file _(Must be in JSON format!)_ + + + The authentication method to use. Must be `gcp-iam` when using GCP IAM. + + + + + ```bash + kubectl create secret generic infisical-gateway-environment --from-literal=INFISICAL_AUTH_METHOD=gcp-iam --from-literal=INFISICAL_MACHINE_IDENTITY_ID= --from-literal=INFISICAL_GCP_SERVICE_ACCOUNT_KEY_FILE_PATH= + ``` + + + + + The AWS IAM method is used to authenticate with Infisical with an AWS IAM role while running in an AWS environment like EC2, Lambda, etc. + + + + + Your machine identity ID. + + + The authentication method to use. Must be `aws-iam` when using Native AWS IAM. + + + + + ```bash + kubectl create secret generic infisical-gateway-environment --from-literal=INFISICAL_AUTH_METHOD=aws-iam --from-literal=INFISICAL_MACHINE_IDENTITY_ID= + ``` + + + + The OIDC Auth method is used to authenticate with Infisical via identity tokens with OIDC. + + + + + Your machine identity ID. + + + The OIDC JWT from the identity provider. + + + The authentication method to use. Must be `oidc-auth` when using OIDC Auth. + + + + + ```bash + kubectl create secret generic infisical-gateway-environment --from-literal=INFISICAL_AUTH_METHOD=oidc-auth --from-literal=INFISICAL_MACHINE_IDENTITY_ID= --from-literal=INFISICAL_JWT= + ``` + + + + The JWT Auth method is used to authenticate with Infisical via a JWT token. + + + + + The JWT token to use for authentication. + + + Your machine identity ID. + + + The authentication method to use. Must be `jwt-auth` when using JWT Auth. + + + + + ```bash + kubectl create secret generic infisical-gateway-environment --from-literal=INFISICAL_AUTH_METHOD=jwt-auth --from-literal=INFISICAL_JWT= --from-literal=INFISICAL_MACHINE_IDENTITY_ID= + ``` + + + You can use the `INFISICAL_TOKEN` environment variable to authenticate with Infisical with a raw machine identity access token. + + + + + The machine identity access token to use for authentication. + + + + + ```bash + kubectl create secret generic infisical-gateway-environment --from-literal=INFISICAL_TOKEN= + ``` + + + + + #### Other environment variables + + + + The API URL to use for the gateway. By default, `INFISICAL_API_URL` is set to `https://app.infisical.com`. + + + + + ### Install the Infisical Gateway Helm Chart + ```bash + helm install infisical-gateway infisical-helm-charts/infisical-gateway + ``` + + ### Check the gateway logs + After installing the gateway, you can check the logs to ensure it's running as expected. + + ```bash + kubectl logs deployment/infisical-gateway + ``` + + You should see the following output which indicates the gateway is running as expected. + ```bash + $ kubectl logs deployment/infisical-gateway + INF Provided relay port 5349. Using TLS + INF Connected with relay + INF 10.0.101.112:56735 + INF Starting relay connection health check + INF Gateway started successfully + INF New connection from: 10.0.1.8:34051 + INF Gateway is reachable by Infisical + ``` + + + + + For development or testing, you can run the Gateway directly. Log in with your machine identity and start the Gateway in one command: + ```bash + infisical gateway --token $(infisical login --method=universal-auth --client-id=<> --client-secret=<> --plain) + ``` + + Alternatively, if you already have the token, use it directly with the `--token` flag: + ```bash + infisical gateway --token + ``` + + Or set it as an environment variable: + ```bash + export INFISICAL_TOKEN= + infisical gateway + ``` + + + + For detailed information about the gateway command and its options, see the [gateway command documentation](/cli/commands/gateway). + + + Ensure the deployed Gateway has network access to the private resources you intend to connect with Infisical. + + + + + + To confirm your Gateway is working, check the deployment status by looking for the message **"Gateway started successfully"** in the Gateway logs. This indicates the Gateway is running properly. Next, verify its registration by opening your Infisical dashboard, navigating to **Organization Access Control**, and selecting the **Gateways** tab. Your newly deployed Gateway should appear in the list. + ![Gateway List](../../../images/platform/gateways/gateway-list.png) + + diff --git a/docs/documentation/platform/gateways/gateway-security.mdx b/docs/documentation/platform/gateways/gateway-security.mdx index 93a7f662f8..6962c627e4 100644 --- a/docs/documentation/platform/gateways/gateway-security.mdx +++ b/docs/documentation/platform/gateways/gateway-security.mdx @@ -6,86 +6,133 @@ description: "Understand the security model and tenant isolation of Infisical's # Gateway Security Architecture -The Infisical Gateway enables Infisical Cloud to securely interact with private resources using mutual TLS authentication and private PKI (Public Key Infrastructure) system to ensure secure, isolated communication between multiple tenants. +The Infisical Gateway enables secure access to private resources using SSH reverse tunnels, certificate-based authentication, and a comprehensive PKI (Public Key Infrastructure) system. The architecture provides end-to-end encryption and complete tenant isolation through multiple certificate authorities. This document explains the internal security architecture and how tenant isolation is maintained. ## Security Model Overview -### Private PKI System -Each organization (tenant) in Infisical has its own private PKI system consisting of: +### Certificate Architecture -1. **Root CA**: The ultimate trust anchor for the organization -2. **Intermediate CAs**: - - Client CA: Issues certificates for cloud components - - Gateway CA: Issues certificates for gateway instances +The gateway system uses multiple certificate authorities depending on deployment configuration: -This hierarchical structure ensures complete isolation between organizations as each has its own independent certificate chain. +**For Organizations Using Infisical-Managed Relays:** + +- **Instance relay SSH Client CA & Server CA** - Gateway ↔ Infisical Relay Server authentication +- **Instance relay PKI Client CA & Server CA** - Platform ↔ Infisical Relay Server authentication +- **Organization Gateway Client CA & Server CA** - Platform ↔ Gateway authentication + +**For Organizations Using Customer-Deployed Relays:** + +- **Organization relay SSH Client CA & Server CA** - Gateway ↔ Customer Relay Server authentication +- **Organization relay PKI Client CA & Server CA** - Platform ↔ Customer Relay Server authentication +- **Organization Gateway Client CA & Server CA** - Platform ↔ Gateway authentication ### Certificate Hierarchy + ``` -Root CA (Organization Specific) -├── Client CA -│ └── Client Certificates (Cloud Components) -└── Gateway CA - └── Gateway Certificates (Gateway Instances) +Instance Level (Shared Relays): +├── Instance Relay SSH CA (Gateway ↔ Relay) +├── Instance Relay PKI CA (Platform ↔ Relay) + +Organization Level: +├── Organization Relay SSH CA (Gateway ↔ Org Relay) +├── Organization Relay PKI CA (Platform ↔ Org Relay) +└── Organization Gateway CA (Platform ↔ Gateway) ``` ## Communication Security ### 1. Gateway Registration + When a gateway is first deployed: -1. Establishes initial connection using machine identity token -2. Allocates a relay address for communication -3. Exchanges certificates through a secure handshake: - - Gateway receives a unique certificate signed by organization's Gateway CA along with certificate chain for verification +1. Authenticates with Infisical using machine identity token +2. Receives SSH certificates for relay server authentication +3. Establishes SSH reverse tunnel to assigned relay server +4. Certificate issuance varies by relay configuration: + - **Infisical-managed relay**: Receives Instance relay SSH client certificate + Instance relay SSH Server CA + - **Customer-deployed relay**: Receives Organization relay SSH client certificate + Organization relay SSH Server CA -### 2. Mutual TLS Authentication -All communication between gateway and cloud uses mutual TLS (mTLS): +### 2. SSH Tunnel Authentication + +Gateway ↔ Relay Server communication uses SSH certificate authentication: - **Gateway Authentication**: - - Presents certificate signed by organization's Gateway CA - - Certificate contains unique identifiers (Organization ID, Gateway ID) - - Cloud validates complete certificate chain -- **Cloud Authentication**: - - Presents certificate signed by organization's Client CA - - Certificate includes required organizational unit ("gateway-client") - - Gateway validates certificate chain back to organization's root CA + - Presents SSH client certificate (Instance or Organization relay SSH Client CA) + - Certificate contains gateway identification and permissions + - Relay server validates certificate against appropriate SSH Client CA -### 3. Relay Communication -The relay system provides secure tunneling: +- **Relay Server Authentication**: + - Presents SSH server certificate (Instance or Organization relay SSH Server CA) + - Gateway validates certificate against appropriate SSH Server CA + - Ensures gateway connects to legitimate relay infrastructure -1. **Connection Establishment**: - - Uses QUIC protocol over UDP for efficient, secure communication - - Provides built-in encryption, congestion control, and multiplexing - - Enables faster connection establishment and reduced latency - - Each organization's traffic is isolated using separate relay sessions +### 3. Platform-to-Gateway Direct Connection -2. **Traffic Isolation**: - - Each gateway gets unique relay credentials - - Traffic is end-to-end encrypted using QUIC's TLS 1.3 - - Organization's private keys never leave their environment +The platform establishes secure direct connections with gateways through a **TLS-pinned tunnel** mechanism: + +1. **TLS-Pinned Tunnel Establishment**: + + - Gateway initiates outbound connection to platform through SSH reverse tunnel + - Platform establishes direct mTLS connection with gateway using Organization Gateway certificates + - TLS certificate pinning ensures the connection is bound to the specific gateway identity + - No inbound connections required - all communication flows through the outbound tunnel + +2. **Connection Flow**: + + ``` + Platform ←→ [SSH Reverse Tunnel] ←→ Gateway + ``` + + - Gateway maintains persistent outbound SSH tunnel to relay server + - Platform connects directly to gateway through this tunnel + - TLS handshake occurs over the SSH tunnel, establishing mTLS connection + - Application traffic flows through the TLS-pinned tunnel + +3. **Security Benefits**: + + - **No inbound connections**: Gateway never needs to accept incoming connections + - **Certificate-based authentication**: Uses Organization Gateway certificates for mutual TLS + - **Double encryption**: TLS traffic within SSH tunnel provides layered security + - **Relay server isolation**: Relay cannot decrypt either TLS or application data + - **Tenant isolation**: Each organization's traffic flows through separate authenticated channels ## Tenant Isolation -### Certificate-Based Isolation -- Each organization has unique root CA and intermediate CAs -- Certificates contain organization-specific identifiers -- Cross-tenant communication is cryptographically impossible +### Multi-Layer Certificate Isolation -### Gateway-Project Mapping -- Gateways are explicitly mapped to specific projects -- Access controls enforce organization boundaries -- Project-level permissions determine resource accessibility +The architecture provides tenant isolation through multiple certificate authority layers: + +- **Instance-level CAs**: Shared relay infrastructure uses instance-level certificates +- **Organization-level CAs**: Each organization has unique certificate authorities +- **Relay deployment flexibility**: Organizations can choose shared or dedicated relay infrastructure +- **Cryptographic separation**: Cross-tenant communication is cryptographically impossible + +### Authentication Flows by Deployment Type + +**Infisical-Managed Relay Deployments:** + +- Gateway authenticates with relay using Instance relay SSH certificates +- Platform authenticates with relay using Instance relay PKI certificates +- Platform authenticates with gateway using Organization Gateway certificates + +**Customer-Deployed Relay Deployments:** + +- Gateway authenticates with relay using Organization relay SSH certificates +- Platform authenticates with relay using Organization relay PKI certificates +- Platform authenticates with gateway using Organization Gateway certificates ### Resource Access Control -1. **Project Verification**: - - Gateway verifies project membership - - Validates organization ownership - - Enforces project-level permissions -2. **Resource Restrictions**: - - Gateways only accept connections to approved resources - - Each connection requires explicit project authorization - - Resources remain private to their assigned organization +1. **Certificate Validation**: + + - All connections require valid certificates from appropriate CAs + - Embedded certificate details control access permissions + - Ephemeral certificate validation ensures time-bound access + +2. **Network Isolation**: + + - Each organization's traffic flows through isolated certificate-authenticated channels + - Relay servers route traffic based on certificate validation without content access + - Gateway validates all incoming connections against Organization Gateway Client CA diff --git a/docs/documentation/platform/gateways/networking.mdx b/docs/documentation/platform/gateways/networking.mdx index 6acdc19937..2b068a5350 100644 --- a/docs/documentation/platform/gateways/networking.mdx +++ b/docs/documentation/platform/gateways/networking.mdx @@ -3,16 +3,17 @@ title: "Networking" description: "Network configuration and firewall requirements for Infisical Gateway" --- -The Infisical Gateway requires outbound network connectivity to establish secure communication with Infisical's relay infrastructure. +The Infisical Gateway requires outbound network connectivity to establish secure SSH reverse tunnels with relay servers. This page outlines the required ports, protocols, and firewall configurations needed for optimal gateway usage. ## Network Architecture -The gateway uses a relay-based architecture to establish secure connections: +The gateway uses SSH reverse tunnels to establish secure connections with end-to-end encryption: -1. **Gateway** connects outbound to **Relay Servers** using UDP/QUIC protocol -2. **Relay Servers** facilitate secure communication between Gateway and Infisical Cloud -3. All traffic is end-to-end encrypted using mutual TLS over QUIC +1. **Gateway** connects outbound to **Relay Servers** using SSH over TCP +2. **Infisical platform** establishes mTLS connections with gateways for application traffic +3. **Relay Servers** route the doubly-encrypted traffic (mTLS payload within SSH tunnels) between the platform and gateways +4. **Double encryption** ensures relay servers cannot access application data - only the platform and gateway can decrypt traffic ## Required Network Connectivity @@ -20,65 +21,70 @@ The gateway uses a relay-based architecture to establish secure connections: The gateway requires the following outbound connectivity: -| Protocol | Destination | Ports | Purpose | -|----------|-------------|-------|---------| -| UDP | Relay Servers | 49152-65535 | Allocated relay communication (TLS) | -| TCP | app.infisical.com / eu.infisical.com | 443 | API communication and relay allocation | +| Protocol | Destination | Ports | Purpose | +| -------- | ------------------------------------ | ----- | ------------------------------------------ | +| TCP | Relay Servers | 2222 | SSH reverse tunnel establishment | +| TCP | app.infisical.com / eu.infisical.com | 443 | API communication and certificate requests | -### Relay Server IP Addresses +### Relay Server Connectivity -Your firewall must allow outbound connectivity to the following Infisical relay servers on dynamically allocated ports. +**For Instance Relays (Infisical Cloud):** Your firewall must allow outbound connectivity to Infisical-managed relay servers. + +**For Organization Relays:** Your firewall must allow outbound connectivity to your own relay server IP addresses or hostnames. + +**For Self-hosted Instance Relays:** Your firewall must allow outbound connectivity to relay servers configured by your instance administrator. - - ``` - 54.235.197.91:49152-65535 - 18.215.196.229:49152-65535 - 3.222.120.233:49152-65535 - 34.196.115.157:49152-65535 - ``` + + Infisical provides multiple managed relay servers with static IP addresses. + You can whitelist these IPs ahead of time based on which relay server you + choose to connect to. **Firewall requirements:** Allow outbound TCP + connections to the desired relay server IP on port 2222. - - ``` - 3.125.237.40:49152-65535 - 52.28.157.98:49152-65535 - 3.125.176.90:49152-65535 - ``` + + You control the relay server IP addresses or hostnames when deploying your + own organization relays. **Firewall requirements:** Allow outbound TCP + connections to your relay server IP or hostname on port 2222. For example, + if your relay is at `203.0.113.100` or `relay.example.com`, allow TCP to + `203.0.113.100:2222` or `relay.example.com:2222`. - - Please contact your Infisical account manager for dedicated relay server IP addresses. + + Contact your instance administrator for the relay server IP addresses or + hostnames configured for your deployment. **Firewall requirements:** Allow + outbound TCP connections to instance relay servers on port 2222. - - These IP addresses are static and managed by Infisical. Any changes will be communicated with 60-day advance notice. - - ## Protocol Details -### QUIC over UDP +### SSH over TCP -The gateway uses QUIC (Quick UDP Internet Connections) for primary communication: +The gateway uses SSH reverse tunnels for primary communication: -- **Port 5349**: STUN/TURN over TLS (secure relay communication) -- **Built-in features**: Connection migration, multiplexing, reduced latency -- **Encryption**: TLS 1.3 with certificate pinning +- **Port 2222**: SSH connection to relay servers +- **Built-in features**: Automatic reconnection, certificate-based authentication, encrypted tunneling +- **Encryption**: SSH with certificate-based authentication and key exchange -## Understanding Firewall Behavior with UDP +## Firewall Configuration for SSH -Unlike TCP connections, UDP is a stateless protocol, and depending on your organization's firewall configuration, you may need to adjust network rules accordingly. -When the gateway sends UDP packets to a relay server, the return responses need to be allowed back through the firewall. -Modern firewalls handle this through "connection tracking" (also called "stateful inspection"), but the behavior can vary depending on your firewall configuration. +The gateway uses standard SSH over TCP, making firewall configuration straightforward. +### TCP Connection Handling -### Connection Tracking +SSH connections over TCP are stateful and handled seamlessly by all modern firewalls: -Modern firewalls automatically track UDP connections and allow return responses. This is the preferred configuration as it: -- Automatically handles return responses -- Reduces firewall rule complexity -- Avoids the need for manual IP whitelisting +- **Established connections** are automatically tracked +- **Return traffic** is allowed for established outbound connections +- **No special configuration** needed for connection tracking +- **Standard SSH protocol** that enterprise firewalls handle well -In the event that your firewall does not support connection tracking, you will need to whitelist the relay IPs to explicitly define return traffic manually. +### Simplified Firewall Rules + +Since SSH uses TCP, you only need simple outbound rules: + +1. **Allow outbound TCP** to relay servers (IP addresses or hostnames) on port 2222 +2. **Allow outbound HTTPS** to Infisical API endpoints on port 443 +3. **No inbound rules required** - all connections are outbound only ## Common Network Scenarios @@ -86,83 +92,87 @@ In the event that your firewall does not support connection tracking, you will n For corporate environments with strict egress filtering: -1. **Whitelist relay IP addresses** (listed above) -2. **Allow UDP port 5349** outbound -3. **Configure connection tracking** for UDP return traffic -4. **Allow ephemeral port range** 49152-65535 for return traffic if connection tracking is disabled +1. **Allow outbound TCP** to relay servers (IP addresses or hostnames) on port 2222 +2. **Allow outbound HTTPS** to the Infisical API server on port 443 +3. **No inbound rules required** - all connections are outbound only +4. **Standard TCP rules** - simple and straightforward configuration ### Cloud Environments (AWS/GCP/Azure) Configure security groups to allow: -- **Outbound UDP** to relay IPs on port 5349 + +- **Outbound TCP** to relay servers (IP addresses or hostnames) on port 2222 - **Outbound HTTPS** to app.infisical.com/eu.infisical.com on port 443 -- **Inbound UDP** on ephemeral ports (if not using stateful rules) +- **No inbound rules required** - SSH reverse tunnels are outbound only ## Frequently Asked Questions The gateway is designed to handle network interruptions gracefully: -- **Automatic reconnection**: The gateway will automatically attempt to reconnect to relay servers every 5 seconds if the connection is lost +- **Automatic reconnection**: The gateway will automatically attempt to reconnect to relay servers if the SSH connection is lost - **Connection retry logic**: Built-in retry mechanisms handle temporary network outages without manual intervention -- **Multiple relay servers**: If one relay server is unavailable, the gateway can connect to alternative relay servers -- **Persistent sessions**: Existing connections are maintained where possible during brief network interruptions +- **Persistent SSH tunnels**: SSH connections are automatically re-established when connectivity is restored +- **Certificate rotation**: The gateway handles certificate renewal automatically during reconnection - **Graceful degradation**: The gateway logs connection issues and continues attempting to restore connectivity No manual intervention is typically required during network interruptions. + - -QUIC (Quick UDP Internet Connections) provides several advantages over traditional TCP for gateway communication: + +SSH over TCP provides several advantages for enterprise gateway communication: -- **Faster connection establishment**: QUIC combines transport and security handshakes, reducing connection setup time -- **Built-in encryption**: TLS 1.3 is integrated into the protocol, ensuring all traffic is encrypted by default -- **Connection migration**: QUIC connections can survive IP address changes (useful for NAT rebinding) -- **Reduced head-of-line blocking**: Multiple data streams can be multiplexed without blocking each other -- **Better performance over unreliable networks**: Advanced congestion control and packet loss recovery -- **Lower latency**: Optimized for real-time communication between gateway and cloud services +- **Firewall-friendly**: TCP is stateful and handled seamlessly by all enterprise firewalls +- **Standard protocol**: SSH is a well-established protocol that network teams are familiar with +- **Certificate-based security**: Uses SSH certificates for strong authentication without shared secrets +- **Automatic tunneling**: SSH reverse tunnels handle all the complexity of secure communication +- **Enterprise compatibility**: Works reliably across all enterprise network configurations + +TCP's reliability and firewall compatibility make it ideal for enterprise environments where network policies are strictly managed. -While TCP is stateful and easier for firewalls to track, QUIC's performance benefits outweigh the additional firewall configuration requirements. No inbound ports need to be opened. The gateway only makes outbound connections: -- **Outbound UDP** to relay servers on ports 49152-65535 -- **Outbound HTTPS** to Infisical API endpoints -- **Return responses** are handled by connection tracking or explicit IP whitelisting +- **Outbound SSH** to relay servers on port 2222 +- **Outbound HTTPS** to Infisical API endpoints on port 443 +- **SSH reverse tunnels** handle all communication - no return traffic configuration needed This design maintains security by avoiding the need for inbound firewall rules that could expose your network to external threats. + - -If your firewall has strict UDP restrictions: + +If your firewall has strict outbound restrictions: -1. **Work with your network team** to allow outbound UDP to the specific relay IP addresses -2. **Use explicit IP whitelisting** if connection tracking is disabled -3. **Consider network policy exceptions** for the gateway host +1. **Work with your network team** to allow outbound TCP connections on port 2222 to relay servers (IP addresses or hostnames) +2. **Allow standard SSH traffic** - most enterprises already have SSH policies in place +3. **Consider network policy exceptions** for the gateway host if needed 4. **Monitor firewall logs** to identify which specific rules are blocking traffic -The gateway requires UDP connectivity to function - TCP-only configurations are not supported. -The gateway connects to **one relay server at a time**: +The gateway connects to **one relay server**: -- **Single active connection**: Only one relay connection is established per gateway instance -- **Automatic failover**: If the current relay becomes unavailable, the gateway will connect to an alternative relay -- **Load distribution**: Different gateway instances may connect to different relay servers for load balancing -- **No manual selection**: The Infisical API automatically assigns the optimal relay server based on availability and proximity +- **Single SSH connection**: Each gateway establishes one SSH reverse tunnel to its assigned relay server +- **Named relay assignment**: Gateways connect to the specific relay server specified by `--relay` +- **Automatic reconnection**: If the relay connection is lost, the gateway automatically reconnects to the same relay +- **Certificate-based authentication**: Each connection uses SSH certificates issued by Infisical for secure authentication -You should whitelist all relay IP addresses to ensure proper failover functionality. -No, relay servers cannot decrypt any traffic passing through them: +No, relay servers cannot decrypt any traffic passing through them due to end-to-end encryption: -- **End-to-end encryption**: All traffic between the gateway and Infisical Cloud is encrypted using mutual TLS with certificate pinning -- **Relay acts as a tunnel**: The relay server only forwards encrypted packets - it has no access to encryption keys -- **No data storage**: Relay servers do not store any traffic or network-identifiable information -- **Certificate isolation**: Each organization has its own private PKI system, ensuring complete tenant isolation +- **Client-to-Gateway mTLS (via TLS-pinned tunnel)**: Clients connect via a proxy that establishes a TLS-pinned tunnel to the gateway; mTLS between the client and gateway is negotiated inside this tunnel, encrypting all application traffic +- **SSH tunnel encryption**: The mTLS-encrypted traffic is then transmitted through SSH reverse tunnels to relay servers +- **Double encryption**: Traffic is encrypted twice - once by client mTLS and again by SSH tunnels +- **Relay only routes traffic**: The relay server only routes the doubly-encrypted traffic without access to either encryption layer +- **No data storage**: Relay servers do not store any traffic or sensitive information +- **Certificate isolation**: Each connection uses unique certificates, ensuring complete tenant isolation -The relay infrastructure is designed as a secure forwarding mechanism, similar to a VPN tunnel, where the relay provider cannot see the contents of the traffic flowing through it. - \ No newline at end of file +The relay infrastructure is designed as a secure routing mechanism where only the client and gateway can decrypt the actual application traffic. + + diff --git a/docs/documentation/platform/gateways/overview.mdx b/docs/documentation/platform/gateways/overview.mdx index 127e544b74..b8ea0102af 100644 --- a/docs/documentation/platform/gateways/overview.mdx +++ b/docs/documentation/platform/gateways/overview.mdx @@ -4,33 +4,53 @@ sidebarTitle: "Overview" description: "How to access private network resources from Infisical" --- -![Alt text](/documentation/platform/gateways/images/gateway-highlevel-diagram.png) +![Architecture Overview](../../../images/platform/gateways/gateway-highlevel-diagram.png) + +The Infisical Gateway provides secure access to private resources within your network without needing direct inbound connections to your environment. This method keeps your resources fully protected from external access while enabling Infisical to securely interact with resources like databases. + +**Architecture Components:** + +- **Gateway**: Lightweight agent deployed within your VPCs that provides access to private resources +- **Relay**: Infrastructure that routes encrypted traffic (instance-wide or organization-specific) -The Infisical Gateway provides secure access to private resources within your network without needing direct inbound connections to your environment. -This method keeps your resources fully protected from external access while enabling Infisical to securely interact with resources like databases. Common use cases include generating dynamic credentials or rotating credentials for private databases. - **Note:** Gateway is a paid feature. - **Infisical Cloud users:** Gateway is - available under the **Enterprise Tier**. - **Self-Hosted Infisical:** Please - contact [sales@infisical.com](mailto:sales@infisical.com) to purchase an - enterprise license. + Gateway is a paid feature available under the Enterprise Tier for Infisical + Cloud users. Self-hosted Infisical users can contact + [sales@infisical.com](mailto:sales@infisical.com) to purchase an enterprise + license. ## How It Works -The Gateway serves as a secure intermediary that facilitates direct communication between the Infisical server and your private network. -It’s a lightweight daemon packaged within the Infisical CLI, making it easy to deploy and manage. Once set up, the Gateway establishes a connection with a relay server, ensuring that all communication between Infisical and your Gateway is fully end-to-end encrypted. -This setup guarantees that only the platform and your Gateway can decrypt the transmitted information, keeping communication with your resources secure, private and isolated. +The Gateway system uses SSH reverse tunnels for secure, firewall-friendly connectivity: + +1. **Gateway Registration**: The gateway establishes an outbound SSH reverse tunnel to a relay server using SSH certificates issued by Infisical +2. **Relay Routing**: The relay server routes encrypted traffic between the Infisical platform and gateways +3. **Resource Access**: The Infisical platform connects to your private resources through the established gateway connections + +**Key Benefits:** + +- **No inbound firewall rules needed** - all connections are outbound from your network +- **Firewall-friendly** - uses standard SSH over TCP +- **Certificate-based authentication** provides enhanced security +- **Automatic reconnection** if connections are lost ## Deployment -The Infisical Gateway is seamlessly integrated into the Infisical CLI under the `gateway` command, making it simple to deploy and manage. +The Infisical Gateway is integrated into the Infisical CLI under the `gateway` command, making it simple to deploy and manage. You can install the Gateway in all the same ways you install the Infisical CLI—whether via npm, Docker, or a binary. For detailed installation instructions, refer to the Infisical [CLI Installation instructions](/cli/overview). -To function, the Gateway must authenticate with Infisical. This requires a machine identity configured with the appropriate permissions to create and manage a Gateway. -Once authenticated, the Gateway establishes a secure connection with Infisical to allow your private resources to be reachable. +**Prerequisites:** + +1. **Relay Server**: Before deploying gateways, you need a running relay server: + - **Infisical Cloud**: Instance relays are already available - no setup needed + - **Self-hosted**: Instance admin must set up shared instance relays, or organizations can deploy their own +2. **Machine Identity**: Configure a machine identity with appropriate permissions to create and manage gateways + +Once authenticated, the Gateway establishes an SSH reverse tunnel to the specified relay server, allowing secure access to your private resources. ### Get started @@ -46,14 +66,51 @@ Once authenticated, the Gateway establishes a secure connection with Infisical t You'll need to choose an authentication method to initiate communication with Infisical. View the available machine identity authentication methods [here](/documentation/platform/identities/machine-identities). + + You have two options for relay infrastructure: + + + + **Infisical Cloud:** Instance relays are already running and available - **no setup required**. You can immediately proceed to deploy gateways using these shared relays. + + **Self-hosted:** If your instance admin has set up shared instance relays, you can use them directly. If not, the instance admin can set them up: + ```bash + # Instance admin sets up shared relay (one-time setup) + export INFISICAL_RELAY_AUTH_SECRET= + infisical relay start --type=instance --ip= --name= + ``` + + + **Available for all users:** Deploy your own dedicated relay infrastructure for enhanced control: + ```bash + # Deploy organization-specific relay + infisical relay start --type=org --ip= --name= --auth-method=universal-auth --client-id= --client-secret= + ``` + + **When to choose this:** + - You need lower latency (deploy closer to your resources) + - Enhanced security requirements + - Compliance needs (data sovereignty, air-gapped environments) + - Custom network policies + + + + Use the Infisical CLI to deploy the Gateway. You can run it directly or install it as a systemd service for production: For production deployments on Linux, install the Gateway as a systemd service: + + + **Gateway v2:** The `infisical gateway systemd install` command deploys the new Gateway v2 component. + + If you are migrating from Gateway v1 (legacy `infisical gateway install` command), this is not in-place. Gateway v2 provisions new gateway instances with new gateway IDs. Update any resources that reference a gateway ID (for example: dynamic secret configs, app connections, or other gateway-bound resources) to use the new Gateway v2 gateway ID. + + ```bash - sudo infisical gateway install --token --domain + sudo infisical gateway systemd install --token --domain --name --relay sudo systemctl start infisical-gateway ``` This will install and start the Gateway as a secure systemd service that: @@ -81,7 +138,7 @@ Once authenticated, the Gateway establishes a secure connection with Infisical t ### Install the latest Helm Chart repository ```bash - helm repo add infisical-helm-charts 'https://dl.cloudsmith.io/public/infisical/helm-charts/helm/charts/' + helm repo add infisical-helm-charts 'https://dl.cloudsmith.io/public/infisical/helm-charts/helm/charts/' ``` ### Update the Helm Chart repository @@ -116,7 +173,12 @@ Once authenticated, the Gateway establishes a secure connection with Infisical t ```bash - kubectl create secret generic infisical-gateway-environment --from-literal=INFISICAL_AUTH_METHOD=universal-auth --from-literal=INFISICAL_UNIVERSAL_AUTH_CLIENT_ID= --from-literal=INFISICAL_UNIVERSAL_AUTH_CLIENT_SECRET= + kubectl create secret generic infisical-gateway-environment \ + --from-literal=INFISICAL_AUTH_METHOD=universal-auth \ + --from-literal=INFISICAL_UNIVERSAL_AUTH_CLIENT_ID= \ + --from-literal=INFISICAL_UNIVERSAL_AUTH_CLIENT_SECRET= \ + --from-literal=INFISICAL_RELAY_NAME= \ + --from-literal=INFISICAL_GATEWAY_NAME= ``` @@ -283,6 +345,29 @@ Once authenticated, the Gateway establishes a secure connection with Infisical t + #### Required environment variables + + In addition to the authentication method above, you **must** include these required variables: + + + + The name of the relay server that this gateway should connect to. + + + The name of this gateway instance. + + + + **Complete example with required variables:** + ```bash + kubectl create secret generic infisical-gateway-environment \ + --from-literal=INFISICAL_AUTH_METHOD=universal-auth \ + --from-literal=INFISICAL_UNIVERSAL_AUTH_CLIENT_ID= \ + --from-literal=INFISICAL_UNIVERSAL_AUTH_CLIENT_SECRET= \ + --from-literal=INFISICAL_RELAY_NAME= \ + --from-literal=INFISICAL_GATEWAY_NAME= + ``` + #### Other environment variables @@ -291,8 +376,13 @@ Once authenticated, the Gateway establishes a secure connection with Infisical t - ### Install the Infisical Gateway Helm Chart + + **Version mapping:** Helm chart versions `>= 1.0.0` contain the new Gateway v2 component. Helm chart versions `<= 0.0.5` contain the legacy Gateway v1 component. + + If you are moving from Gateway v1 (chart `<= 0.0.5`) to Gateway v2 (chart `>= 1.0.0`), this is not in-place. Gateway v2 provisions new gateway instances with new gateway IDs. Update any resources that reference a gateway ID (for example: dynamic secret configs, app connections, or other gateway-bound resources) to use the new Gateway v2 gateway ID. + + ```bash helm install infisical-gateway infisical-helm-charts/infisical-gateway ``` @@ -306,14 +396,18 @@ Once authenticated, the Gateway establishes a secure connection with Infisical t You should see the following output which indicates the gateway is running as expected. ```bash - $ kubectl logs deployment/infisical-gateway - INF Provided relay port 5349. Using TLS - INF Connected with relay - INF 10.0.101.112:56735 - INF Starting relay connection health check - INF Gateway started successfully - INF New connection from: 10.0.1.8:34051 - INF Gateway is reachable by Infisical + $ kubectl logs deployment/infisical-gateway + 12:43AM INF Starting gateway + 12:43AM INF Starting gateway certificate renewal goroutine + 12:43AM INF Successfully registered gateway and received certificates + 12:43AM INF Connecting to relay server infisical-start on 152.42.218.156:2222... + 12:43AM INF Relay connection established for gateway + 12:43AM INF Received incoming connection, starting TLS handshake + 12:43AM INF TLS handshake completed successfully + 12:43AM INF Negotiated ALPN protocol: infisical-ping + 12:43AM INF Starting ping handler + 12:43AM INF Ping handler completed + 12:43AM INF Gateway is reachable by Infisical ``` @@ -321,27 +415,31 @@ Once authenticated, the Gateway establishes a secure connection with Infisical t For development or testing, you can run the Gateway directly. Log in with your machine identity and start the Gateway in one command: ```bash - infisical gateway --token $(infisical login --method=universal-auth --client-id=<> --client-secret=<> --plain) + infisical gateway start --token $(infisical login --method=universal-auth --client-id=<> --client-secret=<> --plain) --relay= --name= ``` Alternatively, if you already have the token, use it directly with the `--token` flag: ```bash - infisical gateway --token + infisical gateway start --token --relay= --name= ``` Or set it as an environment variable: ```bash export INFISICAL_TOKEN= - infisical gateway + infisical gateway start --relay= --name= ``` - For detailed information about the gateway command and its options, see the [gateway command documentation](/cli/commands/gateway). + For detailed information about the gateway commands and their options, see the [gateway command documentation](/cli/commands/gateway). - Ensure the deployed Gateway has network access to the private resources you intend to connect with Infisical. + **Requirements:** + - Ensure the deployed Gateway has network access to the private resources you intend to connect with Infisical + - The gateway must be able to reach the relay server (outbound connection only) + - Replace `` with the name of your relay server and `` with a unique name for this gateway + diff --git a/docs/images/auth-methods/access-personal-settings.png b/docs/images/auth-methods/access-personal-settings.png index a5e1989c1e..31f6f96c77 100644 Binary files a/docs/images/auth-methods/access-personal-settings.png and b/docs/images/auth-methods/access-personal-settings.png differ diff --git a/docs/images/auth-methods/personal-settings-authentication-change-email-confirmation.png b/docs/images/auth-methods/personal-settings-authentication-change-email-confirmation.png new file mode 100644 index 0000000000..1a5986c3cd Binary files /dev/null and b/docs/images/auth-methods/personal-settings-authentication-change-email-confirmation.png differ diff --git a/docs/images/auth-methods/personal-settings-authentication-change-email-password.png b/docs/images/auth-methods/personal-settings-authentication-change-email-password.png new file mode 100644 index 0000000000..78945e2864 Binary files /dev/null and b/docs/images/auth-methods/personal-settings-authentication-change-email-password.png differ diff --git a/docs/images/auth-methods/personal-settings-authentication-tab.png b/docs/images/auth-methods/personal-settings-authentication-tab.png new file mode 100644 index 0000000000..5429e97cda Binary files /dev/null and b/docs/images/auth-methods/personal-settings-authentication-tab.png differ diff --git a/docs/images/platform/audit-log-streams/cribl-add-source.png b/docs/images/platform/audit-log-streams/cribl-add-source.png new file mode 100644 index 0000000000..95546a4569 Binary files /dev/null and b/docs/images/platform/audit-log-streams/cribl-add-source.png differ diff --git a/docs/images/platform/audit-log-streams/cribl-details.png b/docs/images/platform/audit-log-streams/cribl-details.png new file mode 100644 index 0000000000..4aa3cfdf53 Binary files /dev/null and b/docs/images/platform/audit-log-streams/cribl-details.png differ diff --git a/docs/images/platform/audit-log-streams/cribl-general-settings.png b/docs/images/platform/audit-log-streams/cribl-general-settings.png new file mode 100644 index 0000000000..8f3d20c0c8 Binary files /dev/null and b/docs/images/platform/audit-log-streams/cribl-general-settings.png differ diff --git a/docs/images/platform/audit-log-streams/cribl-ingress-address.png b/docs/images/platform/audit-log-streams/cribl-ingress-address.png new file mode 100644 index 0000000000..cb1842d679 Binary files /dev/null and b/docs/images/platform/audit-log-streams/cribl-ingress-address.png differ diff --git a/docs/images/platform/gateways/gateway-highlevel-diagram.png b/docs/images/platform/gateways/gateway-highlevel-diagram.png new file mode 100644 index 0000000000..5f942bcf0c Binary files /dev/null and b/docs/images/platform/gateways/gateway-highlevel-diagram.png differ diff --git a/docs/integrations/platforms/kubernetes/infisical-dynamic-secret-crd.mdx b/docs/integrations/platforms/kubernetes/infisical-dynamic-secret-crd.mdx index 76dd24cf79..848da40552 100644 --- a/docs/integrations/platforms/kubernetes/infisical-dynamic-secret-crd.mdx +++ b/docs/integrations/platforms/kubernetes/infisical-dynamic-secret-crd.mdx @@ -223,7 +223,9 @@ spec: spec: dynamicSecret: secretName: - projectId: + # Use either projectId OR projectSlug, not both + projectId: # Either projectId or projectSlug is required + # projectSlug: environmentSlug: secretsPath: ``` @@ -238,8 +240,21 @@ spec: The project ID of where the dynamic secret is stored in Infisical. + + + Please note that you can only use either `projectId` or `projectSlug` in the `dynamicSecret` field. + + + The project slug of where the dynamic secret is stored in Infisical. + + + Please note that you can only use either `projectId` or `projectSlug` in the `dynamicSecret` field. + + + + {" "} diff --git a/docs/integrations/platforms/kubernetes/infisical-push-secret-crd.mdx b/docs/integrations/platforms/kubernetes/infisical-push-secret-crd.mdx index d5a9c1ef44..0affe7841f 100644 --- a/docs/integrations/platforms/kubernetes/infisical-push-secret-crd.mdx +++ b/docs/integrations/platforms/kubernetes/infisical-push-secret-crd.mdx @@ -44,7 +44,8 @@ Before applying the InfisicalPushSecret CRD, you need to create a Kubernetes sec deletionPolicy: Delete # If set to delete, the secret(s) inside Infisical managed by the operator, will be deleted if the InfisicalPushSecret CRD is deleted. destination: - projectId: + projectId: # Either projectId or projectSlug is required + projectSlug: environmentSlug: secretsPath: @@ -203,6 +204,18 @@ After applying the InfisicalPushSecret CRD, you should notice that the secrets y The project ID where you want to create the secrets in Infisical. + + + Please note that you can only use either `projectId` or `projectSlug` in the `destination` field. + + + + + The project slug where you want to create the secrets in Infisical. + + + Please note that you can only use either `projectId` or `projectSlug` in the `destination` field. + diff --git a/docs/integrations/platforms/kubernetes/infisical-secret-crd.mdx b/docs/integrations/platforms/kubernetes/infisical-secret-crd.mdx index d0e403b797..d72d589570 100644 --- a/docs/integrations/platforms/kubernetes/infisical-secret-crd.mdx +++ b/docs/integrations/platforms/kubernetes/infisical-secret-crd.mdx @@ -142,7 +142,10 @@ spec: authentication: universalAuth: secretsScope: + # either projectSlug or projectId is required projectSlug: # <-- project slug + projectId: # <-- project id + envSlug: # "dev", "staging", "prod", etc.. secretsPath: "" # Root is "/" credentialsRef: @@ -496,9 +499,11 @@ spec: Make sure to also populate the `secretsScope` field with the project slug - _`projectSlug`_, environment slug _`envSlug`_, and secrets path + _`projectSlug`_, or project ID _`projectId`_, environment slug _`envSlug`_, and secrets path _`secretsPath`_ that you want to fetch secrets from. Please see the example below. + + Please note that you can only use either `projectSlug` or `projectId` in the `secretsScope` field. ## Example @@ -545,9 +550,11 @@ spec: Make sure to also populate the `secretsScope` field with the project slug - _`projectSlug`_, environment slug _`envSlug`_, and secrets path + _`projectSlug`_, or project ID _`projectId`_, environment slug _`envSlug`_, and secrets path _`secretsPath`_ that you want to fetch secrets from. Please see the example below. + + Please note that you can only use either `projectSlug` or `projectId` in the `secretsScope` field. ## Example @@ -588,9 +595,11 @@ spec: Make sure to also populate the `secretsScope` field with the project slug - _`projectSlug`_, environment slug _`envSlug`_, and secrets path + _`projectSlug`_, or project ID _`projectId`_, environment slug _`envSlug`_, and secrets path _`secretsPath`_ that you want to fetch secrets from. Please see the example below. + + Please note that you can only use either `projectSlug` or `projectId` in the `secretsScope` field. ## Example @@ -631,9 +640,11 @@ spec: Make sure to also populate the `secretsScope` field with the project slug - _`projectSlug`_, environment slug _`envSlug`_, and secrets path + _`projectSlug`_, or project ID _`projectId`_, environment slug _`envSlug`_, and secrets path _`secretsPath`_ that you want to fetch secrets from. Please see the example below. + + Please note that you can only use either `projectSlug` or `projectId` in the `secretsScope` field. ## Example @@ -675,9 +686,11 @@ spec: Make sure to also populate the `secretsScope` field with the project slug - _`projectSlug`_, environment slug _`envSlug`_, and secrets path + _`projectSlug`_, or project ID _`projectId`_, environment slug _`envSlug`_, and secrets path _`secretsPath`_ that you want to fetch secrets from. Please see the example below. + + Please note that you can only use either `projectSlug` or `projectId` in the `secretsScope` field. ## Example @@ -730,9 +743,11 @@ spec: Make sure to also populate the `secretsScope` field with the project slug - _`projectSlug`_, environment slug _`envSlug`_, and secrets path + _`projectSlug`_, or project ID _`projectId`_, environment slug _`envSlug`_, and secrets path _`secretsPath`_ that you want to fetch secrets from. Please see the example below. + + Please note that you can only use either `projectSlug` or `projectId` in the `secretsScope` field. ## Example diff --git a/docs/self-hosting/guides/releases.mdx b/docs/self-hosting/guides/releases.mdx index d85da1a78e..3cc22ea41d 100644 --- a/docs/self-hosting/guides/releases.mdx +++ b/docs/self-hosting/guides/releases.mdx @@ -19,8 +19,9 @@ Infisical provides two distinct release channels with different update frequenci - **Update Frequency**: Daily builds during weekdays (Monday-Friday) - - **Version Tags**: `vX.Y.Z-nightly-YYYYMMDD` (e.g., `v0.146.0-nightly-20250423`) - - **Multiple Daily Builds**: If multiple nightly builds are created on the same day, they are numbered incrementally: `vX.Y.Z-nightly-YYYYMMDD.1`, `vX.Y.Z-nightly-YYYYMMDD.2`, etc. + - **Versioning Strategy**: Nightly releases provide daily patches and features while making its way towards the next stable release + - **Version Format**: `vX.Y.0-nightly-YYYYMMDD` where X.Y represents the next minor version increment from the latest stable release + - **Multiple Daily Builds**: If multiple nightly builds are created on the same day, they are numbered incrementally: `vX.Y.0-nightly-YYYYMMDD.1`, `vX.Y.0-nightly-YYYYMMDD.2`, etc. - **Stability**: Latest features with standard CI/CD testing - **Release Process**: Built from main branch after all automated tests pass - **Intended Audience**: Development environments & early adopters @@ -31,6 +32,7 @@ Infisical provides two distinct release channels with different update frequenci **Characteristics:** - Access to latest features immediately + - Pre-release versions of upcoming stable releases - Faster security patch delivery - Higher update frequency (daily) diff --git a/frontend/public/images/integrations/Cribl.png b/frontend/public/images/integrations/Cribl.png new file mode 100644 index 0000000000..9e82823687 Binary files /dev/null and b/frontend/public/images/integrations/Cribl.png differ diff --git a/frontend/src/components/permissions/OrgPermissionCan.tsx b/frontend/src/components/permissions/OrgPermissionCan.tsx index d2e698e88d..8e0bf08adb 100644 --- a/frontend/src/components/permissions/OrgPermissionCan.tsx +++ b/frontend/src/components/permissions/OrgPermissionCan.tsx @@ -1,6 +1,7 @@ import { FunctionComponent, ReactNode } from "react"; import { BoundCanProps, Can } from "@casl/react"; +import { TooltipProps } from "@app/components/v2/Tooltip/Tooltip"; import { TOrgPermission, useOrgPermission } from "@app/context/OrgPermissionContext"; import { AccessRestrictedBanner, Tooltip } from "../v2"; @@ -20,6 +21,7 @@ type Props = { renderTooltip?: boolean; allowedLabel?: string; renderGuardBanner?: boolean; + tooltipProps?: Omit; } & BoundCanProps; export const OrgPermissionCan: FunctionComponent = ({ @@ -29,6 +31,7 @@ export const OrgPermissionCan: FunctionComponent = ({ renderTooltip, allowedLabel, renderGuardBanner, + tooltipProps, ...props }) => { const { permission } = useOrgPermission(); @@ -43,11 +46,19 @@ export const OrgPermissionCan: FunctionComponent = ({ : children; if (!isAllowed && passThrough) { - return {finalChild}; + return ( + + {finalChild} + + ); } if (isAllowed && renderTooltip && allowedLabel) { - return {finalChild}; + return ( + + {finalChild} + + ); } if (!isAllowed && renderGuardBanner) { diff --git a/frontend/src/components/v2/SecretInput/SecretInput.tsx b/frontend/src/components/v2/SecretInput/SecretInput.tsx index 747af73d52..c320bb0af8 100644 --- a/frontend/src/components/v2/SecretInput/SecretInput.tsx +++ b/frontend/src/components/v2/SecretInput/SecretInput.tsx @@ -34,7 +34,9 @@ const syntaxHighlight = (content?: string | null, isVisible?: boolean, isImport? // akhilmhdh: Dont remove this br. I am still clueless how this works but weirdly enough // when break is added a line break works properly - return formattedContent.concat(
); + return formattedContent.concat( +
+ ); }; type Props = TextareaHTMLAttributes & { diff --git a/frontend/src/context/SubscriptionContext/SubscriptionContext.tsx b/frontend/src/context/SubscriptionContext/SubscriptionContext.tsx index de52e5f183..95da6bb015 100644 --- a/frontend/src/context/SubscriptionContext/SubscriptionContext.tsx +++ b/frontend/src/context/SubscriptionContext/SubscriptionContext.tsx @@ -3,7 +3,7 @@ import { useRouteContext } from "@tanstack/react-router"; import { fetchOrgSubscription, subscriptionQueryKeys } from "@app/hooks/api/subscriptions/queries"; -export const useSubscription = () => { +export const useSubscription = (refreshCache?: boolean) => { const organizationId = useRouteContext({ from: "/_authenticate/_inject-org-details", select: (el) => el.organizationId @@ -11,7 +11,7 @@ export const useSubscription = () => { const { data: subscription } = useSuspenseQuery({ queryKey: subscriptionQueryKeys.getOrgSubsription(organizationId), - queryFn: () => fetchOrgSubscription(organizationId), + queryFn: () => fetchOrgSubscription(organizationId, refreshCache), staleTime: Infinity }); diff --git a/frontend/src/helpers/auditLogStreams.ts b/frontend/src/helpers/auditLogStreams.ts index b426ada1ca..faa132dd29 100644 --- a/frontend/src/helpers/auditLogStreams.ts +++ b/frontend/src/helpers/auditLogStreams.ts @@ -9,6 +9,7 @@ export const AUDIT_LOG_STREAM_PROVIDER_MAP: Record< { name: string; image?: string; icon?: IconDefinition; size?: number } > = { [LogProvider.Azure]: { name: "Azure", image: "Microsoft Azure.png", size: 60 }, + [LogProvider.Cribl]: { name: "Cribl", image: "Cribl.png", size: 60 }, [LogProvider.Custom]: { name: "Custom", icon: faCode }, [LogProvider.Datadog]: { name: "Datadog", image: "Datadog.png" }, [LogProvider.Splunk]: { name: "Splunk", image: "Splunk.png", size: 65 } @@ -21,6 +22,7 @@ export function getProviderUrl( switch (logStream.provider) { case LogProvider.Custom: case LogProvider.Datadog: + case LogProvider.Cribl: return logStream.credentials.url; case LogProvider.Splunk: return `https://${logStream.credentials.hostname}:8088/services/collector/event`; diff --git a/frontend/src/hooks/api/auditLogStreams/enums.ts b/frontend/src/hooks/api/auditLogStreams/enums.ts index b23a528a7e..ebef18574c 100644 --- a/frontend/src/hooks/api/auditLogStreams/enums.ts +++ b/frontend/src/hooks/api/auditLogStreams/enums.ts @@ -1,6 +1,7 @@ export enum LogProvider { Azure = "azure", + Cribl = "cribl", + Custom = "custom", Datadog = "datadog", - Splunk = "splunk", - Custom = "custom" + Splunk = "splunk" } diff --git a/frontend/src/hooks/api/auditLogStreams/types/index.ts b/frontend/src/hooks/api/auditLogStreams/types/index.ts index d007785cde..f780510c2d 100644 --- a/frontend/src/hooks/api/auditLogStreams/types/index.ts +++ b/frontend/src/hooks/api/auditLogStreams/types/index.ts @@ -1,5 +1,6 @@ import { LogProvider } from "../enums"; import { TAzureProviderLogStream } from "./providers/azure-provider"; +import { TCriblProviderLogStream } from "./providers/cribl-provider"; import { TCustomProviderLogStream } from "./providers/custom-provider"; import { TDatadogProviderLogStream } from "./providers/datadog-provider"; import { TSplunkProviderLogStream } from "./providers/splunk-provider"; @@ -8,10 +9,12 @@ export type TAuditLogStream = | TCustomProviderLogStream | TDatadogProviderLogStream | TSplunkProviderLogStream - | TAzureProviderLogStream; + | TAzureProviderLogStream + | TCriblProviderLogStream; export type TAuditLogStreamProviderMap = { [LogProvider.Azure]: TAzureProviderLogStream; + [LogProvider.Cribl]: TCriblProviderLogStream; [LogProvider.Custom]: TCustomProviderLogStream; [LogProvider.Datadog]: TDatadogProviderLogStream; [LogProvider.Splunk]: TSplunkProviderLogStream; diff --git a/frontend/src/hooks/api/auditLogStreams/types/providers/cribl-provider.ts b/frontend/src/hooks/api/auditLogStreams/types/providers/cribl-provider.ts new file mode 100644 index 0000000000..aeda0390a9 --- /dev/null +++ b/frontend/src/hooks/api/auditLogStreams/types/providers/cribl-provider.ts @@ -0,0 +1,10 @@ +import { LogProvider } from "../../enums"; +import { TRootProviderLogStream } from "./root-provider"; + +export type TCriblProviderLogStream = TRootProviderLogStream & { + provider: LogProvider.Cribl; + credentials: { + url: string; + token: string; + }; +}; diff --git a/frontend/src/hooks/api/gateways-v2/index.tsx b/frontend/src/hooks/api/gateways-v2/index.tsx new file mode 100644 index 0000000000..f8dd99d037 --- /dev/null +++ b/frontend/src/hooks/api/gateways-v2/index.tsx @@ -0,0 +1 @@ +export * from "./mutations"; diff --git a/frontend/src/hooks/api/gateways-v2/mutations.tsx b/frontend/src/hooks/api/gateways-v2/mutations.tsx new file mode 100644 index 0000000000..c3bf8bd1bb --- /dev/null +++ b/frontend/src/hooks/api/gateways-v2/mutations.tsx @@ -0,0 +1,17 @@ +import { useMutation, useQueryClient } from "@tanstack/react-query"; + +import { apiRequest } from "@app/config/request"; + +import { gatewaysQueryKeys } from "../gateways/queries"; + +export const useDeleteGatewayV2ById = () => { + const queryClient = useQueryClient(); + return useMutation({ + mutationFn: (id: string) => { + return apiRequest.delete(`/api/v2/gateways/${id}`); + }, + onSuccess: () => { + queryClient.invalidateQueries(gatewaysQueryKeys.list()); + } + }); +}; diff --git a/frontend/src/hooks/api/gateways-v2/types.ts b/frontend/src/hooks/api/gateways-v2/types.ts new file mode 100644 index 0000000000..69bc217023 --- /dev/null +++ b/frontend/src/hooks/api/gateways-v2/types.ts @@ -0,0 +1,12 @@ +export type TGatewayV2 = { + id: string; + identityId: string; + name: string; + createdAt: string; + updatedAt: string; + heartbeat: string; + identity: { + name: string; + id: string; + }; +}; diff --git a/frontend/src/hooks/api/gateways/queries.tsx b/frontend/src/hooks/api/gateways/queries.tsx index bb05b17a46..ef4dafb75b 100644 --- a/frontend/src/hooks/api/gateways/queries.tsx +++ b/frontend/src/hooks/api/gateways/queries.tsx @@ -2,6 +2,7 @@ import { queryOptions } from "@tanstack/react-query"; import { apiRequest } from "@app/config/request"; +import { TGatewayV2 } from "../gateways-v2/types"; import { TGateway } from "./types"; export const gatewaysQueryKeys = { @@ -11,8 +12,21 @@ export const gatewaysQueryKeys = { queryOptions({ queryKey: gatewaysQueryKeys.listKey(), queryFn: async () => { - const { data } = await apiRequest.get<{ gateways: TGateway[] }>("/api/v1/gateways"); - return data.gateways; + const [{ data }, { data: dataV2 }] = await Promise.all([ + apiRequest.get<{ gateways: TGateway[] }>("/api/v1/gateways"), + apiRequest.get("/api/v2/gateways") + ]); + + return [ + ...data.gateways.map((g) => ({ + ...g, + isV1: true + })), + ...dataV2.map((g) => ({ + ...g, + isV1: false + })) + ]; } }) }; diff --git a/frontend/src/hooks/api/subscriptions/queries.tsx b/frontend/src/hooks/api/subscriptions/queries.tsx index 99b1f44868..f545565eba 100644 --- a/frontend/src/hooks/api/subscriptions/queries.tsx +++ b/frontend/src/hooks/api/subscriptions/queries.tsx @@ -10,9 +10,9 @@ export const subscriptionQueryKeys = { getOrgSubsription: (orgID: string) => ["plan", { orgID }] as const }; -export const fetchOrgSubscription = async (orgID: string) => { +export const fetchOrgSubscription = async (orgID: string, refreshCache: boolean = false) => { const { data } = await apiRequest.get<{ plan: SubscriptionPlan }>( - `/api/v1/organizations/${orgID}/plan` + `/api/v1/organizations/${orgID}/plan${refreshCache ? "?refreshCache=true" : ""}` ); return data.plan; diff --git a/frontend/src/hooks/api/subscriptions/types.ts b/frontend/src/hooks/api/subscriptions/types.ts index 338599fe04..0c3733cc80 100644 --- a/frontend/src/hooks/api/subscriptions/types.ts +++ b/frontend/src/hooks/api/subscriptions/types.ts @@ -54,5 +54,7 @@ export type SubscriptionPlan = { secretScanning: boolean; enterpriseSecretSyncs: boolean; enterpriseAppConnections: boolean; + cardDeclined?: boolean; + cardDeclinedReason?: string; machineIdentityAuthTemplates: boolean; }; diff --git a/frontend/src/hooks/api/users/index.tsx b/frontend/src/hooks/api/users/index.tsx index d20a15bc81..36bccdb8aa 100644 --- a/frontend/src/hooks/api/users/index.tsx +++ b/frontend/src/hooks/api/users/index.tsx @@ -1,8 +1,10 @@ export { useAddUserToWsNonE2EE, useRemoveMyDuplicateAccounts, + useRequestEmailChangeOTP, useRevokeMySessionById, useSendEmailVerificationCode, + useUpdateUserEmail, useVerifyEmailVerificationCode } from "./mutation"; export { diff --git a/frontend/src/hooks/api/users/mutation.tsx b/frontend/src/hooks/api/users/mutation.tsx index b108a98c4e..1f3fdf3ad4 100644 --- a/frontend/src/hooks/api/users/mutation.tsx +++ b/frontend/src/hooks/api/users/mutation.tsx @@ -152,3 +152,30 @@ export const useRemoveMyDuplicateAccounts = () => { } }); }; + +export const useRequestEmailChangeOTP = () => { + return useMutation({ + mutationFn: async ({ newEmail }: { newEmail: string }) => { + const { data } = await apiRequest.post("/api/v2/users/me/email-change/otp", { + newEmail + }); + return data; + } + }); +}; + +export const useUpdateUserEmail = () => { + const queryClient = useQueryClient(); + return useMutation({ + mutationFn: async ({ newEmail, otpCode }: { newEmail: string; otpCode: string }) => { + const { data } = await apiRequest.patch("/api/v2/users/me/email", { + newEmail, + otpCode + }); + return data; + }, + onSuccess: () => { + queryClient.invalidateQueries({ queryKey: userKeys.getUser }); + } + }); +}; diff --git a/frontend/src/layouts/OrganizationLayout/components/NavBar/Navbar.tsx b/frontend/src/layouts/OrganizationLayout/components/NavBar/Navbar.tsx index 63da8b5338..e9bb6a902a 100644 --- a/frontend/src/layouts/OrganizationLayout/components/NavBar/Navbar.tsx +++ b/frontend/src/layouts/OrganizationLayout/components/NavBar/Navbar.tsx @@ -1,4 +1,4 @@ -import { useState } from "react"; +import { useEffect, useState } from "react"; import { faGithub, faSlack } from "@fortawesome/free-brands-svg-icons"; import { faCircleQuestion, faUserCircle } from "@fortawesome/free-regular-svg-icons"; import { @@ -8,6 +8,7 @@ import { faCaretDown, faCheck, faEnvelope, + faExclamationTriangle, faInfo, faInfoCircle, faServer, @@ -111,6 +112,14 @@ export const Navbar = () => { const { subscription } = useSubscription(); const { currentOrg } = useOrganization(); const [showAdminsModal, setShowAdminsModal] = useState(false); + const [showCardDeclinedModal, setShowCardDeclinedModal] = useState(false); + + useEffect(() => { + if (subscription?.cardDeclined && !sessionStorage.getItem("paymentFailed")) { + sessionStorage.setItem("paymentFailed", "true"); + setShowCardDeclinedModal(true); + } + }, [subscription]); const { data: orgs } = useGetOrganizations(); const navigate = useNavigate(); @@ -222,6 +231,19 @@ export const Navbar = () => {
{getPlan(subscription)}
+ {subscription.cardDeclined && ( + +
+ +
+
+ )} @@ -428,6 +450,49 @@ export const Navbar = () => { + + + + Your payment could not be processed. + + } + > +
+
+
+

+ We were unable to process your last payment + {subscription.cardDeclinedReason ? `: ${subscription.cardDeclinedReason}` : ""}. + Please update your payment information to continue using premium features. +

+
+
+
+ + + + +
+
+
+
+
+
diff --git a/frontend/src/pages/organization/BillingPage/components/BillingCloudTab/PreviewSection.tsx b/frontend/src/pages/organization/BillingPage/components/BillingCloudTab/PreviewSection.tsx index eaa90450e3..6ab0429ee7 100644 --- a/frontend/src/pages/organization/BillingPage/components/BillingCloudTab/PreviewSection.tsx +++ b/frontend/src/pages/organization/BillingPage/components/BillingCloudTab/PreviewSection.tsx @@ -1,5 +1,7 @@ +import { useEffect } from "react"; import { faArrowUpRightFromSquare } from "@fortawesome/free-solid-svg-icons"; import { FontAwesomeIcon } from "@fortawesome/react-fontawesome"; +import { useQueryClient } from "@tanstack/react-query"; import { OrgPermissionCan } from "@app/components/permissions"; import { Button } from "@app/components/v2"; @@ -15,13 +17,15 @@ import { useGetOrgPlanBillingInfo, useGetOrgTrialUrl } from "@app/hooks/api"; +import { subscriptionQueryKeys } from "@app/hooks/api/subscriptions/queries"; import { usePopUp } from "@app/hooks/usePopUp"; import { ManagePlansModal } from "./ManagePlansModal"; export const PreviewSection = () => { const { currentOrg } = useOrganization(); - const { subscription } = useSubscription(); + const { subscription } = useSubscription(true); + const queryClient = useQueryClient(); const { data, isPending } = useGetOrgPlanBillingInfo(currentOrg?.id ?? ""); const getOrgTrialUrl = useGetOrgTrialUrl(); const createCustomerPortalSession = useCreateCustomerPortalSession(); @@ -37,6 +41,12 @@ export const PreviewSection = () => { return formattedTotal; }; + useEffect(() => { + queryClient.invalidateQueries({ + queryKey: subscriptionQueryKeys.getOrgSubsription(currentOrg?.id ?? "") + }); + }, []); + const formatDate = (date: number) => { const createdDate = new Date(date * 1000); const day: number = createdDate.getDate(); diff --git a/frontend/src/pages/organization/Gateways/GatewayListPage/GatewayListPage.tsx b/frontend/src/pages/organization/Gateways/GatewayListPage/GatewayListPage.tsx index 1552f85486..729654d76c 100644 --- a/frontend/src/pages/organization/Gateways/GatewayListPage/GatewayListPage.tsx +++ b/frontend/src/pages/organization/Gateways/GatewayListPage/GatewayListPage.tsx @@ -14,7 +14,7 @@ import { } from "@fortawesome/free-solid-svg-icons"; import { FontAwesomeIcon } from "@fortawesome/react-fontawesome"; import { useQuery } from "@tanstack/react-query"; -import { format, formatRelative } from "date-fns"; +import { formatRelative } from "date-fns"; import { createNotification } from "@app/components/notifications"; import { OrgPermissionCan } from "@app/components/permissions"; @@ -48,13 +48,14 @@ import { import { withPermission } from "@app/hoc"; import { usePopUp } from "@app/hooks"; import { gatewaysQueryKeys, useDeleteGatewayById } from "@app/hooks/api/gateways"; +import { useDeleteGatewayV2ById } from "@app/hooks/api/gateways-v2"; import { EditGatewayDetailsModal } from "./components/EditGatewayDetailsModal"; export const GatewayListPage = withPermission( () => { const [search, setSearch] = useState(""); - const { data: gateways, isPending: isGatewayLoading } = useQuery(gatewaysQueryKeys.list()); + const { data: gateways, isPending: isGatewaysLoading } = useQuery(gatewaysQueryKeys.list()); const { popUp, handlePopUpOpen, handlePopUpToggle } = usePopUp([ "deleteGateway", @@ -62,16 +63,20 @@ export const GatewayListPage = withPermission( ] as const); const deleteGatewayById = useDeleteGatewayById(); + const deleteGatewayV2ById = useDeleteGatewayV2ById(); const handleDeleteGateway = async () => { - await deleteGatewayById.mutateAsync((popUp.deleteGateway.data as { id: string }).id, { - onSuccess: () => { - handlePopUpToggle("deleteGateway"); - createNotification({ - type: "success", - text: "Successfully delete gateway" - }); - } + const data = popUp.deleteGateway.data as { id: string; isV1: boolean }; + if (data.isV1) { + await deleteGatewayById.mutateAsync(data.id); + } else { + await deleteGatewayV2ById.mutateAsync(data.id); + } + + handlePopUpToggle("deleteGateway"); + createNotification({ + type: "success", + text: "Successfully deleted gateway" }); }; @@ -127,7 +132,6 @@ export const GatewayListPage = withPermission( Name - Cert Issued At Identity Health Check @@ -143,13 +147,19 @@ export const GatewayListPage = withPermission( - {isGatewayLoading && ( + {isGatewaysLoading && ( )} {filteredGateway?.map((el) => ( - {el.name} - {format(new Date(el.issuedAt), "yyyy-MM-dd hh:mm:ss aaa")} + +
+ {el.name} + + Gateway v{el.isV1 ? "1" : "2"} + +
+ {el.identity.name} {el.heartbeat @@ -176,20 +186,22 @@ export const GatewayListPage = withPermission( > Copy ID - - {(isAllowed: boolean) => ( - } - onClick={() => handlePopUpOpen("editDetails", el)} - > - Edit Details - - )} - + {el.isV1 && ( + + {(isAllowed: boolean) => ( + } + onClick={() => handlePopUpOpen("editDetails", el)} + > + Edit Details + + )} + + )} - {!isGatewayLoading && !filteredGateway?.length && ( + {!isGatewaysLoading && !filteredGateway?.length && ( ); }, - { - action: OrgPermissionAppConnectionActions.Read, - subject: OrgPermissionSubjects.AppConnections - } + { action: OrgGatewayPermissionActions.ListGateways, subject: OrgPermissionSubjects.Gateway } ); diff --git a/frontend/src/pages/organization/SettingsPage/components/AuditLogStreamTab/AuditLogStreamForm/AuditLogStreamForm.tsx b/frontend/src/pages/organization/SettingsPage/components/AuditLogStreamTab/AuditLogStreamForm/AuditLogStreamForm.tsx index 7ba2f2eaab..f59ae23aa5 100644 --- a/frontend/src/pages/organization/SettingsPage/components/AuditLogStreamTab/AuditLogStreamForm/AuditLogStreamForm.tsx +++ b/frontend/src/pages/organization/SettingsPage/components/AuditLogStreamTab/AuditLogStreamForm/AuditLogStreamForm.tsx @@ -7,6 +7,7 @@ import { DiscriminativePick } from "@app/types"; import { AuditLogStreamHeader } from "../components/AuditLogStreamHeader"; import { AzureProviderAuditLogStreamForm } from "./AzureProviderAuditLogStreamForm"; +import { CriblProviderAuditLogStreamForm } from "./CriblProviderAuditLogStreamForm"; import { CustomProviderAuditLogStreamForm } from "./CustomProviderAuditLogStreamForm"; import { DatadogProviderAuditLogStreamForm } from "./DatadogProviderAuditLogStreamForm"; import { SplunkProviderAuditLogStreamForm } from "./SplunkProviderAuditLogStreamForm"; @@ -47,6 +48,8 @@ const CreateForm = ({ provider, onComplete }: CreateFormProps) => { switch (provider) { case LogProvider.Azure: return ; + case LogProvider.Cribl: + return ; case LogProvider.Custom: return ; case LogProvider.Datadog: @@ -90,6 +93,10 @@ const UpdateForm = ({ auditLogStream, onComplete }: UpdateFormProps) => { return ( ); + case LogProvider.Cribl: + return ( + + ); case LogProvider.Custom: return ( diff --git a/frontend/src/pages/organization/SettingsPage/components/AuditLogStreamTab/AuditLogStreamForm/CriblProviderAuditLogStreamForm.tsx b/frontend/src/pages/organization/SettingsPage/components/AuditLogStreamTab/AuditLogStreamForm/CriblProviderAuditLogStreamForm.tsx new file mode 100644 index 0000000000..b839ddac8d --- /dev/null +++ b/frontend/src/pages/organization/SettingsPage/components/AuditLogStreamTab/AuditLogStreamForm/CriblProviderAuditLogStreamForm.tsx @@ -0,0 +1,108 @@ +import { Controller, FormProvider, useForm } from "react-hook-form"; +import { zodResolver } from "@hookform/resolvers/zod"; +import { z } from "zod"; + +import { Button, FormControl, Input, ModalClose, SecretInput } from "@app/components/v2"; +import { LogProvider } from "@app/hooks/api/auditLogStreams/enums"; +import { TCriblProviderLogStream } from "@app/hooks/api/auditLogStreams/types/providers/cribl-provider"; + +type Props = { + auditLogStream?: TCriblProviderLogStream; + onSubmit: (formData: FormData) => void; +}; + +const formSchema = z.object({ + provider: z.literal(LogProvider.Cribl), + credentials: z.object({ + url: z.string().url().trim().min(1).max(255), + token: z.string().trim().min(21).max(255) + }) +}); + +type FormData = z.infer; + +export const CriblProviderAuditLogStreamForm = ({ auditLogStream, onSubmit }: Props) => { + const isUpdate = Boolean(auditLogStream); + + const form = useForm({ + resolver: zodResolver(formSchema), + defaultValues: auditLogStream ?? { + provider: LogProvider.Cribl + } + }); + + const { + handleSubmit, + control, + formState: { isSubmitting, isDirty } + } = form; + + return ( + +
+ ( + + To derive your Stream URL: Obtain your Cribl hostname (e.g. cribl.example.com), + Infisical HTTP data source port (e.g. 20000), and HTTP event API path (e.g. + /infisical). +
+
+ If your Infisical Data Source has TLS enabled, then use the https protocol. + + } + > + +
+ )} + /> + ( + + onChange(e.target.value)} + /> + + )} + /> +
+ + + + +
+ +
+ ); +}; diff --git a/frontend/src/pages/organization/SettingsPage/components/OrgSsoTab/OrgGeneralAuthSection.tsx b/frontend/src/pages/organization/SettingsPage/components/OrgSsoTab/OrgGeneralAuthSection.tsx index b9ca117483..c3d8cea98b 100644 --- a/frontend/src/pages/organization/SettingsPage/components/OrgSsoTab/OrgGeneralAuthSection.tsx +++ b/frontend/src/pages/organization/SettingsPage/components/OrgSsoTab/OrgGeneralAuthSection.tsx @@ -24,11 +24,17 @@ enum EnforceAuthType { export const OrgGeneralAuthSection = ({ isSamlConfigured, isOidcConfigured, - isGoogleConfigured + isGoogleConfigured, + isSamlActive, + isOidcActive, + isLdapActive }: { isSamlConfigured: boolean; isOidcConfigured: boolean; isGoogleConfigured: boolean; + isSamlActive: boolean; + isOidcActive: boolean; + isLdapActive: boolean; }) => { const { currentOrg } = useOrganization(); const { subscription } = useSubscription(); @@ -126,6 +132,15 @@ export const OrgGeneralAuthSection = ({ } }; + const isGoogleOAuthEnforced = currentOrg.googleSsoAuthEnforced; + + const getActiveSsoLabel = () => { + if (isSamlActive) return "SAML"; + if (isOidcActive) return "OIDC"; + if (isLdapActive) return "LDAP"; + return ""; + }; + return (
@@ -135,7 +150,7 @@ export const OrgGeneralAuthSection = ({

-
+
Enforce SAML SSO @@ -160,7 +175,7 @@ export const OrgGeneralAuthSection = ({

-
+
Enforce OIDC SSO @@ -188,26 +203,47 @@ export const OrgGeneralAuthSection = ({
- Enforce Google SSO + Enforce Google OAuth
- + {(isAllowed) => ( - - handleEnforceOrgAuthToggle(value, EnforceAuthType.GOOGLE) - } - isChecked={currentOrg?.googleSsoAuthEnforced ?? false} - isDisabled={!isAllowed || currentOrg?.authEnforced} - /> +
+ + handleEnforceOrgAuthToggle(value, EnforceAuthType.GOOGLE) + } + isChecked={currentOrg?.googleSsoAuthEnforced ?? false} + isDisabled={ + !isAllowed || + currentOrg?.authEnforced || + isOidcActive || + isSamlActive || + isLdapActive + } + /> +
)}

- Enforce users to authenticate via Google OAuth SSO to access this organization. + Enforce users to authenticate via Google OAuth to access this organization.
When this is enabled your organization members will only be able to login with Google - SSO (not Google SAML). + OAuth (not Google SAML).

@@ -267,8 +303,8 @@ export const OrgGeneralAuthSection = ({

- Allow organization admins to bypass SAML enforcement when SSO is unavailable, - misconfigured, or inaccessible. + Allow organization admins to bypass SSO login enforcement when your SSO provider is + unavailable, misconfigured, or inaccessible.

diff --git a/frontend/src/pages/organization/SettingsPage/components/OrgSsoTab/OrgLDAPSection.tsx b/frontend/src/pages/organization/SettingsPage/components/OrgSsoTab/OrgLDAPSection.tsx index e66987baee..a17483fdd9 100644 --- a/frontend/src/pages/organization/SettingsPage/components/OrgSsoTab/OrgLDAPSection.tsx +++ b/frontend/src/pages/organization/SettingsPage/components/OrgSsoTab/OrgLDAPSection.tsx @@ -94,6 +94,8 @@ export const OrgLDAPSection = (): JSX.Element => { handlePopUpOpen("ldapGroupMap"); }; + const isGoogleOAuthEnabled = currentOrg.googleSsoAuthEnforced; + return (
@@ -116,16 +118,31 @@ export const OrgLDAPSection = (): JSX.Element => {

Enable LDAP

- + {(isAllowed) => ( - handleLDAPToggle(value)} - isChecked={data ? data.isActive : false} - isDisabled={!isAllowed} - > - Enable - +
+ handleLDAPToggle(value)} + isChecked={data ? data.isActive : false} + isDisabled={!isAllowed || isGoogleOAuthEnabled} + > + Enable + +
)}
diff --git a/frontend/src/pages/organization/SettingsPage/components/OrgSsoTab/OrgOIDCSection.tsx b/frontend/src/pages/organization/SettingsPage/components/OrgSsoTab/OrgOIDCSection.tsx index ce10c39074..3ba1a8275e 100644 --- a/frontend/src/pages/organization/SettingsPage/components/OrgSsoTab/OrgOIDCSection.tsx +++ b/frontend/src/pages/organization/SettingsPage/components/OrgSsoTab/OrgOIDCSection.tsx @@ -83,6 +83,8 @@ export const OrgOIDCSection = (): JSX.Element => { } }; + const isGoogleOAuthEnabled = currentOrg.googleSsoAuthEnforced; + return (
@@ -106,14 +108,29 @@ export const OrgOIDCSection = (): JSX.Element => {

Enable OIDC

{!isPending && ( - + {(isAllowed) => ( - handleOIDCToggle(value)} - isChecked={data ? data.isActive : false} - isDisabled={!isAllowed} - /> +
+ handleOIDCToggle(value)} + isChecked={data ? data.isActive : false} + isDisabled={!isAllowed || isGoogleOAuthEnabled} + /> +
)}
)} diff --git a/frontend/src/pages/organization/SettingsPage/components/OrgSsoTab/OrgSSOSection.tsx b/frontend/src/pages/organization/SettingsPage/components/OrgSsoTab/OrgSSOSection.tsx index 33843f50fe..53e6cece7f 100644 --- a/frontend/src/pages/organization/SettingsPage/components/OrgSsoTab/OrgSSOSection.tsx +++ b/frontend/src/pages/organization/SettingsPage/components/OrgSsoTab/OrgSSOSection.tsx @@ -78,6 +78,8 @@ export const OrgSSOSection = (): JSX.Element => { } }; + const isGoogleOAuthEnabled = currentOrg.googleSsoAuthEnforced; + return (
@@ -99,14 +101,29 @@ export const OrgSSOSection = (): JSX.Element => {

Enable SAML

{!isPending && ( - + {(isAllowed) => ( - handleSamlSSOToggle(value)} - isChecked={data ? data.isActive : false} - isDisabled={!isAllowed} - /> +
+ handleSamlSSOToggle(value)} + isChecked={data ? data.isActive : false} + isDisabled={!isAllowed || isGoogleOAuthEnabled} + /> +
)}
)} diff --git a/frontend/src/pages/organization/SettingsPage/components/OrgSsoTab/OrgSsoTab.tsx b/frontend/src/pages/organization/SettingsPage/components/OrgSsoTab/OrgSsoTab.tsx index 9964fdf4d3..e4e3a9c23b 100644 --- a/frontend/src/pages/organization/SettingsPage/components/OrgSsoTab/OrgSsoTab.tsx +++ b/frontend/src/pages/organization/SettingsPage/components/OrgSsoTab/OrgSsoTab.tsx @@ -184,6 +184,9 @@ export const OrgSsoTab = withPermission( isSamlConfigured={isSamlConfigured} isOidcConfigured={isOidcConfigured} isGoogleConfigured={isGoogleConfigured} + isSamlActive={Boolean(samlConfig?.isActive)} + isOidcActive={Boolean(oidcConfig?.isActive)} + isLdapActive={Boolean(ldapConfig?.isActive)} /> )} diff --git a/frontend/src/pages/secret-manager/SecretDashboardPage/components/ActionBar/CreateDynamicSecretForm/SqlDatabaseInputForm.tsx b/frontend/src/pages/secret-manager/SecretDashboardPage/components/ActionBar/CreateDynamicSecretForm/SqlDatabaseInputForm.tsx index 8bedbbaeb6..050a51754f 100644 --- a/frontend/src/pages/secret-manager/SecretDashboardPage/components/ActionBar/CreateDynamicSecretForm/SqlDatabaseInputForm.tsx +++ b/frontend/src/pages/secret-manager/SecretDashboardPage/components/ActionBar/CreateDynamicSecretForm/SqlDatabaseInputForm.tsx @@ -19,6 +19,7 @@ import { SecretInput, Select, SelectItem, + Switch, TextArea, Tooltip } from "@app/components/v2"; @@ -66,6 +67,7 @@ const formSchema = z.object({ creationStatement: z.string().min(1), revocationStatement: z.string().min(1), renewStatement: z.string().optional(), + sslEnabled: z.boolean().optional(), ca: z.string().optional(), gatewayId: z.string().optional() }), @@ -200,6 +202,7 @@ export const SqlDatabaseInputForm = ({ const createDynamicSecret = useCreateDynamicSecret(); const { data: gateways, isPending: isGatewaysLoading } = useQuery(gatewaysQueryKeys.list()); + const selectedClient = watch("provider.client"); const handleCreateDynamicSecret = async ({ name, @@ -458,13 +461,34 @@ export const SqlDatabaseInputForm = ({ />
+ {selectedClient === SqlProviders.MsSQL && ( +
+ ( + + + Encrypt Connection (SSL) + + + )} + /> +
+ )} ( diff --git a/frontend/src/pages/secret-manager/SecretDashboardPage/components/DynamicSecretListView/EditDynamicSecretForm/EditDynamicSecretSqlProviderForm.tsx b/frontend/src/pages/secret-manager/SecretDashboardPage/components/DynamicSecretListView/EditDynamicSecretForm/EditDynamicSecretSqlProviderForm.tsx index 2cbad1e196..1eda17bc87 100644 --- a/frontend/src/pages/secret-manager/SecretDashboardPage/components/DynamicSecretListView/EditDynamicSecretForm/EditDynamicSecretSqlProviderForm.tsx +++ b/frontend/src/pages/secret-manager/SecretDashboardPage/components/DynamicSecretListView/EditDynamicSecretForm/EditDynamicSecretSqlProviderForm.tsx @@ -18,6 +18,7 @@ import { SecretInput, Select, SelectItem, + Switch, TextArea, Tooltip } from "@app/components/v2"; @@ -63,6 +64,7 @@ const formSchema = z.object({ creationStatement: z.string().min(1), revocationStatement: z.string().min(1), renewStatement: z.string().optional(), + sslEnabled: z.boolean().optional(), ca: z.string().optional(), gatewayId: z.string().optional().nullable() }) @@ -151,6 +153,7 @@ export const EditDynamicSecretSqlProviderForm = ({ }); const { data: gateways, isPending: isGatewaysLoading } = useQuery(gatewaysQueryKeys.list()); + const selectedClient = watch("inputs.client"); const updateDynamicSecret = useUpdateDynamicSecret(); const selectedGatewayId = watch("inputs.gatewayId"); @@ -407,13 +410,34 @@ export const EditDynamicSecretSqlProviderForm = ({ />
+ {selectedClient === SqlProviders.MsSQL && ( +
+ ( + + + Encrypt Connection (SSL) + + + )} + /> +
+ )} ( diff --git a/frontend/src/pages/user/PersonalSettingsPage/components/ChangeEmailSection/ChangeEmailSection.tsx b/frontend/src/pages/user/PersonalSettingsPage/components/ChangeEmailSection/ChangeEmailSection.tsx new file mode 100644 index 0000000000..723ed74980 --- /dev/null +++ b/frontend/src/pages/user/PersonalSettingsPage/components/ChangeEmailSection/ChangeEmailSection.tsx @@ -0,0 +1,245 @@ +import { useState } from "react"; +import ReactCodeInput from "react-code-input"; +import { Controller, useForm, useWatch } from "react-hook-form"; +import { zodResolver } from "@hookform/resolvers/zod"; +import { useNavigate } from "@tanstack/react-router"; +import { z } from "zod"; + +import { createNotification } from "@app/components/notifications"; +import { Button, FormControl, Input, Modal, ModalContent } from "@app/components/v2"; +import { useUser } from "@app/context"; +import { useRequestEmailChangeOTP, useUpdateUserEmail } from "@app/hooks/api/users"; +import { clearSession } from "@app/hooks/api/users/queries"; + +const emailSchema = z + .object({ + newEmail: z.string().email("Please enter a valid email") + }) + .required(); + +export type EmailFormData = z.infer; + +const otpInputProps = { + inputStyle: { + fontFamily: "monospace", + margin: "4px", + MozAppearance: "textfield" as const, + width: "45px", + borderRadius: "6px", + fontSize: "18px", + height: "45px", + padding: "0", + paddingLeft: "0", + paddingRight: "0", + backgroundColor: "#262626", + color: "white", + border: "1px solid #404040", + textAlign: "center" as const, + outlineColor: "#8ca542", + borderColor: "#404040" + } +}; + +export const ChangeEmailSection = () => { + const navigate = useNavigate(); + const { user } = useUser(); + const [isOTPModalOpen, setIsOTPModalOpen] = useState(false); + const [pendingEmail, setPendingEmail] = useState(""); + + const emailForm = useForm({ + defaultValues: { newEmail: "" }, + resolver: zodResolver(emailSchema) + }); + + const { mutateAsync: requestEmailChangeOTP, isPending: isRequestingOTP } = + useRequestEmailChangeOTP(); + const { mutateAsync: updateUserEmail, isPending: isUpdatingEmail } = useUpdateUserEmail(); + + // Watch the email field to enable/disable the button + const watchedEmail = useWatch({ + control: emailForm.control, + name: "newEmail", + defaultValue: "" + }); + + // Helper function to check if email is valid + const isEmailValid = (email: string): boolean => { + try { + emailSchema.parse({ newEmail: email }); + return true; + } catch { + return false; + } + }; + + const handleEmailSubmit = async ({ newEmail }: EmailFormData) => { + if (newEmail.toLowerCase() === user?.email?.toLowerCase()) { + createNotification({ + text: "New email must be different from current email", + type: "error" + }); + return; + } + + try { + await requestEmailChangeOTP({ newEmail }); + setPendingEmail(newEmail); + setIsOTPModalOpen(true); + + createNotification({ + text: "Verification code sent to your new email address. Check your inbox!", + type: "success" + }); + } catch (err: any) { + console.error(err); + const errorMessage = err?.response?.data?.message || "Failed to send verification code"; + createNotification({ + text: errorMessage, + type: "error" + }); + } + }; + + const [typedOTP, setTypedOTP] = useState(""); + + const handleOTPSubmit = async () => { + if (typedOTP.length !== 6) { + createNotification({ + text: "Please enter the complete 6-digit verification code", + type: "error" + }); + return; + } + + try { + await updateUserEmail({ newEmail: pendingEmail, otpCode: typedOTP }); + + createNotification({ + text: "Email updated successfully. You will be redirected to login.", + type: "success" + }); + + // Reset forms and close modal + emailForm.reset(); + setIsOTPModalOpen(false); + setPendingEmail(""); + setTypedOTP(""); + + // Clear frontend session/token to ensure proper logout + clearSession(true); + + // Redirect to login after a short delay + setTimeout(() => { + navigate({ to: "/login" }); + }, 2000); + } catch (err: any) { + console.error(err); + + const errorMessage = err?.response?.data?.message || "Invalid verification code"; + if (errorMessage.includes("Invalid verification code")) { + // Reset to email step so user must request new OTP + setIsOTPModalOpen(false); + setPendingEmail(""); + setTypedOTP(""); + emailForm.reset(); + + createNotification({ + text: "Invalid verification code. Please request a new one.", + type: "error" + }); + } else { + createNotification({ + text: errorMessage, + type: "error" + }); + } + } + }; + + const handleOTPModalClose = () => { + setIsOTPModalOpen(false); + setPendingEmail(""); + setTypedOTP(""); + }; + + return ( + <> +
+

Change email

+ +
+
+ ( + + + + )} + /> +
+ +

+ We'll send an 6-digit verification code to your new email address. +

+
+
+ + { + if (!isOpen) handleOTPModalClose(); + }} + > + +
+
+ +
+
+ + +
+
+
+
+ + ); +}; diff --git a/frontend/src/pages/user/PersonalSettingsPage/components/ChangeEmailSection/index.tsx b/frontend/src/pages/user/PersonalSettingsPage/components/ChangeEmailSection/index.tsx new file mode 100644 index 0000000000..5a8804bdec --- /dev/null +++ b/frontend/src/pages/user/PersonalSettingsPage/components/ChangeEmailSection/index.tsx @@ -0,0 +1 @@ +export { ChangeEmailSection } from "./ChangeEmailSection"; diff --git a/frontend/src/pages/user/PersonalSettingsPage/components/PersonalAuthTab/PersonalAuthTab.tsx b/frontend/src/pages/user/PersonalSettingsPage/components/PersonalAuthTab/PersonalAuthTab.tsx index b87d87dbb2..2230f1a004 100644 --- a/frontend/src/pages/user/PersonalSettingsPage/components/PersonalAuthTab/PersonalAuthTab.tsx +++ b/frontend/src/pages/user/PersonalSettingsPage/components/PersonalAuthTab/PersonalAuthTab.tsx @@ -2,6 +2,7 @@ import { useGetUser } from "@app/hooks/api"; import { AuthMethod } from "@app/hooks/api/users/types"; import { AuthMethodSection } from "../AuthMethodSection"; +import { ChangeEmailSection } from "../ChangeEmailSection"; import { ChangePasswordSection } from "../ChangePasswordSection"; import { MFASection } from "../SecuritySection"; @@ -16,6 +17,7 @@ export const PersonalAuthTab = () => { )} + {user && !user.authMethods.includes(AuthMethod.LDAP) && }
); }; diff --git a/helm-charts/infisical-gateway/Chart.yaml b/helm-charts/infisical-gateway/Chart.yaml index 17c0a3785a..2dc9ef7967 100644 --- a/helm-charts/infisical-gateway/Chart.yaml +++ b/helm-charts/infisical-gateway/Chart.yaml @@ -15,10 +15,10 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.0.5 +version: 1.0.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "0.0.5" +appVersion: "1.0.0" diff --git a/helm-charts/infisical-gateway/templates/deployment.yaml b/helm-charts/infisical-gateway/templates/deployment.yaml index a6fac0e7c7..d31a9c9e94 100644 --- a/helm-charts/infisical-gateway/templates/deployment.yaml +++ b/helm-charts/infisical-gateway/templates/deployment.yaml @@ -39,6 +39,7 @@ spec: imagePullPolicy: {{ .Values.image.pullPolicy }} args: - gateway + - start envFrom: - secretRef: name: {{ .Values.secret.name }} diff --git a/helm-charts/infisical-gateway/values.yaml b/helm-charts/infisical-gateway/values.yaml index 2e293d1e16..67874055f4 100644 --- a/helm-charts/infisical-gateway/values.yaml +++ b/helm-charts/infisical-gateway/values.yaml @@ -1,6 +1,6 @@ image: pullPolicy: IfNotPresent - tag: "0.41.84" + tag: "0.42.0" secret: # The secret that contains the environment variables to be used by the gateway, such as INFISICAL_API_URL and TOKEN