diff --git a/.env.example b/.env.example new file mode 100644 index 0000000000..bdb3e536d0 --- /dev/null +++ b/.env.example @@ -0,0 +1,65 @@ +# Keys +# Required key for platform encryption/decryption ops +# THIS IS A SAMPLE ENCRYPTION KEY AND SHOULD NEVER BE USED FOR PRODUCTION +ENCRYPTION_KEY=6c1fe4e407b8911c104518103505b218 + +# JWT +# Required secrets to sign JWT tokens +# THIS IS A SAMPLE AUTH_SECRET KEY AND SHOULD NEVER BE USED FOR PRODUCTION +AUTH_SECRET=5lrMXKKWCVocS/uerPsl7V+TX/aaUaI7iDkgl3tSmLE= + +# Postgres creds +POSTGRES_PASSWORD=infisical +POSTGRES_USER=infisical +POSTGRES_DB=infisical + +# Required +DB_CONNECTION_URI=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@db:5432/${POSTGRES_DB} + +# Redis +REDIS_URL=redis://redis:6379 + +# Website URL +# Required +SITE_URL=http://localhost:8080 + +# Mail/SMTP +SMTP_HOST= +SMTP_PORT= +SMTP_NAME= +SMTP_USERNAME= +SMTP_PASSWORD= + +# Integration +# Optional only if integration is used +CLIENT_ID_HEROKU= +CLIENT_ID_VERCEL= +CLIENT_ID_NETLIFY= +CLIENT_ID_GITHUB= +CLIENT_ID_GITLAB= +CLIENT_ID_BITBUCKET= +CLIENT_SECRET_HEROKU= +CLIENT_SECRET_VERCEL= +CLIENT_SECRET_NETLIFY= +CLIENT_SECRET_GITHUB= +CLIENT_SECRET_GITLAB= +CLIENT_SECRET_BITBUCKET= +CLIENT_SLUG_VERCEL= + +# Sentry (optional) for monitoring errors +SENTRY_DSN= + +# Infisical Cloud-specific configs +# Ignore - Not applicable for self-hosted version +POSTHOG_HOST= +POSTHOG_PROJECT_API_KEY= + +# SSO-specific variables +CLIENT_ID_GOOGLE_LOGIN= +CLIENT_SECRET_GOOGLE_LOGIN= + +CLIENT_ID_GITHUB_LOGIN= +CLIENT_SECRET_GITHUB_LOGIN= + +CLIENT_ID_GITLAB_LOGIN= +CLIENT_SECRET_GITLAB_LOGIN= diff --git a/backend/src/@types/fastify.d.ts b/backend/src/@types/fastify.d.ts index a4c3eea7bc..b3e9d99521 100644 --- a/backend/src/@types/fastify.d.ts +++ b/backend/src/@types/fastify.d.ts @@ -3,6 +3,7 @@ import "fastify"; import { TUsers } from "@app/db/schemas"; import { TAuditLogServiceFactory } from "@app/ee/services/audit-log/audit-log-service"; import { TCreateAuditLogDTO } from "@app/ee/services/audit-log/audit-log-types"; +import { TAuditLogStreamServiceFactory } from "@app/ee/services/audit-log-stream/audit-log-stream-service"; import { TDynamicSecretServiceFactory } from "@app/ee/services/dynamic-secret/dynamic-secret-service"; import { TDynamicSecretLeaseServiceFactory } from "@app/ee/services/dynamic-secret-lease/dynamic-secret-lease-service"; import { TGroupServiceFactory } from "@app/ee/services/group/group-service"; @@ -120,6 +121,7 @@ declare module "fastify" { scim: TScimServiceFactory; ldap: TLdapConfigServiceFactory; auditLog: TAuditLogServiceFactory; + auditLogStream: TAuditLogStreamServiceFactory; secretScanning: TSecretScanningServiceFactory; license: TLicenseServiceFactory; trustedIp: TTrustedIpServiceFactory; diff --git a/backend/src/@types/knex.d.ts b/backend/src/@types/knex.d.ts index 8845c1d016..a7d76e9449 100644 --- a/backend/src/@types/knex.d.ts +++ b/backend/src/@types/knex.d.ts @@ -7,6 +7,9 @@ import { TApiKeysUpdate, TAuditLogs, TAuditLogsInsert, + TAuditLogStreams, + TAuditLogStreamsInsert, + TAuditLogStreamsUpdate, TAuditLogsUpdate, TAuthTokens, TAuthTokenSessions, @@ -404,6 +407,11 @@ declare module "knex/types/tables" { [TableName.LdapGroupMap]: Knex.CompositeTableType; [TableName.OrgBot]: Knex.CompositeTableType; [TableName.AuditLog]: Knex.CompositeTableType; + [TableName.AuditLogStream]: Knex.CompositeTableType< + TAuditLogStreams, + TAuditLogStreamsInsert, + TAuditLogStreamsUpdate + >; [TableName.GitAppInstallSession]: Knex.CompositeTableType< TGitAppInstallSessions, TGitAppInstallSessionsInsert, diff --git a/backend/src/db/migrations/20240503101144_audit-log-stream.ts b/backend/src/db/migrations/20240503101144_audit-log-stream.ts new file mode 100644 index 0000000000..210ee1bfa5 --- /dev/null +++ b/backend/src/db/migrations/20240503101144_audit-log-stream.ts @@ -0,0 +1,28 @@ +import { Knex } from "knex"; + +import { TableName } from "../schemas"; +import { createOnUpdateTrigger, dropOnUpdateTrigger } from "../utils"; + +export async function up(knex: Knex): Promise { + if (!(await knex.schema.hasTable(TableName.AuditLogStream))) { + await knex.schema.createTable(TableName.AuditLogStream, (t) => { + t.uuid("id", { primaryKey: true }).defaultTo(knex.fn.uuid()); + t.string("url").notNullable(); + t.text("encryptedHeadersCiphertext"); + t.text("encryptedHeadersIV"); + t.text("encryptedHeadersTag"); + t.string("encryptedHeadersAlgorithm"); + t.string("encryptedHeadersKeyEncoding"); + t.uuid("orgId").notNullable(); + t.foreign("orgId").references("id").inTable(TableName.Organization).onDelete("CASCADE"); + t.timestamps(true, true, true); + }); + } + + await createOnUpdateTrigger(knex, TableName.AuditLogStream); +} + +export async function down(knex: Knex): Promise { + await dropOnUpdateTrigger(knex, TableName.AuditLogStream); + await knex.schema.dropTableIfExists(TableName.AuditLogStream); +} diff --git a/backend/src/db/schemas/audit-log-streams.ts b/backend/src/db/schemas/audit-log-streams.ts new file mode 100644 index 0000000000..901dd8d272 --- /dev/null +++ b/backend/src/db/schemas/audit-log-streams.ts @@ -0,0 +1,25 @@ +// Code generated by automation script, DO NOT EDIT. +// Automated by pulling database and generating zod schema +// To update. Just run npm run generate:schema +// Written by akhilmhdh. + +import { z } from "zod"; + +import { TImmutableDBKeys } from "./models"; + +export const AuditLogStreamsSchema = z.object({ + id: z.string().uuid(), + url: z.string(), + encryptedHeadersCiphertext: z.string().nullable().optional(), + encryptedHeadersIV: z.string().nullable().optional(), + encryptedHeadersTag: z.string().nullable().optional(), + encryptedHeadersAlgorithm: z.string().nullable().optional(), + encryptedHeadersKeyEncoding: z.string().nullable().optional(), + orgId: z.string().uuid(), + createdAt: z.date(), + updatedAt: z.date() +}); + +export type TAuditLogStreams = z.infer; +export type TAuditLogStreamsInsert = Omit, TImmutableDBKeys>; +export type TAuditLogStreamsUpdate = Partial, TImmutableDBKeys>>; diff --git a/backend/src/db/schemas/index.ts b/backend/src/db/schemas/index.ts index 30d6208b8d..0eb9b19868 100644 --- a/backend/src/db/schemas/index.ts +++ b/backend/src/db/schemas/index.ts @@ -1,4 +1,5 @@ export * from "./api-keys"; +export * from "./audit-log-streams"; export * from "./audit-logs"; export * from "./auth-token-sessions"; export * from "./auth-tokens"; diff --git a/backend/src/db/schemas/models.ts b/backend/src/db/schemas/models.ts index ea70dccdb1..3baa7f40d2 100644 --- a/backend/src/db/schemas/models.ts +++ b/backend/src/db/schemas/models.ts @@ -62,6 +62,7 @@ export enum TableName { LdapConfig = "ldap_configs", LdapGroupMap = "ldap_group_maps", AuditLog = "audit_logs", + AuditLogStream = "audit_log_streams", GitAppInstallSession = "git_app_install_sessions", GitAppOrg = "git_app_org", SecretScanningGitRisk = "secret_scanning_git_risks", diff --git a/backend/src/ee/routes/v1/audit-log-stream-router.ts b/backend/src/ee/routes/v1/audit-log-stream-router.ts new file mode 100644 index 0000000000..17bd9e64b9 --- /dev/null +++ b/backend/src/ee/routes/v1/audit-log-stream-router.ts @@ -0,0 +1,215 @@ +import { z } from "zod"; + +import { AUDIT_LOG_STREAMS } from "@app/lib/api-docs"; +import { readLimit } from "@app/server/config/rateLimiter"; +import { verifyAuth } from "@app/server/plugins/auth/verify-auth"; +import { SanitizedAuditLogStreamSchema } from "@app/server/routes/sanitizedSchemas"; +import { AuthMode } from "@app/services/auth/auth-type"; + +export const registerAuditLogStreamRouter = async (server: FastifyZodProvider) => { + server.route({ + method: "POST", + url: "/", + config: { + rateLimit: readLimit + }, + schema: { + description: "Create an Audit Log Stream.", + security: [ + { + bearerAuth: [] + } + ], + body: z.object({ + url: z.string().min(1).describe(AUDIT_LOG_STREAMS.CREATE.url), + headers: z + .object({ + key: z.string().min(1).trim().describe(AUDIT_LOG_STREAMS.CREATE.headers.key), + value: z.string().min(1).trim().describe(AUDIT_LOG_STREAMS.CREATE.headers.value) + }) + .describe(AUDIT_LOG_STREAMS.CREATE.headers.desc) + .array() + .optional() + }), + response: { + 200: z.object({ + auditLogStream: SanitizedAuditLogStreamSchema + }) + } + }, + onRequest: verifyAuth([AuthMode.JWT, AuthMode.IDENTITY_ACCESS_TOKEN]), + handler: async (req) => { + const auditLogStream = await server.services.auditLogStream.create({ + actorId: req.permission.id, + actor: req.permission.type, + actorOrgId: req.permission.orgId, + actorAuthMethod: req.permission.authMethod, + url: req.body.url, + headers: req.body.headers + }); + + return { auditLogStream }; + } + }); + + server.route({ + method: "PATCH", + url: "/:id", + config: { + rateLimit: readLimit + }, + schema: { + description: "Update an Audit Log Stream by ID.", + security: [ + { + bearerAuth: [] + } + ], + params: z.object({ + id: z.string().describe(AUDIT_LOG_STREAMS.UPDATE.id) + }), + body: z.object({ + url: z.string().optional().describe(AUDIT_LOG_STREAMS.UPDATE.url), + headers: z + .object({ + key: z.string().min(1).trim().describe(AUDIT_LOG_STREAMS.UPDATE.headers.key), + value: z.string().min(1).trim().describe(AUDIT_LOG_STREAMS.UPDATE.headers.value) + }) + .describe(AUDIT_LOG_STREAMS.UPDATE.headers.desc) + .array() + .optional() + }), + response: { + 200: z.object({ + auditLogStream: SanitizedAuditLogStreamSchema + }) + } + }, + onRequest: verifyAuth([AuthMode.JWT, AuthMode.IDENTITY_ACCESS_TOKEN]), + handler: async (req) => { + const auditLogStream = await server.services.auditLogStream.updateById({ + actorId: req.permission.id, + actor: req.permission.type, + actorOrgId: req.permission.orgId, + actorAuthMethod: req.permission.authMethod, + id: req.params.id, + url: req.body.url, + headers: req.body.headers + }); + + return { auditLogStream }; + } + }); + + server.route({ + method: "DELETE", + url: "/:id", + config: { + rateLimit: readLimit + }, + schema: { + description: "Delete an Audit Log Stream by ID.", + security: [ + { + bearerAuth: [] + } + ], + params: z.object({ + id: z.string().describe(AUDIT_LOG_STREAMS.DELETE.id) + }), + response: { + 200: z.object({ + auditLogStream: SanitizedAuditLogStreamSchema + }) + } + }, + onRequest: verifyAuth([AuthMode.JWT, AuthMode.IDENTITY_ACCESS_TOKEN]), + handler: async (req) => { + const auditLogStream = await server.services.auditLogStream.deleteById({ + actorId: req.permission.id, + actor: req.permission.type, + actorOrgId: req.permission.orgId, + actorAuthMethod: req.permission.authMethod, + id: req.params.id + }); + + return { auditLogStream }; + } + }); + + server.route({ + method: "GET", + url: "/:id", + config: { + rateLimit: readLimit + }, + schema: { + description: "Get an Audit Log Stream by ID.", + security: [ + { + bearerAuth: [] + } + ], + params: z.object({ + id: z.string().describe(AUDIT_LOG_STREAMS.GET_BY_ID.id) + }), + response: { + 200: z.object({ + auditLogStream: SanitizedAuditLogStreamSchema.extend({ + headers: z + .object({ + key: z.string(), + value: z.string() + }) + .array() + .optional() + }) + }) + } + }, + onRequest: verifyAuth([AuthMode.JWT, AuthMode.IDENTITY_ACCESS_TOKEN]), + handler: async (req) => { + const auditLogStream = await server.services.auditLogStream.getById({ + actorId: req.permission.id, + actor: req.permission.type, + actorOrgId: req.permission.orgId, + actorAuthMethod: req.permission.authMethod, + id: req.params.id + }); + + return { auditLogStream }; + } + }); + + server.route({ + method: "GET", + url: "/", + config: { + rateLimit: readLimit + }, + schema: { + description: "List Audit Log Streams.", + security: [ + { + bearerAuth: [] + } + ], + response: { + 200: z.object({ + auditLogStreams: SanitizedAuditLogStreamSchema.array() + }) + } + }, + onRequest: verifyAuth([AuthMode.JWT, AuthMode.IDENTITY_ACCESS_TOKEN]), + handler: async (req) => { + const auditLogStreams = await server.services.auditLogStream.list({ + actorId: req.permission.id, + actor: req.permission.type, + actorOrgId: req.permission.orgId, + actorAuthMethod: req.permission.authMethod + }); + + return { auditLogStreams }; + } + }); +}; diff --git a/backend/src/ee/routes/v1/index.ts b/backend/src/ee/routes/v1/index.ts index 6860098fd9..cf325b2e36 100644 --- a/backend/src/ee/routes/v1/index.ts +++ b/backend/src/ee/routes/v1/index.ts @@ -1,3 +1,4 @@ +import { registerAuditLogStreamRouter } from "./audit-log-stream-router"; import { registerDynamicSecretLeaseRouter } from "./dynamic-secret-lease-router"; import { registerDynamicSecretRouter } from "./dynamic-secret-router"; import { registerGroupRouter } from "./group-router"; @@ -55,6 +56,7 @@ export const registerV1EERoutes = async (server: FastifyZodProvider) => { await server.register(registerSecretRotationRouter, { prefix: "/secret-rotations" }); await server.register(registerSecretVersionRouter, { prefix: "/secret" }); await server.register(registerGroupRouter, { prefix: "/groups" }); + await server.register(registerAuditLogStreamRouter, { prefix: "/audit-log-streams" }); await server.register( async (privilegeRouter) => { await privilegeRouter.register(registerUserAdditionalPrivilegeRouter, { prefix: "/users" }); diff --git a/backend/src/ee/services/audit-log-stream/audit-log-stream-dal.ts b/backend/src/ee/services/audit-log-stream/audit-log-stream-dal.ts new file mode 100644 index 0000000000..436821ae9d --- /dev/null +++ b/backend/src/ee/services/audit-log-stream/audit-log-stream-dal.ts @@ -0,0 +1,11 @@ +import { TDbClient } from "@app/db"; +import { TableName } from "@app/db/schemas"; +import { ormify } from "@app/lib/knex"; + +export type TAuditLogStreamDALFactory = ReturnType; + +export const auditLogStreamDALFactory = (db: TDbClient) => { + const orm = ormify(db, TableName.AuditLogStream); + + return orm; +}; diff --git a/backend/src/ee/services/audit-log-stream/audit-log-stream-service.ts b/backend/src/ee/services/audit-log-stream/audit-log-stream-service.ts new file mode 100644 index 0000000000..0e313b59bb --- /dev/null +++ b/backend/src/ee/services/audit-log-stream/audit-log-stream-service.ts @@ -0,0 +1,233 @@ +import { ForbiddenError } from "@casl/ability"; +import { RawAxiosRequestHeaders } from "axios"; + +import { SecretKeyEncoding } from "@app/db/schemas"; +import { request } from "@app/lib/config/request"; +import { infisicalSymmetricDecrypt, infisicalSymmetricEncypt } from "@app/lib/crypto/encryption"; +import { BadRequestError } from "@app/lib/errors"; +import { validateLocalIps } from "@app/lib/validator"; + +import { AUDIT_LOG_STREAM_TIMEOUT } from "../audit-log/audit-log-queue"; +import { TLicenseServiceFactory } from "../license/license-service"; +import { OrgPermissionActions, OrgPermissionSubjects } from "../permission/org-permission"; +import { TPermissionServiceFactory } from "../permission/permission-service"; +import { TAuditLogStreamDALFactory } from "./audit-log-stream-dal"; +import { + LogStreamHeaders, + TCreateAuditLogStreamDTO, + TDeleteAuditLogStreamDTO, + TGetDetailsAuditLogStreamDTO, + TListAuditLogStreamDTO, + TUpdateAuditLogStreamDTO +} from "./audit-log-stream-types"; + +type TAuditLogStreamServiceFactoryDep = { + auditLogStreamDAL: TAuditLogStreamDALFactory; + permissionService: Pick; + licenseService: Pick; +}; + +export type TAuditLogStreamServiceFactory = ReturnType; + +export const auditLogStreamServiceFactory = ({ + auditLogStreamDAL, + permissionService, + licenseService +}: TAuditLogStreamServiceFactoryDep) => { + const create = async ({ + url, + actor, + headers = [], + actorId, + actorOrgId, + actorAuthMethod + }: TCreateAuditLogStreamDTO) => { + if (!actorOrgId) throw new BadRequestError({ message: "Missing org id from token" }); + + const plan = await licenseService.getPlan(actorOrgId); + if (!plan.auditLogStreams) + throw new BadRequestError({ + message: "Failed to create audit log streams due to plan restriction. Upgrade plan to create group." + }); + + const { permission } = await permissionService.getOrgPermission( + actor, + actorId, + actorOrgId, + actorAuthMethod, + actorOrgId + ); + ForbiddenError.from(permission).throwUnlessCan(OrgPermissionActions.Create, OrgPermissionSubjects.Settings); + + validateLocalIps(url); + + const totalStreams = await auditLogStreamDAL.find({ orgId: actorOrgId }); + if (totalStreams.length >= plan.auditLogStreamLimit) { + throw new BadRequestError({ + message: + "Failed to create audit log streams due to plan limit reached. Kindly contact Infisical to add more streams." + }); + } + + // testing connection first + const streamHeaders: RawAxiosRequestHeaders = { "Content-Type": "application/json" }; + if (headers.length) + headers.forEach(({ key, value }) => { + streamHeaders[key] = value; + }); + await request + .post( + url, + { ping: "ok" }, + { + headers: streamHeaders, + // request timeout + timeout: AUDIT_LOG_STREAM_TIMEOUT, + // connection timeout + signal: AbortSignal.timeout(AUDIT_LOG_STREAM_TIMEOUT) + } + ) + .catch((err) => { + throw new Error(`Failed to connect with the source ${(err as Error)?.message}`); + }); + const encryptedHeaders = headers ? infisicalSymmetricEncypt(JSON.stringify(headers)) : undefined; + const logStream = await auditLogStreamDAL.create({ + orgId: actorOrgId, + url, + ...(encryptedHeaders + ? { + encryptedHeadersCiphertext: encryptedHeaders.ciphertext, + encryptedHeadersIV: encryptedHeaders.iv, + encryptedHeadersTag: encryptedHeaders.tag, + encryptedHeadersAlgorithm: encryptedHeaders.algorithm, + encryptedHeadersKeyEncoding: encryptedHeaders.encoding + } + : {}) + }); + return logStream; + }; + + const updateById = async ({ + id, + url, + actor, + headers = [], + actorId, + actorOrgId, + actorAuthMethod + }: TUpdateAuditLogStreamDTO) => { + if (!actorOrgId) throw new BadRequestError({ message: "Missing org id from token" }); + + const plan = await licenseService.getPlan(actorOrgId); + if (!plan.auditLogStreams) + throw new BadRequestError({ + message: "Failed to update audit log streams due to plan restriction. Upgrade plan to create group." + }); + + const logStream = await auditLogStreamDAL.findById(id); + if (!logStream) throw new BadRequestError({ message: "Audit log stream not found" }); + + const { orgId } = logStream; + const { permission } = await permissionService.getOrgPermission(actor, actorId, orgId, actorAuthMethod, actorOrgId); + ForbiddenError.from(permission).throwUnlessCan(OrgPermissionActions.Edit, OrgPermissionSubjects.Settings); + + if (url) validateLocalIps(url); + + // testing connection first + const streamHeaders: RawAxiosRequestHeaders = { "Content-Type": "application/json" }; + if (headers.length) + headers.forEach(({ key, value }) => { + streamHeaders[key] = value; + }); + + await request + .post( + url || logStream.url, + { ping: "ok" }, + { + headers: streamHeaders, + // request timeout + timeout: AUDIT_LOG_STREAM_TIMEOUT, + // connection timeout + signal: AbortSignal.timeout(AUDIT_LOG_STREAM_TIMEOUT) + } + ) + .catch((err) => { + throw new Error(`Failed to connect with the source ${(err as Error)?.message}`); + }); + + const encryptedHeaders = headers ? infisicalSymmetricEncypt(JSON.stringify(headers)) : undefined; + const updatedLogStream = await auditLogStreamDAL.updateById(id, { + url, + ...(encryptedHeaders + ? { + encryptedHeadersCiphertext: encryptedHeaders.ciphertext, + encryptedHeadersIV: encryptedHeaders.iv, + encryptedHeadersTag: encryptedHeaders.tag, + encryptedHeadersAlgorithm: encryptedHeaders.algorithm, + encryptedHeadersKeyEncoding: encryptedHeaders.encoding + } + : {}) + }); + return updatedLogStream; + }; + + const deleteById = async ({ id, actor, actorId, actorOrgId, actorAuthMethod }: TDeleteAuditLogStreamDTO) => { + if (!actorOrgId) throw new BadRequestError({ message: "Missing org id from token" }); + + const logStream = await auditLogStreamDAL.findById(id); + if (!logStream) throw new BadRequestError({ message: "Audit log stream not found" }); + + const { orgId } = logStream; + const { permission } = await permissionService.getOrgPermission(actor, actorId, orgId, actorAuthMethod, actorOrgId); + ForbiddenError.from(permission).throwUnlessCan(OrgPermissionActions.Delete, OrgPermissionSubjects.Settings); + + const deletedLogStream = await auditLogStreamDAL.deleteById(id); + return deletedLogStream; + }; + + const getById = async ({ id, actor, actorId, actorOrgId, actorAuthMethod }: TGetDetailsAuditLogStreamDTO) => { + const logStream = await auditLogStreamDAL.findById(id); + if (!logStream) throw new BadRequestError({ message: "Audit log stream not found" }); + + const { orgId } = logStream; + const { permission } = await permissionService.getOrgPermission(actor, actorId, orgId, actorAuthMethod, actorOrgId); + ForbiddenError.from(permission).throwUnlessCan(OrgPermissionActions.Read, OrgPermissionSubjects.Settings); + + const headers = + logStream?.encryptedHeadersCiphertext && logStream?.encryptedHeadersIV && logStream?.encryptedHeadersTag + ? (JSON.parse( + infisicalSymmetricDecrypt({ + tag: logStream.encryptedHeadersTag, + iv: logStream.encryptedHeadersIV, + ciphertext: logStream.encryptedHeadersCiphertext, + keyEncoding: logStream.encryptedHeadersKeyEncoding as SecretKeyEncoding + }) + ) as LogStreamHeaders[]) + : undefined; + + return { ...logStream, headers }; + }; + + const list = async ({ actor, actorId, actorOrgId, actorAuthMethod }: TListAuditLogStreamDTO) => { + const { permission } = await permissionService.getOrgPermission( + actor, + actorId, + actorOrgId, + actorAuthMethod, + actorOrgId + ); + ForbiddenError.from(permission).throwUnlessCan(OrgPermissionActions.Read, OrgPermissionSubjects.Settings); + + const logStreams = await auditLogStreamDAL.find({ orgId: actorOrgId }); + return logStreams; + }; + + return { + create, + updateById, + deleteById, + getById, + list + }; +}; diff --git a/backend/src/ee/services/audit-log-stream/audit-log-stream-types.ts b/backend/src/ee/services/audit-log-stream/audit-log-stream-types.ts new file mode 100644 index 0000000000..3c22251d79 --- /dev/null +++ b/backend/src/ee/services/audit-log-stream/audit-log-stream-types.ts @@ -0,0 +1,27 @@ +import { TOrgPermission } from "@app/lib/types"; + +export type LogStreamHeaders = { + key: string; + value: string; +}; + +export type TCreateAuditLogStreamDTO = Omit & { + url: string; + headers?: LogStreamHeaders[]; +}; + +export type TUpdateAuditLogStreamDTO = Omit & { + id: string; + url?: string; + headers?: LogStreamHeaders[]; +}; + +export type TDeleteAuditLogStreamDTO = Omit & { + id: string; +}; + +export type TListAuditLogStreamDTO = Omit; + +export type TGetDetailsAuditLogStreamDTO = Omit & { + id: string; +}; diff --git a/backend/src/ee/services/audit-log/audit-log-queue.ts b/backend/src/ee/services/audit-log/audit-log-queue.ts index afffd463da..6c563b5738 100644 --- a/backend/src/ee/services/audit-log/audit-log-queue.ts +++ b/backend/src/ee/services/audit-log/audit-log-queue.ts @@ -1,13 +1,21 @@ +import { RawAxiosRequestHeaders } from "axios"; + +import { SecretKeyEncoding } from "@app/db/schemas"; +import { request } from "@app/lib/config/request"; +import { infisicalSymmetricDecrypt } from "@app/lib/crypto/encryption"; import { logger } from "@app/lib/logger"; import { QueueJobs, QueueName, TQueueServiceFactory } from "@app/queue"; import { TProjectDALFactory } from "@app/services/project/project-dal"; +import { TAuditLogStreamDALFactory } from "../audit-log-stream/audit-log-stream-dal"; +import { LogStreamHeaders } from "../audit-log-stream/audit-log-stream-types"; import { TLicenseServiceFactory } from "../license/license-service"; import { TAuditLogDALFactory } from "./audit-log-dal"; import { TCreateAuditLogDTO } from "./audit-log-types"; type TAuditLogQueueServiceFactoryDep = { auditLogDAL: TAuditLogDALFactory; + auditLogStreamDAL: Pick; queueService: TQueueServiceFactory; projectDAL: Pick; licenseService: Pick; @@ -15,11 +23,15 @@ type TAuditLogQueueServiceFactoryDep = { export type TAuditLogQueueServiceFactory = ReturnType; +// keep this timeout 5s it must be fast because else the queue will take time to finish +// audit log is a crowded queue thus needs to be fast +export const AUDIT_LOG_STREAM_TIMEOUT = 5 * 1000; export const auditLogQueueServiceFactory = ({ auditLogDAL, queueService, projectDAL, - licenseService + licenseService, + auditLogStreamDAL }: TAuditLogQueueServiceFactoryDep) => { const pushToLog = async (data: TCreateAuditLogDTO) => { await queueService.queue(QueueName.AuditLog, QueueJobs.AuditLog, data, { @@ -47,7 +59,7 @@ export const auditLogQueueServiceFactory = ({ // skip inserting if audit log retention is 0 meaning its not supported if (ttl === 0) return; - await auditLogDAL.create({ + const auditLog = await auditLogDAL.create({ actor: actor.type, actorMetadata: actor.metadata, userAgent, @@ -59,6 +71,46 @@ export const auditLogQueueServiceFactory = ({ eventMetadata: event.metadata, userAgentType }); + + const logStreams = orgId ? await auditLogStreamDAL.find({ orgId }) : []; + await Promise.allSettled( + logStreams.map( + async ({ + url, + encryptedHeadersTag, + encryptedHeadersIV, + encryptedHeadersKeyEncoding, + encryptedHeadersCiphertext + }) => { + const streamHeaders = + encryptedHeadersIV && encryptedHeadersCiphertext && encryptedHeadersTag + ? (JSON.parse( + infisicalSymmetricDecrypt({ + keyEncoding: encryptedHeadersKeyEncoding as SecretKeyEncoding, + iv: encryptedHeadersIV, + tag: encryptedHeadersTag, + ciphertext: encryptedHeadersCiphertext + }) + ) as LogStreamHeaders[]) + : []; + + const headers: RawAxiosRequestHeaders = { "Content-Type": "application/json" }; + + if (streamHeaders.length) + streamHeaders.forEach(({ key, value }) => { + headers[key] = value; + }); + + return request.post(url, auditLog, { + headers, + // request timeout + timeout: AUDIT_LOG_STREAM_TIMEOUT, + // connection timeout + signal: AbortSignal.timeout(AUDIT_LOG_STREAM_TIMEOUT) + }); + } + ) + ); }); queueService.start(QueueName.AuditLogPrune, async () => { diff --git a/backend/src/ee/services/license/licence-fns.ts b/backend/src/ee/services/license/licence-fns.ts index 8a4de57f1e..189a3c4e06 100644 --- a/backend/src/ee/services/license/licence-fns.ts +++ b/backend/src/ee/services/license/licence-fns.ts @@ -24,6 +24,8 @@ export const getDefaultOnPremFeatures = (): TFeatureSet => ({ customAlerts: false, auditLogs: false, auditLogsRetentionDays: 0, + auditLogStreams: false, + auditLogStreamLimit: 3, samlSSO: false, scim: false, ldap: false, diff --git a/backend/src/ee/services/license/license-types.ts b/backend/src/ee/services/license/license-types.ts index 1cea39a834..a2379ddaa5 100644 --- a/backend/src/ee/services/license/license-types.ts +++ b/backend/src/ee/services/license/license-types.ts @@ -40,6 +40,8 @@ export type TFeatureSet = { customAlerts: false; auditLogs: false; auditLogsRetentionDays: 0; + auditLogStreams: false; + auditLogStreamLimit: 3; samlSSO: false; scim: false; ldap: false; diff --git a/backend/src/lib/api-docs/constants.ts b/backend/src/lib/api-docs/constants.ts index 04b7509cad..efcb03bd32 100644 --- a/backend/src/lib/api-docs/constants.ts +++ b/backend/src/lib/api-docs/constants.ts @@ -272,6 +272,7 @@ export const SECRETS = { export const RAW_SECRETS = { LIST: { + expand: "Whether or not to expand secret references", recursive: "Whether or not to fetch all secrets from the specified base path, and all of its subdirectories. Note, the max depth is 20 deep.", workspaceId: "The ID of the project to list secrets from.", @@ -614,3 +615,29 @@ export const INTEGRATION = { integrationId: "The ID of the integration object." } }; + +export const AUDIT_LOG_STREAMS = { + CREATE: { + url: "The HTTP URL to push logs to.", + headers: { + desc: "The HTTP headers attached for the external prrovider requests.", + key: "The HTTP header key name.", + value: "The HTTP header value." + } + }, + UPDATE: { + id: "The ID of the audit log stream to update.", + url: "The HTTP URL to push logs to.", + headers: { + desc: "The HTTP headers attached for the external prrovider requests.", + key: "The HTTP header key name.", + value: "The HTTP header value." + } + }, + DELETE: { + id: "The ID of the audit log stream to delete." + }, + GET_BY_ID: { + id: "The ID of the audit log stream to get details." + } +}; diff --git a/backend/src/lib/config/env.ts b/backend/src/lib/config/env.ts index 4d3d55ffd2..f9bbb1b46c 100644 --- a/backend/src/lib/config/env.ts +++ b/backend/src/lib/config/env.ts @@ -119,6 +119,7 @@ const envSchema = z }) .transform((data) => ({ ...data, + isCloud: Boolean(data.LICENSE_SERVER_KEY), isSmtpConfigured: Boolean(data.SMTP_HOST), isRedisConfigured: Boolean(data.REDIS_URL), isDevelopmentMode: data.NODE_ENV === "development", diff --git a/backend/src/lib/types/index.ts b/backend/src/lib/types/index.ts index b3b46e7398..2c41f4d238 100644 --- a/backend/src/lib/types/index.ts +++ b/backend/src/lib/types/index.ts @@ -17,7 +17,7 @@ export type TOrgPermission = { actorId: string; orgId: string; actorAuthMethod: ActorAuthMethod; - actorOrgId: string | undefined; + actorOrgId: string; }; export type TProjectPermission = { diff --git a/backend/src/lib/validator/index.ts b/backend/src/lib/validator/index.ts index 6bc4156807..6a70d85713 100644 --- a/backend/src/lib/validator/index.ts +++ b/backend/src/lib/validator/index.ts @@ -1 +1,2 @@ export { isDisposableEmail } from "./validate-email"; +export { validateLocalIps } from "./validate-url"; diff --git a/backend/src/lib/validator/validate-url.ts b/backend/src/lib/validator/validate-url.ts new file mode 100644 index 0000000000..9a953be1ae --- /dev/null +++ b/backend/src/lib/validator/validate-url.ts @@ -0,0 +1,18 @@ +import { getConfig } from "../config/env"; +import { BadRequestError } from "../errors"; + +export const validateLocalIps = (url: string) => { + const validUrl = new URL(url); + const appCfg = getConfig(); + // on cloud local ips are not allowed + if ( + appCfg.isCloud && + (validUrl.host === "host.docker.internal" || + validUrl.host.match(/^10\.\d+\.\d+\.\d+/) || + validUrl.host.match(/^192\.168\.\d+\.\d+/)) + ) + throw new BadRequestError({ message: "Local IPs not allowed as URL" }); + + if (validUrl.host === "localhost" || validUrl.host === "127.0.0.1") + throw new BadRequestError({ message: "Localhost not allowed" }); +}; diff --git a/backend/src/server/routes/index.ts b/backend/src/server/routes/index.ts index c75346d1e1..fd095389a4 100644 --- a/backend/src/server/routes/index.ts +++ b/backend/src/server/routes/index.ts @@ -5,6 +5,8 @@ import { registerV1EERoutes } from "@app/ee/routes/v1"; import { auditLogDALFactory } from "@app/ee/services/audit-log/audit-log-dal"; import { auditLogQueueServiceFactory } from "@app/ee/services/audit-log/audit-log-queue"; import { auditLogServiceFactory } from "@app/ee/services/audit-log/audit-log-service"; +import { auditLogStreamDALFactory } from "@app/ee/services/audit-log-stream/audit-log-stream-dal"; +import { auditLogStreamServiceFactory } from "@app/ee/services/audit-log-stream/audit-log-stream-service"; import { dynamicSecretDALFactory } from "@app/ee/services/dynamic-secret/dynamic-secret-dal"; import { dynamicSecretServiceFactory } from "@app/ee/services/dynamic-secret/dynamic-secret-service"; import { buildDynamicSecretProviders } from "@app/ee/services/dynamic-secret/providers"; @@ -195,6 +197,7 @@ export const registerRoutes = async ( const identityUaClientSecretDAL = identityUaClientSecretDALFactory(db); const auditLogDAL = auditLogDALFactory(db); + const auditLogStreamDAL = auditLogStreamDALFactory(db); const trustedIpDAL = trustedIpDALFactory(db); const telemetryDAL = telemetryDALFactory(db); @@ -245,9 +248,15 @@ export const registerRoutes = async ( auditLogDAL, queueService, projectDAL, - licenseService + licenseService, + auditLogStreamDAL }); const auditLogService = auditLogServiceFactory({ auditLogDAL, permissionService, auditLogQueue }); + const auditLogStreamService = auditLogStreamServiceFactory({ + licenseService, + permissionService, + auditLogStreamDAL + }); const sapService = secretApprovalPolicyServiceFactory({ projectMembershipDAL, projectEnvDAL, @@ -730,6 +739,7 @@ export const registerRoutes = async ( saml: samlService, ldap: ldapService, auditLog: auditLogService, + auditLogStream: auditLogStreamService, secretScanning: secretScanningService, license: licenseService, trustedIp: trustedIpService, diff --git a/backend/src/server/routes/sanitizedSchemas.ts b/backend/src/server/routes/sanitizedSchemas.ts index eaae4149cf..a0b792789c 100644 --- a/backend/src/server/routes/sanitizedSchemas.ts +++ b/backend/src/server/routes/sanitizedSchemas.ts @@ -69,3 +69,10 @@ export const SanitizedDynamicSecretSchema = DynamicSecretsSchema.omit({ keyEncoding: true, algorithm: true }); + +export const SanitizedAuditLogStreamSchema = z.object({ + id: z.string(), + url: z.string(), + createdAt: z.date(), + updatedAt: z.date() +}); diff --git a/backend/src/server/routes/v2/organization-router.ts b/backend/src/server/routes/v2/organization-router.ts index e8204222e0..07074eba32 100644 --- a/backend/src/server/routes/v2/organization-router.ts +++ b/backend/src/server/routes/v2/organization-router.ts @@ -76,6 +76,7 @@ export const registerOrgRouter = async (server: FastifyZodProvider) => { .object({ id: z.string(), name: z.string(), + slug: z.string(), organization: z.string(), environments: z .object({ diff --git a/backend/src/server/routes/v3/secret-router.ts b/backend/src/server/routes/v3/secret-router.ts index 955aa01be6..cae51f8583 100644 --- a/backend/src/server/routes/v3/secret-router.ts +++ b/backend/src/server/routes/v3/secret-router.ts @@ -166,6 +166,11 @@ export const registerSecretRouter = async (server: FastifyZodProvider) => { workspaceSlug: z.string().trim().optional().describe(RAW_SECRETS.LIST.workspaceSlug), environment: z.string().trim().optional().describe(RAW_SECRETS.LIST.environment), secretPath: z.string().trim().default("/").transform(removeTrailingSlash).describe(RAW_SECRETS.LIST.secretPath), + expandSecretReferences: z + .enum(["true", "false"]) + .default("false") + .transform((value) => value === "true") + .describe(RAW_SECRETS.LIST.expand), recursive: z .enum(["true", "false"]) .default("false") @@ -233,6 +238,7 @@ export const registerSecretRouter = async (server: FastifyZodProvider) => { actor: req.permission.type, actorOrgId: req.permission.orgId, environment, + expandSecretReferences: req.query.expandSecretReferences, actorAuthMethod: req.permission.authMethod, projectId: workspaceId, path: secretPath, diff --git a/backend/src/services/auth/auth-signup-service.ts b/backend/src/services/auth/auth-signup-service.ts index 31ca5552fa..5aea537869 100644 --- a/backend/src/services/auth/auth-signup-service.ts +++ b/backend/src/services/auth/auth-signup-service.ts @@ -82,7 +82,7 @@ export const authSignupServiceFactory = ({ await smtpService.sendMail({ template: SmtpTemplates.SignupEmailVerification, subjectLine: "Infisical confirmation code", - recipients: [email], + recipients: [user.email as string], substitutions: { code: token } diff --git a/backend/src/services/secret/secret-service.ts b/backend/src/services/secret/secret-service.ts index 5c7b9bef9a..3f647d8a9b 100644 --- a/backend/src/services/secret/secret-service.ts +++ b/backend/src/services/secret/secret-service.ts @@ -27,6 +27,7 @@ import { fnSecretBlindIndexCheck, fnSecretBulkInsert, fnSecretBulkUpdate, + interpolateSecrets, recursivelyGetSecretPaths } from "./secret-fns"; import { TSecretQueueFactory } from "./secret-queue"; @@ -885,6 +886,7 @@ export const secretServiceFactory = ({ actorAuthMethod, environment, includeImports, + expandSecretReferences, recursive }: TGetSecretsRawDTO) => { const botKey = await projectBotService.getBotKey(projectId); @@ -902,17 +904,66 @@ export const secretServiceFactory = ({ recursive }); - return { - secrets: secrets.map((el) => decryptSecretRaw(el, botKey)), - imports: (imports || [])?.map(({ secrets: importedSecrets, ...el }) => ({ - ...el, - secrets: importedSecrets.map((sec) => - decryptSecretRaw( - { ...sec, environment: el.environment, workspace: projectId, secretPath: el.secretPath }, - botKey - ) + const decryptedSecrets = secrets.map((el) => decryptSecretRaw(el, botKey)); + const decryptedImports = (imports || [])?.map(({ secrets: importedSecrets, ...el }) => ({ + ...el, + secrets: importedSecrets.map((sec) => + decryptSecretRaw( + { ...sec, environment: el.environment, workspace: projectId, secretPath: el.secretPath }, + botKey ) - })) + ) + })); + + if (expandSecretReferences) { + const expandSecrets = interpolateSecrets({ + folderDAL, + projectId, + secretDAL, + secretEncKey: botKey + }); + + const batchSecretsExpand = async ( + secretBatch: { + secretKey: string; + secretValue: string; + secretComment?: string; + }[] + ) => { + const secretRecord: Record< + string, + { + value: string; + comment?: string; + skipMultilineEncoding?: boolean; + } + > = {}; + + secretBatch.forEach((decryptedSecret) => { + secretRecord[decryptedSecret.secretKey] = { + value: decryptedSecret.secretValue, + comment: decryptedSecret.secretComment + }; + }); + + await expandSecrets(secretRecord); + + secretBatch.forEach((decryptedSecret, index) => { + // eslint-disable-next-line no-param-reassign + secretBatch[index].secretValue = secretRecord[decryptedSecret.secretKey].value; + }); + }; + + // expand secrets + await batchSecretsExpand(decryptedSecrets); + + // expand imports by batch + await Promise.all(decryptedImports.map((decryptedImport) => batchSecretsExpand(decryptedImport.secrets))); + } + + return { + secrets: decryptedSecrets, + imports: decryptedImports }; }; diff --git a/backend/src/services/secret/secret-types.ts b/backend/src/services/secret/secret-types.ts index c2a0d5cf64..df0af5b5d8 100644 --- a/backend/src/services/secret/secret-types.ts +++ b/backend/src/services/secret/secret-types.ts @@ -138,6 +138,7 @@ export type TDeleteBulkSecretDTO = { } & TProjectPermission; export type TGetSecretsRawDTO = { + expandSecretReferences?: boolean; path: string; environment: string; includeImports?: boolean; diff --git a/company/documentation/getting-started/introduction.mdx b/company/documentation/getting-started/introduction.mdx new file mode 100644 index 0000000000..0f414c62a9 --- /dev/null +++ b/company/documentation/getting-started/introduction.mdx @@ -0,0 +1,97 @@ +--- +title: "What is Infisical?" +sidebarTitle: "What is Infisical?" +description: "An Introduction to the Infisical secret management platform." +--- + +Infisical is an [open-source](https://github.com/infisical/infisical) secret management platform for developers. +It provides capabilities for storing, managing, and syncing application configuration and secrets like API keys, database +credentials, and certificates across infrastructure. In addition, Infisical prevents secrets leaks to git and enables secure +sharing of secrets among engineers. + +Start managing secrets securely with [Infisical Cloud](https://app.infisical.com) or learn how to [host Infisical](/self-hosting/overview) yourself. + + + + Get started with Infisical Cloud in just a few minutes. + + + Self-host Infisical on your own infrastructure. + + + +## Why Infisical? + +Infisical helps developers achieve secure centralized secret management and provides all the tools to easily manage secrets in various environments and infrastructure components. In particular, here are some of the most common points that developers mention after adopting Infisical: +- Streamlined **local development** processes (switching .env files to [Infisical CLI](/cli/commands/run) and removing secrets from developer machines). +- **Best-in-class developer experience** with an easy-to-use [Web Dashboard](/documentation/platform/project). +- Simple secret management inside **[CI/CD pipelines](/integrations/cicd/githubactions)** and staging environments. +- Secure and compliant secret management practices in **[production environments](/sdks/overview)**. +- **Facilitated workflows** around [secret change management](/documentation/platform/pr-workflows), [access requests](/documentation/platform/access-controls/access-requests), [temporary access provisioning](/documentation/platform/access-controls/temporary-access), and more. +- **Improved security posture** thanks to [secret scanning](/cli/scanning-overview), [granular access control policies](/documentation/platform/access-controls/overview), [automated secret rotation](https://infisical.com/docs/documentation/platform/secret-rotation/overview), and [dynamic secrets](/documentation/platform/dynamic-secrets/overview) capabilities. + +## How does Infisical work? + +To make secret management effortless and secure, Infisical follows a certain structure for enabling secret management workflows as defined below. + +**Identities** in Infisical are users or machine which have a certain set of roles and permissions assigned to them. Such identities are able to manage secrets in various **Clients** throughout the entire infrastructure. To do that, identities have to verify themselves through one of the available **Authentication Methods**. + +As a result, the 3 main concepts that are important to understand are: +- **[Identities](/documentation/platform/identities/overview)**: users or machines with a set permissions assigned to them. +- **[Clients](/integrations/platforms/kubernetes)**: Infisical-developed tools for managing secrets in various infrastructure components (e.g., [Kubernetes Operator](/integrations/platforms/kubernetes), [Infisical Agent](/integrations/platforms/infisical-agent), [CLI](/cli/usage), [SDKs](/sdks/overview), [API](/api-reference/overview/introduction), [Web Dashboard](/documentation/platform/organization)). +- **[Authentication Methods](/documentation/platform/identities/universal-auth)**: ways for Identities to authenticate inside different clients (e.g., SAML SSO for Web Dashboard, Universal Auth for Infisical Agent, etc.). + +## How to get started with Infisical? + +Depending on your use case, it might be helpful to look into some of the resources and guides provided below. + + + + Inject secrets into any application process/environment. + + + Fetch secrets with any programming language on demand. + + + Inject secrets into Docker containers. + + + Fetch and save secrets as native Kubernetes secrets. + + + Fetch secrets via HTTP request. + + + Explore integrations for GitHub, Vercel, AWS, and more. + + diff --git a/company/favicon.png b/company/favicon.png new file mode 100644 index 0000000000..45c9b868e6 Binary files /dev/null and b/company/favicon.png differ diff --git a/company/logo/dark.svg b/company/logo/dark.svg new file mode 100644 index 0000000000..f88594746c --- /dev/null +++ b/company/logo/dark.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/company/logo/light.svg b/company/logo/light.svg new file mode 100644 index 0000000000..16fc09e5e7 --- /dev/null +++ b/company/logo/light.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/company/mint.json b/company/mint.json new file mode 100644 index 0000000000..d867ab7a54 --- /dev/null +++ b/company/mint.json @@ -0,0 +1,80 @@ +{ + "name": "Infisical", + "openapi": "https://app.infisical.com/api/docs/json", + "logo": { + "dark": "/logo/dark.svg", + "light": "/logo/light.svg", + "href": "https://infisical.com" + }, + "favicon": "/favicon.png", + "colors": { + "primary": "#26272b", + "light": "#97b31d", + "dark": "#A1B659", + "ultraLight": "#E7F256", + "ultraDark": "#8D9F4C", + "background": { + "light": "#ffffff", + "dark": "#0D1117" + }, + "anchors": { + "from": "#000000", + "to": "#707174" + } + }, + "modeToggle": { + "default": "light", + "isHidden": true + }, + "feedback": { + "suggestEdit": true, + "raiseIssue": true, + "thumbsRating": true + }, + "api": { + "baseUrl": ["https://app.infisical.com", "http://localhost:8080"] + }, + "topbarLinks": [ + { + "name": "Log In", + "url": "https://app.infisical.com/login" + } + ], + "topbarCtaButton": { + "name": "Start for Free", + "url": "https://app.infisical.com/signup" + }, + "tabs": [ + { + "name": "Integrations", + "url": "integrations" + }, + { + "name": "CLI", + "url": "cli" + }, + { + "name": "API Reference", + "url": "api-reference" + }, + { + "name": "SDKs", + "url": "sdks" + }, + { + "name": "Changelog", + "url": "changelog" + } + ], + "navigation": [ + { + "group": "Getting Started", + "pages": [ + "documentation/getting-started/introduction" + ] + } + ], + "integrations": { + "intercom": "hsg644ru" + } +} diff --git a/company/style.css b/company/style.css new file mode 100644 index 0000000000..b76d064500 --- /dev/null +++ b/company/style.css @@ -0,0 +1,142 @@ +#navbar .max-w-8xl { + max-width: 100%; + border-bottom: 1px solid #ebebeb; + background-color: #fcfcfc; +} + +.max-w-8xl { + /* background-color: #f5f5f5; */ +} + +#sidebar { + left: 0; + padding-left: 48px; + padding-right: 30px; + border-right: 1px; + border-color: #cdd64b; + background-color: #fcfcfc; + border-right: 1px solid #ebebeb; +} + +#sidebar .relative .sticky { + opacity: 0; +} + +#sidebar li > div.mt-2 { + border-radius: 0; + padding: 5px; +} + +#sidebar li > a.mt-2 { + border-radius: 0; + padding: 5px; +} + +#sidebar li > a.leading-6 { + border-radius: 0; + padding: 0px; +} + +/* #sidebar ul > div.mt-12 { + padding-top: 30px; + position: relative; +} + +#sidebar ul > div.mt-12 h5 { + position: absolute; + left: -12px; + top: -0px; +} */ + +#header { + border-left: 1px solid #26272b; + padding-left: 16px; + padding-right: 16px; + background-color: #f5f5f5; + padding-bottom: 10px; + padding-top: 10px; +} + +#content-area .mt-8 .block{ + border-radius: 0; + border-width: 1px; + border-color: #ebebeb; +} + +#content-area .mt-8 .rounded-xl{ + border-radius: 0; +} + +#content-area .mt-8 .rounded-lg{ + border-radius: 0; +} + +#content-area .mt-6 .rounded-xl{ + border-radius: 0; +} + +#content-area .mt-6 .rounded-lg{ + border-radius: 0; +} + +#content-area .mt-6 .rounded-md{ + border-radius: 0; +} + +#content-area .mt-8 .rounded-md{ + border-radius: 0; +} + +#content-area div.my-4{ + border-radius: 0; + border-width: 1px; +} + +#content-area div.flex-1 { + /* text-transform: uppercase; */ + opacity: 0.8; + font-weight: 400; +} + +#content-area button { + border-radius: 0; +} + +#content-area a { + border-radius: 0; +} + +#content-area .not-prose { + border-radius: 0; +} + +/* .eyebrow { + text-transform: uppercase; + font-weight: 400; + color: red; +} */ + +#content-container { + /* background-color: #f5f5f5; */ + margin-top: 2rem; +} + +#topbar-cta-button .group .absolute { + background-color: black; + border-radius: 0px; +} + +/* #topbar-cta-button .group .absolute:hover { + background-color: white; + border-radius: 0px; +} */ + +#topbar-cta-button .group .flex { + margin-top: 5px; + margin-bottom: 5px; + font-size: medium; +} + +.flex-1 .flex .items-center { + /* background-color: #f5f5f5; */ +} \ No newline at end of file diff --git a/docker-swarm/haproxy.cfg b/docker-swarm/haproxy.cfg index 3717fedacc..984943c25a 100644 --- a/docker-swarm/haproxy.cfg +++ b/docker-swarm/haproxy.cfg @@ -24,16 +24,16 @@ resolvers hostdns timeout retry 1s hold valid 5s -frontend master +frontend postgres_master bind *:5433 - default_backend master_backend + default_backend postgres_master_backend -frontend replicas +frontend postgres_replicas bind *:5434 - default_backend replica_backend + default_backend postgres_replica_backend -backend master_backend +backend postgres_master_backend option httpchk GET /master http-check expect status 200 default-server inter 3s fall 3 rise 2 on-marked-down shutdown-sessions @@ -41,7 +41,7 @@ backend master_backend server postgres-2 postgres-2:5432 check port 8008 resolvers hostdns server postgres-3 postgres-3:5432 check port 8008 resolvers hostdns -backend replica_backend +backend postgres_replica_backend option httpchk GET /replica http-check expect status 200 default-server inter 3s fall 3 rise 2 on-marked-down shutdown-sessions @@ -50,11 +50,11 @@ backend replica_backend server postgres-3 postgres-3:5432 check port 8008 resolvers hostdns -frontend redis_frontend +frontend redis_master_frontend bind *:6379 - default_backend redis_backend + default_backend redis_master_backend -backend redis_backend +backend redis_master_backend option tcp-check tcp-check send AUTH\ 123456\r\n tcp-check expect string +OK diff --git a/docker-swarm/stack.yaml b/docker-swarm/stack.yaml index 703b076057..4087c7836f 100644 --- a/docker-swarm/stack.yaml +++ b/docker-swarm/stack.yaml @@ -5,8 +5,8 @@ services: image: haproxy:latest ports: - '7001:7000' - - '5002:5433' - - '5003:5434' + - '5002:5433' # Postgres master + - '5003:5434' # Postgres read - '6379:6379' - '8080:8080' networks: @@ -15,22 +15,18 @@ services: - source: haproxy-config target: /usr/local/etc/haproxy/haproxy.cfg deploy: - placement: - constraints: - - node.labels.name == node1 + mode: global infisical: container_name: infisical-backend - image: infisical/infisical:latest-postgres + image: infisical/infisical:v0.60.1-postgres env_file: .env - ports: - - 80:8080 - environment: - - NODE_ENV=production networks: - infisical secrets: - env_file + deploy: + replicas: 5 etcd1: image: ghcr.io/zalando/spilo-16:3.2-p2 diff --git a/docs/documentation/platform/audit-log-streams.mdx b/docs/documentation/platform/audit-log-streams.mdx new file mode 100644 index 0000000000..2a69780bc6 --- /dev/null +++ b/docs/documentation/platform/audit-log-streams.mdx @@ -0,0 +1,82 @@ +--- +title: "Audit Log Streams" +description: "Learn how to stream Infisical Audit Logs to external logging providers." +--- + + + Audit log streams is a paid feature. + + If you're using Infisical Cloud, then it is available under the **Enterprise Tier**. If you're self-hosting Infisical, + then you should contact team@infisical.com to purchase an enterprise license to use it. + + +Infisical Audit Log Streaming enables you to transmit your organization's Audit Logs to external logging providers for monitoring and analysis. + +The logs are formatted in JSON, requiring your logging provider to support JSON-based log parsing. + + +## Overview + + + + + ![stream create](../../images/platform/audit-log-streams/stream-create.png) + + + ![stream create](../../images/platform/audit-log-streams/stream-inputs.png) + + Provide the following values + + The HTTPS endpoint URL of the logging provider that collects the JSON stream. + + + The HTTP headers for the logging provider for identification and authentication. + + + + +![stream listt](../../images/platform/audit-log-streams/stream-list.png) +Your Audit Logs are now ready to be streamed. + +## Example Providers + +### Better Stack + + + + ![better stack connect source](../../images/platform/audit-log-streams/betterstack-create-source.png) + + + + ![better stack connect](../../images/platform/audit-log-streams/betterstack-source-details.png) + + 1. Copy the **endpoint** from Better Stack to the **Endpoint URL** field. + 3. Create a new header with key **Authorization** and set the value as **Bearer \**. + + + +### Datadog + + + + ![api key create](../../images/platform/audit-log-streams/datadog-api-sidebar.png) + + + ![api key form](../../images/platform/audit-log-streams/data-create-api-key.png) + ![api key form](../../images/platform/audit-log-streams/data-dog-api-key.png) + + + ![datadog url](../../images/platform/audit-log-streams/datadog-logging-endpoint.png) + + 1. Navigate to the [Datadog Send Logs API documentation](https://docs.datadoghq.com/api/latest/logs/?code-lang=curl&site=us5#send-logs). + 2. Pick your Datadog account region. + 3. Obtain your Datadog logging endpoint URL. + + + ![datadog api key details](../../images/platform/audit-log-streams/datadog-source-details.png) + + 1. Copy the **logging endpoint** from Datadog to the **Endpoint URL** field. + 2. Copy the **API Key** from previous step + 3. Create a new header with key **DD-API-KEY** and set the value as **API Key**. + + diff --git a/docs/images/platform/audit-log-streams/betterstack-create-source.png b/docs/images/platform/audit-log-streams/betterstack-create-source.png new file mode 100644 index 0000000000..bee4513ea5 Binary files /dev/null and b/docs/images/platform/audit-log-streams/betterstack-create-source.png differ diff --git a/docs/images/platform/audit-log-streams/betterstack-source-details.png b/docs/images/platform/audit-log-streams/betterstack-source-details.png new file mode 100644 index 0000000000..d67980ae8a Binary files /dev/null and b/docs/images/platform/audit-log-streams/betterstack-source-details.png differ diff --git a/docs/images/platform/audit-log-streams/data-create-api-key.png b/docs/images/platform/audit-log-streams/data-create-api-key.png new file mode 100644 index 0000000000..d25a2c64eb Binary files /dev/null and b/docs/images/platform/audit-log-streams/data-create-api-key.png differ diff --git a/docs/images/platform/audit-log-streams/data-dog-api-key.png b/docs/images/platform/audit-log-streams/data-dog-api-key.png new file mode 100644 index 0000000000..8e49e89e79 Binary files /dev/null and b/docs/images/platform/audit-log-streams/data-dog-api-key.png differ diff --git a/docs/images/platform/audit-log-streams/datadog-api-sidebar.png b/docs/images/platform/audit-log-streams/datadog-api-sidebar.png new file mode 100644 index 0000000000..d95cb9b2de Binary files /dev/null and b/docs/images/platform/audit-log-streams/datadog-api-sidebar.png differ diff --git a/docs/images/platform/audit-log-streams/datadog-logging-endpoint.png b/docs/images/platform/audit-log-streams/datadog-logging-endpoint.png new file mode 100644 index 0000000000..7960b11457 Binary files /dev/null and b/docs/images/platform/audit-log-streams/datadog-logging-endpoint.png differ diff --git a/docs/images/platform/audit-log-streams/datadog-source-details.png b/docs/images/platform/audit-log-streams/datadog-source-details.png new file mode 100644 index 0000000000..5ae25b0b3a Binary files /dev/null and b/docs/images/platform/audit-log-streams/datadog-source-details.png differ diff --git a/docs/images/platform/audit-log-streams/stream-create.png b/docs/images/platform/audit-log-streams/stream-create.png new file mode 100644 index 0000000000..949278e3d1 Binary files /dev/null and b/docs/images/platform/audit-log-streams/stream-create.png differ diff --git a/docs/images/platform/audit-log-streams/stream-inputs.png b/docs/images/platform/audit-log-streams/stream-inputs.png new file mode 100644 index 0000000000..6b9d7c57ba Binary files /dev/null and b/docs/images/platform/audit-log-streams/stream-inputs.png differ diff --git a/docs/images/platform/audit-log-streams/stream-list.png b/docs/images/platform/audit-log-streams/stream-list.png new file mode 100644 index 0000000000..c5cc5598bd Binary files /dev/null and b/docs/images/platform/audit-log-streams/stream-list.png differ diff --git a/docs/images/self-hosting/deployment-options/docker-swarm/ha-proxy-ha.png b/docs/images/self-hosting/deployment-options/docker-swarm/ha-proxy-ha.png new file mode 100644 index 0000000000..bfd2bb5200 Binary files /dev/null and b/docs/images/self-hosting/deployment-options/docker-swarm/ha-proxy-ha.png differ diff --git a/docs/mint.json b/docs/mint.json index 55c5bff923..0bb1ae9bff 100644 --- a/docs/mint.json +++ b/docs/mint.json @@ -143,7 +143,8 @@ "documentation/platform/dynamic-secrets/aws-iam" ] }, - "documentation/platform/groups" + "documentation/platform/groups", + "documentation/platform/audit-log-streams" ] }, { @@ -190,7 +191,6 @@ "group": "Self-host Infisical", "pages": [ "self-hosting/overview", - "self-hosting/configuration/requirements", { "group": "Installation methods", "pages": [ @@ -201,6 +201,7 @@ ] }, "self-hosting/configuration/envars", + "self-hosting/configuration/requirements", { "group": "Guides", "pages": [ @@ -479,6 +480,9 @@ "api-reference/endpoints/secrets/read", "api-reference/endpoints/secrets/update", "api-reference/endpoints/secrets/delete", + "api-reference/endpoints/secrets/create-many", + "api-reference/endpoints/secrets/update-many", + "api-reference/endpoints/secrets/delete-many", "api-reference/endpoints/secrets/attach-tags", "api-reference/endpoints/secrets/detach-tags" ] diff --git a/docs/self-hosting/configuration/requirements.mdx b/docs/self-hosting/configuration/requirements.mdx index 2e31ac8537..c0e9cab019 100644 --- a/docs/self-hosting/configuration/requirements.mdx +++ b/docs/self-hosting/configuration/requirements.mdx @@ -1,5 +1,5 @@ --- -title: "Requirements" +title: "Hardware requirements" description: "Find out the minimal requirements for operating Infisical." --- diff --git a/docs/self-hosting/deployment-options/docker-compose.mdx b/docs/self-hosting/deployment-options/docker-compose.mdx index 5adfe6378a..d618792551 100644 --- a/docs/self-hosting/deployment-options/docker-compose.mdx +++ b/docs/self-hosting/deployment-options/docker-compose.mdx @@ -2,8 +2,7 @@ title: "Docker Compose" description: "Read how to run Infisical with Docker Compose template." --- -Install Infisical using Docker compose. This self hosting method contains all of the required components needed -to run a functional instance of Infisical. +This self hosting guide will walk you though the steps to self host Infisical using Docker compose. ## Prerequisites - [Docker](https://docs.docker.com/engine/install/) @@ -80,4 +79,4 @@ docker-compose -f docker-compose.prod.yml up Your Infisical instance should now be running on port `80`. To access your instance, visit `http://localhost:80`. -![self host sign up](/images/self-hosting/applicable-to-all/selfhost-signup.png) \ No newline at end of file +![self host sign up](/images/self-hosting/applicable-to-all/selfhost-signup.png) diff --git a/docs/self-hosting/deployment-options/docker-swarm.mdx b/docs/self-hosting/deployment-options/docker-swarm.mdx index 2d1e041ddc..c63aff23b1 100644 --- a/docs/self-hosting/deployment-options/docker-swarm.mdx +++ b/docs/self-hosting/deployment-options/docker-swarm.mdx @@ -63,426 +63,9 @@ For the sake of simplicity, the example in this guide only contains one manager It's important to note that while the cluster can tolerate the failure of one node in a three-node setup, it's recommended to have a minimum of three nodes to ensure high availability. With two nodes, the failure of a single node can result in a loss of quorum and potential downtime. -## Docker Deployment Stack +## Docker Deployment Stack Overview - - - ```yaml infisical-stack.yaml -version: "3" - -services: - haproxy: - image: haproxy:latest - ports: - - '7001:7000' - - '5002:5433' - - '5003:5434' - - '6379:6379' - - '8080:8080' - networks: - - infisical - configs: - - source: haproxy-config - target: /usr/local/etc/haproxy/haproxy.cfg - deploy: - placement: - constraints: - - node.labels.name == node1 - - infisical: - container_name: infisical-backend - image: infisical/infisical:v0.60.0-postgres - env_file: .env - ports: - - 80:8080 - environment: - - NODE_ENV=production - networks: - - infisical - secrets: - - env_file - - etcd1: - image: ghcr.io/zalando/spilo-16:3.2-p2 - networks: - - infisical - environment: - ETCD_UNSUPPORTED_ARCH: arm64 - container_name: demo-etcd1 - deploy: - placement: - constraints: - - node.labels.name == node1 - hostname: etcd1 - command: | - etcd --name etcd1 - --listen-client-urls http://0.0.0.0:2379 - --listen-peer-urls=http://0.0.0.0:2380 - --advertise-client-urls http://etcd1:2379 - --initial-cluster=etcd1=http://etcd1:2380,etcd2=http://etcd2:2380,etcd3=http://etcd3:2380 - --initial-advertise-peer-urls=http://etcd1:2380 - --initial-cluster-state=new - - etcd2: - image: ghcr.io/zalando/spilo-16:3.2-p2 - networks: - - infisical - environment: - ETCD_UNSUPPORTED_ARCH: arm64 - container_name: demo-etcd2 - hostname: etcd2 - deploy: - placement: - constraints: - - node.labels.name == node2 - command: | - etcd --name etcd2 - --listen-client-urls http://0.0.0.0:2379 - --listen-peer-urls=http://0.0.0.0:2380 - --advertise-client-urls http://etcd2:2379 - --initial-cluster=etcd1=http://etcd1:2380,etcd2=http://etcd2:2380,etcd3=http://etcd3:2380 - --initial-advertise-peer-urls=http://etcd2:2380 - --initial-cluster-state=new - - etcd3: - image: ghcr.io/zalando/spilo-16:3.2-p2 - networks: - - infisical - environment: - ETCD_UNSUPPORTED_ARCH: arm64 - container_name: demo-etcd3 - hostname: etcd3 - deploy: - placement: - constraints: - - node.labels.name == node3 - command: | - etcd --name etcd3 - --listen-client-urls http://0.0.0.0:2379 - --listen-peer-urls=http://0.0.0.0:2380 - --advertise-client-urls http://etcd3:2379 - --initial-cluster=etcd1=http://etcd1:2380,etcd2=http://etcd2:2380,etcd3=http://etcd3:2380 - --initial-advertise-peer-urls=http://etcd3:2380 - --initial-cluster-state=new - - spolo1: - image: ghcr.io/zalando/spilo-16:3.2-p2 - container_name: postgres-1 - networks: - - infisical - hostname: postgres-1 - environment: - ETCD_HOSTS: etcd1:2379,etcd2:2379,etcd3:2379 - PGPASSWORD_SUPERUSER: "postgres" - PGUSER_SUPERUSER: "postgres" - SCOPE: infisical - volumes: - - postgres_data1:/home/postgres/pgdata - deploy: - placement: - constraints: - - node.labels.name == node1 - - spolo2: - image: ghcr.io/zalando/spilo-16:3.2-p2 - container_name: postgres-2 - networks: - - infisical - hostname: postgres-2 - environment: - ETCD_HOSTS: etcd1:2379,etcd2:2379,etcd3:2379 - PGPASSWORD_SUPERUSER: "postgres" - PGUSER_SUPERUSER: "postgres" - SCOPE: infisical - volumes: - - postgres_data2:/home/postgres/pgdata - deploy: - placement: - constraints: - - node.labels.name == node2 - - spolo3: - image: ghcr.io/zalando/spilo-16:3.2-p2 - container_name: postgres-3 - networks: - - infisical - hostname: postgres-3 - environment: - ETCD_HOSTS: etcd1:2379,etcd2:2379,etcd3:2379 - PGPASSWORD_SUPERUSER: "postgres" - PGUSER_SUPERUSER: "postgres" - SCOPE: infisical - volumes: - - postgres_data3:/home/postgres/pgdata - deploy: - placement: - constraints: - - node.labels.name == node3 - - - redis_replica0: - image: bitnami/redis:6.2.10 - environment: - - REDIS_REPLICATION_MODE=master - - REDIS_PASSWORD=123456 - networks: - - infisical - deploy: - placement: - constraints: - - node.labels.name == node1 - - redis_replica1: - image: bitnami/redis:6.2.10 - environment: - - REDIS_REPLICATION_MODE=slave - - REDIS_MASTER_HOST=redis_replica0 - - REDIS_MASTER_PORT_NUMBER=6379 - - REDIS_MASTER_PASSWORD=123456 - - REDIS_PASSWORD=123456 - networks: - - infisical - deploy: - placement: - constraints: - - node.labels.name == node2 - - redis_replica2: - image: bitnami/redis:6.2.10 - environment: - - REDIS_REPLICATION_MODE=slave - - REDIS_MASTER_HOST=redis_replica0 - - REDIS_MASTER_PORT_NUMBER=6379 - - REDIS_MASTER_PASSWORD=123456 - - REDIS_PASSWORD=123456 - networks: - - infisical - deploy: - placement: - constraints: - - node.labels.name == node3 - - redis_sentinel1: - image: bitnami/redis-sentinel:6.2.10 - environment: - - REDIS_SENTINEL_QUORUM=2 - - REDIS_SENTINEL_DOWN_AFTER_MILLISECONDS=5000 - - REDIS_SENTINEL_FAILOVER_TIMEOUT=60000 - - REDIS_SENTINEL_PORT_NUMBER=26379 - - REDIS_MASTER_HOST=redis_replica1 - - REDIS_MASTER_PORT_NUMBER=6379 - - REDIS_MASTER_PASSWORD=123456 - networks: - - infisical - deploy: - placement: - constraints: - - node.labels.name == node1 - - redis_sentinel2: - image: bitnami/redis-sentinel:6.2.10 - environment: - - REDIS_SENTINEL_QUORUM=2 - - REDIS_SENTINEL_DOWN_AFTER_MILLISECONDS=5000 - - REDIS_SENTINEL_FAILOVER_TIMEOUT=60000 - - REDIS_SENTINEL_PORT_NUMBER=26379 - - REDIS_MASTER_HOST=redis_replica1 - - REDIS_MASTER_PORT_NUMBER=6379 - - REDIS_MASTER_PASSWORD=123456 - networks: - - infisical - deploy: - placement: - constraints: - - node.labels.name == node2 - - redis_sentinel3: - image: bitnami/redis-sentinel:6.2.10 - environment: - - REDIS_SENTINEL_QUORUM=2 - - REDIS_SENTINEL_DOWN_AFTER_MILLISECONDS=5000 - - REDIS_SENTINEL_FAILOVER_TIMEOUT=60000 - - REDIS_SENTINEL_PORT_NUMBER=26379 - - REDIS_MASTER_HOST=redis_replica1 - - REDIS_MASTER_PORT_NUMBER=6379 - - REDIS_MASTER_PASSWORD=123456 - networks: - - infisical - deploy: - placement: - constraints: - - node.labels.name == node3 - -networks: - infisical: - - -volumes: - postgres_data1: - postgres_data2: - postgres_data3: - postgres_data4: - redis0: - redis1: - redis2: - -configs: - haproxy-config: - file: ./haproxy.cfg - -secrets: - env_file: - file: .env - ``` - - - ```text haproxy.cfg -global - maxconn 10000 - log stdout format raw local0 - -defaults - log global - mode tcp - retries 3 - timeout client 30m - timeout connect 10s - timeout server 30m - timeout check 5s - -listen stats - mode http - bind *:7000 - stats enable - stats uri / - -resolvers hostdns - nameserver dns 127.0.0.11:53 - resolve_retries 3 - timeout resolve 1s - timeout retry 1s - hold valid 5s - -frontend master - bind *:5433 - default_backend master_backend - -frontend replicas - bind *:5434 - default_backend replica_backend - - -backend master_backend - option httpchk GET /master - http-check expect status 200 - default-server inter 3s fall 3 rise 2 on-marked-down shutdown-sessions - server postgres-1 postgres-1:5432 check port 8008 resolvers hostdns - server postgres-2 postgres-2:5432 check port 8008 resolvers hostdns - server postgres-3 postgres-3:5432 check port 8008 resolvers hostdns - -backend replica_backend - option httpchk GET /replica - http-check expect status 200 - default-server inter 3s fall 3 rise 2 on-marked-down shutdown-sessions - server postgres-1 postgres-1:5432 check port 8008 resolvers hostdns - server postgres-2 postgres-2:5432 check port 8008 resolvers hostdns - server postgres-3 postgres-3:5432 check port 8008 resolvers hostdns - - -frontend redis_frontend - bind *:6379 - default_backend redis_backend - -backend redis_backend - option tcp-check - tcp-check send AUTH\ 123456\r\n - tcp-check expect string +OK - tcp-check send PING\r\n - tcp-check expect string +PONG - tcp-check send info\ replication\r\n - tcp-check expect string role:master - tcp-check send QUIT\r\n - tcp-check expect string +OK - server redis_master redis_replica0:6379 check inter 1s - server redis_replica1 redis_replica1:6379 check inter 1s - server redis_replica2 redis_replica2:6379 check inter 1s - -frontend infisical_frontend - bind *:8080 - default_backend infisical_backend - -backend infisical_backend - option httpchk GET /api/status - http-check expect status 200 - server infisical infisical:8080 check inter 1s - ``` - - - ```env .env - # Keys -# Required key for platform encryption/decryption ops -# THIS IS A SAMPLE ENCRYPTION KEY AND SHOULD NEVER BE USED FOR PRODUCTION -ENCRYPTION_KEY=6c1fe4e407b8911c104518103505b218 - -# JWT -# Required secrets to sign JWT tokens -# THIS IS A SAMPLE AUTH_SECRET KEY AND SHOULD NEVER BE USED FOR PRODUCTION -AUTH_SECRET=5lrMXKKWCVocS/uerPsl7V+TX/aaUaI7iDkgl3tSmLE= - -DB_CONNECTION_URI=postgres://infisical:infisical@haproxy:5433/infisical?sslmode=no-verify -# Redis -REDIS_URL=redis://:123456@haproxy:6379 - - -# Website URL -# Required -SITE_URL=http://localhost:8080 - -# Mail/SMTP -SMTP_HOST= -SMTP_PORT= -SMTP_NAME= -SMTP_USERNAME= -SMTP_PASSWORD= - -# Integration -# Optional only if integration is used -CLIENT_ID_HEROKU= -CLIENT_ID_VERCEL= -CLIENT_ID_NETLIFY= -CLIENT_ID_GITHUB= -CLIENT_ID_GITLAB= -CLIENT_ID_BITBUCKET= -CLIENT_SECRET_HEROKU= -CLIENT_SECRET_VERCEL= -CLIENT_SECRET_NETLIFY= -CLIENT_SECRET_GITHUB= -CLIENT_SECRET_GITLAB= -CLIENT_SECRET_BITBUCKET= -CLIENT_SLUG_VERCEL= - -# Sentry (optional) for monitoring errors -SENTRY_DSN= - -# Infisical Cloud-specific configs -# Ignore - Not applicable for self-hosted version -POSTHOG_HOST= -POSTHOG_PROJECT_API_KEY= - -# SSO-specific variables -CLIENT_ID_GOOGLE_LOGIN= -CLIENT_SECRET_GOOGLE_LOGIN= - -CLIENT_ID_GITHUB_LOGIN= -CLIENT_SECRET_GITHUB_LOGIN= - -CLIENT_ID_GITLAB_LOGIN= -CLIENT_SECRET_GITLAB_LOGIN= - ``` - - - -The provided Docker stack YAML file defines the services and their configurations for deploying Infisical with high availability. The main components of this stack are as follows. +The [Docker stack file](https://github.com/Infisical/infisical/tree/main/docker-swarm) used in this guide defines the services and their configurations for deploying Infisical in a highly available manner. The main components of this stack are as follows. 1. **HAProxy**: The HAProxy service is configured to expose ports for accessing PostgreSQL (5433 for the master, 5434 for replicas), Redis master (6379), and the Infisical backend (8080). It uses a config file (`haproxy.cfg`) to define the load balancing and health check rules. @@ -496,42 +79,34 @@ The provided Docker stack YAML file defines the services and their configuration 6. **Redis Sentinel**: Three Redis Sentinel instances (redis_sentinel1, redis_sentinel2, redis_sentinel3) are deployed, one on each node, to monitor and manage the Redis instances. They are connected to the `infisical` network. -## HAProxy Configuration +## Deployment instructions -The HAProxy configuration file (`haproxy.cfg`) defines the load balancing and health check rules for the PostgreSQL and Redis instances. - -1. **Stats**: This section enables the HAProxy statistics dashboard, accessible at port 7000. - -2. **Resolvers**: This section defines the DNS resolver for service discovery, using the Docker embedded DNS server. - -3. **Frontend**: There are separate frontend sections for the PostgreSQL master (port 5433), PostgreSQL replicas (port 5434), Redis (port 6379), and the Infisical backend (port 8080). Each frontend binds to the respective port and defines the default backend. - -4. **Backend**: The backend sections define the servers and health check rules for each service. - - For PostgreSQL, there are separate backends for the master and replicas. The health check is performed using an HTTP request to the `/master` or `/replica` endpoint, expecting a 200 status code. - - For Redis, the backend uses a TCP health check with authentication and expects the role to be `master` for the Redis master instance. - - For the Infisical backend, the health check is performed using an HTTP request to the `/api/status` endpoint, expecting a 200 status code. - -## Setting Up Docker Nodes - -1. Initialize Docker Swarm on one of the VMs by running the following command: - - ``` - docker swarm init --advertise-addr + + + ``` + docker swarm init ``` Replace `` with the IP address of the VM that will serve as the manager node. Remember to copy the join token returned by the this init command. + + + For the sake of simplicity, we only use one manager node in this example deployment. However, in production settings, we recommended you have at least 3 manager nodes. + + -2. On the other VMs, join the Docker Swarm by running the command provided by the manager node: - - ``` + + ``` docker swarm join --token :2377 ``` Replace `` with the token provided by the manager node during initialization. -3. Label the nodes with `node.labels.name` to specify their roles. For example: + - ``` + + Labels on nodes will help us select where stateful components such as Postgres and Redis are deployed on. To label nodes, follow the steps below. + + ``` docker node update --label-add name=node1 docker node update --label-add name=node2 docker node update --label-add name=node3 @@ -540,32 +115,102 @@ The HAProxy configuration file (`haproxy.cfg`) defines the load balancing and he Replace ``, ``, and `` with the respective node IDs. To view the list of nodes and their ids, run the following on the manager node `docker node ls`. -## Deploying the Docker Stack + -1. Copy the provided Docker stack YAML file and the HAProxy configuration file to the manager node. + + Copy the Docker stack YAML file, HAProxy configuration file and example `.env` file to the manager node. Ensure that all 3 files are placed in the same file directory. + - [Docker stack file](https://github.com/Infisical/infisical/blob/main/docker-swarm/stack.yaml) (rename to infisical-stack.yaml) + - [HA configuration file](https://github.com/Infisical/infisical/blob/main/docker-swarm/haproxy.cfg) (rename to haproxy.cfg) + - [Example .env file](https://github.com/Infisical/infisical/blob/main/docker-swarm/.env-example) (rename to .env) + -2. Deploy the stack using the following command: + ``` docker stack deploy -c infisical-stack.yaml infisical ``` + - This command deploys the stack with the specified configuration. -3. Run the [schema migration](/self-hosting/configuration/schema-migrations) to initialize the database. -To connect to the Postgres database, use the following default credentials: username: `postgres` and password: `postgres`. + + ```plain + $ docker service ls + ID NAME MODE REPLICAS IMAGE PORTS + 4kzq3ub8qgn9 infisical_etcd1 replicated 1/1 ghcr.io/zalando/spilo-16:3.2-p2 + tqx9t82bn8d9 infisical_etcd2 replicated 1/1 ghcr.io/zalando/spilo-16:3.2-p2 + t8vbkrasy8fz infisical_etcd3 replicated 1/1 ghcr.io/zalando/spilo-16:3.2-p2 + 77iei42fcf6q infisical_haproxy global 4/4 haproxy:latest *:5002-5003->5433-5434/tcp, *:6379->6379/tcp, *:7001->7000/tcp, *:8080->8080/tcp + jaewzqy8md56 infisical_infisical replicated 5/5 infisical/infisical:v0.60.1-postgres + 58w4zablfbtb infisical_redis_replica0 replicated 1/1 bitnami/redis:6.2.10 + w4yag2whq0un infisical_redis_replica1 replicated 1/1 bitnami/redis:6.2.10 + w03mriy0jave infisical_redis_replica2 replicated 1/1 bitnami/redis:6.2.10 + ppo6rk47hc9t infisical_redis_sentinel1 replicated 1/1 bitnami/redis-sentinel:6.2.10 + ub29vd0lnq7f infisical_redis_sentinel2 replicated 1/1 bitnami/redis-sentinel:6.2.10 + szg3yky7yji2 infisical_redis_sentinel3 replicated 1/1 bitnami/redis-sentinel:6.2.10 + eqtocpf5tiy0 infisical_spolo1 replicated 1/1 ghcr.io/zalando/spilo-16:3.2-p2 + 3lznscvk7k5t infisical_spolo2 replicated 1/1 ghcr.io/zalando/spilo-16:3.2-p2 + v04ml7rz2j5q infisical_spolo3 replicated 1/1 ghcr.io/zalando/spilo-16:3.2-p2 + ``` -## Scaling and Resilience + + You'll notice that service `infisical_infisical` will not be in running state. + This is expected as the database does not yet have the desired schemas. + Once the database schema migrations have been successfully applied, this issue should be resolved. + + -To further scale and make the system more resilient, you can add more nodes to the Docker Swarm and update the stack configuration accordingly: + + Run the schema migration to initialize the database. Follow the [guide here](/self-hosting/configuration/schema-migrations) to learn how. -1. Add new VMs and join them to the Docker Swarm as worker nodes. + To connect to the Postgres database, use the following default credentials defined in the Docker swarm: username: `postgres`, password: `postgres` and database: `postgres`. + -2. Update the Docker stack YAML file to include the new nodes in the `deploy` section of the relevant services, specifying the appropriate `node.labels.name` constraints. + + ![HA Proxy stats](/images/self-hosting/deployment-options/docker-swarm/ha-proxy-ha.png) + To view the health of services in your Infisical cluster, visit port `:7001` of any node in your Docker swarm. + This port will expose the HA Proxy stats. -3. Update the HAProxy configuration file (`haproxy.cfg`) to include the new nodes in the backend sections for PostgreSQL and Redis. + Run the following command to view the IPs of the nodes in your docker swarm. -4. Redeploy the updated stack using the `docker stack deploy` command. + ```plain + $ docker node ls + ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS ENGINE VERSION + 0jnegl4gpo235l66nglcwc07t localhost Ready Active 26.0.2 + no1a7zwj88057k73m196ulkq6 * localhost Ready Active Leader 26.0.2 + wcb2x27w3tq7ht4v1h7ke49qk localhost Ready Active 26.0.2 + zov5q7uop7wpxc2ndz712v9oa localhost Ready Active 26.0.2 + ``` -Note that the database containers (PostgreSQL) are stateful and cannot be simply replicated. Instead, one database instance is deployed per node to ensure data consistency and avoid conflicts. + + The stats page may take 1-2 minutes to become accessible. + + -Once all services are running as expected, you may visit the IP address of the node where the HA Proxy was deployed. This should take you to the Infisical installation wizard. \ No newline at end of file + + ![self host sign up](/images/self-hosting/applicable-to-all/selfhost-signup.png) + Once all expected services are up and running, visit `:8080` of any node in the swarm. This will take you to the Infisical configuration page. + + + + +## FAQ + + To further scale and make the system more resilient, you can add more nodes to the Docker Swarm and update the stack configuration accordingly: + + 1. Add new VMs and join them to the Docker Swarm as worker nodes. + + 2. Update the Docker stack YAML file to include the new nodes in the `deploy` section of the relevant services, specifying the appropriate `node.labels.name` constraints. + + 3. Update the HAProxy configuration file (`haproxy.cfg`) to include the new nodes in the backend sections for PostgreSQL and Redis. + + 4. Redeploy the updated stack using the `docker stack deploy` command. + + Note that the database containers (PostgreSQL) are stateful and cannot be simply replicated. Instead, one database instance is deployed per node to ensure data consistency and avoid conflicts. + + + + +Native tooling for scheduled backups of Postgres and Redis is currently in development. +In the meantime, we recommend using a variety of open-source tools available for this purpose. +For Postgres, [Spilo](https://github.com/zalando/spilo) provides built-in support for scheduled data dumps. +You can explore other third party tools for managing db backups, one such tool is [docker-db-backup](https://github.com/tiredofit/docker-db-backup). + diff --git a/frontend/src/components/v2/InfisicalSecretInput/InfisicalSecretInput.tsx b/frontend/src/components/v2/InfisicalSecretInput/InfisicalSecretInput.tsx index e45ca1dc0c..d6f2ab212b 100644 --- a/frontend/src/components/v2/InfisicalSecretInput/InfisicalSecretInput.tsx +++ b/frontend/src/components/v2/InfisicalSecretInput/InfisicalSecretInput.tsx @@ -38,12 +38,7 @@ type ReferenceItem = { export const InfisicalSecretInput = ({ value: propValue, - isVisible, containerClassName, - onBlur, - isDisabled, - isImport, - isReadOnly, secretPath: propSecretPath, environment: propEnvironment, onChange, @@ -276,10 +271,7 @@ export const InfisicalSecretInput = ({ handleSuggestionSelect(); } - if ( - (["ArrowDown", "ArrowUp"].includes(e.key) && isPopupOpen) || - (e.key === "Enter" && highlightedIndex >= 0) - ) { + if (["ArrowDown", "ArrowUp", "Enter"].includes(e.key) && isPopupOpen) { e.preventDefault(); } }; diff --git a/frontend/src/hooks/api/auditLogStreams/index.tsx b/frontend/src/hooks/api/auditLogStreams/index.tsx new file mode 100644 index 0000000000..72b1fba1af --- /dev/null +++ b/frontend/src/hooks/api/auditLogStreams/index.tsx @@ -0,0 +1,6 @@ +export { + useCreateAuditLogStream, + useDeleteAuditLogStream, + useUpdateAuditLogStream +} from "./mutations"; +export { useGetAuditLogStreamDetails, useGetAuditLogStreams } from "./queries"; diff --git a/frontend/src/hooks/api/auditLogStreams/mutations.tsx b/frontend/src/hooks/api/auditLogStreams/mutations.tsx new file mode 100644 index 0000000000..2d99f57c49 --- /dev/null +++ b/frontend/src/hooks/api/auditLogStreams/mutations.tsx @@ -0,0 +1,61 @@ +import { useMutation, useQueryClient } from "@tanstack/react-query"; + +import { apiRequest } from "@app/config/request"; + +import { auditLogStreamKeys } from "./queries"; +import { + TAuditLogStream, + TCreateAuditLogStreamDTO, + TDeleteAuditLogStreamDTO, + TUpdateAuditLogStreamDTO +} from "./types"; + +export const useCreateAuditLogStream = () => { + const queryClient = useQueryClient(); + + return useMutation<{ auditLogStream: TAuditLogStream }, {}, TCreateAuditLogStreamDTO>({ + mutationFn: async (dto) => { + const { data } = await apiRequest.post<{ auditLogStream: TAuditLogStream }>( + "/api/v1/audit-log-streams", + dto + ); + return data; + }, + onSuccess: (_, { orgId }) => { + queryClient.invalidateQueries(auditLogStreamKeys.list(orgId)); + } + }); +}; + +export const useUpdateAuditLogStream = () => { + const queryClient = useQueryClient(); + + return useMutation<{ auditLogStream: TAuditLogStream }, {}, TUpdateAuditLogStreamDTO>({ + mutationFn: async (dto) => { + const { data } = await apiRequest.patch<{ auditLogStream: TAuditLogStream }>( + `/api/v1/audit-log-streams/${dto.id}`, + dto + ); + return data; + }, + onSuccess: (_, { orgId }) => { + queryClient.invalidateQueries(auditLogStreamKeys.list(orgId)); + } + }); +}; + +export const useDeleteAuditLogStream = () => { + const queryClient = useQueryClient(); + + return useMutation<{ auditLogStream: TAuditLogStream }, {}, TDeleteAuditLogStreamDTO>({ + mutationFn: async (dto) => { + const { data } = await apiRequest.delete<{ auditLogStream: TAuditLogStream }>( + `/api/v1/audit-log-streams/${dto.id}` + ); + return data; + }, + onSuccess: (_, { orgId }) => { + queryClient.invalidateQueries(auditLogStreamKeys.list(orgId)); + } + }); +}; diff --git a/frontend/src/hooks/api/auditLogStreams/queries.tsx b/frontend/src/hooks/api/auditLogStreams/queries.tsx new file mode 100644 index 0000000000..f86ca0dce4 --- /dev/null +++ b/frontend/src/hooks/api/auditLogStreams/queries.tsx @@ -0,0 +1,40 @@ +import { useQuery } from "@tanstack/react-query"; + +import { apiRequest } from "@app/config/request"; + +import { TAuditLogStream } from "./types"; + +export const auditLogStreamKeys = { + list: (orgId: string) => ["audit-log-stream", { orgId }], + getById: (id: string) => ["audit-log-stream-details", { id }] +}; + +const fetchAuditLogStreams = async () => { + const { data } = await apiRequest.get<{ auditLogStreams: TAuditLogStream[] }>( + "/api/v1/audit-log-streams" + ); + + return data.auditLogStreams; +}; + +export const useGetAuditLogStreams = (orgId: string) => + useQuery({ + queryKey: auditLogStreamKeys.list(orgId), + queryFn: () => fetchAuditLogStreams(), + enabled: Boolean(orgId) + }); + +const fetchAuditLogStreamDetails = async (id: string) => { + const { data } = await apiRequest.get<{ auditLogStream: TAuditLogStream }>( + `/api/v1/audit-log-streams/${id}` + ); + + return data.auditLogStream; +}; + +export const useGetAuditLogStreamDetails = (id: string) => + useQuery({ + queryKey: auditLogStreamKeys.getById(id), + queryFn: () => fetchAuditLogStreamDetails(id), + enabled: Boolean(id) + }); diff --git a/frontend/src/hooks/api/auditLogStreams/types.ts b/frontend/src/hooks/api/auditLogStreams/types.ts new file mode 100644 index 0000000000..8e21a32090 --- /dev/null +++ b/frontend/src/hooks/api/auditLogStreams/types.ts @@ -0,0 +1,28 @@ +export type LogStreamHeaders = { + key: string; + value: string; +}; + +export type TAuditLogStream = { + id: string; + url: string; + headers?: LogStreamHeaders[]; +}; + +export type TCreateAuditLogStreamDTO = { + url: string; + headers?: LogStreamHeaders[]; + orgId: string; +}; + +export type TUpdateAuditLogStreamDTO = { + id: string; + url?: string; + headers?: LogStreamHeaders[]; + orgId: string; +}; + +export type TDeleteAuditLogStreamDTO = { + id: string; + orgId: string; +}; diff --git a/frontend/src/hooks/api/index.tsx b/frontend/src/hooks/api/index.tsx index b2df27f2da..574da5a319 100644 --- a/frontend/src/hooks/api/index.tsx +++ b/frontend/src/hooks/api/index.tsx @@ -1,6 +1,7 @@ export * from "./admin"; export * from "./apiKeys"; export * from "./auditLogs"; +export * from "./auditLogStreams"; export * from "./auth"; export * from "./bots"; export * from "./dynamicSecret"; diff --git a/frontend/src/hooks/api/subscriptions/types.ts b/frontend/src/hooks/api/subscriptions/types.ts index 46facedd30..45414292da 100644 --- a/frontend/src/hooks/api/subscriptions/types.ts +++ b/frontend/src/hooks/api/subscriptions/types.ts @@ -5,6 +5,8 @@ export type SubscriptionPlan = { auditLogs: boolean; dynamicSecret: boolean; auditLogsRetentionDays: number; + auditLogStreamLimit: number; + auditLogStreams: boolean; customAlerts: boolean; customRateLimits: boolean; pitRecovery: boolean; diff --git a/frontend/src/hooks/api/types.ts b/frontend/src/hooks/api/types.ts index d6b43d6749..49949d88e1 100644 --- a/frontend/src/hooks/api/types.ts +++ b/frontend/src/hooks/api/types.ts @@ -1,5 +1,6 @@ import { ZodIssue } from "zod"; +export type { TAuditLogStream } from "./auditLogStreams/types"; export type { GetAuthTokenAPI } from "./auth/types"; export type { IncidentContact } from "./incidentContacts/types"; export type { IntegrationAuth } from "./integrationAuth/types"; @@ -48,13 +49,13 @@ export enum ApiErrorTypes { export type TApiErrors = | { - error: ApiErrorTypes.ValidationError; - message: ZodIssue[]; - statusCode: 403; - } + error: ApiErrorTypes.ValidationError; + message: ZodIssue[]; + statusCode: 403; + } | { error: ApiErrorTypes.ForbiddenError; message: string; statusCode: 401 } | { - statusCode: 400; - message: string; - error: ApiErrorTypes.BadRequestError; - }; + statusCode: 400; + message: string; + error: ApiErrorTypes.BadRequestError; + }; diff --git a/frontend/src/pages/org/[id]/overview/index.tsx b/frontend/src/pages/org/[id]/overview/index.tsx index b6cc4ee465..ab9fa790d6 100644 --- a/frontend/src/pages/org/[id]/overview/index.tsx +++ b/frontend/src/pages/org/[id]/overview/index.tsx @@ -594,7 +594,7 @@ const OrganizationPage = withPermission( )}
- {window.location.origin.includes("https://app.infisical.com") || window.location.origin.includes("http://localhost:8080") && ( + {(window.location.origin.includes("https://app.infisical.com") || window.location.origin.includes("http://localhost:8080")) && (
- Scheduled maintenance on April 13th 2024 {" "} + Scheduled maintenance on May 11th 2024 {" "}
- Infisical will undergo scheduled maintenance for approximately 1 hour on Saturday, April 13th, 11am EST. During these hours, read - operations will continue to function normally but no resources will be editable. + Infisical will undergo scheduled maintenance for approximately 2 hour on Saturday, May 11th, 11am EST. During these hours, read + operations to Infisical will continue to function normally but no resources will be editable. No action is required on your end — your applications will continue to fetch secrets.
diff --git a/frontend/src/views/Project/MembersPage/components/ServiceTokenTab/ServiceTokenTab.tsx b/frontend/src/views/Project/MembersPage/components/ServiceTokenTab/ServiceTokenTab.tsx index 3ee0613281..068ece4277 100644 --- a/frontend/src/views/Project/MembersPage/components/ServiceTokenTab/ServiceTokenTab.tsx +++ b/frontend/src/views/Project/MembersPage/components/ServiceTokenTab/ServiceTokenTab.tsx @@ -1,3 +1,5 @@ +import { faWarning } from "@fortawesome/free-solid-svg-icons"; +import { FontAwesomeIcon } from "@fortawesome/react-fontawesome"; import { motion } from "framer-motion"; import { ServiceTokenSection } from "./components"; @@ -11,7 +13,37 @@ export const ServiceTokenTab = () => { animate={{ opacity: 1, translateX: 0 }} exit={{ opacity: 0, translateX: 30 }} > - +
+
+ +
+ Deprecation Notice +

+ Service Tokens are being deprecated in favor of Machine Identities. +
+ They will be removed in the future in accordance with the deprecation notice and + timeline stated{" "} + + here + + . +
+ + Learn more about Machine Identities + +

+
+
+ +
); }; diff --git a/frontend/src/views/Settings/OrgSettingsPage/components/AuditLogStreamTab/AuditLogStreamForm.tsx b/frontend/src/views/Settings/OrgSettingsPage/components/AuditLogStreamTab/AuditLogStreamForm.tsx new file mode 100644 index 0000000000..18bfa6340f --- /dev/null +++ b/frontend/src/views/Settings/OrgSettingsPage/components/AuditLogStreamTab/AuditLogStreamForm.tsx @@ -0,0 +1,206 @@ +import { Controller, useFieldArray, useForm } from "react-hook-form"; +import { faPlus, faTrash } from "@fortawesome/free-solid-svg-icons"; +import { FontAwesomeIcon } from "@fortawesome/react-fontawesome"; +import { z } from "zod"; + +import { createNotification } from "@app/components/notifications"; +import { Button, FormControl, FormLabel, IconButton, Input, Spinner } from "@app/components/v2"; +import { useOrganization } from "@app/context"; +import { + useCreateAuditLogStream, + useGetAuditLogStreamDetails, + useUpdateAuditLogStream +} from "@app/hooks/api"; + +type Props = { + id?: string; + onClose: () => void; +}; + +const formSchema = z.object({ + url: z.string().url().min(1), + headers: z + .object({ + key: z.string(), + value: z.string() + }) + .array() + .optional() +}); +type TForm = z.infer; + +export const AuditLogStreamForm = ({ id = "", onClose }: Props) => { + const isEdit = Boolean(id); + const { currentOrg } = useOrganization(); + const orgId = currentOrg?.id || ""; + + const auditLogStream = useGetAuditLogStreamDetails(id); + const createAuditLogStream = useCreateAuditLogStream(); + const updateAuditLogStream = useUpdateAuditLogStream(); + + const { + handleSubmit, + control, + setValue, + getValues, + formState: { isSubmitting } + } = useForm({ + values: auditLogStream?.data, + defaultValues: { + headers: [{ key: "", value: "" }] + } + }); + + const headerFields = useFieldArray({ + control, + name: "headers" + }); + + const handleAuditLogStreamEdit = async ({ headers, url }: TForm) => { + if (!id) return; + try { + await updateAuditLogStream.mutateAsync({ + id, + orgId, + headers, + url + }); + createNotification({ + type: "success", + text: "Successfully updated stream" + }); + onClose(); + } catch (err) { + console.log(err); + createNotification({ + type: "error", + text: "Failed to update stream" + }); + } + }; + + const handleFormSubmit = async ({ headers = [], url }: TForm) => { + if (isSubmitting) return; + const sanitizedHeaders = headers.filter(({ key, value }) => Boolean(key) && Boolean(value)); + const streamHeaders = sanitizedHeaders.length ? sanitizedHeaders : undefined; + if (isEdit) { + await handleAuditLogStreamEdit({ headers: streamHeaders, url }); + return; + } + try { + await createAuditLogStream.mutateAsync({ + orgId, + headers: streamHeaders, + url + }); + createNotification({ + type: "success", + text: "Successfully created stream" + }); + onClose(); + } catch (err) { + console.log(err); + createNotification({ + type: "error", + text: "Failed to create stream" + }); + } + }; + + if (isEdit && auditLogStream.isLoading) { + return ( +
+ +
+ ); + } + + return ( +
+
+ ( + + + + )} + /> + + {headerFields.fields.map(({ id: headerFieldId }, i) => ( +
+ ( + + + + )} + /> + ( + + + + )} + /> + { + const header = getValues("headers"); + if (header && header?.length > 1) { + headerFields.remove(i); + } else { + setValue("headers", [{ key: "", value: "" }]); + } + }} + > + + +
+ ))} +
+ +
+
+
+ + +
+
+ ); +}; diff --git a/frontend/src/views/Settings/OrgSettingsPage/components/AuditLogStreamTab/AuditLogStreamTab.tsx b/frontend/src/views/Settings/OrgSettingsPage/components/AuditLogStreamTab/AuditLogStreamTab.tsx new file mode 100644 index 0000000000..7590d40022 --- /dev/null +++ b/frontend/src/views/Settings/OrgSettingsPage/components/AuditLogStreamTab/AuditLogStreamTab.tsx @@ -0,0 +1,197 @@ +import { faPlug, faPlus } from "@fortawesome/free-solid-svg-icons"; +import { FontAwesomeIcon } from "@fortawesome/react-fontawesome"; + +import { createNotification } from "@app/components/notifications"; +import { OrgPermissionCan } from "@app/components/permissions"; +import { + Button, + DeleteActionModal, + EmptyState, + Modal, + ModalContent, + Table, + TableContainer, + TableSkeleton, + TBody, + Td, + THead, + Tr, + UpgradePlanModal +} from "@app/components/v2"; +import { + OrgPermissionActions, + OrgPermissionSubjects, + useOrganization, + useSubscription +} from "@app/context"; +import { withPermission } from "@app/hoc"; +import { usePopUp } from "@app/hooks"; +import { useDeleteAuditLogStream, useGetAuditLogStreams } from "@app/hooks/api"; + +import { AuditLogStreamForm } from "./AuditLogStreamForm"; + +export const AuditLogStreamsTab = withPermission( + () => { + const { currentOrg } = useOrganization(); + const orgId = currentOrg?.id || ""; + const { popUp, handlePopUpOpen, handlePopUpToggle, handlePopUpClose } = usePopUp([ + "auditLogStreamForm", + "deleteAuditLogStream", + "upgradePlan" + ] as const); + const { subscription } = useSubscription(); + + const { data: auditLogStreams, isLoading: isAuditLogStreamsLoading } = + useGetAuditLogStreams(orgId); + + // mutation + const { mutateAsync: deleteAuditLogStream } = useDeleteAuditLogStream(); + + const handleAuditLogStreamDelete = async () => { + try { + const auditLogStreamId = popUp?.deleteAuditLogStream?.data as string; + await deleteAuditLogStream({ + id: auditLogStreamId, + orgId + }); + handlePopUpClose("deleteAuditLogStream"); + createNotification({ + type: "success", + text: "Successfully deleted stream" + }); + } catch (err) { + console.log(err); + createNotification({ + type: "error", + text: "Failed to delete stream" + }); + } + }; + + return ( +
+
+

Audit Log Streams

+ + {(isAllowed) => ( + + )} + +
+

+ Send audit logs from Infisical to external logging providers via HTTP +

+
+ + + + + + + + + + {isAuditLogStreamsLoading && ( + + )} + {!isAuditLogStreamsLoading && auditLogStreams && auditLogStreams?.length === 0 && ( + + + + )} + {!isAuditLogStreamsLoading && + auditLogStreams?.map(({ id, url }) => ( + + + + + ))} + +
URLAction
+ +
+ {url} + +
+ + {(isAllowed) => ( + + )} + + + {(isAllowed) => ( + + )} + +
+
+
+
+ { + handlePopUpToggle("auditLogStreamForm", isModalOpen); + }} + > + + handlePopUpToggle("auditLogStreamForm")} + /> + + + handlePopUpToggle("upgradePlan", isOpen)} + text="You can add audit log streams if you switch to Infisical's Enterprise plan." + /> + handlePopUpToggle("deleteAuditLogStream", isOpen)} + onClose={() => handlePopUpClose("deleteAuditLogStream")} + onDeleteApproved={handleAuditLogStreamDelete} + /> +
+ ); + }, + { action: OrgPermissionActions.Read, subject: OrgPermissionSubjects.Settings } +); diff --git a/frontend/src/views/Settings/OrgSettingsPage/components/AuditLogStreamTab/index.tsx b/frontend/src/views/Settings/OrgSettingsPage/components/AuditLogStreamTab/index.tsx new file mode 100644 index 0000000000..ecce21f3b6 --- /dev/null +++ b/frontend/src/views/Settings/OrgSettingsPage/components/AuditLogStreamTab/index.tsx @@ -0,0 +1 @@ +export { AuditLogStreamsTab } from "./AuditLogStreamTab"; diff --git a/frontend/src/views/Settings/OrgSettingsPage/components/OrgTabGroup/OrgTabGroup.tsx b/frontend/src/views/Settings/OrgSettingsPage/components/OrgTabGroup/OrgTabGroup.tsx index de6bf04539..fdf541c546 100644 --- a/frontend/src/views/Settings/OrgSettingsPage/components/OrgTabGroup/OrgTabGroup.tsx +++ b/frontend/src/views/Settings/OrgSettingsPage/components/OrgTabGroup/OrgTabGroup.tsx @@ -1,12 +1,14 @@ import { Fragment } from "react"; import { Tab } from "@headlessui/react"; +import { AuditLogStreamsTab } from "../AuditLogStreamTab"; import { OrgAuthTab } from "../OrgAuthTab"; import { OrgGeneralTab } from "../OrgGeneralTab"; const tabs = [ { name: "General", key: "tab-org-general" }, - { name: "Security", key: "tab-org-security" } + { name: "Security", key: "tab-org-security" }, + { name: "Audit Log Streams", key: "tag-audit-log-streams" } ]; export const OrgTabGroup = () => { return ( @@ -17,9 +19,8 @@ export const OrgTabGroup = () => { {({ selected }) => ( @@ -34,6 +35,9 @@ export const OrgTabGroup = () => { + + + ); diff --git a/frontend/src/views/Settings/PersonalSettingsPage/PersonalTabGroup/PersonalTabGroup.tsx b/frontend/src/views/Settings/PersonalSettingsPage/PersonalTabGroup/PersonalTabGroup.tsx index c1e282c4ef..2461729758 100644 --- a/frontend/src/views/Settings/PersonalSettingsPage/PersonalTabGroup/PersonalTabGroup.tsx +++ b/frontend/src/views/Settings/PersonalSettingsPage/PersonalTabGroup/PersonalTabGroup.tsx @@ -1,5 +1,4 @@ import { Fragment } from "react"; -import Link from "next/link"; import { faWarning } from "@fortawesome/free-solid-svg-icons"; import { FontAwesomeIcon } from "@fortawesome/react-fontawesome"; import { Tab } from "@headlessui/react"; @@ -47,18 +46,29 @@ export const PersonalTabGroup = () => {
Deprecation Notice

- API Keys are deprecated and will be removed in the future. -
Please use Machine Identity authentication for your applications and - services. -

- - + API Keys are being deprecated in favor of Machine Identities. +
+ They will be removed in the future in accordance with the deprecation notice and + timeline stated{" "} +
+ here + + . +
+ Learn more about Machine Identities - +

-
diff --git a/frontend/src/views/Settings/ProjectSettingsPage/ProjectSettingsPage.tsx b/frontend/src/views/Settings/ProjectSettingsPage/ProjectSettingsPage.tsx index da87da755a..1d453b8721 100644 --- a/frontend/src/views/Settings/ProjectSettingsPage/ProjectSettingsPage.tsx +++ b/frontend/src/views/Settings/ProjectSettingsPage/ProjectSettingsPage.tsx @@ -7,7 +7,7 @@ import { WebhooksTab } from "./components/WebhooksTab"; const tabs = [ { name: "General", key: "tab-project-general" }, - { name: "Webhooks", key: "tab-project-webhooks" } + { name: "Webhooks", key: "tab-project-webhooks" }, ]; export const ProjectSettingsPage = () => { @@ -25,9 +25,8 @@ export const ProjectSettingsPage = () => { {({ selected }) => (