From 0502dc60a6f38bf5ec6011b812000c7ced77f608 Mon Sep 17 00:00:00 2001 From: = Date: Sun, 7 Sep 2025 22:45:41 +0530 Subject: [PATCH] feat: added support for redis read replica and updated doc --- backend/src/keystore/keystore.ts | 56 +++++++++++++++++----- backend/src/lib/config/env.ts | 22 +++++++++ backend/src/lib/config/redis.ts | 11 ++++- docs/self-hosting/configuration/envars.mdx | 20 ++++++++ 4 files changed, 95 insertions(+), 14 deletions(-) diff --git a/backend/src/keystore/keystore.ts b/backend/src/keystore/keystore.ts index 5acfdc65ef..0854c2c717 100644 --- a/backend/src/keystore/keystore.ts +++ b/backend/src/keystore/keystore.ts @@ -3,6 +3,7 @@ import { pgAdvisoryLockHashText } from "@app/lib/crypto/hashtext"; import { applyJitter } from "@app/lib/dates"; import { delay as delayMs } from "@app/lib/delay"; import { ExecutionResult, Redlock, Settings } from "@app/lib/red-lock"; +import { Redis, Cluster } from "ioredis"; export const PgSqlLock = { BootUpMigration: 2023, @@ -102,30 +103,53 @@ export type TKeyStoreFactory = { getKeysByPattern: (pattern: string, limit?: number) => Promise; }; +const pickPrimaryOrSecondaryRedis = (primary: Redis | Cluster, secondaries?: Array) => { + if (!secondaries || !secondaries.length) return primary; + const selectedReplica = secondaries[Math.floor(Math.random() * secondaries.length)]; + return selectedReplica; +}; + export const keyStoreFactory = (redisConfigKeys: TRedisConfigKeys): TKeyStoreFactory => { - const redis = buildRedisFromConfig(redisConfigKeys); - const redisLock = new Redlock([redis], { retryCount: 2, retryDelay: 200 }); + const primaryRedis = buildRedisFromConfig(redisConfigKeys); + const redisReadReplicas = redisConfigKeys.REDIS_READ_REPLICAS?.map((el) => { + if (redisConfigKeys.REDIS_URL) { + const primaryNode = new URL(redisConfigKeys?.REDIS_URL); + primaryNode.hostname = el.host; + primaryNode.port = String(el.port); + return buildRedisFromConfig({ ...redisConfigKeys, REDIS_URL: primaryNode.toString() }); + } + + if (redisConfigKeys.REDIS_SENTINEL_HOSTS) { + return buildRedisFromConfig({ ...redisConfigKeys, REDIS_SENTINEL_HOSTS: [el] }); + } + + return buildRedisFromConfig({ ...redisConfigKeys, REDIS_CLUSTER_HOSTS: [el] }); + }); + const redisLock = new Redlock([primaryRedis], { retryCount: 2, retryDelay: 200 }); const setItem = async (key: string, value: string | number | Buffer, prefix?: string) => - redis.set(prefix ? `${prefix}:${key}` : key, value); + primaryRedis.set(prefix ? `${prefix}:${key}` : key, value); - const getItem = async (key: string, prefix?: string) => redis.get(prefix ? `${prefix}:${key}` : key); + const getItem = async (key: string, prefix?: string) => + pickPrimaryOrSecondaryRedis(primaryRedis, redisReadReplicas).get(prefix ? `${prefix}:${key}` : key); const getItems = async (keys: string[], prefix?: string) => - redis.mget(keys.map((key) => (prefix ? `${prefix}:${key}` : key))); + pickPrimaryOrSecondaryRedis(primaryRedis, redisReadReplicas).mget( + keys.map((key) => (prefix ? `${prefix}:${key}` : key)) + ); const setItemWithExpiry = async ( key: string, expiryInSeconds: number | string, value: string | number | Buffer, prefix?: string - ) => redis.set(prefix ? `${prefix}:${key}` : key, value, "EX", expiryInSeconds); + ) => primaryRedis.set(prefix ? `${prefix}:${key}` : key, value, "EX", expiryInSeconds); - const deleteItem = async (key: string) => redis.del(key); + const deleteItem = async (key: string) => primaryRedis.del(key); const deleteItemsByKeyIn = async (keys: string[]) => { if (keys.length === 0) return 0; - return redis.del(keys); + return primaryRedis.del(keys); }; const deleteItems = async ({ pattern, batchSize = 500, delay = 1500, jitter = 200 }: TDeleteItems) => { @@ -135,12 +159,12 @@ export const keyStoreFactory = (redisConfigKeys: TRedisConfigKeys): TKeyStoreFac do { // Await in loop is needed so that Redis is not overwhelmed // eslint-disable-next-line no-await-in-loop - const [nextCursor, keys] = await redis.scan(cursor, "MATCH", pattern, "COUNT", 1000); // Count should be 1000 - 5000 for prod loads + const [nextCursor, keys] = await primaryRedis.scan(cursor, "MATCH", pattern, "COUNT", 1000); // Count should be 1000 - 5000 for prod loads cursor = nextCursor; for (let i = 0; i < keys.length; i += batchSize) { const batch = keys.slice(i, i + batchSize); - const pipeline = redis.pipeline(); + const pipeline = primaryRedis.pipeline(); for (const key of batch) { pipeline.unlink(key); } @@ -156,9 +180,9 @@ export const keyStoreFactory = (redisConfigKeys: TRedisConfigKeys): TKeyStoreFac return totalDeleted; }; - const incrementBy = async (key: string, value: number) => redis.incrby(key, value); + const incrementBy = async (key: string, value: number) => primaryRedis.incrby(key, value); - const setExpiry = async (key: string, expiryInSeconds: number) => redis.expire(key, expiryInSeconds); + const setExpiry = async (key: string, expiryInSeconds: number) => primaryRedis.expire(key, expiryInSeconds); const waitTillReady = async ({ key, @@ -189,7 +213,13 @@ export const keyStoreFactory = (redisConfigKeys: TRedisConfigKeys): TKeyStoreFac do { // eslint-disable-next-line no-await-in-loop - const [nextCursor, keys] = await redis.scan(cursor, "MATCH", pattern, "COUNT", 1000); + const [nextCursor, keys] = await pickPrimaryOrSecondaryRedis(primaryRedis, redisReadReplicas).scan( + cursor, + "MATCH", + pattern, + "COUNT", + 1000 + ); cursor = nextCursor; allKeys.push(...keys); diff --git a/backend/src/lib/config/env.ts b/backend/src/lib/config/env.ts index 922fdbcdff..c4170aac30 100644 --- a/backend/src/lib/config/env.ts +++ b/backend/src/lib/config/env.ts @@ -57,6 +57,22 @@ const envSchema = z .optional() .describe("Comma-separated list of Redis Cluster host:port pairs. Eg: 192.168.65.254:6379,192.168.65.254:6380") ), + REDIS_READ_REPLICAS: zpStr( + z + .string() + .optional() + .describe( + "Comma-separated list of Redis read replicas host:port pairs. Eg: 192.168.65.254:6379,192.168.65.254:6380" + ) + ), + REDIS_CLUSTER_ENABLE_TLS: z + .enum(["true", "false"]) + .default("false") + .transform((el) => el === "true"), + REDIS_CLUSTER_AWS_ELASTICACHE_DNS_LOOKUP_MODE: z + .enum(["true", "false"]) + .default("false") + .transform((el) => el === "true"), HOST: zpStr(z.string().default("localhost")), DB_CONNECTION_URI: zpStr(z.string().describe("Postgres database connection string")).default( `postgresql://${process.env.DB_USER}:${process.env.DB_PASSWORD}@${process.env.DB_HOST}:${process.env.DB_PORT}/${process.env.DB_NAME}` @@ -377,6 +393,12 @@ const envSchema = z const [host, port] = el.trim().split(":"); return { host: host.trim(), port: Number(port.trim()) }; }), + REDIS_READ_REPLICAS: data.REDIS_READ_REPLICAS?.trim() + ?.split(",") + .map((el) => { + const [host, port] = el.trim().split(":"); + return { host: host.trim(), port: Number(port.trim()) }; + }), isSecretScanningConfigured: Boolean(data.SECRET_SCANNING_GIT_APP_ID) && Boolean(data.SECRET_SCANNING_PRIVATE_KEY) && diff --git a/backend/src/lib/config/redis.ts b/backend/src/lib/config/redis.ts index 620f0f9362..8e375ab62d 100644 --- a/backend/src/lib/config/redis.ts +++ b/backend/src/lib/config/redis.ts @@ -6,12 +6,17 @@ export type TRedisConfigKeys = Partial<{ REDIS_PASSWORD: string; REDIS_CLUSTER_HOSTS: { host: string; port: number }[]; + REDIS_CLUSTER_ENABLE_TLS: boolean; + // ref: https://github.com/redis/ioredis?tab=readme-ov-file#special-note-aws-elasticache-clusters-with-tls + REDIS_CLUSTER_AWS_ELASTICACHE_DNS_LOOKUP_MODE: boolean; REDIS_SENTINEL_HOSTS: { host: string; port: number }[]; REDIS_SENTINEL_MASTER_NAME: string; REDIS_SENTINEL_ENABLE_TLS: boolean; REDIS_SENTINEL_USERNAME: string; REDIS_SENTINEL_PASSWORD: string; + + REDIS_READ_REPLICAS: { host: string; port: number }[]; }>; export const buildRedisFromConfig = (cfg: TRedisConfigKeys) => { @@ -19,9 +24,13 @@ export const buildRedisFromConfig = (cfg: TRedisConfigKeys) => { if (cfg.REDIS_CLUSTER_HOSTS) { return new Redis.Cluster(cfg.REDIS_CLUSTER_HOSTS, { + dnsLookup: cfg.REDIS_CLUSTER_AWS_ELASTICACHE_DNS_LOOKUP_MODE + ? (address, callback) => callback(null, address) + : undefined, redisOptions: { username: cfg.REDIS_USERNAME, - password: cfg.REDIS_PASSWORD + password: cfg.REDIS_PASSWORD, + tls: cfg?.REDIS_CLUSTER_ENABLE_TLS ? {} : undefined } }); } diff --git a/docs/self-hosting/configuration/envars.mdx b/docs/self-hosting/configuration/envars.mdx index 6f3f019333..e966d44605 100644 --- a/docs/self-hosting/configuration/envars.mdx +++ b/docs/self-hosting/configuration/envars.mdx @@ -190,6 +190,12 @@ Redis is used for caching and background tasks. You can use either a standalone Comma-separated list of Redis Cluster host:port pairs. ``` 192.168.65.254:26379,192.168.65.254:26380 ``` + + Enable Redis TLS encryption on connection. + + + Enable this if you are using AWS encrypt on transit for Elasticache cluster. For more information refer ![here](https://github.com/redis/ioredis?tab=readme-ov-file#special-note-aws-elasticache-clusters-with-tls). + Authentication username for Redis Node @@ -197,6 +203,20 @@ Redis is used for caching and background tasks. You can use either a standalone Authentication password for Redis Node + + + Comma-separated list of Redis read replicas host:port pairs. ``` + 192.168.65.254:26379,192.168.65.254:26380 ``` + + + The rest of the parameters of the primary instance will be inherited. + + ## Email Service