update db

This commit is contained in:
Lakee Sivaraya
2026-01-15 10:46:11 -08:00
parent 793c888808
commit c3afbaebce
11 changed files with 10170 additions and 187 deletions

View File

@@ -146,15 +146,14 @@ export async function GET(request: NextRequest, { params }: TableRouteParams) {
/**
* DELETE /api/table/[tableId]?workspaceId=xxx
*
* Soft deletes a table.
* Deletes a table.
*
* @param request - The incoming HTTP request
* @param context - Route context containing tableId param
* @returns JSON response confirming deletion or error
*
* @remarks
* This performs a soft delete, marking the table as deleted.
* Rows remain in the database but become inaccessible.
* This performs a hard delete, removing the table and its rows.
* The operation requires write access to the table.
*/
export async function DELETE(request: NextRequest, { params }: TableRouteParams) {

View File

@@ -1,7 +1,7 @@
import { db } from '@sim/db'
import { userTableDefinitions, userTableRows } from '@sim/db/schema'
import { userTableRows } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { and, eq, sql } from 'drizzle-orm'
import { and, eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { checkHybridAuth } from '@/lib/auth/hybrid'
@@ -322,35 +322,17 @@ export async function DELETE(request: NextRequest, { params }: RowRouteParams) {
}
}
// Delete row in a transaction to ensure atomicity
const deletedRow = await db.transaction(async (trx) => {
// Delete row
const [deleted] = await trx
.delete(userTableRows)
.where(
and(
eq(userTableRows.id, rowId),
eq(userTableRows.tableId, tableId),
eq(userTableRows.workspaceId, actualWorkspaceId)
)
// Delete row
const [deletedRow] = await db
.delete(userTableRows)
.where(
and(
eq(userTableRows.id, rowId),
eq(userTableRows.tableId, tableId),
eq(userTableRows.workspaceId, actualWorkspaceId)
)
.returning()
if (!deleted) {
return null
}
// Update row count
await trx
.update(userTableDefinitions)
.set({
rowCount: sql`${userTableDefinitions.rowCount} - 1`,
updatedAt: new Date(),
})
.where(eq(userTableDefinitions.id, tableId))
return deleted
})
)
.returning()
if (!deletedRow) {
return NextResponse.json({ error: 'Row not found' }, { status: 404 })

View File

@@ -1,5 +1,5 @@
import { db } from '@sim/db'
import { userTableDefinitions, userTableRows } from '@sim/db/schema'
import { userTableRows } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { and, eq, sql } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
@@ -170,7 +170,7 @@ async function handleBatchInsert(
})
if (!validation.valid) return validation.response
// Insert all rows in a transaction to ensure atomicity
// Insert all rows
const now = new Date()
const rowsToInsert = validated.rows.map((data) => ({
id: `row_${crypto.randomUUID().replace(/-/g, '')}`,
@@ -182,21 +182,7 @@ async function handleBatchInsert(
createdBy: userId,
}))
const insertedRows = await db.transaction(async (trx) => {
// Insert all rows
const inserted = await trx.insert(userTableRows).values(rowsToInsert).returning()
// Update row count
await trx
.update(userTableDefinitions)
.set({
rowCount: sql`${userTableDefinitions.rowCount} + ${validated.rows.length}`,
updatedAt: now,
})
.where(eq(userTableDefinitions.id, tableId))
return inserted
})
const insertedRows = await db.insert(userTableRows).values(rowsToInsert).returning()
logger.info(`[${requestId}] Batch inserted ${insertedRows.length} rows into table ${tableId}`)
@@ -313,36 +299,22 @@ export async function POST(request: NextRequest, { params }: TableRowsRouteParam
)
}
// Insert row in a transaction to ensure atomicity
// Insert row
const rowId = `row_${crypto.randomUUID().replace(/-/g, '')}`
const now = new Date()
const [row] = await db.transaction(async (trx) => {
// Insert row
const insertedRow = await trx
.insert(userTableRows)
.values({
id: rowId,
tableId,
workspaceId,
data: validated.data,
createdAt: now,
updatedAt: now,
createdBy: authResult.userId,
})
.returning()
// Update row count
await trx
.update(userTableDefinitions)
.set({
rowCount: sql`${userTableDefinitions.rowCount} + 1`,
updatedAt: now,
})
.where(eq(userTableDefinitions.id, tableId))
return insertedRow
})
const [row] = await db
.insert(userTableRows)
.values({
id: rowId,
tableId,
workspaceId,
data: validated.data,
createdAt: now,
updatedAt: now,
createdBy: authResult.userId,
})
.returning()
logger.info(`[${requestId}] Inserted row ${rowId} into table ${tableId}`)
@@ -857,15 +829,6 @@ export async function DELETE(request: NextRequest, { params }: TableRowsRoutePar
`[${requestId}] Deleted batch ${Math.floor(i / TABLE_LIMITS.DELETE_BATCH_SIZE) + 1} (${totalDeleted}/${rowIds.length} rows)`
)
}
// Update row count
await trx
.update(userTableDefinitions)
.set({
rowCount: sql`${userTableDefinitions.rowCount} - ${matchingRows.length}`,
updatedAt: new Date(),
})
.where(eq(userTableDefinitions.id, tableId))
})
logger.info(`[${requestId}] Deleted ${matchingRows.length} rows from table ${tableId}`)

View File

@@ -1,5 +1,5 @@
import { db } from '@sim/db'
import { userTableDefinitions, userTableRows } from '@sim/db/schema'
import { userTableRows } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { and, eq, sql } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
@@ -211,15 +211,6 @@ export async function POST(request: NextRequest, { params }: UpsertRouteParams)
})
.returning()
// Update row count for insert
await trx
.update(userTableDefinitions)
.set({
rowCount: sql`${userTableDefinitions.rowCount} + 1`,
updatedAt: now,
})
.where(eq(userTableDefinitions.id, tableId))
return {
row: insertedRow,
operation: 'insert' as const,

View File

@@ -1,17 +1,26 @@
import { db } from '@sim/db'
import { userTableDefinitions } from '@sim/db/schema'
import { userTableDefinitions, userTableRows } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { and, eq, isNull } from 'drizzle-orm'
import { count, eq } from 'drizzle-orm'
import { NextResponse } from 'next/server'
import type { ColumnDefinition, TableSchema } from '@/lib/table'
import { getUserEntityPermissions } from '@/lib/workspaces/permissions/utils'
const logger = createLogger('TableUtils')
async function getTableRowCount(tableId: string): Promise<number> {
const [result] = await db
.select({ count: count() })
.from(userTableRows)
.where(eq(userTableRows.tableId, tableId))
return Number(result?.count ?? 0)
}
/**
* Represents the core data structure for a user-defined table as stored in the database.
*
* This extends the base TableDefinition with DB-specific fields like createdBy and deletedAt.
* This extends the base TableDefinition with DB-specific fields like createdBy.
*/
export interface TableData {
/** Unique identifier for the table */
@@ -30,8 +39,6 @@ export interface TableData {
maxRows: number
/** Current number of rows in the table */
rowCount: number
/** Timestamp when the table was soft-deleted, if applicable */
deletedAt?: Date | null
/** Timestamp when the table was created */
createdAt: Date
/** Timestamp when the table was last updated */
@@ -97,7 +104,7 @@ async function checkTableAccessInternal(
workspaceId: userTableDefinitions.workspaceId,
})
.from(userTableDefinitions)
.where(and(eq(userTableDefinitions.id, tableId), isNull(userTableDefinitions.deletedAt)))
.where(eq(userTableDefinitions.id, tableId))
.limit(1)
if (table.length === 0) {
@@ -273,7 +280,7 @@ export async function checkAccessWithFullTable(
const [tableData] = await db
.select()
.from(userTableDefinitions)
.where(and(eq(userTableDefinitions.id, tableId), isNull(userTableDefinitions.deletedAt)))
.where(eq(userTableDefinitions.id, tableId))
.limit(1)
if (!tableData) {
@@ -281,7 +288,8 @@ export async function checkAccessWithFullTable(
return NextResponse.json({ error: 'Table not found' }, { status: 404 })
}
const table = tableData as unknown as TableData
const rowCount = await getTableRowCount(tableId)
const table = { ...tableData, rowCount } as unknown as TableData
// Case 1: User created the table directly (always has full access)
if (table.createdBy === userId) {
@@ -319,10 +327,10 @@ export async function checkAccessWithFullTable(
}
/**
* Fetches a table by ID with soft-delete awareness.
* Fetches a table by ID.
*
* @param tableId - The unique identifier of the table to fetch
* @returns Promise resolving to table data or null if not found/deleted
* @returns Promise resolving to table data or null if not found
*
* @example
* ```typescript
@@ -336,14 +344,15 @@ export async function getTableById(tableId: string): Promise<TableData | null> {
const [table] = await db
.select()
.from(userTableDefinitions)
.where(and(eq(userTableDefinitions.id, tableId), isNull(userTableDefinitions.deletedAt)))
.where(eq(userTableDefinitions.id, tableId))
.limit(1)
if (!table) {
return null
}
return table as unknown as TableData
const rowCount = await getTableRowCount(tableId)
return { ...table, rowCount } as unknown as TableData
}
/**
@@ -371,7 +380,7 @@ export async function verifyTableWorkspace(tableId: string, workspaceId: string)
const table = await db
.select({ workspaceId: userTableDefinitions.workspaceId })
.from(userTableDefinitions)
.where(and(eq(userTableDefinitions.id, tableId), isNull(userTableDefinitions.deletedAt)))
.where(eq(userTableDefinitions.id, tableId))
.limit(1)
if (table.length === 0) {

View File

@@ -10,7 +10,7 @@
import { db } from '@sim/db'
import { userTableDefinitions, userTableRows } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { and, count, eq, isNull, sql } from 'drizzle-orm'
import { and, count, eq, sql } from 'drizzle-orm'
import { TABLE_LIMITS } from './constants'
import { buildFilterClause, buildSortClause } from './query-builder'
import type {
@@ -39,6 +39,15 @@ import {
const logger = createLogger('TableService')
async function getTableRowCount(tableId: string): Promise<number> {
const [result] = await db
.select({ count: count() })
.from(userTableRows)
.where(eq(userTableRows.tableId, tableId))
return Number(result?.count ?? 0)
}
/**
* Gets a table by ID with full details.
*
@@ -49,18 +58,19 @@ export async function getTableById(tableId: string): Promise<TableDefinition | n
const results = await db
.select()
.from(userTableDefinitions)
.where(and(eq(userTableDefinitions.id, tableId), isNull(userTableDefinitions.deletedAt)))
.where(eq(userTableDefinitions.id, tableId))
.limit(1)
if (results.length === 0) return null
const table = results[0]
const rowCount = await getTableRowCount(tableId)
return {
id: table.id,
name: table.name,
description: table.description,
schema: table.schema as TableSchema,
rowCount: table.rowCount,
rowCount,
maxRows: table.maxRows,
workspaceId: table.workspaceId,
createdAt: table.createdAt,
@@ -78,17 +88,26 @@ export async function listTables(workspaceId: string): Promise<TableDefinition[]
const tables = await db
.select()
.from(userTableDefinitions)
.where(
and(eq(userTableDefinitions.workspaceId, workspaceId), isNull(userTableDefinitions.deletedAt))
)
.where(eq(userTableDefinitions.workspaceId, workspaceId))
.orderBy(userTableDefinitions.createdAt)
const rowCounts = await db
.select({
tableId: userTableRows.tableId,
rowCount: count(),
})
.from(userTableRows)
.where(eq(userTableRows.workspaceId, workspaceId))
.groupBy(userTableRows.tableId)
const rowCountByTable = new Map(rowCounts.map((row) => [row.tableId, Number(row.rowCount ?? 0)]))
return tables.map((t) => ({
id: t.id,
name: t.name,
description: t.description,
schema: t.schema as TableSchema,
rowCount: t.rowCount,
rowCount: rowCountByTable.get(t.id) ?? 0,
maxRows: t.maxRows,
workspaceId: t.workspaceId,
createdAt: t.createdAt,
@@ -124,12 +143,7 @@ export async function createTable(
const existingCount = await db
.select({ count: count() })
.from(userTableDefinitions)
.where(
and(
eq(userTableDefinitions.workspaceId, data.workspaceId),
isNull(userTableDefinitions.deletedAt)
)
)
.where(eq(userTableDefinitions.workspaceId, data.workspaceId))
if (existingCount[0].count >= TABLE_LIMITS.MAX_TABLES_PER_WORKSPACE) {
throw new Error(
@@ -144,8 +158,7 @@ export async function createTable(
.where(
and(
eq(userTableDefinitions.workspaceId, data.workspaceId),
eq(userTableDefinitions.name, data.name),
isNull(userTableDefinitions.deletedAt)
eq(userTableDefinitions.name, data.name)
)
)
.limit(1)
@@ -164,7 +177,6 @@ export async function createTable(
schema: data.schema,
workspaceId: data.workspaceId,
createdBy: data.userId,
rowCount: 0,
maxRows: TABLE_LIMITS.MAX_ROWS_PER_TABLE,
createdAt: now,
updatedAt: now,
@@ -179,7 +191,7 @@ export async function createTable(
name: newTable.name,
description: newTable.description,
schema: newTable.schema as TableSchema,
rowCount: newTable.rowCount,
rowCount: 0,
maxRows: newTable.maxRows,
workspaceId: newTable.workspaceId,
createdAt: newTable.createdAt,
@@ -188,24 +200,16 @@ export async function createTable(
}
/**
* Deletes a table (soft delete).
*
* Note: Rows are not soft-deleted as they don't have a deletedAt column.
* They remain in the database but are orphaned (not accessible via normal queries
* since the parent table is soft-deleted).
* Deletes a table (hard delete).
*
* @param tableId - Table ID to delete
* @param requestId - Request ID for logging
*/
export async function deleteTable(tableId: string, requestId: string): Promise<void> {
const now = new Date()
// Soft delete the table only
// Rows don't have deletedAt - they're effectively orphaned when table is soft-deleted
await db
.update(userTableDefinitions)
.set({ deletedAt: now, updatedAt: now })
.where(eq(userTableDefinitions.id, tableId))
await db.transaction(async (trx) => {
await trx.delete(userTableRows).where(eq(userTableRows.tableId, tableId))
await trx.delete(userTableDefinitions).where(eq(userTableDefinitions.id, tableId))
})
logger.info(`[${requestId}] Deleted table ${tableId}`)
}
@@ -224,8 +228,10 @@ export async function insertRow(
table: TableDefinition,
requestId: string
): Promise<TableRow> {
const rowCount = await getTableRowCount(data.tableId)
// Check capacity
if (table.rowCount >= table.maxRows) {
if (rowCount >= table.maxRows) {
throw new Error(`Table has reached maximum row limit (${table.maxRows})`)
}
@@ -271,16 +277,7 @@ export async function insertRow(
updatedAt: now,
}
await db.transaction(async (trx) => {
await trx.insert(userTableRows).values(newRow)
await trx
.update(userTableDefinitions)
.set({
rowCount: sql`${userTableDefinitions.rowCount} + 1`,
updatedAt: now,
})
.where(eq(userTableDefinitions.id, data.tableId))
})
await db.insert(userTableRows).values(newRow)
logger.info(`[${requestId}] Inserted row ${rowId} into table ${data.tableId}`)
@@ -306,11 +303,13 @@ export async function batchInsertRows(
table: TableDefinition,
requestId: string
): Promise<TableRow[]> {
const currentRowCount = await getTableRowCount(data.tableId)
// Check capacity
const remainingCapacity = table.maxRows - table.rowCount
const remainingCapacity = table.maxRows - currentRowCount
if (remainingCapacity < data.rows.length) {
throw new Error(
`Insufficient capacity. Can only insert ${remainingCapacity} more rows (table has ${table.rowCount}/${table.maxRows} rows)`
`Insufficient capacity. Can only insert ${remainingCapacity} more rows (table has ${currentRowCount}/${table.maxRows} rows)`
)
}
@@ -359,16 +358,7 @@ export async function batchInsertRows(
updatedAt: now,
}))
await db.transaction(async (trx) => {
await trx.insert(userTableRows).values(rowsToInsert)
await trx
.update(userTableDefinitions)
.set({
rowCount: sql`${userTableDefinitions.rowCount} + ${data.rows.length}`,
updatedAt: now,
})
.where(eq(userTableDefinitions.id, data.tableId))
})
await db.insert(userTableRows).values(rowsToInsert)
logger.info(`[${requestId}] Batch inserted ${data.rows.length} rows into table ${data.tableId}`)
@@ -584,19 +574,7 @@ export async function deleteRow(
throw new Error('Row not found')
}
const now = new Date()
await db.transaction(async (trx) => {
await trx.delete(userTableRows).where(eq(userTableRows.id, rowId))
await trx
.update(userTableDefinitions)
.set({
rowCount: sql`${userTableDefinitions.rowCount} - 1`,
updatedAt: now,
})
.where(eq(userTableDefinitions.id, tableId))
})
await db.delete(userTableRows).where(eq(userTableRows.id, rowId))
logger.info(`[${requestId}] Deleted row ${rowId} from table ${tableId}`)
}
@@ -744,15 +722,6 @@ export async function deleteRowsByFilter(
)
)
}
// Update row count
await trx
.update(userTableDefinitions)
.set({
rowCount: sql`${userTableDefinitions.rowCount} - ${matchingRows.length}`,
updatedAt: new Date(),
})
.where(eq(userTableDefinitions.id, data.tableId))
})
logger.info(`[${requestId}] Deleted ${matchingRows.length} rows from table ${data.tableId}`)

View File

@@ -0,0 +1,47 @@
DROP TABLE IF EXISTS "user_table_rows";
--> statement-breakpoint
DROP TABLE IF EXISTS "user_table_definitions";
--> statement-breakpoint
CREATE TABLE "user_table_definitions" (
"id" text PRIMARY KEY NOT NULL,
"workspace_id" text NOT NULL,
"name" text NOT NULL,
"description" text,
"schema" jsonb NOT NULL,
"max_rows" integer DEFAULT 10000 NOT NULL,
"created_by" text NOT NULL,
"created_at" timestamp DEFAULT now() NOT NULL,
"updated_at" timestamp DEFAULT now() NOT NULL
);
--> statement-breakpoint
CREATE TABLE "user_table_rows" (
"id" text PRIMARY KEY NOT NULL,
"table_id" text NOT NULL,
"workspace_id" text NOT NULL,
"data" jsonb NOT NULL,
"created_at" timestamp DEFAULT now() NOT NULL,
"updated_at" timestamp DEFAULT now() NOT NULL,
"created_by" text
);
--> statement-breakpoint
ALTER TABLE "user_table_definitions" ADD CONSTRAINT "user_table_definitions_workspace_id_workspace_id_fk" FOREIGN KEY ("workspace_id") REFERENCES "public"."workspace"("id") ON DELETE cascade ON UPDATE no action;
--> statement-breakpoint
ALTER TABLE "user_table_definitions" ADD CONSTRAINT "user_table_definitions_created_by_user_id_fk" FOREIGN KEY ("created_by") REFERENCES "public"."user"("id") ON DELETE cascade ON UPDATE no action;
--> statement-breakpoint
ALTER TABLE "user_table_rows" ADD CONSTRAINT "user_table_rows_table_id_user_table_definitions_id_fk" FOREIGN KEY ("table_id") REFERENCES "public"."user_table_definitions"("id") ON DELETE cascade ON UPDATE no action;
--> statement-breakpoint
ALTER TABLE "user_table_rows" ADD CONSTRAINT "user_table_rows_workspace_id_workspace_id_fk" FOREIGN KEY ("workspace_id") REFERENCES "public"."workspace"("id") ON DELETE cascade ON UPDATE no action;
--> statement-breakpoint
ALTER TABLE "user_table_rows" ADD CONSTRAINT "user_table_rows_created_by_user_id_fk" FOREIGN KEY ("created_by") REFERENCES "public"."user"("id") ON DELETE set null ON UPDATE no action;
--> statement-breakpoint
CREATE INDEX "user_table_def_workspace_id_idx" ON "user_table_definitions" USING btree ("workspace_id");
--> statement-breakpoint
CREATE UNIQUE INDEX "user_table_def_workspace_name_unique" ON "user_table_definitions" USING btree ("workspace_id","name");
--> statement-breakpoint
CREATE INDEX "user_table_rows_table_id_idx" ON "user_table_rows" USING btree ("table_id");
--> statement-breakpoint
CREATE INDEX "user_table_rows_workspace_id_idx" ON "user_table_rows" USING btree ("workspace_id");
--> statement-breakpoint
CREATE INDEX "user_table_rows_data_gin_idx" ON "user_table_rows" USING gin ("data");
--> statement-breakpoint
CREATE INDEX "user_table_rows_workspace_table_idx" ON "user_table_rows" USING btree ("workspace_id","table_id");

File diff suppressed because it is too large Load Diff

View File

@@ -981,6 +981,13 @@
"when": 1768267681365,
"tag": "0140_awesome_killer_shrike",
"breakpoints": true
},
{
"idx": 141,
"version": "7",
"when": 1768502155819,
"tag": "0141_steady_moondragon",
"breakpoints": true
}
]
}

View File

@@ -2126,13 +2126,11 @@ export const userTableDefinitions = pgTable(
*/
schema: jsonb('schema').notNull(),
maxRows: integer('max_rows').notNull().default(10000),
rowCount: integer('row_count').notNull().default(0),
createdBy: text('created_by')
.notNull()
.references(() => user.id, { onDelete: 'cascade' }),
createdAt: timestamp('created_at').notNull().defaultNow(),
updatedAt: timestamp('updated_at').notNull().defaultNow(),
deletedAt: timestamp('deleted_at'),
},
(table) => ({
workspaceIdIdx: index('user_table_def_workspace_id_idx').on(table.workspaceId),
@@ -2140,7 +2138,6 @@ export const userTableDefinitions = pgTable(
table.workspaceId,
table.name
),
deletedAtIdx: index('user_table_def_deleted_at_idx').on(table.deletedAt),
})
)

View File

@@ -23,6 +23,7 @@ export function createMockSqlOperators() {
gte: vi.fn((a, b) => ({ type: 'gte', left: a, right: b })),
lt: vi.fn((a, b) => ({ type: 'lt', left: a, right: b })),
lte: vi.fn((a, b) => ({ type: 'lte', left: a, right: b })),
count: vi.fn((column) => ({ type: 'count', column })),
and: vi.fn((...conditions) => ({ type: 'and', conditions })),
or: vi.fn((...conditions) => ({ type: 'or', conditions })),
not: vi.fn((condition) => ({ type: 'not', condition })),