fix(models): memory fixes, provider code typing, cost calculation cleanup (#2515)

* improvement(memory): should not be block scoped

* cleanup provider code

* update other providers

* cleanup fallback code

* remove flaky test

* fix memory

* move streaming fix to right level

* cleanup streaming server

* make memories workspace scoped

* update docs

* fix dedup logic

* fix streaming parsing issue for multiple onStream calls for same block

* fix(provieders): support parallel agent tool calls, consolidate utils

* address greptile comments

* remove all comments

* fixed openrouter response format handling, groq & cerebras response formats

* removed duplicate type

---------

Co-authored-by: waleed <walif6@gmail.com>
This commit is contained in:
Vikhyath Mondreti
2025-12-22 15:59:53 -08:00
committed by GitHub
parent 086982c7a3
commit 8c2c49eb14
65 changed files with 12201 additions and 4626 deletions

View File

@@ -0,0 +1,32 @@
-- Step 1: Add workspace_id as nullable first
ALTER TABLE "memory" ADD COLUMN "workspace_id" text;
-- Step 2: Backfill workspace_id from workflow's workspace_id
UPDATE memory m
SET workspace_id = w.workspace_id
FROM workflow w
WHERE m.workflow_id = w.id
AND w.workspace_id IS NOT NULL;
-- Step 3: Delete rows where workspace_id couldn't be resolved
DELETE FROM memory WHERE workspace_id IS NULL;
-- Step 4: Now make workspace_id NOT NULL
ALTER TABLE "memory" ALTER COLUMN "workspace_id" SET NOT NULL;
-- Step 5: Drop old constraint and indexes
ALTER TABLE "memory" DROP CONSTRAINT IF EXISTS "memory_workflow_id_workflow_id_fk";
--> statement-breakpoint
DROP INDEX IF EXISTS "memory_workflow_idx";
--> statement-breakpoint
DROP INDEX IF EXISTS "memory_workflow_key_idx";
-- Step 6: Add new foreign key and indexes
ALTER TABLE "memory" ADD CONSTRAINT "memory_workspace_id_workspace_id_fk" FOREIGN KEY ("workspace_id") REFERENCES "public"."workspace"("id") ON DELETE cascade ON UPDATE no action;
--> statement-breakpoint
CREATE INDEX "memory_workspace_idx" ON "memory" USING btree ("workspace_id");
--> statement-breakpoint
CREATE UNIQUE INDEX "memory_workspace_key_idx" ON "memory" USING btree ("workspace_id","key");
-- Step 7: Drop old column
ALTER TABLE "memory" DROP COLUMN IF EXISTS "workflow_id";

File diff suppressed because it is too large Load Diff

View File

@@ -904,6 +904,13 @@
"when": 1766275541149,
"tag": "0129_stormy_nightmare",
"breakpoints": true
},
{
"idx": 130,
"version": "7",
"when": 1766433914366,
"tag": "0130_bored_master_chief",
"breakpoints": true
}
]
}

View File

@@ -962,24 +962,21 @@ export const memory = pgTable(
'memory',
{
id: text('id').primaryKey(),
workflowId: text('workflow_id').references(() => workflow.id, { onDelete: 'cascade' }),
key: text('key').notNull(), // Conversation ID provided by user with format: conversationId:blockId
data: jsonb('data').notNull(), // Stores agent messages as array of {role, content} objects
workspaceId: text('workspace_id')
.notNull()
.references(() => workspace.id, { onDelete: 'cascade' }),
key: text('key').notNull(),
data: jsonb('data').notNull(),
createdAt: timestamp('created_at').notNull().defaultNow(),
updatedAt: timestamp('updated_at').notNull().defaultNow(),
deletedAt: timestamp('deleted_at'),
},
(table) => {
return {
// Add index on key for faster lookups
keyIdx: index('memory_key_idx').on(table.key),
// Add index on workflowId for faster filtering
workflowIdx: index('memory_workflow_idx').on(table.workflowId),
// Compound unique index to ensure keys are unique per workflow
uniqueKeyPerWorkflowIdx: uniqueIndex('memory_workflow_key_idx').on(
table.workflowId,
workspaceIdx: index('memory_workspace_idx').on(table.workspaceId),
uniqueKeyPerWorkspaceIdx: uniqueIndex('memory_workspace_key_idx').on(
table.workspaceId,
table.key
),
}