feat(plugins): expose llm input/output hook payloads (openclaw#16724) thanks @SecondThread

Verified:
- pnpm build
- pnpm check
- pnpm test:macmini

Co-authored-by: SecondThread <18317476+SecondThread@users.noreply.github.com>
Co-authored-by: Tak Hoffman <781889+Takhoffman@users.noreply.github.com>
This commit is contained in:
David Harmeyer
2026-02-15 14:01:00 -08:00
committed by GitHub
parent 3c6cff5758
commit 7c822d039b
8 changed files with 222 additions and 3 deletions

View File

@@ -6,6 +6,7 @@ Docs: https://docs.openclaw.ai
### Changes
- Plugins: expose `llm_input` and `llm_output` hook payloads so extensions can observe prompt/input context and model output usage details. (#16724) Thanks @SecondThread.
- Subagents: nested sub-agents (sub-sub-agents) with configurable depth. Set `agents.defaults.subagents.maxSpawnDepth: 2` to allow sub-agents to spawn their own children. Includes `maxChildrenPerAgent` limit (default 5), depth-aware tool policy, and proper announce chain routing. (#14447) Thanks @tyler6204.
- Discord: components v2 UI + embeds passthrough + exec approval UX refinements (CV2 containers, button layout, Discord-forwarding skip). Thanks @thewilloftheshadow.
- Slack/Discord/Telegram: add per-channel ack reaction overrides (account/channel-level) to support platform-specific emoji formats. (#17092) Thanks @zerone0x.

View File

@@ -954,6 +954,32 @@ export async function runEmbeddedAttempt(
);
}
if (hookRunner?.hasHooks("llm_input")) {
hookRunner
.runLlmInput(
{
runId: params.runId,
sessionId: params.sessionId,
provider: params.provider,
model: params.modelId,
systemPrompt: systemPromptText,
prompt: effectivePrompt,
historyMessages: activeSession.messages,
imagesCount: imageResult.images.length,
},
{
agentId: hookAgentId,
sessionKey: params.sessionKey,
sessionId: params.sessionId,
workspaceDir: params.workspaceDir,
messageProvider: params.messageProvider ?? undefined,
},
)
.catch((err) => {
log.warn(`llm_input hook failed: ${String(err)}`);
});
}
// Only pass images option if there are actually images to pass
// This avoids potential issues with models that don't expect the images parameter
if (imageResult.images.length > 0) {
@@ -1103,6 +1129,31 @@ export async function runEmbeddedAttempt(
)
.map((entry) => ({ toolName: entry.toolName, meta: entry.meta }));
if (hookRunner?.hasHooks("llm_output")) {
hookRunner
.runLlmOutput(
{
runId: params.runId,
sessionId: params.sessionId,
provider: params.provider,
model: params.modelId,
assistantTexts,
lastAssistant,
usage: getUsageTotals(),
},
{
agentId: hookAgentId,
sessionKey: params.sessionKey,
sessionId: params.sessionId,
workspaceDir: params.workspaceDir,
messageProvider: params.messageProvider ?? undefined,
},
)
.catch((err) => {
log.warn(`llm_output hook failed: ${String(err)}`);
});
}
return {
aborted,
timedOut,

View File

@@ -450,6 +450,7 @@ export async function runReplyAgent(params: {
promptTokens,
total: totalTokens,
},
lastCallUsage: runResult.meta.agentMeta?.lastCallUsage,
context: {
limit: contextTokensUsed,
used: totalTokens,

View File

@@ -97,15 +97,18 @@ export async function resolveControlUiDistIndexPath(
for (let i = 0; i < 8; i++) {
const pkgJsonPath = path.join(dir, "package.json");
const indexPath = path.join(dir, "dist", "control-ui", "index.html");
if (fs.existsSync(pkgJsonPath) && fs.existsSync(indexPath)) {
if (fs.existsSync(pkgJsonPath)) {
try {
const raw = fs.readFileSync(pkgJsonPath, "utf-8");
const parsed = JSON.parse(raw) as { name?: unknown };
if (parsed.name === "openclaw") {
return indexPath;
return fs.existsSync(indexPath) ? indexPath : null;
}
// Stop at the first package boundary to avoid resolving through unrelated ancestors.
return null;
} catch {
// Invalid package.json, continue searching
// Invalid package.json at package boundary; abort fallback resolution.
return null;
}
}
const parent = path.dirname(dir);

View File

@@ -22,6 +22,13 @@ export type DiagnosticUsageEvent = DiagnosticBaseEvent & {
promptTokens?: number;
total?: number;
};
lastCallUsage?: {
input?: number;
output?: number;
cacheRead?: number;
cacheWrite?: number;
total?: number;
};
context?: {
limit?: number;
used?: number;

View File

@@ -14,6 +14,8 @@ import type {
PluginHookBeforeAgentStartEvent,
PluginHookBeforeAgentStartResult,
PluginHookBeforeCompactionEvent,
PluginHookLlmInputEvent,
PluginHookLlmOutputEvent,
PluginHookBeforeResetEvent,
PluginHookBeforeToolCallEvent,
PluginHookBeforeToolCallResult,
@@ -41,6 +43,8 @@ export type {
PluginHookAgentContext,
PluginHookBeforeAgentStartEvent,
PluginHookBeforeAgentStartResult,
PluginHookLlmInputEvent,
PluginHookLlmOutputEvent,
PluginHookAgentEndEvent,
PluginHookBeforeCompactionEvent,
PluginHookBeforeResetEvent,
@@ -212,6 +216,24 @@ export function createHookRunner(registry: PluginRegistry, options: HookRunnerOp
return runVoidHook("agent_end", event, ctx);
}
/**
* Run llm_input hook.
* Allows plugins to observe the exact input payload sent to the LLM.
* Runs in parallel (fire-and-forget).
*/
async function runLlmInput(event: PluginHookLlmInputEvent, ctx: PluginHookAgentContext) {
return runVoidHook("llm_input", event, ctx);
}
/**
* Run llm_output hook.
* Allows plugins to observe the exact output payload returned by the LLM.
* Runs in parallel (fire-and-forget).
*/
async function runLlmOutput(event: PluginHookLlmOutputEvent, ctx: PluginHookAgentContext) {
return runVoidHook("llm_output", event, ctx);
}
/**
* Run before_compaction hook.
*/
@@ -458,6 +480,8 @@ export function createHookRunner(registry: PluginRegistry, options: HookRunnerOp
return {
// Agent hooks
runBeforeAgentStart,
runLlmInput,
runLlmOutput,
runAgentEnd,
runBeforeCompaction,
runAfterCompaction,

View File

@@ -297,6 +297,8 @@ export type PluginDiagnostic = {
export type PluginHookName =
| "before_agent_start"
| "llm_input"
| "llm_output"
| "agent_end"
| "before_compaction"
| "after_compaction"
@@ -332,6 +334,35 @@ export type PluginHookBeforeAgentStartResult = {
prependContext?: string;
};
// llm_input hook
export type PluginHookLlmInputEvent = {
runId: string;
sessionId: string;
provider: string;
model: string;
systemPrompt?: string;
prompt: string;
historyMessages: unknown[];
imagesCount: number;
};
// llm_output hook
export type PluginHookLlmOutputEvent = {
runId: string;
sessionId: string;
provider: string;
model: string;
assistantTexts: string[];
lastAssistant?: unknown;
usage?: {
input?: number;
output?: number;
cacheRead?: number;
cacheWrite?: number;
total?: number;
};
};
// agent_end hook
export type PluginHookAgentEndEvent = {
messages: unknown[];
@@ -498,6 +529,11 @@ export type PluginHookHandlerMap = {
event: PluginHookBeforeAgentStartEvent,
ctx: PluginHookAgentContext,
) => Promise<PluginHookBeforeAgentStartResult | void> | PluginHookBeforeAgentStartResult | void;
llm_input: (event: PluginHookLlmInputEvent, ctx: PluginHookAgentContext) => Promise<void> | void;
llm_output: (
event: PluginHookLlmOutputEvent,
ctx: PluginHookAgentContext,
) => Promise<void> | void;
agent_end: (event: PluginHookAgentEndEvent, ctx: PluginHookAgentContext) => Promise<void> | void;
before_compaction: (
event: PluginHookBeforeCompactionEvent,

View File

@@ -0,0 +1,96 @@
import { describe, expect, it, vi } from "vitest";
import type { PluginRegistry } from "./registry.js";
import { createHookRunner } from "./hooks.js";
function createMockRegistry(
hooks: Array<{ hookName: string; handler: (...args: unknown[]) => unknown }>,
): PluginRegistry {
return {
hooks: hooks as never[],
typedHooks: hooks.map((h) => ({
pluginId: "test-plugin",
hookName: h.hookName,
handler: h.handler,
priority: 0,
source: "test",
})),
tools: [],
httpHandlers: [],
httpRoutes: [],
channelRegistrations: [],
gatewayHandlers: {},
cliRegistrars: [],
services: [],
providers: [],
commands: [],
} as unknown as PluginRegistry;
}
describe("llm hook runner methods", () => {
it("runLlmInput invokes registered llm_input hooks", async () => {
const handler = vi.fn();
const registry = createMockRegistry([{ hookName: "llm_input", handler }]);
const runner = createHookRunner(registry);
await runner.runLlmInput(
{
runId: "run-1",
sessionId: "session-1",
provider: "openai",
model: "gpt-5",
systemPrompt: "be helpful",
prompt: "hello",
historyMessages: [],
imagesCount: 0,
},
{
agentId: "main",
sessionId: "session-1",
},
);
expect(handler).toHaveBeenCalledWith(
expect.objectContaining({ runId: "run-1", prompt: "hello" }),
expect.objectContaining({ sessionId: "session-1" }),
);
});
it("runLlmOutput invokes registered llm_output hooks", async () => {
const handler = vi.fn();
const registry = createMockRegistry([{ hookName: "llm_output", handler }]);
const runner = createHookRunner(registry);
await runner.runLlmOutput(
{
runId: "run-1",
sessionId: "session-1",
provider: "openai",
model: "gpt-5",
assistantTexts: ["hi"],
lastAssistant: { role: "assistant", content: "hi" },
usage: {
input: 10,
output: 20,
total: 30,
},
},
{
agentId: "main",
sessionId: "session-1",
},
);
expect(handler).toHaveBeenCalledWith(
expect.objectContaining({ runId: "run-1", assistantTexts: ["hi"] }),
expect.objectContaining({ sessionId: "session-1" }),
);
});
it("hasHooks returns true for registered llm hooks", () => {
const registry = createMockRegistry([{ hookName: "llm_input", handler: vi.fn() }]);
const runner = createHookRunner(registry);
expect(runner.hasHooks("llm_input")).toBe(true);
expect(runner.hasHooks("llm_output")).toBe(false);
});
});