feat: audit console TUI, system prompt management, and CLI improvements

Audit Console Phase 1: tool_call_trace emission from mcplocal router,
session_bind/rbac_decision event kinds, GET /audit/sessions endpoint,
full Ink TUI with session sidebar, event timeline, and detail view
(mcpctl console --audit).

System prompts: move 6 hardcoded LLM prompts to mcpctl-system project
with extensible ResourceRuleRegistry validation framework, template
variable enforcement ({{maxTokens}}, {{pageCount}}), and delete-resets-
to-default behavior. All consumers fetch via SystemPromptFetcher with
hardcoded fallbacks.

CLI: -p shorthand for --project across get/create/delete/config commands,
console auto-scroll improvements, shell completions regenerated.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Michal
2026-03-03 23:50:54 +00:00
parent 89f869f460
commit 5d859ca7d8
42 changed files with 1932 additions and 77 deletions

View File

@@ -15,7 +15,9 @@ export type AuditEventKind =
| 'stage_execution' // Individual stage detail
| 'gate_decision' // Gate open/close with intent
| 'prompt_delivery' // Which prompts were sent to client
| 'tool_call_trace'; // Tool call with server + timing
| 'tool_call_trace' // Tool call with server + timing
| 'rbac_decision' // RBAC allow/deny with subject + binding
| 'session_bind'; // Client session bound to project
export type AuditSource = 'client' | 'mcplocal' | 'mcpd';

View File

@@ -6,6 +6,7 @@
*/
import type { ProviderRegistry } from '../providers/registry.js';
import type { SystemPromptFetcher } from '../proxymodel/types.js';
export interface PromptIndexForLlm {
name: string;
@@ -28,8 +29,12 @@ export class LlmPromptSelector {
async selectPrompts(
tags: string[],
promptIndex: PromptIndexForLlm[],
getSystemPromptFn?: SystemPromptFetcher,
): Promise<LlmSelectionResult> {
const systemPrompt = `You are a context selection assistant. Given a developer's task keywords and a list of available project prompts, select which prompts are relevant to their work. Return a JSON object with "selectedNames" (array of prompt names) and "reasoning" (brief explanation). Priority 10 prompts must always be included.`;
const DEFAULT_SYSTEM_PROMPT = `You are a context selection assistant. Given a developer's task keywords and a list of available project prompts, select which prompts are relevant to their work. Return a JSON object with "selectedNames" (array of prompt names) and "reasoning" (brief explanation). Priority 10 prompts must always be included.`;
const systemPrompt = getSystemPromptFn
? await getSystemPromptFn('llm-gate-context-selector', DEFAULT_SYSTEM_PROMPT)
: DEFAULT_SYSTEM_PROMPT;
const userPrompt = `Task keywords: ${tags.join(', ')}

View File

@@ -61,13 +61,16 @@ export function registerProjectMcpEndpoint(app: FastifyInstance, mcpdClient: Mcp
const llmDisabled = mcpdConfig.llmProvider === 'none' || localOverride?.provider === 'none';
const effectiveRegistry = llmDisabled ? null : (providerRegistry ?? null);
// Wire pagination support with LLM provider and project model override
router.setPaginator(new ResponsePaginator(effectiveRegistry, {}, resolvedModel));
// Configure prompt resources with SA-scoped client for RBAC
const saClient = mcpdClient.withHeaders({ 'X-Service-Account': `project:${projectName}` });
router.setPromptConfig(saClient, projectName);
// System prompt fetcher for LLM consumers (uses router's cached fetcher)
const getSystemPrompt = router.getSystemPromptFn();
// Wire pagination support with LLM provider and project model override
router.setPaginator(new ResponsePaginator(effectiveRegistry, {}, resolvedModel, getSystemPrompt));
// Wire proxymodel pipeline (model resolved lazily from disk for hot-reload)
const proxyModelName = mcpdConfig.proxyModel ?? 'default';
const llmAdapter = effectiveRegistry ? new LLMProviderAdapter(effectiveRegistry) : {
@@ -167,6 +170,15 @@ export function registerProjectMcpEndpoint(app: FastifyInstance, mcpdClient: Mcp
eventType: 'session_created',
body: null,
});
// Audit: session_bind
router.getAuditCollector()?.emit({
timestamp: new Date().toISOString(),
sessionId: id,
eventKind: 'session_bind',
source: 'mcplocal',
verified: true,
payload: { projectName },
});
},
});

View File

@@ -1,6 +1,7 @@
import { randomUUID } from 'node:crypto';
import type { ProviderRegistry } from '../providers/registry.js';
import { estimateTokens } from './token-counter.js';
import type { SystemPromptFetcher } from '../proxymodel/types.js';
// --- Configuration ---
@@ -106,6 +107,7 @@ export class ResponsePaginator {
private providers: ProviderRegistry | null,
config: Partial<PaginationConfig> = {},
private modelOverride?: string,
private readonly getSystemPrompt?: SystemPromptFetcher,
) {
this.config = { ...DEFAULT_PAGINATION_CONFIG, ...config };
}
@@ -254,9 +256,13 @@ export class ResponsePaginator {
return `--- Page ${String(i + 1)} (chars ${String(p.startChar)}-${String(p.endChar)}, ~${String(p.estimatedTokens)} tokens) ---\n${preview}${truncated}`;
}).join('\n\n');
const systemPrompt = this.getSystemPrompt
? await this.getSystemPrompt('llm-pagination-index', PAGINATION_INDEX_SYSTEM_PROMPT)
: PAGINATION_INDEX_SYSTEM_PROMPT;
const result = await provider.complete({
messages: [
{ role: 'system', content: PAGINATION_INDEX_SYSTEM_PROMPT },
{ role: 'system', content: systemPrompt },
{ role: 'user', content: `Tool: ${toolName}\nTotal size: ${String(raw.length)} chars, ${String(pages.length)} pages\n\n${previews}` },
],
maxTokens: this.config.indexMaxTokens,

View File

@@ -5,6 +5,7 @@ import { estimateTokens } from './token-counter.js';
import { FilterCache } from './filter-cache.js';
import type { FilterCacheConfig } from './filter-cache.js';
import { FilterMetrics } from './metrics.js';
import type { SystemPromptFetcher } from '../proxymodel/types.js';
export interface LlmProcessorConfig {
/** Enable request preprocessing */
@@ -58,6 +59,7 @@ export class LlmProcessor {
constructor(
private providers: ProviderRegistry,
private config: LlmProcessorConfig = DEFAULT_PROCESSOR_CONFIG,
private readonly getSystemPrompt?: SystemPromptFetcher,
) {
this.filterCache = new FilterCache(config.filterCache);
this.metrics = new FilterMetrics();
@@ -112,9 +114,13 @@ export class LlmProcessor {
}
try {
const systemPrompt = this.getSystemPrompt
? await this.getSystemPrompt('llm-request-optimization', REQUEST_OPTIMIZATION_SYSTEM_PROMPT)
: REQUEST_OPTIMIZATION_SYSTEM_PROMPT;
const result = await provider.complete({
messages: [
{ role: 'system', content: REQUEST_OPTIMIZATION_SYSTEM_PROMPT },
{ role: 'system', content: systemPrompt },
{ role: 'user', content: `Tool: ${toolName}\nParameters: ${JSON.stringify(params)}` },
],
maxTokens: this.config.maxTokens,
@@ -176,9 +182,13 @@ export class LlmProcessor {
const startTime = performance.now();
try {
const systemPrompt = this.getSystemPrompt
? await this.getSystemPrompt('llm-response-filter', RESPONSE_FILTER_SYSTEM_PROMPT)
: RESPONSE_FILTER_SYSTEM_PROMPT;
const result = await provider.complete({
messages: [
{ role: 'system', content: RESPONSE_FILTER_SYSTEM_PROMPT },
{ role: 'system', content: systemPrompt },
{ role: 'user', content: `Tool: ${toolName}\nResponse (${raw.length} chars):\n${raw}` },
],
maxTokens: this.config.maxTokens,

View File

@@ -3,7 +3,7 @@
* Runs content through a sequence of stages defined by a ProxyModel.
* Each stage receives the output of the previous stage as input.
*/
import type { StageContext, StageResult, StageLogger, Section, ContentType, LLMProvider, CacheProvider } from './types.js';
import type { StageContext, StageResult, StageLogger, Section, ContentType, LLMProvider, CacheProvider, SystemPromptFetcher } from './types.js';
import type { ProxyModelDefinition } from './schema.js';
import { getStage } from './stage-registry.js';
import type { AuditCollector } from '../audit/collector.js';
@@ -27,6 +27,8 @@ export interface ExecuteOptions {
cache: CacheProvider;
/** Optional logger override (defaults to console). */
log?: StageLogger;
/** Optional system prompt fetcher for stages that use editable prompts. */
getSystemPrompt?: SystemPromptFetcher;
/** Optional audit collector for pipeline/stage event emission. */
auditCollector?: AuditCollector;
/** Server name for per-server audit tagging. */
@@ -72,6 +74,8 @@ export async function executePipeline(opts: ExecuteOptions): Promise<StageResult
continue;
}
const defaultFetcher: SystemPromptFetcher = async (_name, fallback) => fallback;
const ctx: StageContext = {
contentType: opts.contentType,
sourceName: opts.sourceName,
@@ -81,6 +85,7 @@ export async function executePipeline(opts: ExecuteOptions): Promise<StageResult
llm,
cache,
log: consoleLogger(stageSpec.type),
getSystemPrompt: opts.getSystemPrompt ?? defaultFetcher,
config: stageSpec.config ?? {},
};

View File

@@ -244,7 +244,8 @@ async function handleBeginSession(
summary: p.summary,
chapters: p.chapters,
}));
const llmResult = await llmSelector.selectPrompts(tags, llmIndex);
const getSystemPromptFn = ctx.getSystemPrompt.bind(ctx);
const llmResult = await llmSelector.selectPrompts(tags, llmIndex, getSystemPromptFn);
reasoning = llmResult.reasoning;
const selectedSet = new Set(llmResult.selectedNames);

View File

@@ -63,10 +63,12 @@ async function generatePageTitles(pages: string[], ctx: StageContext): Promise<s
return `--- Page ${i + 1} (${page.length} chars) ---\n${preview}`;
}).join('\n\n');
const DEFAULT_PROMPT = `Generate exactly {{pageCount}} short descriptive titles (max 60 chars each) for the following {{pageCount}} pages. Return ONLY a JSON array of {{pageCount}} strings. No markdown, no explanation.`;
const template = await ctx.getSystemPrompt('llm-paginate-titles', DEFAULT_PROMPT);
const prompt = template.replaceAll('{{pageCount}}', String(pages.length));
const result = await ctx.llm.complete(
`Generate exactly ${pages.length} short descriptive titles (max 60 chars each) for the following ${pages.length} pages. ` +
`Return ONLY a JSON array of ${pages.length} strings. No markdown, no explanation.\n\n` +
`${previews}`,
`${prompt}\n\n${previews}`,
{ maxTokens: pages.length * 30 },
);

View File

@@ -128,10 +128,12 @@ async function cachedSummarize(
): Promise<string> {
const key = `summary:${ctx.cache.hash(content)}:${maxTokens}`;
return ctx.cache.getOrCompute(key, async () => {
const DEFAULT_PROMPT = `Summarize the following in about {{maxTokens}} tokens. Preserve all items marked MUST, REQUIRED, or CRITICAL verbatim. Be specific — mention names, IDs, counts, key values.`;
const template = await ctx.getSystemPrompt('llm-summarize', DEFAULT_PROMPT);
const prompt = template.replaceAll('{{maxTokens}}', String(maxTokens));
return ctx.llm.complete(
`Summarize the following in about ${maxTokens} tokens. ` +
`Preserve all items marked MUST, REQUIRED, or CRITICAL verbatim. ` +
`Be specific — mention names, IDs, counts, key values.\n\n${content}`,
`${prompt}\n\n${content}`,
{ maxTokens },
);
});

View File

@@ -10,6 +10,9 @@
* SessionController — method-level hooks with per-session state
*/
/** Fetches a system prompt by name, falling back to the provided default. */
export type SystemPromptFetcher = (name: string, fallback: string) => Promise<string>;
// ── Content Stage Contract ──────────────────────────────────────────
/**
@@ -40,6 +43,9 @@ export interface StageContext {
cache: CacheProvider;
log: StageLogger;
/** Fetch a system prompt from mcpctl-system, with hardcoded fallback. */
getSystemPrompt: SystemPromptFetcher;
/** Stage-specific configuration from the proxymodel YAML */
config: Record<string, unknown>;
}

View File

@@ -91,6 +91,10 @@ export class McpRouter {
this.auditCollector = collector;
}
getAuditCollector(): AuditCollector | null {
return this.auditCollector;
}
setLlmProcessor(processor: LlmProcessor): void {
this.llmProcessor = processor;
}
@@ -105,6 +109,11 @@ export class McpRouter {
this.linkResolver = new LinkResolver(mcpdClient);
}
/** Return a bound system prompt fetcher for external consumers (e.g. ResponsePaginator). */
getSystemPromptFn(): (name: string, fallback: string) => Promise<string> {
return (name, fallback) => this.getSystemPrompt(name, fallback);
}
/** Set the plugin for this router. When set, plugin hooks are dispatched. */
setPlugin(plugin: ProxyModelPlugin): void {
this.plugin = plugin;
@@ -149,6 +158,7 @@ export class McpRouter {
proxyModel: proxyModelDef,
llm: effectiveLlm,
cache: effectiveCache,
getSystemPrompt: (name, fallback) => this.getSystemPrompt(name, fallback),
...(this.auditCollector ? { auditCollector: this.auditCollector } : {}),
...(serverName !== undefined ? { serverName } : {}),
});
@@ -683,6 +693,30 @@ export class McpRouter {
const params = request.params as Record<string, unknown> | undefined;
const toolName = params?.['name'] as string | undefined;
const toolArgs = (params?.['arguments'] ?? {}) as Record<string, unknown>;
const startMs = Date.now();
const emitTrace = (response: JsonRpcResponse, serverName?: string): void => {
if (this.auditCollector && context?.sessionId && toolName) {
const durationMs = Date.now() - startMs;
const resultSize = JSON.stringify(response.result ?? response.error ?? {}).length;
const resolved = serverName ?? this.toolToServer.get(toolName);
const base = {
timestamp: new Date().toISOString(),
sessionId: context.sessionId,
eventKind: 'tool_call_trace' as const,
source: 'mcplocal' as const,
verified: true,
payload: {
toolName,
argKeys: Object.keys(toolArgs).join(', '),
resultSizeBytes: resultSize,
durationMs,
error: response.error?.message ?? null,
},
};
this.auditCollector.emit(resolved ? { ...base, serverName: resolved } : base);
}
};
// Plugin path
if (this.plugin && context?.sessionId) {
@@ -693,10 +727,14 @@ export class McpRouter {
if (virtualTool) {
try {
const result = await virtualTool.handler(toolArgs, ctx);
return { jsonrpc: '2.0', id: request.id, result: result as JsonRpcResponse['result'] };
const resp = { jsonrpc: '2.0' as const, id: request.id, result: result as JsonRpcResponse['result'] };
emitTrace(resp, 'virtual');
return resp;
} catch (err) {
const code = (err as { code?: number }).code ?? -32603;
return { jsonrpc: '2.0', id: request.id, error: { code, message: err instanceof Error ? err.message : String(err) } };
const resp = { jsonrpc: '2.0' as const, id: request.id, error: { code, message: err instanceof Error ? err.message : String(err) } };
emitTrace(resp, 'virtual');
return resp;
}
}
@@ -714,6 +752,7 @@ export class McpRouter {
response = await this.plugin.onToolCallAfter(toolName, toolArgs, response, ctx);
}
emitTrace(response);
return response;
}
@@ -743,7 +782,9 @@ export class McpRouter {
// If no LLM processor or tool shouldn't be processed, route directly
if (!this.llmProcessor || !toolName || !this.llmProcessor.shouldProcess('tools/call', toolName)) {
const response = await this.routeNamespacedCall(request, 'name', this.toolToServer);
return this.maybePaginate(toolName, response);
const paginated = await this.maybePaginate(toolName, response);
emitTrace(paginated);
return paginated;
}
// Preprocess request params
@@ -757,14 +798,23 @@ export class McpRouter {
// Try pagination
const paginated = await this.maybePaginate(toolName, response);
if (paginated !== response) return paginated;
if (paginated !== response) {
emitTrace(paginated);
return paginated;
}
// Filter response
if (response.error) return response;
if (response.error) {
emitTrace(response);
return response;
}
const filtered = await this.llmProcessor.filterResponse(toolName, response);
if (filtered.filtered) {
return { ...response, result: filtered.result };
const filteredResp = { ...response, result: filtered.result };
emitTrace(filteredResp);
return filteredResp;
}
emitTrace(response);
return response;
}

View File

@@ -47,6 +47,7 @@ function mockCtx(original: string, config: Record<string, unknown> = {}, llmAvai
llm: mockLlm,
cache: mockCache,
log: mockLog,
getSystemPrompt: async (_name: string, fallback: string) => fallback,
config,
};
}

View File

@@ -136,6 +136,7 @@ function createMockContext(original: string): StageContext {
llm: mockLlm,
cache: mockCache,
log: mockLog,
getSystemPrompt: async (_name: string, fallback: string) => fallback,
config: {},
};
}

View File

@@ -580,4 +580,93 @@ describe('McpRouter', () => {
expect(config!).toHaveProperty('llm', haLlm);
});
});
describe('tool_call_trace audit emission', () => {
it('emits tool_call_trace on successful tool call', async () => {
const alpha = mockUpstream('alpha', { tools: [{ name: 'do_thing' }] });
router.addUpstream(alpha);
await router.discoverTools();
const emitted: Array<Record<string, unknown>> = [];
const mockCollector = { emit: vi.fn((e: Record<string, unknown>) => emitted.push(e)) };
router.setAuditCollector(mockCollector as never);
await router.route(
{ jsonrpc: '2.0', id: 1, method: 'tools/call', params: { name: 'alpha/do_thing', arguments: { key: 'val' } } },
{ sessionId: 'sess-1' },
);
expect(mockCollector.emit).toHaveBeenCalledOnce();
const event = emitted[0]!;
expect(event['eventKind']).toBe('tool_call_trace');
expect(event['sessionId']).toBe('sess-1');
expect(event['serverName']).toBe('alpha');
expect(event['verified']).toBe(true);
const payload = event['payload'] as Record<string, unknown>;
expect(payload['toolName']).toBe('alpha/do_thing');
expect(payload['argKeys']).toBe('key');
expect(payload['durationMs']).toBeTypeOf('number');
expect(payload['resultSizeBytes']).toBeTypeOf('number');
expect(payload['error']).toBeNull();
});
it('does not emit when auditCollector is not set', async () => {
const alpha = mockUpstream('alpha', { tools: [{ name: 'do_thing' }] });
router.addUpstream(alpha);
await router.discoverTools();
// No setAuditCollector call — should not throw
const resp = await router.route(
{ jsonrpc: '2.0', id: 1, method: 'tools/call', params: { name: 'alpha/do_thing', arguments: {} } },
{ sessionId: 'sess-1' },
);
expect(resp.result).toBeDefined();
});
it('does not emit when sessionId is missing', async () => {
const alpha = mockUpstream('alpha', { tools: [{ name: 'do_thing' }] });
router.addUpstream(alpha);
await router.discoverTools();
const mockCollector = { emit: vi.fn() };
router.setAuditCollector(mockCollector as never);
await router.route(
{ jsonrpc: '2.0', id: 1, method: 'tools/call', params: { name: 'alpha/do_thing', arguments: {} } },
);
expect(mockCollector.emit).not.toHaveBeenCalled();
});
it('captures error in trace when upstream returns error', async () => {
const failing: UpstreamConnection = {
name: 'fail-srv',
isAlive: vi.fn(() => true),
close: vi.fn(async () => {}),
onNotification: vi.fn(),
send: vi.fn(async (req: JsonRpcRequest): Promise<JsonRpcResponse> => {
if (req.method === 'tools/list') {
return { jsonrpc: '2.0', id: req.id, result: { tools: [{ name: 'fail_tool' }] } };
}
return { jsonrpc: '2.0', id: req.id, error: { code: -32000, message: 'Something broke' } };
}),
};
router.addUpstream(failing);
await router.discoverTools();
const emitted: Array<Record<string, unknown>> = [];
const mockCollector = { emit: vi.fn((e: Record<string, unknown>) => emitted.push(e)) };
router.setAuditCollector(mockCollector as never);
await router.route(
{ jsonrpc: '2.0', id: 1, method: 'tools/call', params: { name: 'fail-srv/fail_tool', arguments: { a: 1, b: 2 } } },
{ sessionId: 'sess-err' },
);
expect(mockCollector.emit).toHaveBeenCalledOnce();
const payload = emitted[0]!['payload'] as Record<string, unknown>;
expect(payload['error']).toBe('Something broke');
expect(payload['argKeys']).toBe('a, b');
});
});
});

View File

@@ -0,0 +1,207 @@
/**
* Smoke tests: System prompts (LLM pipeline).
*
* Validates that the 6 LLM system prompts are created in mcpctl-system,
* that validation rejects edits missing required template variables,
* and that deletion resets to defaults.
*
* Run with: pnpm test:smoke
*/
import { describe, it, expect, beforeAll } from 'vitest';
import http from 'node:http';
import { readFileSync } from 'node:fs';
import { join } from 'node:path';
import { homedir } from 'node:os';
import { isMcplocalRunning, getMcpdUrl } from './mcp-client.js';
const MCPD_URL = getMcpdUrl();
function loadMcpdCredentials(): { token: string; url: string } {
try {
const raw = readFileSync(join(homedir(), '.mcpctl', 'credentials'), 'utf-8');
const parsed = JSON.parse(raw) as { token?: string; mcpdUrl?: string };
return {
token: parsed.token ?? '',
url: parsed.mcpdUrl ?? MCPD_URL,
};
} catch {
return { token: '', url: MCPD_URL };
}
}
const MCPD_CREDS = loadMcpdCredentials();
const MCPD_EFFECTIVE_URL = MCPD_CREDS.url || MCPD_URL;
interface Prompt {
id: string;
name: string;
content: string;
priority: number;
projectId: string;
}
function mcpdRequest<T>(method: string, path: string, body?: unknown): Promise<{ status: number; data: T }> {
return new Promise((resolve, reject) => {
const url = new URL(path, MCPD_EFFECTIVE_URL);
const headers: Record<string, string> = {
'Accept': 'application/json',
};
if (body !== undefined) headers['Content-Type'] = 'application/json';
if (MCPD_CREDS.token) headers['Authorization'] = `Bearer ${MCPD_CREDS.token}`;
const bodyStr = body !== undefined ? JSON.stringify(body) : undefined;
if (bodyStr) headers['Content-Length'] = String(Buffer.byteLength(bodyStr));
const req = http.request(url, { method, timeout: 10_000, headers }, (res) => {
const chunks: Buffer[] = [];
res.on('data', (chunk: Buffer) => chunks.push(chunk));
res.on('end', () => {
const raw = Buffer.concat(chunks).toString();
try {
resolve({ status: res.statusCode ?? 500, data: raw ? JSON.parse(raw) as T : (undefined as T) });
} catch {
resolve({ status: res.statusCode ?? 500, data: raw as unknown as T });
}
});
});
req.on('error', reject);
req.on('timeout', () => reject(new Error('Request timeout')));
if (bodyStr) req.write(bodyStr);
req.end();
});
}
const LLM_PROMPT_NAMES = [
'llm-response-filter',
'llm-request-optimization',
'llm-pagination-index',
'llm-gate-context-selector',
'llm-summarize',
'llm-paginate-titles',
];
describe('Smoke: System Prompts', () => {
let available = false;
let systemProjectId = '';
let prompts: Prompt[] = [];
beforeAll(async () => {
console.log('');
console.log(' ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━');
console.log(' Smoke Test: System Prompts (LLM pipeline)');
console.log(' ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━');
available = await isMcplocalRunning();
if (!available) {
console.log('\n ✗ mcplocal not running — all tests will be skipped\n');
return;
}
// Find mcpctl-system project
const projectsResult = await mcpdRequest<Array<{ id: string; name: string }>>('GET', '/api/v1/projects');
const systemProject = projectsResult.data.find((p) => p.name === 'mcpctl-system');
if (!systemProject) {
console.log('\n ✗ mcpctl-system project not found — tests will fail\n');
return;
}
systemProjectId = systemProject.id;
// Fetch all prompts for system project (API uses project name, not ID)
const promptsResult = await mcpdRequest<Prompt[]>('GET', `/api/v1/prompts?project=mcpctl-system`);
prompts = promptsResult.data;
console.log(`\n ✓ Found ${prompts.length} system prompts\n`);
});
it('all 6 LLM prompts exist in mcpctl-system', () => {
if (!available) return;
const promptNames = prompts.map((p) => p.name);
for (const name of LLM_PROMPT_NAMES) {
expect(promptNames, `Missing system prompt: ${name}`).toContain(name);
}
});
it('edit a prompt with no required vars succeeds', async () => {
if (!available) return;
const prompt = prompts.find((p) => p.name === 'llm-response-filter');
expect(prompt).toBeDefined();
const newContent = prompt!.content + '\n- Additional custom rule';
const result = await mcpdRequest<Prompt>('PUT', `/api/v1/prompts/${prompt!.id}`, {
content: newContent,
});
expect(result.status).toBe(200);
expect(result.data.content).toBe(newContent);
// Restore original
await mcpdRequest('PUT', `/api/v1/prompts/${prompt!.id}`, {
content: prompt!.content,
});
});
it('edit llm-summarize removing {{maxTokens}} is rejected with 400', async () => {
if (!available) return;
const prompt = prompts.find((p) => p.name === 'llm-summarize');
expect(prompt).toBeDefined();
const result = await mcpdRequest<{ message?: string }>('PUT', `/api/v1/prompts/${prompt!.id}`, {
content: 'Summarize this content briefly.',
});
expect(result.status).toBe(400);
});
it('edit llm-paginate-titles removing {{pageCount}} is rejected with 400', async () => {
if (!available) return;
const prompt = prompts.find((p) => p.name === 'llm-paginate-titles');
expect(prompt).toBeDefined();
const result = await mcpdRequest<{ message?: string }>('PUT', `/api/v1/prompts/${prompt!.id}`, {
content: 'Generate some titles for pages.',
});
expect(result.status).toBe(400);
});
it('edit with required vars present succeeds', async () => {
if (!available) return;
const prompt = prompts.find((p) => p.name === 'llm-summarize');
expect(prompt).toBeDefined();
const newContent = 'Custom: Summarize in about {{maxTokens}} tokens. Keep it concise.';
const result = await mcpdRequest<Prompt>('PUT', `/api/v1/prompts/${prompt!.id}`, {
content: newContent,
});
expect(result.status).toBe(200);
expect(result.data.content).toBe(newContent);
// Restore original
await mcpdRequest('PUT', `/api/v1/prompts/${prompt!.id}`, {
content: prompt!.content,
});
});
it('delete a system prompt resets to default', async () => {
if (!available) return;
const prompt = prompts.find((p) => p.name === 'llm-gate-context-selector');
expect(prompt).toBeDefined();
// First, modify the prompt
await mcpdRequest('PUT', `/api/v1/prompts/${prompt!.id}`, {
content: 'Temporarily customized content.',
});
// Delete should reset to default, not actually delete
const deleteResult = await mcpdRequest<Prompt>('DELETE', `/api/v1/prompts/${prompt!.id}`);
expect(deleteResult.status).toBe(200);
expect(deleteResult.data.content).toContain('context selection assistant');
// Prompt should still exist
const getResult = await mcpdRequest<Prompt>('GET', `/api/v1/prompts/${prompt!.id}`);
expect(getResult.status).toBe(200);
expect(getResult.data.name).toBe('llm-gate-context-selector');
});
});

View File

@@ -0,0 +1,160 @@
import { describe, it, expect, vi } from 'vitest';
import type { StageContext, LLMProvider, CacheProvider, StageLogger, SystemPromptFetcher } from '../src/proxymodel/types.js';
import paginate from '../src/proxymodel/stages/paginate.js';
import summarizeTree from '../src/proxymodel/stages/summarize-tree.js';
function mockCtx(
original: string,
config: Record<string, unknown> = {},
opts: { llmAvailable?: boolean; getSystemPrompt?: SystemPromptFetcher } = {},
): StageContext {
const mockLlm: LLMProvider = {
async complete(prompt) {
// For paginate: return JSON array of titles
if (prompt.includes('short descriptive titles') || prompt.includes('JSON array')) {
return '["Title A", "Title B"]';
}
// For summarize: return a summary
return `Summary of: ${prompt.slice(0, 40)}...`;
},
available: () => opts.llmAvailable ?? false,
};
const cache = new Map<string, string>();
const mockCache: CacheProvider = {
async getOrCompute(key, compute) {
if (cache.has(key)) return cache.get(key)!;
const val = await compute();
cache.set(key, val);
return val;
},
hash(content) { return content.slice(0, 8); },
async get(key) { return cache.get(key) ?? null; },
async set(key, value) { cache.set(key, value); },
};
const mockLog: StageLogger = {
debug: vi.fn(),
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
};
return {
contentType: 'toolResult',
sourceName: 'test/tool',
projectName: 'test',
sessionId: 'sess-1',
originalContent: original,
llm: mockLlm,
cache: mockCache,
log: mockLog,
getSystemPrompt: opts.getSystemPrompt ?? (async (_name, fallback) => fallback),
config,
};
}
describe('System prompt fetching in stages', () => {
describe('paginate stage', () => {
it('uses getSystemPrompt to fetch paginate-titles prompt', async () => {
const fetchSpy = vi.fn(async (_name: string, fallback: string) => fallback);
const content = 'A'.repeat(9000); // Larger than default pageSize (8000)
const ctx = mockCtx(content, {}, { llmAvailable: true, getSystemPrompt: fetchSpy });
await paginate(content, ctx);
expect(fetchSpy).toHaveBeenCalledWith(
'llm-paginate-titles',
expect.stringContaining('{{pageCount}}'),
);
});
it('falls back to hardcoded default when fetcher returns fallback', async () => {
const content = 'B'.repeat(9000);
const ctx = mockCtx(content, {}, { llmAvailable: true });
const result = await paginate(content, ctx);
// Should still produce paginated output (uses default prompt)
expect(result.content).toContain('pages');
});
it('interpolates {{pageCount}} in the fetched template', async () => {
let capturedPrompt = '';
const customFetcher: SystemPromptFetcher = async (name, fallback) => {
if (name === 'llm-paginate-titles') {
return 'Custom: generate {{pageCount}} titles please';
}
return fallback;
};
const mockLlm: LLMProvider = {
async complete(prompt) {
capturedPrompt = prompt;
return '["A", "B"]';
},
available: () => true,
};
const content = 'C'.repeat(9000);
const ctx = mockCtx(content, {}, { llmAvailable: true, getSystemPrompt: customFetcher });
// Override llm to capture the prompt
(ctx as { llm: LLMProvider }).llm = mockLlm;
await paginate(content, ctx);
expect(capturedPrompt).toContain('Custom: generate 2 titles please');
expect(capturedPrompt).not.toContain('{{pageCount}}');
});
});
describe('summarize-tree stage', () => {
it('uses getSystemPrompt to fetch llm-summarize prompt', async () => {
const fetchSpy = vi.fn(async (_name: string, fallback: string) => fallback);
// Need prose content > 2000 chars with headers to trigger LLM summary
const sections = [
'# Section 1\n' + 'Word '.repeat(500),
'# Section 2\n' + 'Text '.repeat(500),
].join('\n\n');
const ctx = mockCtx(sections, {}, { llmAvailable: true, getSystemPrompt: fetchSpy });
await summarizeTree(sections, ctx);
expect(fetchSpy).toHaveBeenCalledWith(
'llm-summarize',
expect.stringContaining('{{maxTokens}}'),
);
});
it('interpolates {{maxTokens}} in the fetched template', async () => {
let capturedPrompt = '';
const customFetcher: SystemPromptFetcher = async (name, fallback) => {
if (name === 'llm-summarize') {
return 'Custom summary in {{maxTokens}} tokens max';
}
return fallback;
};
const mockLlm: LLMProvider = {
async complete(prompt) {
capturedPrompt = prompt;
return 'A brief summary';
},
available: () => true,
};
const sections = [
'# Part A\n' + 'Content '.repeat(500),
'# Part B\n' + 'More '.repeat(500),
].join('\n\n');
const ctx = mockCtx(sections, {}, { llmAvailable: true, getSystemPrompt: customFetcher });
(ctx as { llm: LLMProvider }).llm = mockLlm;
await summarizeTree(sections, ctx);
expect(capturedPrompt).toContain('Custom summary in 200 tokens max');
expect(capturedPrompt).not.toContain('{{maxTokens}}');
});
});
});