feat: audit console TUI, system prompt management, and CLI improvements
Audit Console Phase 1: tool_call_trace emission from mcplocal router,
session_bind/rbac_decision event kinds, GET /audit/sessions endpoint,
full Ink TUI with session sidebar, event timeline, and detail view
(mcpctl console --audit).
System prompts: move 6 hardcoded LLM prompts to mcpctl-system project
with extensible ResourceRuleRegistry validation framework, template
variable enforcement ({{maxTokens}}, {{pageCount}}), and delete-resets-
to-default behavior. All consumers fetch via SystemPromptFetcher with
hardcoded fallbacks.
CLI: -p shorthand for --project across get/create/delete/config commands,
console auto-scroll improvements, shell completions regenerated.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
160
src/mcplocal/tests/system-prompt-fetching.test.ts
Normal file
160
src/mcplocal/tests/system-prompt-fetching.test.ts
Normal file
@@ -0,0 +1,160 @@
|
||||
import { describe, it, expect, vi } from 'vitest';
|
||||
import type { StageContext, LLMProvider, CacheProvider, StageLogger, SystemPromptFetcher } from '../src/proxymodel/types.js';
|
||||
import paginate from '../src/proxymodel/stages/paginate.js';
|
||||
import summarizeTree from '../src/proxymodel/stages/summarize-tree.js';
|
||||
|
||||
function mockCtx(
|
||||
original: string,
|
||||
config: Record<string, unknown> = {},
|
||||
opts: { llmAvailable?: boolean; getSystemPrompt?: SystemPromptFetcher } = {},
|
||||
): StageContext {
|
||||
const mockLlm: LLMProvider = {
|
||||
async complete(prompt) {
|
||||
// For paginate: return JSON array of titles
|
||||
if (prompt.includes('short descriptive titles') || prompt.includes('JSON array')) {
|
||||
return '["Title A", "Title B"]';
|
||||
}
|
||||
// For summarize: return a summary
|
||||
return `Summary of: ${prompt.slice(0, 40)}...`;
|
||||
},
|
||||
available: () => opts.llmAvailable ?? false,
|
||||
};
|
||||
|
||||
const cache = new Map<string, string>();
|
||||
const mockCache: CacheProvider = {
|
||||
async getOrCompute(key, compute) {
|
||||
if (cache.has(key)) return cache.get(key)!;
|
||||
const val = await compute();
|
||||
cache.set(key, val);
|
||||
return val;
|
||||
},
|
||||
hash(content) { return content.slice(0, 8); },
|
||||
async get(key) { return cache.get(key) ?? null; },
|
||||
async set(key, value) { cache.set(key, value); },
|
||||
};
|
||||
|
||||
const mockLog: StageLogger = {
|
||||
debug: vi.fn(),
|
||||
info: vi.fn(),
|
||||
warn: vi.fn(),
|
||||
error: vi.fn(),
|
||||
};
|
||||
|
||||
return {
|
||||
contentType: 'toolResult',
|
||||
sourceName: 'test/tool',
|
||||
projectName: 'test',
|
||||
sessionId: 'sess-1',
|
||||
originalContent: original,
|
||||
llm: mockLlm,
|
||||
cache: mockCache,
|
||||
log: mockLog,
|
||||
getSystemPrompt: opts.getSystemPrompt ?? (async (_name, fallback) => fallback),
|
||||
config,
|
||||
};
|
||||
}
|
||||
|
||||
describe('System prompt fetching in stages', () => {
|
||||
describe('paginate stage', () => {
|
||||
it('uses getSystemPrompt to fetch paginate-titles prompt', async () => {
|
||||
const fetchSpy = vi.fn(async (_name: string, fallback: string) => fallback);
|
||||
const content = 'A'.repeat(9000); // Larger than default pageSize (8000)
|
||||
const ctx = mockCtx(content, {}, { llmAvailable: true, getSystemPrompt: fetchSpy });
|
||||
|
||||
await paginate(content, ctx);
|
||||
|
||||
expect(fetchSpy).toHaveBeenCalledWith(
|
||||
'llm-paginate-titles',
|
||||
expect.stringContaining('{{pageCount}}'),
|
||||
);
|
||||
});
|
||||
|
||||
it('falls back to hardcoded default when fetcher returns fallback', async () => {
|
||||
const content = 'B'.repeat(9000);
|
||||
const ctx = mockCtx(content, {}, { llmAvailable: true });
|
||||
|
||||
const result = await paginate(content, ctx);
|
||||
// Should still produce paginated output (uses default prompt)
|
||||
expect(result.content).toContain('pages');
|
||||
});
|
||||
|
||||
it('interpolates {{pageCount}} in the fetched template', async () => {
|
||||
let capturedPrompt = '';
|
||||
const customFetcher: SystemPromptFetcher = async (name, fallback) => {
|
||||
if (name === 'llm-paginate-titles') {
|
||||
return 'Custom: generate {{pageCount}} titles please';
|
||||
}
|
||||
return fallback;
|
||||
};
|
||||
|
||||
const mockLlm: LLMProvider = {
|
||||
async complete(prompt) {
|
||||
capturedPrompt = prompt;
|
||||
return '["A", "B"]';
|
||||
},
|
||||
available: () => true,
|
||||
};
|
||||
|
||||
const content = 'C'.repeat(9000);
|
||||
const ctx = mockCtx(content, {}, { llmAvailable: true, getSystemPrompt: customFetcher });
|
||||
// Override llm to capture the prompt
|
||||
(ctx as { llm: LLMProvider }).llm = mockLlm;
|
||||
|
||||
await paginate(content, ctx);
|
||||
|
||||
expect(capturedPrompt).toContain('Custom: generate 2 titles please');
|
||||
expect(capturedPrompt).not.toContain('{{pageCount}}');
|
||||
});
|
||||
});
|
||||
|
||||
describe('summarize-tree stage', () => {
|
||||
it('uses getSystemPrompt to fetch llm-summarize prompt', async () => {
|
||||
const fetchSpy = vi.fn(async (_name: string, fallback: string) => fallback);
|
||||
// Need prose content > 2000 chars with headers to trigger LLM summary
|
||||
const sections = [
|
||||
'# Section 1\n' + 'Word '.repeat(500),
|
||||
'# Section 2\n' + 'Text '.repeat(500),
|
||||
].join('\n\n');
|
||||
|
||||
const ctx = mockCtx(sections, {}, { llmAvailable: true, getSystemPrompt: fetchSpy });
|
||||
|
||||
await summarizeTree(sections, ctx);
|
||||
|
||||
expect(fetchSpy).toHaveBeenCalledWith(
|
||||
'llm-summarize',
|
||||
expect.stringContaining('{{maxTokens}}'),
|
||||
);
|
||||
});
|
||||
|
||||
it('interpolates {{maxTokens}} in the fetched template', async () => {
|
||||
let capturedPrompt = '';
|
||||
const customFetcher: SystemPromptFetcher = async (name, fallback) => {
|
||||
if (name === 'llm-summarize') {
|
||||
return 'Custom summary in {{maxTokens}} tokens max';
|
||||
}
|
||||
return fallback;
|
||||
};
|
||||
|
||||
const mockLlm: LLMProvider = {
|
||||
async complete(prompt) {
|
||||
capturedPrompt = prompt;
|
||||
return 'A brief summary';
|
||||
},
|
||||
available: () => true,
|
||||
};
|
||||
|
||||
const sections = [
|
||||
'# Part A\n' + 'Content '.repeat(500),
|
||||
'# Part B\n' + 'More '.repeat(500),
|
||||
].join('\n\n');
|
||||
|
||||
const ctx = mockCtx(sections, {}, { llmAvailable: true, getSystemPrompt: customFetcher });
|
||||
(ctx as { llm: LLMProvider }).llm = mockLlm;
|
||||
|
||||
await summarizeTree(sections, ctx);
|
||||
|
||||
expect(capturedPrompt).toContain('Custom summary in 200 tokens max');
|
||||
expect(capturedPrompt).not.toContain('{{maxTokens}}');
|
||||
});
|
||||
});
|
||||
});
|
||||
Reference in New Issue
Block a user