feat: audit console TUI, system prompt management, and CLI improvements
Audit Console Phase 1: tool_call_trace emission from mcplocal router,
session_bind/rbac_decision event kinds, GET /audit/sessions endpoint,
full Ink TUI with session sidebar, event timeline, and detail view
(mcpctl console --audit).
System prompts: move 6 hardcoded LLM prompts to mcpctl-system project
with extensible ResourceRuleRegistry validation framework, template
variable enforcement ({{maxTokens}}, {{pageCount}}), and delete-resets-
to-default behavior. All consumers fetch via SystemPromptFetcher with
hardcoded fallbacks.
CLI: -p shorthand for --project across get/create/delete/config commands,
console auto-scroll improvements, shell completions regenerated.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
207
src/mcplocal/tests/smoke/system-prompts.test.ts
Normal file
207
src/mcplocal/tests/smoke/system-prompts.test.ts
Normal file
@@ -0,0 +1,207 @@
|
||||
/**
|
||||
* Smoke tests: System prompts (LLM pipeline).
|
||||
*
|
||||
* Validates that the 6 LLM system prompts are created in mcpctl-system,
|
||||
* that validation rejects edits missing required template variables,
|
||||
* and that deletion resets to defaults.
|
||||
*
|
||||
* Run with: pnpm test:smoke
|
||||
*/
|
||||
import { describe, it, expect, beforeAll } from 'vitest';
|
||||
import http from 'node:http';
|
||||
import { readFileSync } from 'node:fs';
|
||||
import { join } from 'node:path';
|
||||
import { homedir } from 'node:os';
|
||||
import { isMcplocalRunning, getMcpdUrl } from './mcp-client.js';
|
||||
|
||||
const MCPD_URL = getMcpdUrl();
|
||||
|
||||
function loadMcpdCredentials(): { token: string; url: string } {
|
||||
try {
|
||||
const raw = readFileSync(join(homedir(), '.mcpctl', 'credentials'), 'utf-8');
|
||||
const parsed = JSON.parse(raw) as { token?: string; mcpdUrl?: string };
|
||||
return {
|
||||
token: parsed.token ?? '',
|
||||
url: parsed.mcpdUrl ?? MCPD_URL,
|
||||
};
|
||||
} catch {
|
||||
return { token: '', url: MCPD_URL };
|
||||
}
|
||||
}
|
||||
|
||||
const MCPD_CREDS = loadMcpdCredentials();
|
||||
const MCPD_EFFECTIVE_URL = MCPD_CREDS.url || MCPD_URL;
|
||||
|
||||
interface Prompt {
|
||||
id: string;
|
||||
name: string;
|
||||
content: string;
|
||||
priority: number;
|
||||
projectId: string;
|
||||
}
|
||||
|
||||
function mcpdRequest<T>(method: string, path: string, body?: unknown): Promise<{ status: number; data: T }> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const url = new URL(path, MCPD_EFFECTIVE_URL);
|
||||
const headers: Record<string, string> = {
|
||||
'Accept': 'application/json',
|
||||
};
|
||||
if (body !== undefined) headers['Content-Type'] = 'application/json';
|
||||
if (MCPD_CREDS.token) headers['Authorization'] = `Bearer ${MCPD_CREDS.token}`;
|
||||
|
||||
const bodyStr = body !== undefined ? JSON.stringify(body) : undefined;
|
||||
if (bodyStr) headers['Content-Length'] = String(Buffer.byteLength(bodyStr));
|
||||
|
||||
const req = http.request(url, { method, timeout: 10_000, headers }, (res) => {
|
||||
const chunks: Buffer[] = [];
|
||||
res.on('data', (chunk: Buffer) => chunks.push(chunk));
|
||||
res.on('end', () => {
|
||||
const raw = Buffer.concat(chunks).toString();
|
||||
try {
|
||||
resolve({ status: res.statusCode ?? 500, data: raw ? JSON.parse(raw) as T : (undefined as T) });
|
||||
} catch {
|
||||
resolve({ status: res.statusCode ?? 500, data: raw as unknown as T });
|
||||
}
|
||||
});
|
||||
});
|
||||
req.on('error', reject);
|
||||
req.on('timeout', () => reject(new Error('Request timeout')));
|
||||
if (bodyStr) req.write(bodyStr);
|
||||
req.end();
|
||||
});
|
||||
}
|
||||
|
||||
const LLM_PROMPT_NAMES = [
|
||||
'llm-response-filter',
|
||||
'llm-request-optimization',
|
||||
'llm-pagination-index',
|
||||
'llm-gate-context-selector',
|
||||
'llm-summarize',
|
||||
'llm-paginate-titles',
|
||||
];
|
||||
|
||||
describe('Smoke: System Prompts', () => {
|
||||
let available = false;
|
||||
let systemProjectId = '';
|
||||
let prompts: Prompt[] = [];
|
||||
|
||||
beforeAll(async () => {
|
||||
console.log('');
|
||||
console.log(' ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━');
|
||||
console.log(' Smoke Test: System Prompts (LLM pipeline)');
|
||||
console.log(' ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━');
|
||||
|
||||
available = await isMcplocalRunning();
|
||||
if (!available) {
|
||||
console.log('\n ✗ mcplocal not running — all tests will be skipped\n');
|
||||
return;
|
||||
}
|
||||
|
||||
// Find mcpctl-system project
|
||||
const projectsResult = await mcpdRequest<Array<{ id: string; name: string }>>('GET', '/api/v1/projects');
|
||||
const systemProject = projectsResult.data.find((p) => p.name === 'mcpctl-system');
|
||||
if (!systemProject) {
|
||||
console.log('\n ✗ mcpctl-system project not found — tests will fail\n');
|
||||
return;
|
||||
}
|
||||
systemProjectId = systemProject.id;
|
||||
|
||||
// Fetch all prompts for system project (API uses project name, not ID)
|
||||
const promptsResult = await mcpdRequest<Prompt[]>('GET', `/api/v1/prompts?project=mcpctl-system`);
|
||||
prompts = promptsResult.data;
|
||||
console.log(`\n ✓ Found ${prompts.length} system prompts\n`);
|
||||
});
|
||||
|
||||
it('all 6 LLM prompts exist in mcpctl-system', () => {
|
||||
if (!available) return;
|
||||
|
||||
const promptNames = prompts.map((p) => p.name);
|
||||
for (const name of LLM_PROMPT_NAMES) {
|
||||
expect(promptNames, `Missing system prompt: ${name}`).toContain(name);
|
||||
}
|
||||
});
|
||||
|
||||
it('edit a prompt with no required vars succeeds', async () => {
|
||||
if (!available) return;
|
||||
|
||||
const prompt = prompts.find((p) => p.name === 'llm-response-filter');
|
||||
expect(prompt).toBeDefined();
|
||||
|
||||
const newContent = prompt!.content + '\n- Additional custom rule';
|
||||
const result = await mcpdRequest<Prompt>('PUT', `/api/v1/prompts/${prompt!.id}`, {
|
||||
content: newContent,
|
||||
});
|
||||
expect(result.status).toBe(200);
|
||||
expect(result.data.content).toBe(newContent);
|
||||
|
||||
// Restore original
|
||||
await mcpdRequest('PUT', `/api/v1/prompts/${prompt!.id}`, {
|
||||
content: prompt!.content,
|
||||
});
|
||||
});
|
||||
|
||||
it('edit llm-summarize removing {{maxTokens}} is rejected with 400', async () => {
|
||||
if (!available) return;
|
||||
|
||||
const prompt = prompts.find((p) => p.name === 'llm-summarize');
|
||||
expect(prompt).toBeDefined();
|
||||
|
||||
const result = await mcpdRequest<{ message?: string }>('PUT', `/api/v1/prompts/${prompt!.id}`, {
|
||||
content: 'Summarize this content briefly.',
|
||||
});
|
||||
expect(result.status).toBe(400);
|
||||
});
|
||||
|
||||
it('edit llm-paginate-titles removing {{pageCount}} is rejected with 400', async () => {
|
||||
if (!available) return;
|
||||
|
||||
const prompt = prompts.find((p) => p.name === 'llm-paginate-titles');
|
||||
expect(prompt).toBeDefined();
|
||||
|
||||
const result = await mcpdRequest<{ message?: string }>('PUT', `/api/v1/prompts/${prompt!.id}`, {
|
||||
content: 'Generate some titles for pages.',
|
||||
});
|
||||
expect(result.status).toBe(400);
|
||||
});
|
||||
|
||||
it('edit with required vars present succeeds', async () => {
|
||||
if (!available) return;
|
||||
|
||||
const prompt = prompts.find((p) => p.name === 'llm-summarize');
|
||||
expect(prompt).toBeDefined();
|
||||
|
||||
const newContent = 'Custom: Summarize in about {{maxTokens}} tokens. Keep it concise.';
|
||||
const result = await mcpdRequest<Prompt>('PUT', `/api/v1/prompts/${prompt!.id}`, {
|
||||
content: newContent,
|
||||
});
|
||||
expect(result.status).toBe(200);
|
||||
expect(result.data.content).toBe(newContent);
|
||||
|
||||
// Restore original
|
||||
await mcpdRequest('PUT', `/api/v1/prompts/${prompt!.id}`, {
|
||||
content: prompt!.content,
|
||||
});
|
||||
});
|
||||
|
||||
it('delete a system prompt resets to default', async () => {
|
||||
if (!available) return;
|
||||
|
||||
const prompt = prompts.find((p) => p.name === 'llm-gate-context-selector');
|
||||
expect(prompt).toBeDefined();
|
||||
|
||||
// First, modify the prompt
|
||||
await mcpdRequest('PUT', `/api/v1/prompts/${prompt!.id}`, {
|
||||
content: 'Temporarily customized content.',
|
||||
});
|
||||
|
||||
// Delete should reset to default, not actually delete
|
||||
const deleteResult = await mcpdRequest<Prompt>('DELETE', `/api/v1/prompts/${prompt!.id}`);
|
||||
expect(deleteResult.status).toBe(200);
|
||||
expect(deleteResult.data.content).toContain('context selection assistant');
|
||||
|
||||
// Prompt should still exist
|
||||
const getResult = await mcpdRequest<Prompt>('GET', `/api/v1/prompts/${prompt!.id}`);
|
||||
expect(getResult.status).toBe(200);
|
||||
expect(getResult.data.name).toBe('llm-gate-context-selector');
|
||||
});
|
||||
});
|
||||
Reference in New Issue
Block a user