feat: prompt section drill-down via prompts/get arguments
Extends section drill-down (previously tool-only) to work with prompts/get using _resultId + _section arguments. Shares the same section store as tool results, enabling cross-method drill-down. Large prompts (>2000 chars) are automatically split into sections. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -33,6 +33,19 @@ export function createContentPipelinePlugin(): ProxyModelPlugin {
|
||||
return null;
|
||||
},
|
||||
|
||||
async onPromptGet(_name, request, ctx) {
|
||||
// Intercept section drill-down on prompt results
|
||||
const args = (request.params as Record<string, unknown> | undefined)?.arguments as Record<string, unknown> | undefined;
|
||||
const resultId = args?.['_resultId'] as string | undefined;
|
||||
const section = args?.['_section'] as string | undefined;
|
||||
|
||||
if (resultId && section) {
|
||||
return handleSectionDrillDown(request, resultId, section, ctx);
|
||||
}
|
||||
|
||||
return null;
|
||||
},
|
||||
|
||||
async onToolCallAfter(toolName, _args, response, ctx) {
|
||||
if (response.error) return response;
|
||||
|
||||
|
||||
@@ -4,12 +4,13 @@ import { ResponsePaginator } from './llm/pagination.js';
|
||||
import type { McpdClient } from './http/mcpd-client.js';
|
||||
import type { PromptIndexEntry } from './gate/tag-matcher.js';
|
||||
import { LinkResolver } from './services/link-resolver.js';
|
||||
import type { LLMProvider, CacheProvider } from './proxymodel/types.js';
|
||||
import type { LLMProvider, CacheProvider, Section } from './proxymodel/types.js';
|
||||
import { executePipeline } from './proxymodel/executor.js';
|
||||
import { getProxyModel } from './proxymodel/loader.js';
|
||||
import type { ProxyModelPlugin } from './proxymodel/plugin.js';
|
||||
import type { ProxyModelPlugin, PluginSessionContext } from './proxymodel/plugin.js';
|
||||
import { PluginContextImpl, type PluginContextDeps } from './proxymodel/plugin-context.js';
|
||||
import type { AuditCollector } from './audit/collector.js';
|
||||
import { pauseQueue } from './proxymodel/pause-queue.js';
|
||||
|
||||
export interface RouteContext {
|
||||
sessionId?: string;
|
||||
@@ -149,7 +150,7 @@ export class McpRouter {
|
||||
if (!proxyModelDef || !effectiveLlm || !effectiveCache) {
|
||||
return { content };
|
||||
}
|
||||
return executePipeline({
|
||||
const result = await executePipeline({
|
||||
content,
|
||||
contentType,
|
||||
sourceName: toolName,
|
||||
@@ -162,6 +163,21 @@ export class McpRouter {
|
||||
...(this.auditCollector ? { auditCollector: this.auditCollector } : {}),
|
||||
...(serverName !== undefined ? { serverName } : {}),
|
||||
});
|
||||
|
||||
// Pause queue: if paused, hold the result until released/edited/dropped
|
||||
if (pauseQueue.paused) {
|
||||
const pausedContent = await pauseQueue.enqueue({
|
||||
sessionId,
|
||||
projectName: this.projectName ?? 'unknown',
|
||||
contentType,
|
||||
sourceName: toolName,
|
||||
original: content,
|
||||
transformed: result.content,
|
||||
});
|
||||
return { ...result, content: pausedContent };
|
||||
}
|
||||
|
||||
return result;
|
||||
},
|
||||
queueNotification: (notification) => this.queueNotification(sessionId, notification),
|
||||
postToMcpd: async (path, body) => {
|
||||
@@ -636,6 +652,15 @@ export class McpRouter {
|
||||
|
||||
case 'prompts/get': {
|
||||
const promptName = (request.params as Record<string, unknown> | undefined)?.name as string | undefined;
|
||||
|
||||
// Plugin hook: onPromptGet (section drill-down, etc.)
|
||||
if (this.plugin?.onPromptGet && context?.sessionId && promptName) {
|
||||
const ctx = await this.getOrCreatePluginContext(context.sessionId);
|
||||
const intercepted = await this.plugin.onPromptGet(promptName, request, ctx);
|
||||
if (intercepted) return intercepted;
|
||||
}
|
||||
|
||||
let response: JsonRpcResponse;
|
||||
if (promptName?.startsWith('mcpctl/')) {
|
||||
const shortName = promptName.slice('mcpctl/'.length);
|
||||
const managedIndex = await this.fetchPromptIndex();
|
||||
@@ -643,7 +668,7 @@ export class McpRouter {
|
||||
if (!entry) {
|
||||
return { jsonrpc: '2.0', id: request.id, error: { code: -32601, message: `Unknown name: ${promptName}` } };
|
||||
}
|
||||
return {
|
||||
response = {
|
||||
jsonrpc: '2.0',
|
||||
id: request.id,
|
||||
result: {
|
||||
@@ -659,8 +684,29 @@ export class McpRouter {
|
||||
],
|
||||
},
|
||||
};
|
||||
} else {
|
||||
response = await this.routeNamespacedCall(request, 'name', this.promptToServer);
|
||||
}
|
||||
return this.routeNamespacedCall(request, 'name', this.promptToServer);
|
||||
|
||||
// Post-process: run large prompt content through the pipeline for section-split
|
||||
if (!response.error && this.plugin && context?.sessionId) {
|
||||
const ctx = await this.getOrCreatePluginContext(context.sessionId);
|
||||
const text = extractPromptText(response);
|
||||
if (text && text.length > 2000) {
|
||||
try {
|
||||
const result = await ctx.processContent(promptName ?? 'prompt', text, 'prompt');
|
||||
if (result.sections && result.sections.length > 0) {
|
||||
response = injectSectionsIntoPromptResponse(response, result.content, result.sections, ctx);
|
||||
} else if (result.content !== text) {
|
||||
response = replacePromptText(response, result.content);
|
||||
}
|
||||
} catch {
|
||||
// Pipeline failed — return original
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
// Handle MCP notifications (no response expected, but return empty result if called as request)
|
||||
@@ -961,3 +1007,74 @@ export class McpRouter {
|
||||
this.pluginContexts.clear();
|
||||
}
|
||||
}
|
||||
|
||||
// ── Prompt section drill-down helpers ──
|
||||
|
||||
// Same key used by content-pipeline plugin so drill-down lookups work across tools and prompts
|
||||
const SECTION_STORE_KEY = '_contentPipeline_sections';
|
||||
const SECTION_TTL_MS = 300_000; // 5 minutes
|
||||
|
||||
interface SectionStoreEntry {
|
||||
sections: Section[];
|
||||
createdAt: number;
|
||||
}
|
||||
|
||||
/** Extract text from all messages in a prompts/get response. */
|
||||
function extractPromptText(response: JsonRpcResponse): string | null {
|
||||
if (!response.result || typeof response.result !== 'object') return null;
|
||||
const result = response.result as Record<string, unknown>;
|
||||
const messages = result['messages'] as Array<{ role: string; content: unknown }> | undefined;
|
||||
if (!Array.isArray(messages)) return null;
|
||||
const texts: string[] = [];
|
||||
for (const msg of messages) {
|
||||
if (typeof msg.content === 'string') {
|
||||
texts.push(msg.content);
|
||||
} else if (msg.content && typeof msg.content === 'object') {
|
||||
const c = msg.content as { type?: string; text?: string };
|
||||
if (c.type === 'text' && c.text) texts.push(c.text);
|
||||
}
|
||||
}
|
||||
return texts.length > 0 ? texts.join('\n') : null;
|
||||
}
|
||||
|
||||
/** Replace prompt message text with processed content. */
|
||||
function replacePromptText(response: JsonRpcResponse, newText: string): JsonRpcResponse {
|
||||
const result = response.result as Record<string, unknown>;
|
||||
const messages = result['messages'] as Array<{ role: string; content: unknown }>;
|
||||
const newMessages = messages.map((msg) => {
|
||||
if (typeof msg.content === 'string') {
|
||||
return { ...msg, content: newText };
|
||||
}
|
||||
if (msg.content && typeof msg.content === 'object') {
|
||||
return { ...msg, content: { type: 'text', text: newText } };
|
||||
}
|
||||
return msg;
|
||||
});
|
||||
return { ...response, result: { ...result, messages: newMessages } };
|
||||
}
|
||||
|
||||
/** Inject sections TOC into prompt response and store sections for drill-down. */
|
||||
function injectSectionsIntoPromptResponse(
|
||||
response: JsonRpcResponse,
|
||||
tocContent: string,
|
||||
sections: Section[],
|
||||
ctx: PluginSessionContext,
|
||||
): JsonRpcResponse {
|
||||
const resultId = `pm-${Date.now().toString(36)}`;
|
||||
|
||||
// Store sections (same store as tool results so drill-down works uniformly)
|
||||
let store = ctx.state.get(SECTION_STORE_KEY) as Map<string, SectionStoreEntry> | undefined;
|
||||
if (!store) {
|
||||
store = new Map();
|
||||
ctx.state.set(SECTION_STORE_KEY, store);
|
||||
}
|
||||
store.set(resultId, { sections, createdAt: Date.now() });
|
||||
// Evict stale
|
||||
const now = Date.now();
|
||||
for (const [key, entry] of store) {
|
||||
if (now - entry.createdAt > SECTION_TTL_MS) store.delete(key);
|
||||
}
|
||||
|
||||
const text = `${tocContent}\n\n_resultId: ${resultId} — use _resultId and _section parameters to drill into a section.`;
|
||||
return replacePromptText(response, text);
|
||||
}
|
||||
|
||||
368
src/mcplocal/tests/prompt-section-drilldown.test.ts
Normal file
368
src/mcplocal/tests/prompt-section-drilldown.test.ts
Normal file
@@ -0,0 +1,368 @@
|
||||
/**
|
||||
* Tests for prompt section drill-down.
|
||||
*
|
||||
* Verifies that large prompt responses are split into sections via the
|
||||
* content-pipeline plugin, and that clients can drill into individual
|
||||
* sections using _resultId + _section parameters on subsequent prompts/get calls.
|
||||
*/
|
||||
import { describe, it, expect, vi } from 'vitest';
|
||||
import { createContentPipelinePlugin } from '../src/proxymodel/plugins/content-pipeline.js';
|
||||
import type { PluginSessionContext } from '../src/proxymodel/plugin.js';
|
||||
import type { JsonRpcRequest, JsonRpcResponse } from '../src/types.js';
|
||||
import type { Section, LLMProvider, CacheProvider, StageLogger, ContentType } from '../src/proxymodel/types.js';
|
||||
|
||||
// ── Helpers ──
|
||||
|
||||
/** Generate a large markdown prompt with multiple sections. */
|
||||
function generateLargePrompt(sectionCount: number): string {
|
||||
const sections: string[] = [];
|
||||
for (let i = 1; i <= sectionCount; i++) {
|
||||
const body = `This is the content of section ${i}. `.repeat(80); // ~3200 chars each
|
||||
sections.push(`## Section ${i}\n\n${body}`);
|
||||
}
|
||||
return sections.join('\n\n');
|
||||
}
|
||||
|
||||
/** Generate a large JSON prompt (array of objects). */
|
||||
function generateLargeJsonPrompt(itemCount: number): string {
|
||||
const items = Array.from({ length: itemCount }, (_, i) => ({
|
||||
name: `item-${i + 1}`,
|
||||
description: `Description for item ${i + 1}. `.repeat(60),
|
||||
config: { enabled: true, priority: i + 1 },
|
||||
}));
|
||||
return JSON.stringify(items, null, 2);
|
||||
}
|
||||
|
||||
function createMockCtx(): PluginSessionContext {
|
||||
const state = new Map<string, unknown>();
|
||||
|
||||
const mockLlm: LLMProvider = {
|
||||
async complete(prompt) { return `Summary: ${prompt.slice(0, 30)}...`; },
|
||||
available: () => false,
|
||||
};
|
||||
|
||||
const cache = new Map<string, string>();
|
||||
const mockCache: CacheProvider = {
|
||||
async getOrCompute(key, compute) {
|
||||
if (cache.has(key)) return cache.get(key)!;
|
||||
const val = await compute();
|
||||
cache.set(key, val);
|
||||
return val;
|
||||
},
|
||||
hash(content) { return content.slice(0, 8); },
|
||||
async get(key) { return cache.get(key) ?? null; },
|
||||
async set(key, value) { cache.set(key, value); },
|
||||
};
|
||||
|
||||
const mockLog: StageLogger = {
|
||||
debug: vi.fn(),
|
||||
info: vi.fn(),
|
||||
warn: vi.fn(),
|
||||
error: vi.fn(),
|
||||
};
|
||||
|
||||
return {
|
||||
sessionId: 'test-session',
|
||||
projectName: 'test-project',
|
||||
state,
|
||||
llm: mockLlm,
|
||||
cache: mockCache,
|
||||
log: mockLog,
|
||||
discoverTools: async () => [],
|
||||
registerVirtualTool: vi.fn(),
|
||||
registerVirtualServer: vi.fn(),
|
||||
routeToUpstream: async (req: JsonRpcRequest) => ({ jsonrpc: '2.0' as const, id: req.id, result: {} }),
|
||||
fetchPromptIndex: async () => [],
|
||||
getSystemPrompt: async (_name: string, fallback: string) => fallback,
|
||||
processContent: async (toolName: string, content: string, contentType: ContentType) => {
|
||||
// Use real section-split stage for realistic testing
|
||||
const sectionSplit = (await import('../src/proxymodel/stages/section-split.js')).default;
|
||||
const ctx = {
|
||||
contentType,
|
||||
sourceName: toolName,
|
||||
projectName: 'test',
|
||||
sessionId: 'test-session',
|
||||
originalContent: content,
|
||||
llm: mockLlm,
|
||||
cache: mockCache,
|
||||
log: mockLog,
|
||||
getSystemPrompt: async (_n: string, fb: string) => fb,
|
||||
config: { minSectionSize: 500 },
|
||||
};
|
||||
return sectionSplit(content, ctx);
|
||||
},
|
||||
queueNotification: vi.fn(),
|
||||
postToMcpd: async () => ({}),
|
||||
};
|
||||
}
|
||||
|
||||
function makePromptGetRequest(name: string, args?: Record<string, unknown>): JsonRpcRequest {
|
||||
return {
|
||||
jsonrpc: '2.0',
|
||||
id: 1,
|
||||
method: 'prompts/get',
|
||||
params: { name, ...(args ? { arguments: args } : {}) },
|
||||
};
|
||||
}
|
||||
|
||||
function makePromptResponse(id: number | string, text: string): JsonRpcResponse {
|
||||
return {
|
||||
jsonrpc: '2.0',
|
||||
id,
|
||||
result: {
|
||||
prompt: { name: 'test-prompt', description: 'A test prompt' },
|
||||
messages: [
|
||||
{ role: 'user', content: { type: 'text', text } },
|
||||
],
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
function extractResponseText(response: JsonRpcResponse): string {
|
||||
const result = response.result as Record<string, unknown>;
|
||||
// Prompt format: { messages: [{ content: { text } }] }
|
||||
if (result['messages']) {
|
||||
const messages = result['messages'] as Array<{ role: string; content: unknown }>;
|
||||
const c = messages[0].content;
|
||||
if (typeof c === 'string') return c;
|
||||
return (c as { text: string }).text;
|
||||
}
|
||||
// Tool result format: { content: [{ type: 'text', text }] }
|
||||
if (Array.isArray(result['content'])) {
|
||||
const parts = result['content'] as Array<{ type: string; text: string }>;
|
||||
return parts.map((p) => p.text).join('\n');
|
||||
}
|
||||
throw new Error(`Unexpected response format: ${JSON.stringify(result)}`);
|
||||
}
|
||||
|
||||
// ── Tests ──
|
||||
|
||||
describe('Prompt section drill-down', () => {
|
||||
describe('onPromptGet hook', () => {
|
||||
it('returns null when no _resultId/_section params', async () => {
|
||||
const plugin = createContentPipelinePlugin();
|
||||
const ctx = createMockCtx();
|
||||
const request = makePromptGetRequest('my-prompt');
|
||||
|
||||
const result = await plugin.onPromptGet!('my-prompt', request, ctx);
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
|
||||
it('intercepts _resultId + _section and returns cached section', async () => {
|
||||
const plugin = createContentPipelinePlugin();
|
||||
const ctx = createMockCtx();
|
||||
|
||||
// Manually store sections in the shared store (simulating prior pipeline run)
|
||||
const sections: Section[] = [
|
||||
{ id: 'overview', title: 'Overview', content: 'This is the overview section with lots of detail.' },
|
||||
{ id: 'setup', title: 'Setup', content: 'Step-by-step setup instructions here.' },
|
||||
{ id: 'api', title: 'API Reference', content: 'Full API documentation content.' },
|
||||
];
|
||||
const store = new Map<string, { sections: Section[]; createdAt: number }>();
|
||||
store.set('pm-test123', { sections, createdAt: Date.now() });
|
||||
ctx.state.set('_contentPipeline_sections', store);
|
||||
|
||||
// Drill into 'setup' section
|
||||
const request = makePromptGetRequest('my-prompt', { _resultId: 'pm-test123', _section: 'setup' });
|
||||
const result = await plugin.onPromptGet!('my-prompt', request, ctx);
|
||||
|
||||
expect(result).not.toBeNull();
|
||||
const text = extractResponseText(result!);
|
||||
expect(text).toBe('Step-by-step setup instructions here.');
|
||||
});
|
||||
|
||||
it('returns error for expired/invalid _resultId', async () => {
|
||||
const plugin = createContentPipelinePlugin();
|
||||
const ctx = createMockCtx();
|
||||
|
||||
const request = makePromptGetRequest('my-prompt', { _resultId: 'pm-nonexistent', _section: 'setup' });
|
||||
const result = await plugin.onPromptGet!('my-prompt', request, ctx);
|
||||
|
||||
expect(result).not.toBeNull();
|
||||
const text = extractResponseText(result!);
|
||||
expect(text).toContain('Cached result not found');
|
||||
});
|
||||
|
||||
it('returns error for unknown section ID', async () => {
|
||||
const plugin = createContentPipelinePlugin();
|
||||
const ctx = createMockCtx();
|
||||
|
||||
const sections: Section[] = [
|
||||
{ id: 'intro', title: 'Intro', content: 'Introduction content.' },
|
||||
];
|
||||
const store = new Map<string, { sections: Section[]; createdAt: number }>();
|
||||
store.set('pm-abc', { sections, createdAt: Date.now() });
|
||||
ctx.state.set('_contentPipeline_sections', store);
|
||||
|
||||
const request = makePromptGetRequest('my-prompt', { _resultId: 'pm-abc', _section: 'nonexistent' });
|
||||
const result = await plugin.onPromptGet!('my-prompt', request, ctx);
|
||||
|
||||
expect(result).not.toBeNull();
|
||||
const text = extractResponseText(result!);
|
||||
expect(text).toContain("Section 'nonexistent' not found");
|
||||
expect(text).toContain('intro');
|
||||
});
|
||||
});
|
||||
|
||||
describe('shared section store with tools', () => {
|
||||
it('prompt drill-down can access sections stored by tool pipeline', async () => {
|
||||
const plugin = createContentPipelinePlugin();
|
||||
const ctx = createMockCtx();
|
||||
|
||||
// Simulate a tool call that stored sections (onToolCallAfter would do this)
|
||||
const toolSections: Section[] = [
|
||||
{ id: 'nodes', title: 'Nodes', content: 'List of all flow nodes.' },
|
||||
{ id: 'connections', title: 'Connections', content: 'Node connection map.' },
|
||||
];
|
||||
const store = new Map<string, { sections: Section[]; createdAt: number }>();
|
||||
store.set('pm-fromtool', { sections: toolSections, createdAt: Date.now() });
|
||||
ctx.state.set('_contentPipeline_sections', store);
|
||||
|
||||
// Prompt drill-down using a resultId that came from a tool result
|
||||
const request = makePromptGetRequest('any-prompt', { _resultId: 'pm-fromtool', _section: 'connections' });
|
||||
const result = await plugin.onPromptGet!('any-prompt', request, ctx);
|
||||
|
||||
expect(result).not.toBeNull();
|
||||
const text = extractResponseText(result!);
|
||||
expect(text).toBe('Node connection map.');
|
||||
});
|
||||
});
|
||||
|
||||
describe('large prompt → section-split → drill-down (full cycle)', () => {
|
||||
it('markdown prompt is split into sections with TOC', async () => {
|
||||
const largePrompt = generateLargePrompt(5);
|
||||
expect(largePrompt.length).toBeGreaterThan(10000);
|
||||
|
||||
// Run through section-split stage directly
|
||||
const sectionSplit = (await import('../src/proxymodel/stages/section-split.js')).default;
|
||||
const mockLog: StageLogger = { debug: vi.fn(), info: vi.fn(), warn: vi.fn(), error: vi.fn() };
|
||||
const mockLlm: LLMProvider = { complete: async () => '', available: () => false };
|
||||
const mockCache: CacheProvider = {
|
||||
getOrCompute: async (_, c) => c(),
|
||||
hash: (s) => s.slice(0, 8),
|
||||
get: async () => null,
|
||||
set: async () => {},
|
||||
};
|
||||
|
||||
const result = await sectionSplit(largePrompt, {
|
||||
contentType: 'prompt',
|
||||
sourceName: 'test-prompt',
|
||||
projectName: 'test',
|
||||
sessionId: 'sess-1',
|
||||
originalContent: largePrompt,
|
||||
llm: mockLlm,
|
||||
cache: mockCache,
|
||||
log: mockLog,
|
||||
getSystemPrompt: async (_n: string, fb: string) => fb,
|
||||
config: { minSectionSize: 500 },
|
||||
});
|
||||
|
||||
// Should have produced sections
|
||||
expect(result.sections).toBeDefined();
|
||||
expect(result.sections!.length).toBeGreaterThanOrEqual(3);
|
||||
|
||||
// TOC should list sections
|
||||
expect(result.content).toContain('sections');
|
||||
expect(result.content).toContain('Use section parameter');
|
||||
|
||||
// Original was ~16K, TOC should be much shorter
|
||||
expect(result.content.length).toBeLessThan(largePrompt.length);
|
||||
|
||||
// Log the transformation for visibility
|
||||
process.stderr.write(`\n--- Original prompt size: ${largePrompt.length} chars ---\n`);
|
||||
process.stderr.write(`--- Transformed TOC size: ${result.content.length} chars ---\n`);
|
||||
process.stderr.write(`--- Sections: ${result.sections!.map((s) => `${s.id} (${s.content.length})`).join(', ')} ---\n`);
|
||||
});
|
||||
|
||||
it('JSON prompt is split into per-item sections', async () => {
|
||||
const largeJson = generateLargeJsonPrompt(8);
|
||||
expect(largeJson.length).toBeGreaterThan(5000);
|
||||
|
||||
const sectionSplit = (await import('../src/proxymodel/stages/section-split.js')).default;
|
||||
const mockLog: StageLogger = { debug: vi.fn(), info: vi.fn(), warn: vi.fn(), error: vi.fn() };
|
||||
const mockLlm: LLMProvider = { complete: async () => '', available: () => false };
|
||||
const mockCache: CacheProvider = {
|
||||
getOrCompute: async (_, c) => c(),
|
||||
hash: (s) => s.slice(0, 8),
|
||||
get: async () => null,
|
||||
set: async () => {},
|
||||
};
|
||||
|
||||
const result = await sectionSplit(largeJson, {
|
||||
contentType: 'prompt',
|
||||
sourceName: 'json-prompt',
|
||||
projectName: 'test',
|
||||
sessionId: 'sess-1',
|
||||
originalContent: largeJson,
|
||||
llm: mockLlm,
|
||||
cache: mockCache,
|
||||
log: mockLog,
|
||||
getSystemPrompt: async (_n: string, fb: string) => fb,
|
||||
config: { minSectionSize: 200 },
|
||||
});
|
||||
|
||||
expect(result.sections).toBeDefined();
|
||||
expect(result.sections!.length).toBeGreaterThanOrEqual(4);
|
||||
// JSON array items should use name as section id
|
||||
expect(result.sections!.some((s) => s.id.startsWith('item-'))).toBe(true);
|
||||
|
||||
process.stderr.write(`\n--- JSON prompt: ${largeJson.length} chars → ${result.sections!.length} sections ---\n`);
|
||||
});
|
||||
|
||||
it('full cycle: pipeline → store → drill-down → content', async () => {
|
||||
const plugin = createContentPipelinePlugin();
|
||||
const ctx = createMockCtx();
|
||||
|
||||
// Step 1: Generate a large prompt response
|
||||
const largePrompt = generateLargePrompt(4);
|
||||
const originalResponse = makePromptResponse(1, largePrompt);
|
||||
|
||||
process.stderr.write(`\n=== Full cycle test ===\n`);
|
||||
process.stderr.write(`Original prompt size: ${largePrompt.length} chars\n`);
|
||||
|
||||
// Step 2: Simulate the router post-processing (processContent + store)
|
||||
const pipelineResult = await ctx.processContent('test-prompt', largePrompt, 'prompt');
|
||||
|
||||
expect(pipelineResult.sections).toBeDefined();
|
||||
expect(pipelineResult.sections!.length).toBeGreaterThan(1);
|
||||
|
||||
// Store sections like the router would
|
||||
const resultId = `pm-${Date.now().toString(36)}`;
|
||||
const store = new Map<string, { sections: Section[]; createdAt: number }>();
|
||||
store.set(resultId, { sections: pipelineResult.sections!, createdAt: Date.now() });
|
||||
ctx.state.set('_contentPipeline_sections', store);
|
||||
|
||||
process.stderr.write(`Pipeline output (TOC):\n${pipelineResult.content}\n`);
|
||||
process.stderr.write(`Stored ${pipelineResult.sections!.length} sections under ${resultId}\n`);
|
||||
|
||||
// Step 3: Drill into first section
|
||||
const firstSection = pipelineResult.sections![0];
|
||||
const drillRequest = makePromptGetRequest('test-prompt', {
|
||||
_resultId: resultId,
|
||||
_section: firstSection.id,
|
||||
});
|
||||
const drillResult = await plugin.onPromptGet!('test-prompt', drillRequest, ctx);
|
||||
|
||||
expect(drillResult).not.toBeNull();
|
||||
const drillText = extractResponseText(drillResult!);
|
||||
expect(drillText).toBe(firstSection.content);
|
||||
|
||||
process.stderr.write(`Drill-down into "${firstSection.id}": ${drillText.length} chars\n`);
|
||||
|
||||
// Step 4: Drill into last section
|
||||
const lastSection = pipelineResult.sections![pipelineResult.sections!.length - 1];
|
||||
const drillRequest2 = makePromptGetRequest('test-prompt', {
|
||||
_resultId: resultId,
|
||||
_section: lastSection.id,
|
||||
});
|
||||
const drillResult2 = await plugin.onPromptGet!('test-prompt', drillRequest2, ctx);
|
||||
|
||||
expect(drillResult2).not.toBeNull();
|
||||
const drillText2 = extractResponseText(drillResult2!);
|
||||
expect(drillText2).toBe(lastSection.content);
|
||||
|
||||
process.stderr.write(`Drill-down into "${lastSection.id}": ${drillText2.length} chars\n`);
|
||||
process.stderr.write(`=== Full cycle complete ===\n`);
|
||||
});
|
||||
});
|
||||
});
|
||||
122
src/mcplocal/tests/smoke/prompt-drilldown.test.ts
Normal file
122
src/mcplocal/tests/smoke/prompt-drilldown.test.ts
Normal file
@@ -0,0 +1,122 @@
|
||||
/**
|
||||
* Smoke tests: Prompt section drill-down.
|
||||
*
|
||||
* Verifies that large prompts served via prompts/get are section-split
|
||||
* and that subsequent calls with _resultId + _section return cached sections.
|
||||
*
|
||||
* Requires: mcplocal running on localhost:3200, mcpd on 10.0.0.194:3100
|
||||
*/
|
||||
import { describe, it, expect, beforeAll, afterAll } from 'vitest';
|
||||
import { SmokeMcpSession, isMcplocalRunning } from './mcp-client.js';
|
||||
|
||||
const PROJECT_NAME = 'smoke-data';
|
||||
|
||||
describe('Smoke: Prompt section drill-down', () => {
|
||||
let available = false;
|
||||
let session: SmokeMcpSession;
|
||||
|
||||
beforeAll(async () => {
|
||||
available = await isMcplocalRunning();
|
||||
if (!available) return;
|
||||
|
||||
session = new SmokeMcpSession(PROJECT_NAME);
|
||||
await session.initialize();
|
||||
await session.sendNotification('notifications/initialized');
|
||||
});
|
||||
|
||||
afterAll(async () => {
|
||||
if (session) await session.close();
|
||||
});
|
||||
|
||||
it('prompts/list returns available prompts', async () => {
|
||||
if (!available) return;
|
||||
|
||||
const result = await session.send('prompts/list') as { prompts: Array<{ name: string; description?: string }> };
|
||||
expect(result.prompts).toBeDefined();
|
||||
expect(Array.isArray(result.prompts)).toBe(true);
|
||||
// Should have at least mcpctl-managed prompts
|
||||
const mcpctlPrompts = result.prompts.filter((p) => p.name.startsWith('mcpctl/'));
|
||||
console.log(` Found ${result.prompts.length} prompts (${mcpctlPrompts.length} mcpctl-managed)`);
|
||||
});
|
||||
|
||||
it('prompts/get returns prompt content', async () => {
|
||||
if (!available) return;
|
||||
|
||||
const listResult = await session.send('prompts/list') as { prompts: Array<{ name: string }> };
|
||||
if (listResult.prompts.length === 0) {
|
||||
console.log(' No prompts available — skipping');
|
||||
return;
|
||||
}
|
||||
|
||||
const promptName = listResult.prompts[0].name;
|
||||
const getResult = await session.send('prompts/get', { name: promptName }) as {
|
||||
messages?: Array<{ role: string; content: unknown }>;
|
||||
};
|
||||
|
||||
expect(getResult.messages).toBeDefined();
|
||||
expect(getResult.messages!.length).toBeGreaterThan(0);
|
||||
console.log(` prompts/get "${promptName}": ${getResult.messages!.length} message(s)`);
|
||||
});
|
||||
|
||||
it('large prompt response includes section TOC with _resultId', async () => {
|
||||
if (!available) return;
|
||||
|
||||
// Find a mcpctl-managed prompt (these tend to be large system prompts)
|
||||
const listResult = await session.send('prompts/list') as { prompts: Array<{ name: string }> };
|
||||
const mcpctlPrompts = listResult.prompts.filter((p) => p.name.startsWith('mcpctl/'));
|
||||
|
||||
if (mcpctlPrompts.length === 0) {
|
||||
console.log(' No mcpctl prompts — skipping section drill-down test');
|
||||
return;
|
||||
}
|
||||
|
||||
// Try each prompt to find one large enough to be section-split
|
||||
let foundSections = false;
|
||||
for (const prompt of mcpctlPrompts) {
|
||||
const getResult = await session.send('prompts/get', { name: prompt.name }) as {
|
||||
messages?: Array<{ role: string; content: unknown }>;
|
||||
};
|
||||
|
||||
if (!getResult.messages || getResult.messages.length === 0) continue;
|
||||
|
||||
const msg = getResult.messages[0];
|
||||
const text = typeof msg.content === 'string'
|
||||
? msg.content
|
||||
: (msg.content as { text?: string }).text ?? '';
|
||||
|
||||
if (text.includes('_resultId')) {
|
||||
foundSections = true;
|
||||
console.log(` "${prompt.name}": section-split TOC detected (${text.length} chars)`);
|
||||
console.log(` TOC preview: ${text.slice(0, 200)}...`);
|
||||
|
||||
// Extract _resultId
|
||||
const match = /_resultId:\s*(pm-[a-z0-9]+)/.exec(text);
|
||||
if (match) {
|
||||
const resultId = match[1];
|
||||
// Extract first section id from TOC
|
||||
const sectionMatch = /\[([^\]]+)\]/.exec(text);
|
||||
if (sectionMatch) {
|
||||
const sectionId = sectionMatch[1];
|
||||
console.log(` Drilling into section "${sectionId}" with resultId "${resultId}"...`);
|
||||
|
||||
// Drill down
|
||||
const drillResult = await session.send('prompts/get', {
|
||||
name: prompt.name,
|
||||
arguments: { _resultId: resultId, _section: sectionId },
|
||||
}) as { content?: Array<{ text: string }> };
|
||||
|
||||
expect(drillResult).toBeDefined();
|
||||
console.log(` Drill-down returned: ${JSON.stringify(drillResult).length} chars`);
|
||||
}
|
||||
}
|
||||
break;
|
||||
} else {
|
||||
console.log(` "${prompt.name}": ${text.length} chars (not section-split — likely too small)`);
|
||||
}
|
||||
}
|
||||
|
||||
if (!foundSections) {
|
||||
console.log(' No prompts large enough for section-split — test inconclusive but not failing');
|
||||
}
|
||||
});
|
||||
});
|
||||
Reference in New Issue
Block a user