Files
mcpctl/src/mcpd/src/bootstrap/system-project.ts
Michal 0995851810 feat: remove proxyMode — all traffic goes through mcplocal proxy
proxyMode "direct" was a security hole (leaked secrets as plaintext env
vars in .mcp.json) and bypassed all mcplocal features (gating, audit,
RBAC, content pipeline, namespacing). Removed from schema, API, CLI,
and all tests. Old configs with proxyMode are accepted but silently
stripped via Zod .transform() for backward compatibility.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-07 23:36:36 +00:00

189 lines
7.7 KiB
TypeScript

/**
* Bootstrap the mcpctl-system project and its system prompts.
*
* This runs on every mcpd startup and uses upserts to be idempotent.
* System prompts are editable by users but will be re-created if deleted.
*/
import type { PrismaClient } from '@prisma/client';
/** Well-known project name for system prompts. */
export const SYSTEM_PROJECT_NAME = 'mcpctl-system';
/** Well-known email for the system user. */
const SYSTEM_USER_EMAIL = 'system@mcpctl.local';
interface SystemPromptDef {
name: string;
priority: number;
content: string;
/** Template variables that must be present when editing (e.g., '{{maxTokens}}'). */
requiredVars?: string[];
}
const SYSTEM_PROMPTS: SystemPromptDef[] = [
{
name: 'gate-instructions',
priority: 10,
content: `This project uses a gated session. Before you can access tools, you must start a session by calling begin_session.
Call begin_session immediately using the arguments it requires (check its input schema). If it accepts a description, briefly describe the user's task. If it accepts tags, provide 3-7 keywords relevant to the user's request.
The available tools and prompts are listed below. After calling begin_session, you will receive relevant project context and full access to all tools.`,
},
{
name: 'gate-encouragement',
priority: 10,
content: `If any of the listed prompts seem relevant to your work, or if you encounter unfamiliar patterns, conventions, or constraints during implementation, use read_prompts({ tags: [...] }) to retrieve them.
It is better to check and not need it than to proceed without important context. The project maintainers have documented common pitfalls, architecture decisions, and required patterns — taking 10 seconds to retrieve a prompt can save hours of rework.`,
},
{
name: 'gate-intercept-preamble',
priority: 10,
content: `The following project context was automatically retrieved based on your tool call. You bypassed the begin_session step, so this context was matched using keywords extracted from your tool invocation.
Review this context carefully — it may contain important guidelines, constraints, or patterns relevant to your work. If you need more context, use read_prompts({ tags: [...] }) at any time.`,
},
{
name: 'gate-session-active',
priority: 10,
content: `The session is now active with full tool access. Proceed with the user's original request using the tools listed above.`,
},
{
name: 'session-greeting',
priority: 10,
content: `Welcome to this project. To get started, call begin_session with the arguments it requires.
Examples:
begin_session({ tags: ["zigbee", "pairing", "mqtt"] })
begin_session({ description: "I want to pair a new Zigbee device" })
This will load relevant project context, policies, and guidelines tailored to your work.`,
},
// ── LLM pipeline prompts (priority 5, editable) ──
{
name: 'llm-response-filter',
priority: 5,
content: `You are a data filtering assistant. Your job is to extract only the relevant information from MCP tool responses.
Rules:
- Remove redundant or verbose fields that aren't useful to the user's query
- Keep essential identifiers, names, statuses, and key metrics
- Preserve error messages and warnings in full
- If the response is already concise, return it unchanged
- Output valid JSON only, no markdown or explanations
- If you cannot parse the input, return it unchanged`,
},
{
name: 'llm-request-optimization',
priority: 5,
content: `You are a query optimization assistant. Your job is to optimize MCP tool call parameters.
Rules:
- Add appropriate filters or limits if the query is too broad
- Keep the original intent of the request
- Output valid JSON with the optimized parameters only, no markdown or explanations
- If no optimization is needed, return the original parameters unchanged`,
},
{
name: 'llm-pagination-index',
priority: 5,
content: `You are a document indexing assistant. Given a large tool response split into pages, generate a concise summary for each page describing what data it contains.
Rules:
- For each page, write 1-2 sentences describing the key content
- Be specific: mention entity names, IDs, counts, or key fields visible on that page
- If it's JSON, describe the structure and notable entries
- If it's text, describe the topics covered
- Output valid JSON only: an array of objects with "page" (1-based number) and "summary" (string)
- Example output: [{"page": 1, "summary": "Configuration nodes and global settings (inject, debug, function nodes 1-15)"}, {"page": 2, "summary": "HTTP request nodes and API integrations (nodes 16-40)"}]`,
},
{
name: 'llm-gate-context-selector',
priority: 5,
content: `You are a context selection assistant. Given a developer's task keywords and a list of available project prompts, select which prompts are relevant to their work. Return a JSON object with "selectedNames" (array of prompt names) and "reasoning" (brief explanation). Priority 10 prompts must always be included.`,
},
{
name: 'llm-summarize',
priority: 5,
requiredVars: ['{{maxTokens}}'],
content: `Summarize the following in about {{maxTokens}} tokens. Preserve all items marked MUST, REQUIRED, or CRITICAL verbatim. Be specific — mention names, IDs, counts, key values.`,
},
{
name: 'llm-paginate-titles',
priority: 5,
requiredVars: ['{{pageCount}}'],
content: `Generate exactly {{pageCount}} short descriptive titles (max 60 chars each) for the following {{pageCount}} pages. Return ONLY a JSON array of {{pageCount}} strings. No markdown, no explanation.`,
},
];
/**
* Ensure the mcpctl-system project and its system prompts exist.
* Uses upserts so this is safe to call on every startup.
*/
export async function bootstrapSystemProject(prisma: PrismaClient): Promise<void> {
// Ensure a system user exists (needed as project owner)
const systemUser = await prisma.user.upsert({
where: { email: SYSTEM_USER_EMAIL },
create: {
email: SYSTEM_USER_EMAIL,
name: 'System',
passwordHash: '!locked', // Cannot login — not a real password hash
role: 'USER',
},
update: {},
});
// Upsert the system project
const project = await prisma.project.upsert({
where: { name: SYSTEM_PROJECT_NAME },
create: {
name: SYSTEM_PROJECT_NAME,
description: 'System prompts for mcpctl gating and session management',
prompt: '',
gated: false,
ownerId: systemUser.id,
},
update: {}, // Don't overwrite user edits to the project itself
});
// Upsert each system prompt (re-create if deleted, don't overwrite content if edited)
for (const def of SYSTEM_PROMPTS) {
const existing = await prisma.prompt.findFirst({
where: { name: def.name, projectId: project.id },
});
if (!existing) {
await prisma.prompt.create({
data: {
name: def.name,
content: def.content,
priority: def.priority,
projectId: project.id,
},
});
}
// If the prompt exists, don't overwrite — user may have edited it
}
}
/** Get the names of all system prompts (for delete protection). */
export function getSystemPromptNames(): string[] {
return SYSTEM_PROMPTS.map((p) => p.name);
}
/** Get the required template variables for a system prompt (e.g., ['{{maxTokens}}']). */
export function getSystemPromptRequiredVars(name: string): string[] | undefined {
const def = SYSTEM_PROMPTS.find((p) => p.name === name);
return def?.requiredVars;
}
/** Get the default content for a system prompt (for reset-on-delete). */
export function getSystemPromptDefault(name: string): string | undefined {
const def = SYSTEM_PROMPTS.find((p) => p.name === name);
return def?.content;
}