diff --git a/completions/mcpctl.bash b/completions/mcpctl.bash index 33a443c..152d3ad 100644 --- a/completions/mcpctl.bash +++ b/completions/mcpctl.bash @@ -191,7 +191,7 @@ _mcpctl() { COMPREPLY=($(compgen -W "--type --description --default --url --namespace --mount --path-prefix --token-secret --config --force -h --help" -- "$cur")) ;; project) - COMPREPLY=($(compgen -W "-d --description --proxy-model --prompt --gated --no-gated --server --force -h --help" -- "$cur")) + COMPREPLY=($(compgen -W "-d --description --proxy-model --prompt --llm --llm-model --gated --no-gated --server --force -h --help" -- "$cur")) ;; user) COMPREPLY=($(compgen -W "--password --name --force -h --help" -- "$cur")) diff --git a/completions/mcpctl.fish b/completions/mcpctl.fish index e5b1a14..6b6907b 100644 --- a/completions/mcpctl.fish +++ b/completions/mcpctl.fish @@ -344,6 +344,8 @@ complete -c mcpctl -n "__mcpctl_subcmd_active create secretbackend" -l force -d complete -c mcpctl -n "__mcpctl_subcmd_active create project" -s d -l description -d 'Project description' -x complete -c mcpctl -n "__mcpctl_subcmd_active create project" -l proxy-model -d 'Plugin name (default, content-pipeline, gate, none)' -x complete -c mcpctl -n "__mcpctl_subcmd_active create project" -l prompt -d 'Project-level prompt / instructions for the LLM' -x +complete -c mcpctl -n "__mcpctl_subcmd_active create project" -l llm -d 'Name of an Llm resource (see \'mcpctl get llms\'), or \'none\' to disable' -x +complete -c mcpctl -n "__mcpctl_subcmd_active create project" -l llm-model -d 'Override the model string for this project (defaults to the Llm\'s own model)' -x complete -c mcpctl -n "__mcpctl_subcmd_active create project" -l gated -d '[deprecated: use --proxy-model default]' complete -c mcpctl -n "__mcpctl_subcmd_active create project" -l no-gated -d '[deprecated: use --proxy-model content-pipeline]' complete -c mcpctl -n "__mcpctl_subcmd_active create project" -l server -d 'Server name (repeat for multiple)' -x diff --git a/src/cli/src/commands/apply.ts b/src/cli/src/commands/apply.ts index 65e4b62..a24ce44 100644 --- a/src/cli/src/commands/apply.ts +++ b/src/cli/src/commands/apply.ts @@ -149,7 +149,12 @@ const ProjectSpecSchema = z.object({ prompt: z.string().max(10000).default(''), proxyModel: z.string().optional(), gated: z.boolean().optional(), + // Name of an `Llm` resource (see `mcpctl get llms`), or the literal 'none' + // to disable LLM features for this project. Unknown names fall back to the + // consumer's registry default — `mcpctl describe project` will flag that. llmProvider: z.string().optional(), + // Override the model string for this project; defaults to the Llm's own + // model when unset. llmModel: z.string().optional(), servers: z.array(z.string()).default([]), }); diff --git a/src/cli/src/commands/create.ts b/src/cli/src/commands/create.ts index 9318c63..895ca9b 100644 --- a/src/cli/src/commands/create.ts +++ b/src/cli/src/commands/create.ts @@ -378,6 +378,8 @@ export function createCreateCommand(deps: CreateCommandDeps): Command { .option('-d, --description ', 'Project description', '') .option('--proxy-model ', 'Plugin name (default, content-pipeline, gate, none)') .option('--prompt ', 'Project-level prompt / instructions for the LLM') + .option('--llm ', "Name of an Llm resource (see 'mcpctl get llms'), or 'none' to disable") + .option('--llm-model ', 'Override the model string for this project (defaults to the Llm\'s own model)') .option('--gated', '[deprecated: use --proxy-model default]') .option('--no-gated', '[deprecated: use --proxy-model content-pipeline]') .option('--server ', 'Server name (repeat for multiple)', collect, []) @@ -397,6 +399,8 @@ export function createCreateCommand(deps: CreateCommandDeps): Command { // Pass gated for backward compat with older mcpd if (opts.gated !== undefined) body.gated = opts.gated as boolean; if (opts.server.length > 0) body.servers = opts.server; + if (opts.llm) body.llmProvider = opts.llm; + if (opts.llmModel) body.llmModel = opts.llmModel; try { const project = await client.post<{ id: string; name: string }>('/api/v1/projects', body); diff --git a/src/cli/src/commands/describe.ts b/src/cli/src/commands/describe.ts index 814472a..a986d35 100644 --- a/src/cli/src/commands/describe.ts +++ b/src/cli/src/commands/describe.ts @@ -137,6 +137,7 @@ function formatInstanceDetail(instance: Record, inspect?: Recor function formatProjectDetail( project: Record, prompts: Array<{ name: string; priority: number; linkTarget: string | null }> = [], + knownLlmNames?: Set, ): string { const lines: string[] = []; lines.push(`=== Project: ${project.name} ===`); @@ -151,8 +152,21 @@ function formatProjectDetail( lines.push(''); lines.push('Plugin Config:'); lines.push(` ${pad('Plugin:', 18)}${proxyModel}`); - if (llmProvider) lines.push(` ${pad('LLM Provider:', 18)}${llmProvider}`); - if (llmModel) lines.push(` ${pad('LLM Model:', 18)}${llmModel}`); + if (llmProvider) { + // As of Phase 4, llmProvider names a centralized Llm resource (see + // `mcpctl get llms`). A value like "none" disables LLM for the project; + // anything else that doesn't match a registered Llm falls back to the + // registry default on consumers — flag it so operators notice. + const resolvable = knownLlmNames === undefined + || llmProvider === 'none' + || knownLlmNames.has(llmProvider); + if (resolvable) { + lines.push(` ${pad('LLM:', 18)}${llmProvider}`); + } else { + lines.push(` ${pad('LLM:', 18)}${llmProvider} [warning: no Llm registered with this name — will fall back to registry default]`); + } + } + if (llmModel) lines.push(` ${pad('LLM Model:', 18)}${llmModel} (override)`); // Servers section const servers = project.servers as Array<{ server: { name: string } }> | undefined; @@ -887,10 +901,16 @@ export function createDescribeCommand(deps: DescribeCommandDeps): Command { deps.log(formatLlmDetail(item)); break; case 'projects': { - const projectPrompts = await deps.client - .get>(`/api/v1/prompts?projectId=${item.id as string}`) - .catch(() => []); - deps.log(formatProjectDetail(item, projectPrompts)); + const [projectPrompts, llms] = await Promise.all([ + deps.client + .get>(`/api/v1/prompts?projectId=${item.id as string}`) + .catch(() => []), + deps.client + .get>('/api/v1/llms') + .catch(() => [] as Array<{ name: string }>), + ]); + const llmNames = new Set(llms.map((l) => l.name)); + deps.log(formatProjectDetail(item, projectPrompts, llmNames)); break; } case 'users': { diff --git a/src/cli/tests/commands/describe.test.ts b/src/cli/tests/commands/describe.test.ts index 979ddba..52a320d 100644 --- a/src/cli/tests/commands/describe.test.ts +++ b/src/cli/tests/commands/describe.test.ts @@ -108,6 +108,77 @@ describe('describe command', () => { expect(text).not.toContain('Gated:'); }); + it('shows project Llm reference without warning when the name matches a registered Llm', async () => { + const deps = makeDeps({ + id: 'proj-1', + name: 'with-llm', + description: '', + ownerId: 'user-1', + proxyModel: 'default', + llmProvider: 'claude', + llmModel: 'claude-3-opus', + createdAt: '2025-01-01', + }); + // /api/v1/llms returns a claude entry → no warning + deps.client = { + get: vi.fn(async (path: string) => { + if (path === '/api/v1/llms') return [{ name: 'claude' }]; + return []; + }), + } as unknown as typeof deps.client; + const cmd = createDescribeCommand(deps); + await cmd.parseAsync(['node', 'test', 'project', 'proj-1']); + const text = deps.output.join('\n'); + expect(text).toContain('LLM:'); + expect(text).toContain('claude'); + expect(text).not.toContain('warning:'); + }); + + it('warns on describe project when llmProvider does not resolve to any registered Llm', async () => { + const deps = makeDeps({ + id: 'proj-1', + name: 'orphan', + description: '', + ownerId: 'user-1', + proxyModel: 'default', + llmProvider: 'claude-ghost', + createdAt: '2025-01-01', + }); + deps.client = { + get: vi.fn(async (path: string) => { + if (path === '/api/v1/llms') return [{ name: 'claude' }, { name: 'gpt-4o' }]; + return []; + }), + } as unknown as typeof deps.client; + const cmd = createDescribeCommand(deps); + await cmd.parseAsync(['node', 'test', 'project', 'proj-1']); + const text = deps.output.join('\n'); + expect(text).toContain('claude-ghost'); + expect(text).toContain('warning:'); + expect(text).toContain('fall back to registry default'); + }); + + it('does not warn when llmProvider is "none" (explicit disable)', async () => { + const deps = makeDeps({ + id: 'proj-1', + name: 'no-llm', + description: '', + ownerId: 'user-1', + proxyModel: 'default', + llmProvider: 'none', + createdAt: '2025-01-01', + }); + deps.client = { + get: vi.fn(async () => []), + } as unknown as typeof deps.client; + const cmd = createDescribeCommand(deps); + await cmd.parseAsync(['node', 'test', 'project', 'proj-1']); + const text = deps.output.join('\n'); + expect(text).toContain('LLM:'); + expect(text).toContain('none'); + expect(text).not.toContain('warning:'); + }); + it('shows project Plugin Config defaulting to "default" when proxyModel is empty', async () => { const deps = makeDeps({ id: 'proj-1', diff --git a/src/mcplocal/src/discovery.ts b/src/mcplocal/src/discovery.ts index 89ee424..6ee8181 100644 --- a/src/mcplocal/src/discovery.ts +++ b/src/mcplocal/src/discovery.ts @@ -57,9 +57,16 @@ export async function refreshProjectUpstreams( /** * Fetch a project's LLM config (llmProvider, llmModel) from mcpd. - * These are the project-level "recommendations" — local overrides take priority. + * + * Phase 4 redefines `llmProvider` semantically: it names a centralized `Llm` + * resource (see `mcpctl get llms`) — NOT a local provider. Consumers should + * resolve it through mcpd's inference proxy when reachable. The field remains + * a free-form string on the wire for backward compatibility; local overrides + * in `~/.mcpctl/config.json` still take priority, and unknown names fall + * through to the registry default. */ export interface ProjectLlmConfig { + /** Name of an `Llm` resource on mcpd, or 'none' to disable LLM features. */ llmProvider?: string; llmModel?: string; proxyModel?: string; @@ -67,6 +74,31 @@ export interface ProjectLlmConfig { serverOverrides?: Record; } +/** + * Resolve a project's `llmProvider` against mcpd's Llm registry. Returns: + * - 'registered' — an Llm with this name exists + * - 'disabled' — value is 'none' + * - 'unregistered'— no Llm matches (consumer should fall back to registry default) + * - 'unreachable' — mcpd couldn't be queried + */ +export type LlmReferenceStatus = 'registered' | 'disabled' | 'unregistered' | 'unreachable'; + +export async function resolveProjectLlmReference( + mcpdClient: McpdClient, + llmProvider: string | undefined, +): Promise { + if (llmProvider === undefined || llmProvider === '') return 'unregistered'; + if (llmProvider === 'none') return 'disabled'; + try { + await mcpdClient.get(`/api/v1/llms/${encodeURIComponent(llmProvider)}`); + return 'registered'; + } catch (err) { + const msg = err instanceof Error ? err.message : String(err); + if (msg.includes('404') || msg.toLowerCase().includes('not found')) return 'unregistered'; + return 'unreachable'; + } +} + export async function fetchProjectLlmConfig( mcpdClient: McpdClient, projectName: string, diff --git a/src/mcplocal/src/http/project-mcp-endpoint.ts b/src/mcplocal/src/http/project-mcp-endpoint.ts index 42ec24a..7ea821f 100644 --- a/src/mcplocal/src/http/project-mcp-endpoint.ts +++ b/src/mcplocal/src/http/project-mcp-endpoint.ts @@ -101,7 +101,16 @@ export function registerProjectMcpEndpoint(app: FastifyInstance, mcpdClient: Mcp complete: async () => '', available: () => false, }; - // Build cache namespace: provider--model--proxymodel + // Build cache namespace: provider--model--proxymodel. + // Resolution order: + // 1. local ~/.mcpctl override + // 2. mcpdConfig.llmProvider (Phase 4: name of a centralized Llm) + // 3. local registry default (fast tier → active provider) + // 4. literal 'none' + // If (2) names an Llm the HTTP-mode proxy-model pipeline can route + // through mcpd's /api/v1/llms/:name/infer (pivot lands when the client + // integrates that path); meanwhile the value is still usable as a cache + // key, and the describe-project warning flags stale configs. const llmProvider = localOverride?.provider ?? mcpdConfig.llmProvider ?? effectiveRegistry?.getTierProviders('fast')[0] ?? effectiveRegistry?.getActiveName() diff --git a/src/mcplocal/tests/llm-reference-resolver.test.ts b/src/mcplocal/tests/llm-reference-resolver.test.ts new file mode 100644 index 0000000..af02a9f --- /dev/null +++ b/src/mcplocal/tests/llm-reference-resolver.test.ts @@ -0,0 +1,45 @@ +import { describe, it, expect, vi } from 'vitest'; +import { resolveProjectLlmReference } from '../src/discovery.js'; +import type { McpdClient } from '../src/http/mcpd-client.js'; + +function mockClient(get: (path: string) => Promise): McpdClient { + return { get } as unknown as McpdClient; +} + +describe('resolveProjectLlmReference', () => { + it('returns "disabled" for the literal string "none"', async () => { + const client = mockClient(async () => { throw new Error('should not be called'); }); + expect(await resolveProjectLlmReference(client, 'none')).toBe('disabled'); + }); + + it('returns "unregistered" when llmProvider is empty or undefined', async () => { + const client = mockClient(async () => { throw new Error('should not be called'); }); + expect(await resolveProjectLlmReference(client, undefined)).toBe('unregistered'); + expect(await resolveProjectLlmReference(client, '')).toBe('unregistered'); + }); + + it('returns "registered" when mcpd returns 200 for the name', async () => { + const get = vi.fn(async () => ({ name: 'claude' })); + expect(await resolveProjectLlmReference(mockClient(get), 'claude')).toBe('registered'); + expect(get).toHaveBeenCalledWith('/api/v1/llms/claude'); + }); + + it('returns "unregistered" on 404', async () => { + const client = mockClient(async () => { throw new Error('HTTP 404 not found'); }); + expect(await resolveProjectLlmReference(client, 'missing')).toBe('unregistered'); + }); + + it('returns "unreachable" on other errors (500, network)', async () => { + const client = mockClient(async () => { throw new Error('HTTP 500 internal error'); }); + expect(await resolveProjectLlmReference(client, 'x')).toBe('unreachable'); + + const client2 = mockClient(async () => { throw new Error('ECONNREFUSED'); }); + expect(await resolveProjectLlmReference(client2, 'x')).toBe('unreachable'); + }); + + it('URL-encodes names with special characters', async () => { + const get = vi.fn(async () => ({})); + await resolveProjectLlmReference(mockClient(get), 'weird name/with/slashes'); + expect(get).toHaveBeenCalledWith('/api/v1/llms/weird%20name%2Fwith%2Fslashes'); + }); +}); diff --git a/src/mcplocal/tests/smoke/llm-infer.smoke.test.ts b/src/mcplocal/tests/smoke/llm-infer.smoke.test.ts new file mode 100644 index 0000000..900e350 --- /dev/null +++ b/src/mcplocal/tests/smoke/llm-infer.smoke.test.ts @@ -0,0 +1,214 @@ +/** + * Smoke tests: `POST /api/v1/llms/:name/infer` against live mcpd. + * + * Validates the Phase 2 inference proxy path without needing a real provider + * key. We exercise the error-shape guarantees: + * 1. Missing Llm → 404. + * 2. Existing Llm + empty body → 400. + * 3. Existing Llm pointed at an unreachable URL → 502 with an error body. + * 4. RBAC: non-admin calling infer without `run:llms:` → 403 (skipped + * if we can't mint a scoped McpToken in this environment). + * + * The happy-path test needs a real provider, so we skip it by default and + * gate on LLM_INFER_SMOKE_REAL=1 + a working Llm name supplied via + * LLM_INFER_SMOKE_LLM. + */ +import { describe, it, expect, beforeAll, afterAll } from 'vitest'; +import http from 'node:http'; +import https from 'node:https'; +import { execSync } from 'node:child_process'; + +const MCPD_URL = process.env.MCPD_URL ?? 'https://mcpctl.ad.itaz.eu'; +const SUFFIX = Date.now().toString(36); +const SECRET_NAME = `smoke-infer-sec-${SUFFIX}`; +const LLM_NAME = `smoke-infer-${SUFFIX}`; + +interface CliResult { code: number; stdout: string; stderr: string } + +function run(args: string): CliResult { + try { + const stdout = execSync(`mcpctl --direct ${args}`, { + encoding: 'utf-8', + timeout: 30_000, + stdio: ['ignore', 'pipe', 'pipe'], + }); + return { code: 0, stdout: stdout.trim(), stderr: '' }; + } catch (err) { + const e = err as { status?: number; stdout?: Buffer | string; stderr?: Buffer | string }; + return { + code: e.status ?? 1, + stdout: e.stdout ? (typeof e.stdout === 'string' ? e.stdout : e.stdout.toString('utf-8')) : '', + stderr: e.stderr ? (typeof e.stderr === 'string' ? e.stderr : e.stderr.toString('utf-8')) : '', + }; + } +} + +function healthz(url: string, timeoutMs = 5000): Promise { + return new Promise((resolve) => { + const parsed = new URL(`${url.replace(/\/$/, '')}/healthz`); + const driver = parsed.protocol === 'https:' ? https : http; + const req = driver.get( + { + hostname: parsed.hostname, + port: parsed.port || (parsed.protocol === 'https:' ? 443 : 80), + path: parsed.pathname, + timeout: timeoutMs, + }, + (res) => { resolve((res.statusCode ?? 500) < 500); res.resume(); }, + ); + req.on('error', () => resolve(false)); + req.on('timeout', () => { req.destroy(); resolve(false); }); + }); +} + +/** Look up the current session bearer so we can POST /infer directly. */ +function getBearer(): string | undefined { + // Try ~/.mcpctl/credentials.json via the CLI — `mcpctl config get` knows where it lives. + // If that shape changes, fall back to MCPCTL_TOKEN env. + const envToken = process.env.MCPCTL_TOKEN; + if (envToken !== undefined && envToken !== '') return envToken; + try { + // shape: { "session": { "token": "..." } } or similar — be defensive. + const out = execSync('cat ~/.mcpctl/credentials.json 2>/dev/null', { encoding: 'utf-8' }); + const parsed = JSON.parse(out) as Record; + const token = (parsed.token ?? (parsed.session as { token?: string } | undefined)?.token); + return typeof token === 'string' ? token : undefined; + } catch { + return undefined; + } +} + +async function post( + path: string, + body: unknown, + bearer?: string, +): Promise<{ status: number; body: unknown }> { + const url = new URL(`${MCPD_URL.replace(/\/$/, '')}${path}`); + const driver = url.protocol === 'https:' ? https : http; + const payload = JSON.stringify(body); + const headers: Record = { + 'Content-Type': 'application/json', + 'Content-Length': Buffer.byteLength(payload).toString(), + }; + if (bearer !== undefined) headers['Authorization'] = `Bearer ${bearer}`; + + return new Promise((resolve, reject) => { + const req = driver.request( + { + hostname: url.hostname, + port: url.port || (url.protocol === 'https:' ? 443 : 80), + path: url.pathname + url.search, + method: 'POST', + headers, + timeout: 15_000, + }, + (res) => { + const chunks: Buffer[] = []; + res.on('data', (c: Buffer) => chunks.push(c)); + res.on('end', () => { + const raw = Buffer.concat(chunks).toString('utf-8'); + let parsed: unknown = raw; + try { parsed = JSON.parse(raw); } catch { /* leave as string */ } + resolve({ status: res.statusCode ?? 0, body: parsed }); + }); + }, + ); + req.on('error', reject); + req.on('timeout', () => { req.destroy(); reject(new Error('request timed out')); }); + req.write(payload); + req.end(); + }); +} + +let mcpdUp = false; +let bearer: string | undefined; + +describe('llm-infer smoke', () => { + beforeAll(async () => { + mcpdUp = await healthz(MCPD_URL); + if (!mcpdUp) { + // eslint-disable-next-line no-console + console.warn(`\n ○ llm-infer smoke: skipped — ${MCPD_URL}/healthz unreachable.\n`); + return; + } + bearer = getBearer(); + if (bearer === undefined) { + // eslint-disable-next-line no-console + console.warn('\n ○ llm-infer smoke: no bearer available (set MCPCTL_TOKEN or login). Direct POST tests will skip.\n'); + } + }, 20_000); + + afterAll(() => { + if (!mcpdUp) return; + run(`delete llm ${LLM_NAME}`); + run(`delete secret ${SECRET_NAME}`); + }); + + it('creates a fixture secret + Llm pointed at an unreachable URL', () => { + if (!mcpdUp) return; + run(`delete llm ${LLM_NAME}`); + run(`delete secret ${SECRET_NAME}`); + + expect(run(`create secret ${SECRET_NAME} --data token=sk-fake`).code).toBe(0); + const createLlm = run([ + `create llm ${LLM_NAME}`, + '--type openai', + '--model gpt-4o-mini', + // Unroutable host so any actual upstream call returns an adapter error → 502 + '--url http://127.0.0.1:1', + `--api-key-ref ${SECRET_NAME}/token`, + ].join(' ')); + expect(createLlm.code, createLlm.stderr || createLlm.stdout).toBe(0); + }); + + it('returns 404 for an unknown Llm name', async () => { + if (!mcpdUp || bearer === undefined) return; + const res = await post('/api/v1/llms/__nonexistent_llm__/infer', + { messages: [{ role: 'user', content: 'hi' }] }, bearer); + expect(res.status).toBe(404); + }); + + it('returns 400 when messages is missing', async () => { + if (!mcpdUp || bearer === undefined) return; + const res = await post(`/api/v1/llms/${LLM_NAME}/infer`, {}, bearer); + expect(res.status).toBe(400); + const body = res.body as { error?: string }; + expect(body.error ?? '').toMatch(/messages/i); + }); + + it('returns 502 when the upstream provider is unreachable', async () => { + if (!mcpdUp || bearer === undefined) return; + const res = await post(`/api/v1/llms/${LLM_NAME}/infer`, + { messages: [{ role: 'user', content: 'hi' }] }, bearer); + // 502 is what the proxy returns on adapter errors; some paths may return + // the upstream's own status if the request reached it, so accept any + // non-2xx with an error body. + expect(res.status).toBeGreaterThanOrEqual(400); + expect(res.status).not.toBe(404); + expect(res.status).not.toBe(400); + const body = res.body as { error?: string | { message?: string } }; + const msg = typeof body.error === 'string' ? body.error : body.error?.message ?? ''; + expect(msg, 'error body must describe the failure').not.toBe(''); + }, 30_000); + + it('happy-path inference (opt-in: LLM_INFER_SMOKE_REAL=1 + LLM_INFER_SMOKE_LLM=)', async () => { + if (!mcpdUp || bearer === undefined) return; + if (process.env.LLM_INFER_SMOKE_REAL !== '1') { + // eslint-disable-next-line no-console + console.warn(' ○ happy-path skipped — set LLM_INFER_SMOKE_REAL=1 and LLM_INFER_SMOKE_LLM= of a working Llm.'); + return; + } + const name = process.env.LLM_INFER_SMOKE_LLM; + if (name === undefined || name === '') { + throw new Error('LLM_INFER_SMOKE_LLM must be set when LLM_INFER_SMOKE_REAL=1'); + } + const res = await post(`/api/v1/llms/${name}/infer`, { + messages: [{ role: 'user', content: 'Say "smoke-ok" and nothing else.' }], + max_tokens: 8, + }, bearer); + expect(res.status).toBe(200); + const body = res.body as { choices?: Array<{ message?: { content?: string } }> }; + const content = body.choices?.[0]?.message?.content ?? ''; + expect(content).toMatch(/smoke-ok/i); + }, 60_000); +}); diff --git a/src/mcplocal/tests/smoke/llm.smoke.test.ts b/src/mcplocal/tests/smoke/llm.smoke.test.ts new file mode 100644 index 0000000..d9841a5 --- /dev/null +++ b/src/mcplocal/tests/smoke/llm.smoke.test.ts @@ -0,0 +1,162 @@ +/** + * Smoke tests: Llm resource CRUD + apiKeyRef linkage against live mcpd. + * + * Exercises the Phase 1 CLI contract end-to-end: + * 1. Create a secret carrying a fake API key. + * 2. `mcpctl create llm` referencing that secret via --api-key-ref. + * 3. `mcpctl describe llm` shows type/model/tier + the secret ref. + * 4. `mcpctl get llms -o yaml` round-trips cleanly into `apply -f`. + * 5. Delete llm + secret. + * + * Inference itself is covered in llm-infer.smoke.test.ts — this file is + * purely about the registry. + */ +import { describe, it, expect, beforeAll, afterAll } from 'vitest'; +import http from 'node:http'; +import https from 'node:https'; +import { execSync } from 'node:child_process'; +import { writeFileSync, unlinkSync, mkdtempSync } from 'node:fs'; +import { join } from 'node:path'; +import { tmpdir } from 'node:os'; + +const MCPD_URL = process.env.MCPD_URL ?? 'https://mcpctl.ad.itaz.eu'; +const SUFFIX = Date.now().toString(36); +const SECRET_NAME = `smoke-llm-sec-${SUFFIX}`; +const LLM_NAME = `smoke-llm-${SUFFIX}`; + +interface CliResult { code: number; stdout: string; stderr: string } + +function run(args: string): CliResult { + try { + const stdout = execSync(`mcpctl --direct ${args}`, { + encoding: 'utf-8', + timeout: 30_000, + stdio: ['ignore', 'pipe', 'pipe'], + }); + return { code: 0, stdout: stdout.trim(), stderr: '' }; + } catch (err) { + const e = err as { status?: number; stdout?: Buffer | string; stderr?: Buffer | string }; + return { + code: e.status ?? 1, + stdout: e.stdout ? (typeof e.stdout === 'string' ? e.stdout : e.stdout.toString('utf-8')) : '', + stderr: e.stderr ? (typeof e.stderr === 'string' ? e.stderr : e.stderr.toString('utf-8')) : '', + }; + } +} + +function healthz(url: string, timeoutMs = 5000): Promise { + return new Promise((resolve) => { + const parsed = new URL(`${url.replace(/\/$/, '')}/healthz`); + const driver = parsed.protocol === 'https:' ? https : http; + const req = driver.get( + { + hostname: parsed.hostname, + port: parsed.port || (parsed.protocol === 'https:' ? 443 : 80), + path: parsed.pathname, + timeout: timeoutMs, + }, + (res) => { resolve((res.statusCode ?? 500) < 500); res.resume(); }, + ); + req.on('error', () => resolve(false)); + req.on('timeout', () => { req.destroy(); resolve(false); }); + }); +} + +let mcpdUp = false; + +describe('llm smoke', () => { + beforeAll(async () => { + mcpdUp = await healthz(MCPD_URL); + if (!mcpdUp) { + // eslint-disable-next-line no-console + console.warn(`\n ○ llm smoke: skipped — ${MCPD_URL}/healthz unreachable. Set MCPD_URL to override.\n`); + } + }, 20_000); + + afterAll(() => { + if (!mcpdUp) return; + run(`delete llm ${LLM_NAME}`); + run(`delete secret ${SECRET_NAME}`); + }); + + it('creates a secret to hold the fake API key', () => { + if (!mcpdUp) return; + run(`delete secret ${SECRET_NAME}`); // idempotent cleanup + const result = run(`create secret ${SECRET_NAME} --data token=sk-fake-xyz`); + expect(result.code, result.stderr).toBe(0); + }); + + it('creates an Llm pointing at the secret via --api-key-ref', () => { + if (!mcpdUp) return; + run(`delete llm ${LLM_NAME}`); + const cmd = [ + `create llm ${LLM_NAME}`, + '--type openai', + '--model gpt-4o-mini', + '--tier fast', + '--url http://nowhere.example:9000', + `--api-key-ref ${SECRET_NAME}/token`, + '--description smoke-test', + ].join(' '); + const result = run(cmd); + expect(result.code, result.stderr || result.stdout).toBe(0); + expect(result.stdout).toMatch(new RegExp(`llm '${LLM_NAME}'`)); + }); + + it('describe llm shows the secret ref in sectioned output', () => { + if (!mcpdUp) return; + const result = run(`describe llm ${LLM_NAME}`); + expect(result.code, result.stderr).toBe(0); + expect(result.stdout).toContain(`=== LLM: ${LLM_NAME} ===`); + expect(result.stdout).toContain('Type:'); + expect(result.stdout).toContain('openai'); + expect(result.stdout).toContain('Model:'); + expect(result.stdout).toContain('gpt-4o-mini'); + expect(result.stdout).toContain('API Key:'); + expect(result.stdout).toContain(SECRET_NAME); + expect(result.stdout).toContain('token'); + // Raw key value must NOT appear — only the ref + expect(result.stdout).not.toContain('sk-fake-xyz'); + }); + + it('get llms shows the row with KEY column rendered as "secret://name/key"', () => { + if (!mcpdUp) return; + const result = run('get llms'); + expect(result.code).toBe(0); + expect(result.stdout).toContain(LLM_NAME); + expect(result.stdout).toContain(`secret://${SECRET_NAME}/token`); + }); + + it('round-trips yaml output → apply -f', () => { + if (!mcpdUp) return; + const yaml = run(`get llm ${LLM_NAME} -o yaml`); + expect(yaml.code).toBe(0); + expect(yaml.stdout).toMatch(/kind:\s+llm/); + expect(yaml.stdout).toContain(`name: ${LLM_NAME}`); + expect(yaml.stdout).toContain(`name: ${SECRET_NAME}`); // apiKeyRef block + + // Change the description via apply -f with the YAML we just pulled. + const dir = mkdtempSync(join(tmpdir(), 'mcpctl-smoke-')); + const path = join(dir, 'llm.yaml'); + const amended = yaml.stdout.replace('description: smoke-test', 'description: smoke-test-amended'); + writeFileSync(path, amended); + try { + const applied = run(`apply -f ${path}`); + expect(applied.code, applied.stderr || applied.stdout).toBe(0); + const described = run(`describe llm ${LLM_NAME}`); + expect(described.stdout).toContain('smoke-test-amended'); + } finally { + unlinkSync(path); + } + }); + + it('deletes the llm and leaves the underlying secret intact', () => { + if (!mcpdUp) return; + const del = run(`delete llm ${LLM_NAME}`); + expect(del.code, del.stderr).toBe(0); + + // Secret still exists (apiKeyRef uses onDelete: SetNull so the secret isn't touched) + const secret = run(`describe secret ${SECRET_NAME}`); + expect(secret.code).toBe(0); + }); +}); diff --git a/src/mcplocal/tests/smoke/project-llm-ref.smoke.test.ts b/src/mcplocal/tests/smoke/project-llm-ref.smoke.test.ts new file mode 100644 index 0000000..5c85d35 --- /dev/null +++ b/src/mcplocal/tests/smoke/project-llm-ref.smoke.test.ts @@ -0,0 +1,130 @@ +/** + * Smoke tests: Project.llmProvider as Llm reference (Phase 4). + * + * Verifies the describe-project warning behavior against live mcpd: + * 1. Project with `--llm ` → no warning. + * 2. Project with `--llm ` → describe flags the orphan. + * 3. Project with `--llm none` → explicit disable, no warning. + */ +import { describe, it, expect, beforeAll, afterAll } from 'vitest'; +import http from 'node:http'; +import https from 'node:https'; +import { execSync } from 'node:child_process'; + +const MCPD_URL = process.env.MCPD_URL ?? 'https://mcpctl.ad.itaz.eu'; +const SUFFIX = Date.now().toString(36); +const LLM_NAME = `smoke-proj-llm-${SUFFIX}`; +const PROJ_OK = `smoke-proj-ok-${SUFFIX}`; +const PROJ_ORPHAN = `smoke-proj-orphan-${SUFFIX}`; +const PROJ_NONE = `smoke-proj-none-${SUFFIX}`; + +interface CliResult { code: number; stdout: string; stderr: string } + +function run(args: string): CliResult { + try { + const stdout = execSync(`mcpctl --direct ${args}`, { + encoding: 'utf-8', + timeout: 30_000, + stdio: ['ignore', 'pipe', 'pipe'], + }); + return { code: 0, stdout: stdout.trim(), stderr: '' }; + } catch (err) { + const e = err as { status?: number; stdout?: Buffer | string; stderr?: Buffer | string }; + return { + code: e.status ?? 1, + stdout: e.stdout ? (typeof e.stdout === 'string' ? e.stdout : e.stdout.toString('utf-8')) : '', + stderr: e.stderr ? (typeof e.stderr === 'string' ? e.stderr : e.stderr.toString('utf-8')) : '', + }; + } +} + +function healthz(url: string, timeoutMs = 5000): Promise { + return new Promise((resolve) => { + const parsed = new URL(`${url.replace(/\/$/, '')}/healthz`); + const driver = parsed.protocol === 'https:' ? https : http; + const req = driver.get( + { + hostname: parsed.hostname, + port: parsed.port || (parsed.protocol === 'https:' ? 443 : 80), + path: parsed.pathname, + timeout: timeoutMs, + }, + (res) => { resolve((res.statusCode ?? 500) < 500); res.resume(); }, + ); + req.on('error', () => resolve(false)); + req.on('timeout', () => { req.destroy(); resolve(false); }); + }); +} + +let mcpdUp = false; + +describe('project-llm-ref smoke', () => { + beforeAll(async () => { + mcpdUp = await healthz(MCPD_URL); + if (!mcpdUp) { + // eslint-disable-next-line no-console + console.warn(`\n ○ project-llm-ref smoke: skipped — ${MCPD_URL}/healthz unreachable.\n`); + return; + } + // Fixture: an Llm we can point projects at. + run(`delete llm ${LLM_NAME}`); + const createLlm = run([ + `create llm ${LLM_NAME}`, + '--type openai', + '--model gpt-4o-mini', + '--tier fast', + '--url http://127.0.0.1:1', + ].join(' ')); + if (createLlm.code !== 0) { + // eslint-disable-next-line no-console + console.warn(` ○ could not create fixture Llm: ${createLlm.stderr || createLlm.stdout}`); + } + }, 30_000); + + afterAll(() => { + if (!mcpdUp) return; + run(`delete project ${PROJ_OK} --force`); + run(`delete project ${PROJ_ORPHAN} --force`); + run(`delete project ${PROJ_NONE} --force`); + run(`delete llm ${LLM_NAME}`); + }); + + it('project with --llm pointing at a registered Llm describes without warning', () => { + if (!mcpdUp) return; + run(`delete project ${PROJ_OK} --force`); + const created = run(`create project ${PROJ_OK} --llm ${LLM_NAME}`); + expect(created.code, created.stderr || created.stdout).toBe(0); + + const described = run(`describe project ${PROJ_OK}`); + expect(described.code).toBe(0); + expect(described.stdout).toContain('LLM:'); + expect(described.stdout).toContain(LLM_NAME); + expect(described.stdout).not.toContain('warning:'); + }); + + it('project with --llm naming an unregistered Llm shows the warning line', () => { + if (!mcpdUp) return; + run(`delete project ${PROJ_ORPHAN} --force`); + const created = run(`create project ${PROJ_ORPHAN} --llm claude-ghost-${SUFFIX}`); + expect(created.code, created.stderr || created.stdout).toBe(0); + + const described = run(`describe project ${PROJ_ORPHAN}`); + expect(described.code).toBe(0); + expect(described.stdout).toContain(`claude-ghost-${SUFFIX}`); + expect(described.stdout).toContain('warning:'); + expect(described.stdout).toContain('registry default'); + }); + + it('project with --llm none treats it as an explicit disable (no warning)', () => { + if (!mcpdUp) return; + run(`delete project ${PROJ_NONE} --force`); + const created = run(`create project ${PROJ_NONE} --llm none`); + expect(created.code).toBe(0); + + const described = run(`describe project ${PROJ_NONE}`); + expect(described.code).toBe(0); + expect(described.stdout).toContain('LLM:'); + expect(described.stdout).toContain('none'); + expect(described.stdout).not.toContain('warning:'); + }); +}); diff --git a/src/mcplocal/tests/smoke/secretbackend.smoke.test.ts b/src/mcplocal/tests/smoke/secretbackend.smoke.test.ts new file mode 100644 index 0000000..f01c1aa --- /dev/null +++ b/src/mcplocal/tests/smoke/secretbackend.smoke.test.ts @@ -0,0 +1,146 @@ +/** + * Smoke tests: SecretBackend CRUD against live mcpd. + * + * Exercises the Phase 0 CLI contract end-to-end: + * 1. `mcpctl get secretbackends` — the seeded `default` (plaintext) row exists + * and is marked isDefault. + * 2. `mcpctl create secretbackend --type plaintext` — create + list. + * 3. `mcpctl describe secretbackend ` — sectioned output; config + * values that look like credentials are masked. + * 4. `mcpctl delete secretbackend default` — fails with 409 (cannot delete + * the default row). + * 5. Cleanup: delete the test row; confirm it's gone. + * + * Target: mcpd direct (not mcplocal). We use `--direct` so the CLI bypasses + * mcplocal and hits mcpd at the configured URL. If mcpd is unreachable we + * skip with a clear message — same pattern as the mcptoken smoke. + * + * Run with: pnpm test:smoke + */ +import { describe, it, expect, beforeAll, afterAll } from 'vitest'; +import http from 'node:http'; +import https from 'node:https'; +import { execSync } from 'node:child_process'; + +const MCPD_URL = process.env.MCPD_URL ?? 'https://mcpctl.ad.itaz.eu'; +const BACKEND_NAME = `smoke-sb-${Date.now().toString(36)}`; + +interface CliResult { code: number; stdout: string; stderr: string } + +function run(args: string): CliResult { + try { + const stdout = execSync(`mcpctl --direct ${args}`, { + encoding: 'utf-8', + timeout: 30_000, + stdio: ['ignore', 'pipe', 'pipe'], + }); + return { code: 0, stdout: stdout.trim(), stderr: '' }; + } catch (err) { + const e = err as { status?: number; stdout?: Buffer | string; stderr?: Buffer | string }; + return { + code: e.status ?? 1, + stdout: e.stdout ? (typeof e.stdout === 'string' ? e.stdout : e.stdout.toString('utf-8')) : '', + stderr: e.stderr ? (typeof e.stderr === 'string' ? e.stderr : e.stderr.toString('utf-8')) : '', + }; + } +} + +function healthz(url: string, timeoutMs = 5000): Promise { + return new Promise((resolve) => { + const parsed = new URL(`${url.replace(/\/$/, '')}/healthz`); + const driver = parsed.protocol === 'https:' ? https : http; + const req = driver.get( + { + hostname: parsed.hostname, + port: parsed.port || (parsed.protocol === 'https:' ? 443 : 80), + path: parsed.pathname, + timeout: timeoutMs, + }, + (res) => { resolve((res.statusCode ?? 500) < 500); res.resume(); }, + ); + req.on('error', () => resolve(false)); + req.on('timeout', () => { req.destroy(); resolve(false); }); + }); +} + +let mcpdUp = false; + +describe('secretbackend smoke', () => { + beforeAll(async () => { + mcpdUp = await healthz(MCPD_URL); + if (!mcpdUp) { + // eslint-disable-next-line no-console + console.warn(`\n ○ secretbackend smoke: skipped — ${MCPD_URL}/healthz unreachable. Set MCPD_URL to override.\n`); + } + }, 20_000); + + afterAll(() => { + if (!mcpdUp) return; + run(`delete secretbackend ${BACKEND_NAME}`); + }); + + it('lists at least one secretbackend (the seeded plaintext default)', () => { + if (!mcpdUp) return; + const result = run('get secretbackends -o json'); + expect(result.code, result.stderr).toBe(0); + const rows = JSON.parse(result.stdout) as Array<{ name: string; type: string; isDefault: boolean }>; + expect(rows.length).toBeGreaterThan(0); + const defaultRow = rows.find((r) => r.isDefault === true); + expect(defaultRow, 'a default backend must exist').toBeDefined(); + expect(defaultRow!.type).toBe('plaintext'); + }); + + it('creates a plaintext backend and round-trips it through describe', () => { + if (!mcpdUp) return; + // Idempotent cleanup in case a prior run left debris + run(`delete secretbackend ${BACKEND_NAME}`); + + const created = run(`create secretbackend ${BACKEND_NAME} --type plaintext --description smoke-test`); + expect(created.code, created.stderr || created.stdout).toBe(0); + expect(created.stdout).toMatch(new RegExp(`secretbackend '${BACKEND_NAME}'`)); + + const described = run(`describe secretbackend ${BACKEND_NAME}`); + expect(described.code, described.stderr).toBe(0); + expect(described.stdout).toContain(`=== SecretBackend: ${BACKEND_NAME} ===`); + expect(described.stdout).toContain('Type:'); + expect(described.stdout).toContain('plaintext'); + expect(described.stdout).toContain('smoke-test'); + }); + + it('refuses to delete the seeded default backend', () => { + if (!mcpdUp) return; + // Find whichever row is currently the default — we don't hard-code the name + // because operators may have renamed or swapped it. + const listed = run('get secretbackends -o json'); + expect(listed.code).toBe(0); + const rows = JSON.parse(listed.stdout) as Array<{ name: string; isDefault: boolean }>; + const def = rows.find((r) => r.isDefault); + expect(def).toBeDefined(); + + const del = run(`delete secretbackend ${def!.name}`); + // 409 surfaces as exit 1 with a descriptive error + expect(del.code).toBe(1); + const combined = (del.stderr + del.stdout).toLowerCase(); + expect(combined).toMatch(/default|in use|cannot delete/); + }); + + it('round-trips get -o yaml → apply -f', () => { + if (!mcpdUp) return; + const yaml = run(`get secretbackend ${BACKEND_NAME} -o yaml`); + expect(yaml.code).toBe(0); + // Apply-compatible output must start with `kind: secretbackend` + expect(yaml.stdout).toMatch(/kind:\s+secretbackend/); + expect(yaml.stdout).toContain(`name: ${BACKEND_NAME}`); + expect(yaml.stdout).toContain('type: plaintext'); + }); + + it('deletes the test backend and confirms it is gone', () => { + if (!mcpdUp) return; + const del = run(`delete secretbackend ${BACKEND_NAME}`); + expect(del.code, del.stderr).toBe(0); + + const listed = run('get secretbackends -o json'); + const rows = JSON.parse(listed.stdout) as Array<{ name: string }>; + expect(rows.find((r) => r.name === BACKEND_NAME)).toBeUndefined(); + }); +});