feat: add backup + server type smoke tests
New smoke test file: backup-and-servers.test.ts - Backup completeness: prompts, templates, runtime, command, containerPort, replicas - SSE server proxy (my-home-assistant): 84 tools - Docker-image STDIO proxy (docmost): 11 tools - Package STDIO proxy (aws-docs): 4 tools - Instance status accuracy: RUNNING instances must respond to proxy These tests would have caught every migration bug: - Missing runtime (python servers on node runner) - Missing command (HA SSE in STDIO mode) - Missing containerPort (SSE on wrong port) - Backup data loss (prompts, templates, server fields) Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
289
src/mcplocal/tests/smoke/backup-and-servers.test.ts
Normal file
289
src/mcplocal/tests/smoke/backup-and-servers.test.ts
Normal file
@@ -0,0 +1,289 @@
|
|||||||
|
/**
|
||||||
|
* Smoke tests: Backup completeness + server type coverage.
|
||||||
|
*
|
||||||
|
* These tests verify that:
|
||||||
|
* 1. Backup includes ALL fields (runtime, command, containerPort, prompts, templates)
|
||||||
|
* 2. All server types work via MCP proxy (STDIO, SSE, docker-image)
|
||||||
|
* 3. Instance status reflects actual container state
|
||||||
|
*
|
||||||
|
* Prerequisites:
|
||||||
|
* - mcplocal running on localhost:3200
|
||||||
|
* - mcpd running (k8s or Portainer)
|
||||||
|
* - At least one server of each type deployed
|
||||||
|
*/
|
||||||
|
import { describe, it, expect, beforeAll } from 'vitest';
|
||||||
|
import http from 'node:http';
|
||||||
|
import https from 'node:https';
|
||||||
|
import { existsSync, readFileSync } from 'node:fs';
|
||||||
|
import { join } from 'node:path';
|
||||||
|
import { homedir } from 'node:os';
|
||||||
|
|
||||||
|
// Load mcpd URL and token from config
|
||||||
|
const CONFIG_PATH = join(homedir(), '.mcpctl', 'config.json');
|
||||||
|
const CREDS_PATH = join(homedir(), '.mcpctl', 'credentials');
|
||||||
|
|
||||||
|
function loadConfig(): { mcpdUrl: string; token: string } {
|
||||||
|
let mcpdUrl = 'http://localhost:3100';
|
||||||
|
let token = '';
|
||||||
|
try {
|
||||||
|
if (existsSync(CONFIG_PATH)) {
|
||||||
|
const cfg = JSON.parse(readFileSync(CONFIG_PATH, 'utf-8')) as { mcpdUrl?: string };
|
||||||
|
if (cfg.mcpdUrl) mcpdUrl = cfg.mcpdUrl;
|
||||||
|
}
|
||||||
|
if (existsSync(CREDS_PATH)) {
|
||||||
|
const creds = JSON.parse(readFileSync(CREDS_PATH, 'utf-8')) as { token?: string };
|
||||||
|
if (creds.token) token = creds.token;
|
||||||
|
}
|
||||||
|
} catch { /* use defaults */ }
|
||||||
|
return { mcpdUrl, token };
|
||||||
|
}
|
||||||
|
|
||||||
|
const { mcpdUrl, token } = loadConfig();
|
||||||
|
|
||||||
|
function mcpdRequest<T>(method: string, path: string, body?: unknown): Promise<{ status: number; data: T }> {
|
||||||
|
return new Promise((resolve, reject) => {
|
||||||
|
const url = new URL(path, mcpdUrl);
|
||||||
|
const isHttps = url.protocol === 'https:';
|
||||||
|
const transport = isHttps ? https : http;
|
||||||
|
|
||||||
|
const headers: Record<string, string> = { Accept: 'application/json' };
|
||||||
|
if (body !== undefined) headers['Content-Type'] = 'application/json';
|
||||||
|
if (token) headers['Authorization'] = `Bearer ${token}`;
|
||||||
|
const bodyStr = body !== undefined ? JSON.stringify(body) : undefined;
|
||||||
|
if (bodyStr) headers['Content-Length'] = String(Buffer.byteLength(bodyStr));
|
||||||
|
|
||||||
|
const req = transport.request(url, {
|
||||||
|
method,
|
||||||
|
timeout: 30_000,
|
||||||
|
headers,
|
||||||
|
rejectUnauthorized: false,
|
||||||
|
}, (res) => {
|
||||||
|
const chunks: Buffer[] = [];
|
||||||
|
res.on('data', (chunk: Buffer) => chunks.push(chunk));
|
||||||
|
res.on('end', () => {
|
||||||
|
const raw = Buffer.concat(chunks).toString();
|
||||||
|
try {
|
||||||
|
resolve({ status: res.statusCode ?? 500, data: raw ? JSON.parse(raw) as T : (undefined as T) });
|
||||||
|
} catch {
|
||||||
|
resolve({ status: res.statusCode ?? 500, data: raw as unknown as T });
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
req.on('error', reject);
|
||||||
|
req.on('timeout', () => { req.destroy(); reject(new Error('Request timeout')); });
|
||||||
|
if (bodyStr) req.write(bodyStr);
|
||||||
|
req.end();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
interface BackupBundle {
|
||||||
|
servers: Array<{
|
||||||
|
name: string;
|
||||||
|
runtime: string | null;
|
||||||
|
command: unknown;
|
||||||
|
containerPort: number | null;
|
||||||
|
replicas: number;
|
||||||
|
transport: string;
|
||||||
|
dockerImage: string | null;
|
||||||
|
packageName: string | null;
|
||||||
|
env: unknown;
|
||||||
|
healthCheck: unknown;
|
||||||
|
externalUrl: string | null;
|
||||||
|
}>;
|
||||||
|
prompts: Array<{ name: string; projectName: string | null; content: string }>;
|
||||||
|
templates: Array<{ name: string; transport: string }>;
|
||||||
|
secrets: unknown[];
|
||||||
|
projects: unknown[];
|
||||||
|
}
|
||||||
|
|
||||||
|
interface Server {
|
||||||
|
id: string;
|
||||||
|
name: string;
|
||||||
|
transport: string;
|
||||||
|
dockerImage: string | null;
|
||||||
|
packageName: string | null;
|
||||||
|
runtime: string | null;
|
||||||
|
command: string[] | null;
|
||||||
|
containerPort: number | null;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface Instance {
|
||||||
|
id: string;
|
||||||
|
serverId: string;
|
||||||
|
containerId: string | null;
|
||||||
|
status: string;
|
||||||
|
server: { name: string };
|
||||||
|
}
|
||||||
|
|
||||||
|
interface ProxyResult {
|
||||||
|
result?: { tools?: Array<{ name: string }> };
|
||||||
|
error?: { code: number; message: string };
|
||||||
|
}
|
||||||
|
|
||||||
|
describe('Smoke: Backup completeness', () => {
|
||||||
|
let available = false;
|
||||||
|
let bundle: BackupBundle;
|
||||||
|
|
||||||
|
beforeAll(async () => {
|
||||||
|
try {
|
||||||
|
const res = await mcpdRequest<{ status: string }>('GET', '/healthz');
|
||||||
|
available = res.status === 200;
|
||||||
|
} catch {
|
||||||
|
available = false;
|
||||||
|
}
|
||||||
|
if (!available) return;
|
||||||
|
|
||||||
|
const res = await mcpdRequest<BackupBundle>('POST', '/api/v1/backup', {});
|
||||||
|
bundle = res.data;
|
||||||
|
}, 30_000);
|
||||||
|
|
||||||
|
it('skips if mcpd not reachable', () => {
|
||||||
|
if (!available) console.log('SKIP: mcpd not reachable');
|
||||||
|
expect(true).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('backup includes prompts', () => {
|
||||||
|
if (!available) return;
|
||||||
|
expect(bundle.prompts).toBeDefined();
|
||||||
|
expect(bundle.prompts.length).toBeGreaterThan(0);
|
||||||
|
console.log(` ${bundle.prompts.length} prompts in backup`);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('backup includes templates', () => {
|
||||||
|
if (!available) return;
|
||||||
|
expect(bundle.templates).toBeDefined();
|
||||||
|
expect(bundle.templates.length).toBeGreaterThan(0);
|
||||||
|
console.log(` ${bundle.templates.length} templates in backup`);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('backup servers have runtime field', () => {
|
||||||
|
if (!available) return;
|
||||||
|
// Python servers must have runtime=python
|
||||||
|
const pythonServers = bundle.servers.filter((s) =>
|
||||||
|
s.packageName?.includes('aws-documentation') || s.packageName?.includes('awslabs'),
|
||||||
|
);
|
||||||
|
for (const s of pythonServers) {
|
||||||
|
expect(s.runtime, `${s.name} should have runtime=python`).toBe('python');
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
it('backup servers have command field for docker-image STDIO servers', () => {
|
||||||
|
if (!available) return;
|
||||||
|
const dockerStdio = bundle.servers.filter((s) => s.dockerImage && s.transport === 'STDIO');
|
||||||
|
for (const s of dockerStdio) {
|
||||||
|
expect(s.command, `${s.name} (dockerImage STDIO) should have command`).toBeTruthy();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
it('backup SSE servers have containerPort', () => {
|
||||||
|
if (!available) return;
|
||||||
|
const sseServers = bundle.servers.filter((s) => s.transport === 'SSE');
|
||||||
|
for (const s of sseServers) {
|
||||||
|
expect(s.containerPort, `${s.name} (SSE) should have containerPort`).toBeGreaterThan(0);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
it('backup servers have replicas field', () => {
|
||||||
|
if (!available) return;
|
||||||
|
for (const s of bundle.servers) {
|
||||||
|
expect(typeof s.replicas, `${s.name} should have numeric replicas`).toBe('number');
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('Smoke: Server type proxy coverage', () => {
|
||||||
|
let available = false;
|
||||||
|
let servers: Server[];
|
||||||
|
let instances: Instance[];
|
||||||
|
|
||||||
|
beforeAll(async () => {
|
||||||
|
try {
|
||||||
|
const res = await mcpdRequest<{ status: string }>('GET', '/healthz');
|
||||||
|
available = res.status === 200;
|
||||||
|
} catch {
|
||||||
|
available = false;
|
||||||
|
}
|
||||||
|
if (!available) return;
|
||||||
|
|
||||||
|
servers = (await mcpdRequest<Server[]>('GET', '/api/v1/servers')).data;
|
||||||
|
instances = (await mcpdRequest<Instance[]>('GET', '/api/v1/instances')).data;
|
||||||
|
}, 30_000);
|
||||||
|
|
||||||
|
it('skips if mcpd not reachable', () => {
|
||||||
|
if (!available) console.log('SKIP: mcpd not reachable');
|
||||||
|
expect(true).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('SSE server returns tools via proxy', async () => {
|
||||||
|
if (!available) return;
|
||||||
|
const sseServer = servers.find((s) => s.transport === 'SSE');
|
||||||
|
if (!sseServer) { console.log(' SKIP: no SSE server'); return; }
|
||||||
|
|
||||||
|
const running = instances.find((i) => i.serverId === sseServer.id && (i.status === 'RUNNING' || i.status === 'STARTING'));
|
||||||
|
if (!running) { console.log(` SKIP: ${sseServer.name} has no running instance`); return; }
|
||||||
|
|
||||||
|
const res = await mcpdRequest<ProxyResult>('POST', '/api/v1/mcp/proxy', {
|
||||||
|
serverId: sseServer.id,
|
||||||
|
method: 'tools/list',
|
||||||
|
});
|
||||||
|
expect(res.status).toBe(200);
|
||||||
|
expect(res.data.result?.tools?.length, `${sseServer.name} should have tools`).toBeGreaterThan(0);
|
||||||
|
console.log(` ${sseServer.name} (SSE): ${res.data.result?.tools?.length} tools`);
|
||||||
|
}, 30_000);
|
||||||
|
|
||||||
|
it('docker-image STDIO server returns tools via proxy', async () => {
|
||||||
|
if (!available) return;
|
||||||
|
const dockerStdio = servers.find((s) => s.transport === 'STDIO' && s.dockerImage && !s.packageName);
|
||||||
|
if (!dockerStdio) { console.log(' SKIP: no docker-image STDIO server'); return; }
|
||||||
|
|
||||||
|
const running = instances.find((i) => i.serverId === dockerStdio.id && (i.status === 'RUNNING' || i.status === 'STARTING'));
|
||||||
|
if (!running) { console.log(` SKIP: ${dockerStdio.name} has no running instance`); return; }
|
||||||
|
|
||||||
|
const res = await mcpdRequest<ProxyResult>('POST', '/api/v1/mcp/proxy', {
|
||||||
|
serverId: dockerStdio.id,
|
||||||
|
method: 'tools/list',
|
||||||
|
});
|
||||||
|
expect(res.status).toBe(200);
|
||||||
|
expect(res.data.result?.tools?.length, `${dockerStdio.name} should have tools`).toBeGreaterThan(0);
|
||||||
|
console.log(` ${dockerStdio.name} (docker STDIO): ${res.data.result?.tools?.length} tools`);
|
||||||
|
}, 60_000);
|
||||||
|
|
||||||
|
it('package STDIO server returns tools via proxy', async () => {
|
||||||
|
if (!available) return;
|
||||||
|
const pkgStdio = servers.find((s) => s.transport === 'STDIO' && s.packageName && !s.dockerImage);
|
||||||
|
if (!pkgStdio) { console.log(' SKIP: no package STDIO server'); return; }
|
||||||
|
|
||||||
|
const running = instances.find((i) => i.serverId === pkgStdio.id && (i.status === 'RUNNING' || i.status === 'STARTING'));
|
||||||
|
if (!running) { console.log(` SKIP: ${pkgStdio.name} has no running instance`); return; }
|
||||||
|
|
||||||
|
const res = await mcpdRequest<ProxyResult>('POST', '/api/v1/mcp/proxy', {
|
||||||
|
serverId: pkgStdio.id,
|
||||||
|
method: 'tools/list',
|
||||||
|
});
|
||||||
|
expect(res.status).toBe(200);
|
||||||
|
expect(res.data.result?.tools?.length, `${pkgStdio.name} should have tools`).toBeGreaterThan(0);
|
||||||
|
console.log(` ${pkgStdio.name} (package STDIO): ${res.data.result?.tools?.length} tools`);
|
||||||
|
}, 60_000);
|
||||||
|
|
||||||
|
it('all running instances have actual running containers', async () => {
|
||||||
|
if (!available) return;
|
||||||
|
const runningInstances = instances.filter((i) => i.status === 'RUNNING' && i.containerId);
|
||||||
|
expect(runningInstances.length).toBeGreaterThan(0);
|
||||||
|
|
||||||
|
for (const inst of runningInstances) {
|
||||||
|
// Verify the proxy can actually reach the container
|
||||||
|
const server = servers.find((s) => s.id === inst.serverId);
|
||||||
|
if (!server) continue;
|
||||||
|
|
||||||
|
// Quick health check: try tools/list (should not 500)
|
||||||
|
const res = await mcpdRequest<ProxyResult>('POST', '/api/v1/mcp/proxy', {
|
||||||
|
serverId: server.id,
|
||||||
|
method: 'tools/list',
|
||||||
|
});
|
||||||
|
expect(
|
||||||
|
res.status,
|
||||||
|
`${server.name} instance claims RUNNING but proxy returned ${res.status}`,
|
||||||
|
).not.toBe(500);
|
||||||
|
}
|
||||||
|
}, 120_000);
|
||||||
|
});
|
||||||
Reference in New Issue
Block a user