Two bugs fixed: 1. Backup completeness: JSON backup API now includes prompts and templates. Previously these were silently dropped during backup/restore, causing data loss on migration. 2. STDIO proxy for docker-image servers: servers with dockerImage but no packageName/command (like docmost) now use k8s Attach to connect to the container's PID 1 stdin/stdout instead of exec. This fixes "has no packageName or command" errors. Changes: - backup-service.ts: add BackupPrompt/BackupTemplate types, export them - restore-service.ts: restore prompts (with project FK) and templates - mcp-proxy-service.ts: sendViaPersistentAttach for docker-image STDIO - orchestrator.ts: add attachInteractive to McpOrchestrator interface - kubernetes-orchestrator.ts: implement attachInteractive via k8s Attach - k8s-client-official.ts: expose Attach client Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
359 lines
12 KiB
TypeScript
359 lines
12 KiB
TypeScript
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
|
import type { ContainerSpec } from '../src/services/orchestrator.js';
|
|
|
|
// Mock @kubernetes/client-node before imports
|
|
vi.mock('@kubernetes/client-node', () => {
|
|
const handlers = new Map<string, { resolve: unknown; reject?: unknown }>();
|
|
|
|
function setHandler(key: string, resolveVal: unknown, rejectVal?: unknown) {
|
|
handlers.set(key, { resolve: resolveVal, reject: rejectVal });
|
|
}
|
|
|
|
function getHandler(key: string) {
|
|
return handlers.get(key);
|
|
}
|
|
|
|
function clearHandlers() {
|
|
handlers.clear();
|
|
}
|
|
|
|
const mockCore = {
|
|
listNamespace: vi.fn(async () => {
|
|
const h = getHandler('listNamespace');
|
|
if (h?.reject) throw h.reject;
|
|
return h?.resolve ?? { items: [] };
|
|
}),
|
|
createNamespacedPod: vi.fn(async (params: { namespace: string; body: { metadata: { name: string } } }) => {
|
|
const h = getHandler('createNamespacedPod');
|
|
if (h?.reject) throw h.reject;
|
|
return h?.resolve ?? params.body;
|
|
}),
|
|
readNamespacedPod: vi.fn(async (params: { name: string }) => {
|
|
const h = getHandler(`readNamespacedPod:${params.name}`);
|
|
if (h?.reject) throw h.reject;
|
|
return h?.resolve;
|
|
}),
|
|
deleteNamespacedPod: vi.fn(async (params: { name: string }) => {
|
|
const h = getHandler(`deleteNamespacedPod:${params.name}`);
|
|
if (h?.reject) throw h.reject;
|
|
return h?.resolve ?? {};
|
|
}),
|
|
listNamespacedPod: vi.fn(async () => {
|
|
const h = getHandler('listNamespacedPod');
|
|
if (h?.reject) throw h.reject;
|
|
return h?.resolve ?? { items: [] };
|
|
}),
|
|
readNamespace: vi.fn(async (params: { name: string }) => {
|
|
const h = getHandler(`readNamespace:${params.name}`);
|
|
if (h?.reject) throw h.reject;
|
|
return h?.resolve ?? {};
|
|
}),
|
|
createNamespace: vi.fn(async () => {
|
|
const h = getHandler('createNamespace');
|
|
if (h?.reject) throw h.reject;
|
|
return h?.resolve ?? {};
|
|
}),
|
|
};
|
|
|
|
class MockKubeConfig {
|
|
loadFromDefault = vi.fn();
|
|
setCurrentContext = vi.fn();
|
|
getContexts = vi.fn(() => []);
|
|
getCurrentContext = vi.fn(() => 'default');
|
|
makeApiClient = vi.fn(() => mockCore);
|
|
}
|
|
|
|
class MockExec {
|
|
exec = vi.fn();
|
|
}
|
|
|
|
class MockAttach {
|
|
attach = vi.fn();
|
|
}
|
|
|
|
class MockLog {
|
|
log = vi.fn();
|
|
}
|
|
|
|
return {
|
|
KubeConfig: MockKubeConfig,
|
|
CoreV1Api: class {},
|
|
Exec: MockExec,
|
|
Attach: MockAttach,
|
|
Log: MockLog,
|
|
// Export test helpers
|
|
__testHelpers: { setHandler, getHandler, clearHandlers, mockCore },
|
|
};
|
|
});
|
|
|
|
// Import after mock
|
|
import { KubernetesOrchestrator } from '../src/services/k8s/kubernetes-orchestrator.js';
|
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
const k8sMock = await import('@kubernetes/client-node') as any;
|
|
const { setHandler, clearHandlers, mockCore } = k8sMock.__testHelpers;
|
|
|
|
const testSpec: ContainerSpec = {
|
|
image: 'mysources.co.uk/michal/mcpctl-node-runner:latest',
|
|
name: 'my-server',
|
|
env: { PORT: '3000' },
|
|
containerPort: 3000,
|
|
};
|
|
|
|
const podRunning = {
|
|
metadata: {
|
|
name: 'my-server',
|
|
namespace: 'mcpctl-servers',
|
|
creationTimestamp: '2026-01-01T00:00:00Z',
|
|
labels: { 'mcpctl.managed': 'true' },
|
|
},
|
|
status: {
|
|
phase: 'Running',
|
|
podIP: '10.42.0.15',
|
|
containerStatuses: [{
|
|
state: { running: { startedAt: '2026-01-01T00:00:00Z' } },
|
|
}],
|
|
},
|
|
spec: {
|
|
containers: [{ name: 'my-server', ports: [{ containerPort: 3000 }] }],
|
|
},
|
|
};
|
|
|
|
const podPending = {
|
|
metadata: {
|
|
name: 'my-server',
|
|
namespace: 'mcpctl-servers',
|
|
creationTimestamp: '2026-01-01T00:00:00Z',
|
|
},
|
|
status: {
|
|
phase: 'Pending',
|
|
containerStatuses: [{
|
|
state: { waiting: { reason: 'ContainerCreating' } },
|
|
}],
|
|
},
|
|
spec: {
|
|
containers: [{ name: 'my-server' }],
|
|
},
|
|
};
|
|
|
|
describe('KubernetesOrchestrator', () => {
|
|
let orch: KubernetesOrchestrator;
|
|
|
|
beforeEach(() => {
|
|
clearHandlers();
|
|
vi.clearAllMocks();
|
|
orch = new KubernetesOrchestrator({ serversNamespace: 'mcpctl-servers' });
|
|
});
|
|
|
|
describe('ping', () => {
|
|
it('returns true on successful API call', async () => {
|
|
setHandler('listNamespace', { items: [] });
|
|
expect(await orch.ping()).toBe(true);
|
|
});
|
|
|
|
it('returns false on error', async () => {
|
|
setHandler('listNamespace', undefined, new Error('connection refused'));
|
|
expect(await orch.ping()).toBe(false);
|
|
});
|
|
});
|
|
|
|
describe('pullImage', () => {
|
|
it('is a no-op for K8s', async () => {
|
|
await expect(orch.pullImage('some-image:latest')).resolves.toBeUndefined();
|
|
});
|
|
});
|
|
|
|
describe('createContainer', () => {
|
|
it('creates a pod and returns container info', async () => {
|
|
// ensureNamespace
|
|
setHandler('readNamespace:mcpctl-servers', {});
|
|
// createPod returns the pod
|
|
setHandler('createNamespacedPod', podRunning);
|
|
// inspectContainer after create
|
|
setHandler('readNamespacedPod:my-server', podRunning);
|
|
|
|
const info = await orch.createContainer(testSpec);
|
|
expect(info.containerId).toBe('my-server');
|
|
expect(info.state).toBe('running');
|
|
expect(info.port).toBe(3000);
|
|
expect(info.ip).toBe('10.42.0.15');
|
|
});
|
|
|
|
it('throws on API error', async () => {
|
|
setHandler('readNamespace:mcpctl-servers', {});
|
|
setHandler('createNamespacedPod', undefined, new Error('pod already exists'));
|
|
|
|
await expect(orch.createContainer(testSpec)).rejects.toThrow('pod already exists');
|
|
});
|
|
});
|
|
|
|
describe('inspectContainer', () => {
|
|
it('returns running container info with pod IP', async () => {
|
|
setHandler('readNamespacedPod:my-server', podRunning);
|
|
|
|
const info = await orch.inspectContainer('my-server');
|
|
expect(info.state).toBe('running');
|
|
expect(info.name).toBe('my-server');
|
|
expect(info.ip).toBe('10.42.0.15');
|
|
expect(info.port).toBe(3000);
|
|
});
|
|
|
|
it('maps pending state correctly', async () => {
|
|
setHandler('readNamespacedPod:my-server', podPending);
|
|
|
|
const info = await orch.inspectContainer('my-server');
|
|
expect(info.state).toBe('starting');
|
|
});
|
|
|
|
it('throws when pod not found', async () => {
|
|
setHandler('readNamespacedPod:missing', undefined, { statusCode: 404, message: 'not found' });
|
|
|
|
await expect(orch.inspectContainer('missing')).rejects.toBeDefined();
|
|
});
|
|
});
|
|
|
|
describe('stopContainer', () => {
|
|
it('deletes the pod', async () => {
|
|
setHandler('deleteNamespacedPod:my-server', {});
|
|
await expect(orch.stopContainer('my-server')).resolves.toBeUndefined();
|
|
});
|
|
});
|
|
|
|
describe('removeContainer', () => {
|
|
it('deletes the pod successfully', async () => {
|
|
setHandler('deleteNamespacedPod:my-server', {});
|
|
await expect(orch.removeContainer('my-server')).resolves.toBeUndefined();
|
|
});
|
|
|
|
it('ignores 404 (already deleted)', async () => {
|
|
setHandler('deleteNamespacedPod:my-server', undefined, { statusCode: 404 });
|
|
await expect(orch.removeContainer('my-server')).resolves.toBeUndefined();
|
|
});
|
|
|
|
it('throws on other errors', async () => {
|
|
setHandler('deleteNamespacedPod:my-server', undefined, { statusCode: 403, message: 'forbidden' });
|
|
await expect(orch.removeContainer('my-server')).rejects.toBeDefined();
|
|
});
|
|
});
|
|
|
|
describe('listContainers', () => {
|
|
it('lists managed pods', async () => {
|
|
setHandler('listNamespacedPod', { items: [podRunning] });
|
|
|
|
const containers = await orch.listContainers();
|
|
expect(containers).toHaveLength(1);
|
|
expect(containers[0]!.containerId).toBe('my-server');
|
|
expect(containers[0]!.state).toBe('running');
|
|
expect(containers[0]!.ip).toBe('10.42.0.15');
|
|
|
|
expect(mockCore.listNamespacedPod).toHaveBeenCalledWith(
|
|
expect.objectContaining({ labelSelector: 'mcpctl.managed=true' }),
|
|
);
|
|
});
|
|
|
|
it('returns empty when no pods', async () => {
|
|
setHandler('listNamespacedPod', { items: [] });
|
|
const containers = await orch.listContainers();
|
|
expect(containers).toEqual([]);
|
|
});
|
|
});
|
|
|
|
describe('ensureNamespace', () => {
|
|
it('does nothing if namespace exists', async () => {
|
|
setHandler('readNamespace:test-ns', {});
|
|
await expect(orch.ensureNamespace('test-ns')).resolves.toBeUndefined();
|
|
expect(mockCore.createNamespace).not.toHaveBeenCalled();
|
|
});
|
|
|
|
it('creates namespace if not found', async () => {
|
|
setHandler('readNamespace:new-ns', undefined, { statusCode: 404 });
|
|
setHandler('createNamespace', {});
|
|
await expect(orch.ensureNamespace('new-ns')).resolves.toBeUndefined();
|
|
expect(mockCore.createNamespace).toHaveBeenCalled();
|
|
});
|
|
|
|
it('handles conflict (namespace already created by another process)', async () => {
|
|
setHandler('readNamespace:new-ns', undefined, { statusCode: 404 });
|
|
setHandler('createNamespace', undefined, { statusCode: 409, message: 'already exists' });
|
|
await expect(orch.ensureNamespace('new-ns')).resolves.toBeUndefined();
|
|
});
|
|
});
|
|
|
|
describe('getNamespace', () => {
|
|
it('returns configured namespace', () => {
|
|
expect(orch.getNamespace()).toBe('mcpctl-servers');
|
|
});
|
|
|
|
it('defaults to mcpctl-servers', () => {
|
|
const defaultOrch = new KubernetesOrchestrator();
|
|
expect(defaultOrch.getNamespace()).toBe('mcpctl-servers');
|
|
});
|
|
});
|
|
|
|
describe('pod IP extraction', () => {
|
|
it('extracts podIP from status', async () => {
|
|
setHandler('readNamespacedPod:my-server', podRunning);
|
|
const info = await orch.inspectContainer('my-server');
|
|
expect(info.ip).toBe('10.42.0.15');
|
|
});
|
|
|
|
it('returns undefined ip when no podIP', async () => {
|
|
const podWithoutIP = {
|
|
...podRunning,
|
|
status: { ...podRunning.status, podIP: undefined },
|
|
};
|
|
setHandler('readNamespacedPod:my-server', podWithoutIP);
|
|
const info = await orch.inspectContainer('my-server');
|
|
expect(info.ip).toBeUndefined();
|
|
});
|
|
});
|
|
|
|
describe('manifest security', () => {
|
|
it('creates pods with security hardening', async () => {
|
|
setHandler('readNamespace:mcpctl-servers', {});
|
|
setHandler('createNamespacedPod', podRunning);
|
|
setHandler('readNamespacedPod:my-server', podRunning);
|
|
|
|
await orch.createContainer(testSpec);
|
|
|
|
const createCall = mockCore.createNamespacedPod.mock.calls[0]![0];
|
|
const container = createCall.body.spec.containers[0];
|
|
expect(container.securityContext.runAsNonRoot).toBe(false);
|
|
expect(container.securityContext.readOnlyRootFilesystem).toBe(false);
|
|
expect(container.securityContext.allowPrivilegeEscalation).toBe(false);
|
|
expect(container.securityContext.capabilities.drop).toEqual(['ALL']);
|
|
expect(container.securityContext.seccompProfile.type).toBe('RuntimeDefault');
|
|
});
|
|
|
|
it('creates pods with automountServiceAccountToken disabled', async () => {
|
|
setHandler('readNamespace:mcpctl-servers', {});
|
|
setHandler('createNamespacedPod', podRunning);
|
|
setHandler('readNamespacedPod:my-server', podRunning);
|
|
|
|
await orch.createContainer(testSpec);
|
|
|
|
const createCall = mockCore.createNamespacedPod.mock.calls[0]![0];
|
|
expect(createCall.body.spec.automountServiceAccountToken).toBe(false);
|
|
});
|
|
|
|
it('creates pods with stdin enabled for STDIO servers', async () => {
|
|
setHandler('readNamespace:mcpctl-servers', {});
|
|
setHandler('createNamespacedPod', podRunning);
|
|
setHandler('readNamespacedPod:my-server', podRunning);
|
|
|
|
await orch.createContainer(testSpec);
|
|
|
|
const createCall = mockCore.createNamespacedPod.mock.calls[0]![0];
|
|
expect(createCall.body.spec.containers[0].stdin).toBe(true);
|
|
});
|
|
});
|
|
|
|
describe('context enforcement', () => {
|
|
it('sets context when configured', () => {
|
|
const _orch = new KubernetesOrchestrator({ context: 'default' });
|
|
// The mock KubeConfig.setCurrentContext should have been called
|
|
// This verifies the safety mechanism works
|
|
expect(_orch.getNamespace()).toBe('mcpctl-servers');
|
|
});
|
|
});
|
|
});
|