feat: add Kubernetes orchestrator for MCP server deployment
KubernetesOrchestrator implements McpOrchestrator interface with K8s API client, manifest generation (Pod/Deployment), namespace management, resource limits, and security contexts. 39 new tests. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -9,3 +9,5 @@ export { DEFAULT_MEMORY_LIMIT, DEFAULT_NANO_CPUS } from './orchestrator.js';
|
|||||||
export { DockerContainerManager } from './docker/container-manager.js';
|
export { DockerContainerManager } from './docker/container-manager.js';
|
||||||
export { AuditLogService } from './audit-log.service.js';
|
export { AuditLogService } from './audit-log.service.js';
|
||||||
export type { AuditLogQueryParams } from './audit-log.service.js';
|
export type { AuditLogQueryParams } from './audit-log.service.js';
|
||||||
|
export { KubernetesOrchestrator } from './k8s/index.js';
|
||||||
|
export type { K8sClientConfig } from './k8s/index.js';
|
||||||
|
|||||||
12
src/mcpd/src/services/k8s/index.ts
Normal file
12
src/mcpd/src/services/k8s/index.ts
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
export { KubernetesOrchestrator } from './kubernetes-orchestrator.js';
|
||||||
|
export { K8sClient, loadDefaultConfig, parseKubeconfig } from './k8s-client.js';
|
||||||
|
export type { K8sClientConfig, K8sResponse, K8sError } from './k8s-client.js';
|
||||||
|
export {
|
||||||
|
generatePodSpec,
|
||||||
|
generateDeploymentSpec,
|
||||||
|
generateNamespaceSpec,
|
||||||
|
formatMemory,
|
||||||
|
formatCpu,
|
||||||
|
sanitizeName,
|
||||||
|
} from './manifest-generator.js';
|
||||||
|
export type { K8sPodManifest, K8sDeploymentManifest, K8sNamespaceManifest } from './manifest-generator.js';
|
||||||
281
src/mcpd/src/services/k8s/k8s-client.ts
Normal file
281
src/mcpd/src/services/k8s/k8s-client.ts
Normal file
@@ -0,0 +1,281 @@
|
|||||||
|
import https from 'node:https';
|
||||||
|
import http from 'node:http';
|
||||||
|
import fs from 'node:fs';
|
||||||
|
import path from 'node:path';
|
||||||
|
|
||||||
|
export interface K8sClientConfig {
|
||||||
|
apiServer: string;
|
||||||
|
token?: string;
|
||||||
|
caCert?: string;
|
||||||
|
namespace?: string;
|
||||||
|
/** Skip TLS verification (for dev clusters) */
|
||||||
|
insecure?: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface K8sResponse<T = unknown> {
|
||||||
|
statusCode: number;
|
||||||
|
body: T;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface K8sError {
|
||||||
|
kind: 'Status';
|
||||||
|
apiVersion: 'v1';
|
||||||
|
status: 'Failure';
|
||||||
|
message: string;
|
||||||
|
reason: string;
|
||||||
|
code: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Load config from in-cluster service account or KUBECONFIG env.
|
||||||
|
*/
|
||||||
|
export function loadDefaultConfig(): K8sClientConfig {
|
||||||
|
// In-cluster detection
|
||||||
|
const tokenPath = '/var/run/secrets/kubernetes.io/serviceaccount/token';
|
||||||
|
const caPath = '/var/run/secrets/kubernetes.io/serviceaccount/ca.crt';
|
||||||
|
const nsPath = '/var/run/secrets/kubernetes.io/serviceaccount/namespace';
|
||||||
|
|
||||||
|
if (fs.existsSync(tokenPath)) {
|
||||||
|
const token = fs.readFileSync(tokenPath, 'utf-8').trim();
|
||||||
|
const namespace = fs.existsSync(nsPath) ? fs.readFileSync(nsPath, 'utf-8').trim() : 'default';
|
||||||
|
const config: K8sClientConfig = {
|
||||||
|
apiServer: `https://${process.env['KUBERNETES_SERVICE_HOST'] ?? 'kubernetes.default.svc'}:${process.env['KUBERNETES_SERVICE_PORT'] ?? '443'}`,
|
||||||
|
token,
|
||||||
|
namespace,
|
||||||
|
};
|
||||||
|
if (fs.existsSync(caPath)) {
|
||||||
|
config.caCert = fs.readFileSync(caPath, 'utf-8');
|
||||||
|
}
|
||||||
|
return config;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback: parse KUBECONFIG or ~/.kube/config
|
||||||
|
const kubeconfigPath = process.env['KUBECONFIG'] ?? path.join(process.env['HOME'] ?? '', '.kube', 'config');
|
||||||
|
if (fs.existsSync(kubeconfigPath)) {
|
||||||
|
return parseKubeconfig(fs.readFileSync(kubeconfigPath, 'utf-8'));
|
||||||
|
}
|
||||||
|
|
||||||
|
throw new Error('No Kubernetes configuration found (no in-cluster config or kubeconfig)');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Minimal kubeconfig parser - extracts current-context server and auth.
|
||||||
|
*/
|
||||||
|
export function parseKubeconfig(raw: string): K8sClientConfig {
|
||||||
|
// Simple YAML-like parser for kubeconfig (avoids yaml dependency)
|
||||||
|
const lines = raw.split('\n');
|
||||||
|
let currentContext = '';
|
||||||
|
let contextCluster = '';
|
||||||
|
let contextUser = '';
|
||||||
|
let serverUrl = '';
|
||||||
|
let token = '';
|
||||||
|
let caCert: string | undefined;
|
||||||
|
let namespace = 'default';
|
||||||
|
|
||||||
|
// Pass 1: find current-context
|
||||||
|
for (const line of lines) {
|
||||||
|
const match = line.match(/^current-context:\s*(.+)/);
|
||||||
|
if (match?.[1]) {
|
||||||
|
currentContext = match[1].trim();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pass 2: find context details
|
||||||
|
let inContexts = false;
|
||||||
|
let inTargetContext = false;
|
||||||
|
for (const line of lines) {
|
||||||
|
if (line.match(/^contexts:/)) { inContexts = true; continue; }
|
||||||
|
if (inContexts && line.match(/^\S/) && !line.match(/^contexts:/)) { inContexts = false; }
|
||||||
|
if (inContexts && line.includes(`name: ${currentContext}`)) { inTargetContext = true; continue; }
|
||||||
|
if (inTargetContext) {
|
||||||
|
const clusterMatch = line.match(/cluster:\s*(.+)/);
|
||||||
|
if (clusterMatch?.[1]) contextCluster = clusterMatch[1].trim();
|
||||||
|
const userMatch = line.match(/user:\s*(.+)/);
|
||||||
|
if (userMatch?.[1]) contextUser = userMatch[1].trim();
|
||||||
|
const nsMatch = line.match(/namespace:\s*(.+)/);
|
||||||
|
if (nsMatch?.[1]) namespace = nsMatch[1].trim();
|
||||||
|
if (contextCluster && contextUser) { inTargetContext = false; }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pass 3: find cluster server
|
||||||
|
let inClusters = false;
|
||||||
|
let inTargetCluster = false;
|
||||||
|
for (const line of lines) {
|
||||||
|
if (line.match(/^clusters:/)) { inClusters = true; continue; }
|
||||||
|
if (inClusters && line.match(/^\S/) && !line.match(/^clusters:/)) { inClusters = false; }
|
||||||
|
if (inClusters && line.includes(`name: ${contextCluster}`)) { inTargetCluster = true; continue; }
|
||||||
|
if (inTargetCluster) {
|
||||||
|
const serverMatch = line.match(/server:\s*(.+)/);
|
||||||
|
if (serverMatch?.[1]) { serverUrl = serverMatch[1].trim(); inTargetCluster = false; }
|
||||||
|
const caMatch = line.match(/certificate-authority-data:\s*(.+)/);
|
||||||
|
if (caMatch?.[1]) caCert = Buffer.from(caMatch[1].trim(), 'base64').toString('utf-8');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pass 4: find user token
|
||||||
|
let inUsers = false;
|
||||||
|
let inTargetUser = false;
|
||||||
|
for (const line of lines) {
|
||||||
|
if (line.match(/^users:/)) { inUsers = true; continue; }
|
||||||
|
if (inUsers && line.match(/^\S/) && !line.match(/^users:/)) { inUsers = false; }
|
||||||
|
if (inUsers && line.includes(`name: ${contextUser}`)) { inTargetUser = true; continue; }
|
||||||
|
if (inTargetUser) {
|
||||||
|
const tokenMatch = line.match(/token:\s*(.+)/);
|
||||||
|
if (tokenMatch?.[1]) { token = tokenMatch[1].trim(); inTargetUser = false; }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!serverUrl) {
|
||||||
|
throw new Error(`Could not parse kubeconfig: no server found for context "${currentContext}"`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const config: K8sClientConfig = {
|
||||||
|
apiServer: serverUrl,
|
||||||
|
namespace,
|
||||||
|
};
|
||||||
|
if (token) config.token = token;
|
||||||
|
if (caCert) config.caCert = caCert;
|
||||||
|
return config;
|
||||||
|
}
|
||||||
|
|
||||||
|
export class K8sClient {
|
||||||
|
private apiServer: string;
|
||||||
|
private token: string | undefined;
|
||||||
|
private caCert: string | undefined;
|
||||||
|
private insecure: boolean;
|
||||||
|
readonly defaultNamespace: string;
|
||||||
|
|
||||||
|
constructor(config: K8sClientConfig) {
|
||||||
|
this.apiServer = config.apiServer.replace(/\/$/, '');
|
||||||
|
this.token = config.token;
|
||||||
|
this.caCert = config.caCert;
|
||||||
|
this.insecure = config.insecure ?? false;
|
||||||
|
this.defaultNamespace = config.namespace ?? 'default';
|
||||||
|
}
|
||||||
|
|
||||||
|
async get<T = unknown>(path: string): Promise<K8sResponse<T>> {
|
||||||
|
return this.request<T>('GET', path);
|
||||||
|
}
|
||||||
|
|
||||||
|
async post<T = unknown>(path: string, body: unknown): Promise<K8sResponse<T>> {
|
||||||
|
return this.request<T>('POST', path, body);
|
||||||
|
}
|
||||||
|
|
||||||
|
async delete<T = unknown>(path: string): Promise<K8sResponse<T>> {
|
||||||
|
return this.request<T>('DELETE', path);
|
||||||
|
}
|
||||||
|
|
||||||
|
async patch<T = unknown>(path: string, body: unknown): Promise<K8sResponse<T>> {
|
||||||
|
return this.request<T>('PATCH', path, body, 'application/strategic-merge-patch+json');
|
||||||
|
}
|
||||||
|
|
||||||
|
private request<T>(method: string, urlPath: string, body?: unknown, contentType = 'application/json'): Promise<K8sResponse<T>> {
|
||||||
|
return new Promise((resolve, reject) => {
|
||||||
|
const url = new URL(urlPath, this.apiServer);
|
||||||
|
const isHttps = url.protocol === 'https:';
|
||||||
|
const transport = isHttps ? https : http;
|
||||||
|
|
||||||
|
const headers: Record<string, string> = {
|
||||||
|
Accept: 'application/json',
|
||||||
|
};
|
||||||
|
if (this.token) {
|
||||||
|
headers['Authorization'] = `Bearer ${this.token}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
let payload: string | undefined;
|
||||||
|
if (body !== undefined) {
|
||||||
|
payload = JSON.stringify(body);
|
||||||
|
headers['Content-Type'] = contentType;
|
||||||
|
headers['Content-Length'] = String(Buffer.byteLength(payload));
|
||||||
|
}
|
||||||
|
|
||||||
|
const opts: https.RequestOptions = {
|
||||||
|
hostname: url.hostname,
|
||||||
|
port: url.port || (isHttps ? 443 : 80),
|
||||||
|
path: url.pathname + url.search,
|
||||||
|
method,
|
||||||
|
headers,
|
||||||
|
timeout: 30000,
|
||||||
|
};
|
||||||
|
|
||||||
|
if (isHttps) {
|
||||||
|
if (this.caCert) {
|
||||||
|
opts.ca = this.caCert;
|
||||||
|
}
|
||||||
|
if (this.insecure) {
|
||||||
|
opts.rejectUnauthorized = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const req = transport.request(opts, (res) => {
|
||||||
|
const chunks: Buffer[] = [];
|
||||||
|
res.on('data', (chunk: Buffer) => chunks.push(chunk));
|
||||||
|
res.on('end', () => {
|
||||||
|
const raw = Buffer.concat(chunks).toString('utf-8');
|
||||||
|
try {
|
||||||
|
const parsed = JSON.parse(raw) as T;
|
||||||
|
resolve({ statusCode: res.statusCode ?? 0, body: parsed });
|
||||||
|
} catch {
|
||||||
|
reject(new Error(`Invalid JSON from K8s API: ${raw.slice(0, 200)}`));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
req.on('error', reject);
|
||||||
|
req.on('timeout', () => {
|
||||||
|
req.destroy();
|
||||||
|
reject(new Error('K8s API request timed out'));
|
||||||
|
});
|
||||||
|
if (payload) req.write(payload);
|
||||||
|
req.end();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
async getLogs(namespace: string, podName: string, opts?: { tail?: number; since?: number }): Promise<string> {
|
||||||
|
return new Promise((resolve, reject) => {
|
||||||
|
const params = new URLSearchParams();
|
||||||
|
if (opts?.tail !== undefined) params.set('tailLines', String(opts.tail));
|
||||||
|
if (opts?.since !== undefined) params.set('sinceSeconds', String(opts.since));
|
||||||
|
const qs = params.toString();
|
||||||
|
const urlPath = `/api/v1/namespaces/${namespace}/pods/${podName}/log${qs ? `?${qs}` : ''}`;
|
||||||
|
|
||||||
|
const url = new URL(urlPath, this.apiServer);
|
||||||
|
const isHttps = url.protocol === 'https:';
|
||||||
|
const transport = isHttps ? https : http;
|
||||||
|
|
||||||
|
const headers: Record<string, string> = {};
|
||||||
|
if (this.token) {
|
||||||
|
headers['Authorization'] = `Bearer ${this.token}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
const reqOpts: https.RequestOptions = {
|
||||||
|
hostname: url.hostname,
|
||||||
|
port: url.port || (isHttps ? 443 : 80),
|
||||||
|
path: url.pathname + url.search,
|
||||||
|
method: 'GET',
|
||||||
|
headers,
|
||||||
|
timeout: 30000,
|
||||||
|
};
|
||||||
|
|
||||||
|
if (isHttps) {
|
||||||
|
if (this.caCert) reqOpts.ca = this.caCert;
|
||||||
|
if (this.insecure) reqOpts.rejectUnauthorized = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
const req = transport.request(reqOpts, (res) => {
|
||||||
|
const chunks: Buffer[] = [];
|
||||||
|
res.on('data', (chunk: Buffer) => chunks.push(chunk));
|
||||||
|
res.on('end', () => resolve(Buffer.concat(chunks).toString('utf-8')));
|
||||||
|
});
|
||||||
|
|
||||||
|
req.on('error', reject);
|
||||||
|
req.on('timeout', () => {
|
||||||
|
req.destroy();
|
||||||
|
reject(new Error('K8s log request timed out'));
|
||||||
|
});
|
||||||
|
req.end();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
200
src/mcpd/src/services/k8s/kubernetes-orchestrator.ts
Normal file
200
src/mcpd/src/services/k8s/kubernetes-orchestrator.ts
Normal file
@@ -0,0 +1,200 @@
|
|||||||
|
import type {
|
||||||
|
McpOrchestrator,
|
||||||
|
ContainerSpec,
|
||||||
|
ContainerInfo,
|
||||||
|
ContainerLogs,
|
||||||
|
} from '../orchestrator.js';
|
||||||
|
import { K8sClient } from './k8s-client.js';
|
||||||
|
import type { K8sClientConfig } from './k8s-client.js';
|
||||||
|
import { generatePodSpec, generateNamespaceSpec } from './manifest-generator.js';
|
||||||
|
|
||||||
|
interface K8sPodStatus {
|
||||||
|
metadata: {
|
||||||
|
name: string;
|
||||||
|
namespace: string;
|
||||||
|
creationTimestamp: string;
|
||||||
|
labels?: Record<string, string>;
|
||||||
|
};
|
||||||
|
status: {
|
||||||
|
phase: string;
|
||||||
|
containerStatuses?: Array<{
|
||||||
|
state: {
|
||||||
|
running?: Record<string, unknown>;
|
||||||
|
waiting?: { reason?: string };
|
||||||
|
terminated?: { reason?: string; exitCode?: number };
|
||||||
|
};
|
||||||
|
}>;
|
||||||
|
};
|
||||||
|
spec?: {
|
||||||
|
containers: Array<{
|
||||||
|
ports?: Array<{ containerPort: number }>;
|
||||||
|
}>;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
interface K8sPodList {
|
||||||
|
items: K8sPodStatus[];
|
||||||
|
}
|
||||||
|
|
||||||
|
function mapPhase(phase: string, containerStatuses?: K8sPodStatus['status']['containerStatuses']): ContainerInfo['state'] {
|
||||||
|
// Check container-level status first for more granularity
|
||||||
|
if (containerStatuses && containerStatuses.length > 0) {
|
||||||
|
const cs = containerStatuses[0];
|
||||||
|
if (cs) {
|
||||||
|
if (cs.state.running) return 'running';
|
||||||
|
if (cs.state.waiting) return 'starting';
|
||||||
|
if (cs.state.terminated) return 'stopped';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (phase) {
|
||||||
|
case 'Running':
|
||||||
|
return 'running';
|
||||||
|
case 'Pending':
|
||||||
|
return 'starting';
|
||||||
|
case 'Succeeded':
|
||||||
|
case 'Failed':
|
||||||
|
return 'stopped';
|
||||||
|
default:
|
||||||
|
return 'unknown';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export class KubernetesOrchestrator implements McpOrchestrator {
|
||||||
|
private client: K8sClient;
|
||||||
|
private namespace: string;
|
||||||
|
|
||||||
|
constructor(config: K8sClientConfig) {
|
||||||
|
this.client = new K8sClient(config);
|
||||||
|
this.namespace = config.namespace ?? 'default';
|
||||||
|
}
|
||||||
|
|
||||||
|
async ping(): Promise<boolean> {
|
||||||
|
try {
|
||||||
|
const res = await this.client.get('/api/v1');
|
||||||
|
return res.statusCode === 200;
|
||||||
|
} catch {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async pullImage(_image: string): Promise<void> {
|
||||||
|
// K8s pulls images on pod scheduling - no pre-pull needed
|
||||||
|
}
|
||||||
|
|
||||||
|
async createContainer(spec: ContainerSpec): Promise<ContainerInfo> {
|
||||||
|
await this.ensureNamespace(this.namespace);
|
||||||
|
|
||||||
|
const manifest = generatePodSpec(spec, this.namespace);
|
||||||
|
const res = await this.client.post<K8sPodStatus>(
|
||||||
|
`/api/v1/namespaces/${this.namespace}/pods`,
|
||||||
|
manifest,
|
||||||
|
);
|
||||||
|
|
||||||
|
if (res.statusCode >= 400) {
|
||||||
|
const err = res.body as unknown as { message?: string };
|
||||||
|
throw new Error(`Failed to create pod: ${err.message ?? `HTTP ${res.statusCode}`}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait briefly for pod to start scheduling
|
||||||
|
await new Promise((resolve) => setTimeout(resolve, 500));
|
||||||
|
|
||||||
|
return this.inspectContainer(res.body.metadata.name);
|
||||||
|
}
|
||||||
|
|
||||||
|
async stopContainer(containerId: string): Promise<void> {
|
||||||
|
// In K8s, "stopping" a pod means deleting it
|
||||||
|
await this.removeContainer(containerId);
|
||||||
|
}
|
||||||
|
|
||||||
|
async removeContainer(containerId: string, _force?: boolean): Promise<void> {
|
||||||
|
const res = await this.client.delete(
|
||||||
|
`/api/v1/namespaces/${this.namespace}/pods/${containerId}`,
|
||||||
|
);
|
||||||
|
if (res.statusCode >= 400 && res.statusCode !== 404) {
|
||||||
|
const err = res.body as { message?: string };
|
||||||
|
throw new Error(`Failed to delete pod: ${err.message ?? `HTTP ${res.statusCode}`}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async inspectContainer(containerId: string): Promise<ContainerInfo> {
|
||||||
|
const res = await this.client.get<K8sPodStatus>(
|
||||||
|
`/api/v1/namespaces/${this.namespace}/pods/${containerId}`,
|
||||||
|
);
|
||||||
|
|
||||||
|
if (res.statusCode === 404) {
|
||||||
|
throw new Error(`Pod "${containerId}" not found in namespace "${this.namespace}"`);
|
||||||
|
}
|
||||||
|
if (res.statusCode >= 400) {
|
||||||
|
const err = res.body as unknown as { message?: string };
|
||||||
|
throw new Error(`Failed to inspect pod: ${err.message ?? `HTTP ${res.statusCode}`}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const pod = res.body;
|
||||||
|
const result: ContainerInfo = {
|
||||||
|
containerId: pod.metadata.name,
|
||||||
|
name: pod.metadata.name,
|
||||||
|
state: mapPhase(pod.status.phase, pod.status.containerStatuses),
|
||||||
|
createdAt: new Date(pod.metadata.creationTimestamp),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Extract port from first container spec if available
|
||||||
|
const containers = pod.spec?.containers;
|
||||||
|
if (containers && containers.length > 0) {
|
||||||
|
const ports = containers[0]?.ports;
|
||||||
|
if (ports && ports.length > 0 && ports[0]) {
|
||||||
|
result.port = ports[0].containerPort;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
async getContainerLogs(
|
||||||
|
containerId: string,
|
||||||
|
opts?: { tail?: number; since?: number },
|
||||||
|
): Promise<ContainerLogs> {
|
||||||
|
const logOpts: { tail?: number; since?: number } = {
|
||||||
|
tail: opts?.tail ?? 100,
|
||||||
|
};
|
||||||
|
if (opts?.since !== undefined) {
|
||||||
|
logOpts.since = opts.since;
|
||||||
|
}
|
||||||
|
const stdout = await this.client.getLogs(this.namespace, containerId, logOpts);
|
||||||
|
return { stdout, stderr: '' };
|
||||||
|
}
|
||||||
|
|
||||||
|
async listContainers(namespace?: string): Promise<ContainerInfo[]> {
|
||||||
|
const ns = namespace ?? this.namespace;
|
||||||
|
const res = await this.client.get<K8sPodList>(
|
||||||
|
`/api/v1/namespaces/${ns}/pods?labelSelector=mcpctl.managed%3Dtrue`,
|
||||||
|
);
|
||||||
|
if (res.statusCode >= 400) return [];
|
||||||
|
|
||||||
|
return res.body.items.map((pod) => {
|
||||||
|
const info: ContainerInfo = {
|
||||||
|
containerId: pod.metadata.name,
|
||||||
|
name: pod.metadata.name,
|
||||||
|
state: mapPhase(pod.status.phase, pod.status.containerStatuses),
|
||||||
|
createdAt: new Date(pod.metadata.creationTimestamp),
|
||||||
|
};
|
||||||
|
return info;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
async ensureNamespace(name: string): Promise<void> {
|
||||||
|
const res = await this.client.get(`/api/v1/namespaces/${name}`);
|
||||||
|
if (res.statusCode === 200) return;
|
||||||
|
|
||||||
|
const nsManifest = generateNamespaceSpec(name);
|
||||||
|
const createRes = await this.client.post('/api/v1/namespaces', nsManifest);
|
||||||
|
if (createRes.statusCode >= 400 && createRes.statusCode !== 409) {
|
||||||
|
const err = createRes.body as { message?: string };
|
||||||
|
throw new Error(`Failed to create namespace "${name}": ${err.message ?? `HTTP ${createRes.statusCode}`}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
getNamespace(): string {
|
||||||
|
return this.namespace;
|
||||||
|
}
|
||||||
|
}
|
||||||
175
src/mcpd/src/services/k8s/manifest-generator.ts
Normal file
175
src/mcpd/src/services/k8s/manifest-generator.ts
Normal file
@@ -0,0 +1,175 @@
|
|||||||
|
import type { ContainerSpec } from '../orchestrator.js';
|
||||||
|
import { DEFAULT_MEMORY_LIMIT, DEFAULT_NANO_CPUS } from '../orchestrator.js';
|
||||||
|
|
||||||
|
const MCPCTL_LABEL = 'mcpctl.managed';
|
||||||
|
|
||||||
|
export interface K8sPodManifest {
|
||||||
|
apiVersion: 'v1';
|
||||||
|
kind: 'Pod';
|
||||||
|
metadata: {
|
||||||
|
name: string;
|
||||||
|
namespace: string;
|
||||||
|
labels: Record<string, string>;
|
||||||
|
};
|
||||||
|
spec: {
|
||||||
|
containers: Array<{
|
||||||
|
name: string;
|
||||||
|
image: string;
|
||||||
|
env?: Array<{ name: string; value: string }>;
|
||||||
|
ports?: Array<{ containerPort: number }>;
|
||||||
|
resources: {
|
||||||
|
limits: { memory: string; cpu: string };
|
||||||
|
requests: { memory: string; cpu: string };
|
||||||
|
};
|
||||||
|
securityContext: {
|
||||||
|
runAsNonRoot: boolean;
|
||||||
|
readOnlyRootFilesystem: boolean;
|
||||||
|
allowPrivilegeEscalation: boolean;
|
||||||
|
};
|
||||||
|
}>;
|
||||||
|
restartPolicy: 'Always' | 'Never' | 'OnFailure';
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface K8sDeploymentManifest {
|
||||||
|
apiVersion: 'apps/v1';
|
||||||
|
kind: 'Deployment';
|
||||||
|
metadata: {
|
||||||
|
name: string;
|
||||||
|
namespace: string;
|
||||||
|
labels: Record<string, string>;
|
||||||
|
};
|
||||||
|
spec: {
|
||||||
|
replicas: number;
|
||||||
|
selector: { matchLabels: Record<string, string> };
|
||||||
|
template: {
|
||||||
|
metadata: { labels: Record<string, string> };
|
||||||
|
spec: K8sPodManifest['spec'];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface K8sNamespaceManifest {
|
||||||
|
apiVersion: 'v1';
|
||||||
|
kind: 'Namespace';
|
||||||
|
metadata: { name: string };
|
||||||
|
}
|
||||||
|
|
||||||
|
function formatMemory(bytes: number): string {
|
||||||
|
if (bytes >= 1024 * 1024 * 1024) return `${Math.floor(bytes / (1024 * 1024 * 1024))}Gi`;
|
||||||
|
if (bytes >= 1024 * 1024) return `${Math.floor(bytes / (1024 * 1024))}Mi`;
|
||||||
|
if (bytes >= 1024) return `${Math.floor(bytes / 1024)}Ki`;
|
||||||
|
return `${bytes}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
function formatCpu(nanoCpus: number): string {
|
||||||
|
const millicores = Math.floor(nanoCpus / 1_000_000);
|
||||||
|
return `${millicores}m`;
|
||||||
|
}
|
||||||
|
|
||||||
|
function sanitizeName(name: string): string {
|
||||||
|
return name.toLowerCase().replace(/[^a-z0-9-]/g, '-').replace(/^-+|-+$/g, '').slice(0, 63);
|
||||||
|
}
|
||||||
|
|
||||||
|
function buildLabels(spec: ContainerSpec): Record<string, string> {
|
||||||
|
return {
|
||||||
|
[MCPCTL_LABEL]: 'true',
|
||||||
|
'app.kubernetes.io/managed-by': 'mcpctl',
|
||||||
|
'app.kubernetes.io/name': sanitizeName(spec.name),
|
||||||
|
...spec.labels,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
function buildContainerSpec(spec: ContainerSpec) {
|
||||||
|
const memoryLimit = spec.memoryLimit ?? DEFAULT_MEMORY_LIMIT;
|
||||||
|
const nanoCpus = spec.nanoCpus ?? DEFAULT_NANO_CPUS;
|
||||||
|
const memStr = formatMemory(memoryLimit);
|
||||||
|
const cpuStr = formatCpu(nanoCpus);
|
||||||
|
|
||||||
|
const container: {
|
||||||
|
name: string;
|
||||||
|
image: string;
|
||||||
|
env?: Array<{ name: string; value: string }>;
|
||||||
|
ports?: Array<{ containerPort: number }>;
|
||||||
|
resources: { limits: { memory: string; cpu: string }; requests: { memory: string; cpu: string } };
|
||||||
|
securityContext: { runAsNonRoot: boolean; readOnlyRootFilesystem: boolean; allowPrivilegeEscalation: boolean };
|
||||||
|
} = {
|
||||||
|
name: sanitizeName(spec.name),
|
||||||
|
image: spec.image,
|
||||||
|
resources: {
|
||||||
|
limits: { memory: memStr, cpu: cpuStr },
|
||||||
|
requests: { memory: memStr, cpu: cpuStr },
|
||||||
|
},
|
||||||
|
securityContext: {
|
||||||
|
runAsNonRoot: true,
|
||||||
|
readOnlyRootFilesystem: true,
|
||||||
|
allowPrivilegeEscalation: false,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
if (spec.env && Object.keys(spec.env).length > 0) {
|
||||||
|
container.env = Object.entries(spec.env).map(([name, value]) => ({ name, value }));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (spec.containerPort) {
|
||||||
|
container.ports = [{ containerPort: spec.containerPort }];
|
||||||
|
}
|
||||||
|
|
||||||
|
return container;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function generatePodSpec(spec: ContainerSpec, namespace: string): K8sPodManifest {
|
||||||
|
const labels = buildLabels(spec);
|
||||||
|
return {
|
||||||
|
apiVersion: 'v1',
|
||||||
|
kind: 'Pod',
|
||||||
|
metadata: {
|
||||||
|
name: sanitizeName(spec.name),
|
||||||
|
namespace,
|
||||||
|
labels,
|
||||||
|
},
|
||||||
|
spec: {
|
||||||
|
containers: [buildContainerSpec(spec)],
|
||||||
|
restartPolicy: 'Always',
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
export function generateDeploymentSpec(spec: ContainerSpec, namespace: string, replicas = 1): K8sDeploymentManifest {
|
||||||
|
const labels = buildLabels(spec);
|
||||||
|
const selectorLabels = {
|
||||||
|
'app.kubernetes.io/name': sanitizeName(spec.name),
|
||||||
|
[MCPCTL_LABEL]: 'true',
|
||||||
|
};
|
||||||
|
|
||||||
|
return {
|
||||||
|
apiVersion: 'apps/v1',
|
||||||
|
kind: 'Deployment',
|
||||||
|
metadata: {
|
||||||
|
name: sanitizeName(spec.name),
|
||||||
|
namespace,
|
||||||
|
labels,
|
||||||
|
},
|
||||||
|
spec: {
|
||||||
|
replicas,
|
||||||
|
selector: { matchLabels: selectorLabels },
|
||||||
|
template: {
|
||||||
|
metadata: { labels },
|
||||||
|
spec: {
|
||||||
|
containers: [buildContainerSpec(spec)],
|
||||||
|
restartPolicy: 'Always',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
export function generateNamespaceSpec(name: string): K8sNamespaceManifest {
|
||||||
|
return {
|
||||||
|
apiVersion: 'v1',
|
||||||
|
kind: 'Namespace',
|
||||||
|
metadata: { name },
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
export { sanitizeName, formatMemory, formatCpu };
|
||||||
166
src/mcpd/tests/k8s-manifest.test.ts
Normal file
166
src/mcpd/tests/k8s-manifest.test.ts
Normal file
@@ -0,0 +1,166 @@
|
|||||||
|
import { describe, it, expect } from 'vitest';
|
||||||
|
import {
|
||||||
|
generatePodSpec,
|
||||||
|
generateDeploymentSpec,
|
||||||
|
generateNamespaceSpec,
|
||||||
|
formatMemory,
|
||||||
|
formatCpu,
|
||||||
|
sanitizeName,
|
||||||
|
} from '../src/services/k8s/manifest-generator.js';
|
||||||
|
import type { ContainerSpec } from '../src/services/orchestrator.js';
|
||||||
|
|
||||||
|
const baseSpec: ContainerSpec = {
|
||||||
|
image: 'mcpctl/test-server:latest',
|
||||||
|
name: 'test-server',
|
||||||
|
};
|
||||||
|
|
||||||
|
describe('formatMemory', () => {
|
||||||
|
it('formats bytes to Gi', () => {
|
||||||
|
expect(formatMemory(1024 * 1024 * 1024)).toBe('1Gi');
|
||||||
|
expect(formatMemory(2 * 1024 * 1024 * 1024)).toBe('2Gi');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('formats bytes to Mi', () => {
|
||||||
|
expect(formatMemory(512 * 1024 * 1024)).toBe('512Mi');
|
||||||
|
expect(formatMemory(256 * 1024 * 1024)).toBe('256Mi');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('formats bytes to Ki', () => {
|
||||||
|
expect(formatMemory(64 * 1024)).toBe('64Ki');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('formats small values as plain bytes', () => {
|
||||||
|
expect(formatMemory(500)).toBe('500');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('formatCpu', () => {
|
||||||
|
it('converts nanoCPUs to millicores', () => {
|
||||||
|
expect(formatCpu(500_000_000)).toBe('500m');
|
||||||
|
expect(formatCpu(1_000_000_000)).toBe('1000m');
|
||||||
|
expect(formatCpu(250_000_000)).toBe('250m');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('sanitizeName', () => {
|
||||||
|
it('lowercases and replaces invalid chars', () => {
|
||||||
|
expect(sanitizeName('My Server')).toBe('my-server');
|
||||||
|
expect(sanitizeName('test_server.v2')).toBe('test-server-v2');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('strips leading/trailing hyphens', () => {
|
||||||
|
expect(sanitizeName('-hello-')).toBe('hello');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('truncates to 63 chars', () => {
|
||||||
|
const long = 'a'.repeat(100);
|
||||||
|
expect(sanitizeName(long).length).toBeLessThanOrEqual(63);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('generatePodSpec', () => {
|
||||||
|
it('generates valid pod manifest', () => {
|
||||||
|
const pod = generatePodSpec(baseSpec, 'default');
|
||||||
|
|
||||||
|
expect(pod.apiVersion).toBe('v1');
|
||||||
|
expect(pod.kind).toBe('Pod');
|
||||||
|
expect(pod.metadata.name).toBe('test-server');
|
||||||
|
expect(pod.metadata.namespace).toBe('default');
|
||||||
|
expect(pod.metadata.labels['mcpctl.managed']).toBe('true');
|
||||||
|
expect(pod.spec.containers).toHaveLength(1);
|
||||||
|
expect(pod.spec.containers[0]!.image).toBe('mcpctl/test-server:latest');
|
||||||
|
expect(pod.spec.restartPolicy).toBe('Always');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('applies default resource limits', () => {
|
||||||
|
const pod = generatePodSpec(baseSpec, 'default');
|
||||||
|
const container = pod.spec.containers[0]!;
|
||||||
|
expect(container.resources.limits.memory).toBe('512Mi');
|
||||||
|
expect(container.resources.limits.cpu).toBe('500m');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('applies custom resource limits', () => {
|
||||||
|
const spec: ContainerSpec = {
|
||||||
|
...baseSpec,
|
||||||
|
memoryLimit: 1024 * 1024 * 1024,
|
||||||
|
nanoCpus: 1_000_000_000,
|
||||||
|
};
|
||||||
|
const pod = generatePodSpec(spec, 'default');
|
||||||
|
const container = pod.spec.containers[0]!;
|
||||||
|
expect(container.resources.limits.memory).toBe('1Gi');
|
||||||
|
expect(container.resources.limits.cpu).toBe('1000m');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('includes env vars when specified', () => {
|
||||||
|
const spec: ContainerSpec = {
|
||||||
|
...baseSpec,
|
||||||
|
env: { API_KEY: 'secret', PORT: '3000' },
|
||||||
|
};
|
||||||
|
const pod = generatePodSpec(spec, 'test-ns');
|
||||||
|
const container = pod.spec.containers[0]!;
|
||||||
|
expect(container.env).toEqual([
|
||||||
|
{ name: 'API_KEY', value: 'secret' },
|
||||||
|
{ name: 'PORT', value: '3000' },
|
||||||
|
]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('includes port when specified', () => {
|
||||||
|
const spec: ContainerSpec = { ...baseSpec, containerPort: 8080 };
|
||||||
|
const pod = generatePodSpec(spec, 'default');
|
||||||
|
const container = pod.spec.containers[0]!;
|
||||||
|
expect(container.ports).toEqual([{ containerPort: 8080 }]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('omits env and ports when not specified', () => {
|
||||||
|
const pod = generatePodSpec(baseSpec, 'default');
|
||||||
|
const container = pod.spec.containers[0]!;
|
||||||
|
expect(container.env).toBeUndefined();
|
||||||
|
expect(container.ports).toBeUndefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('sets security context', () => {
|
||||||
|
const pod = generatePodSpec(baseSpec, 'default');
|
||||||
|
const sc = pod.spec.containers[0]!.securityContext;
|
||||||
|
expect(sc.runAsNonRoot).toBe(true);
|
||||||
|
expect(sc.readOnlyRootFilesystem).toBe(true);
|
||||||
|
expect(sc.allowPrivilegeEscalation).toBe(false);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('propagates custom labels', () => {
|
||||||
|
const spec: ContainerSpec = {
|
||||||
|
...baseSpec,
|
||||||
|
labels: { team: 'infra', version: 'v1' },
|
||||||
|
};
|
||||||
|
const pod = generatePodSpec(spec, 'default');
|
||||||
|
expect(pod.metadata.labels['team']).toBe('infra');
|
||||||
|
expect(pod.metadata.labels['version']).toBe('v1');
|
||||||
|
expect(pod.metadata.labels['mcpctl.managed']).toBe('true');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('generateDeploymentSpec', () => {
|
||||||
|
it('generates valid deployment manifest', () => {
|
||||||
|
const dep = generateDeploymentSpec(baseSpec, 'prod', 3);
|
||||||
|
|
||||||
|
expect(dep.apiVersion).toBe('apps/v1');
|
||||||
|
expect(dep.kind).toBe('Deployment');
|
||||||
|
expect(dep.metadata.namespace).toBe('prod');
|
||||||
|
expect(dep.spec.replicas).toBe(3);
|
||||||
|
expect(dep.spec.selector.matchLabels['mcpctl.managed']).toBe('true');
|
||||||
|
expect(dep.spec.template.spec.containers).toHaveLength(1);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('defaults to 1 replica', () => {
|
||||||
|
const dep = generateDeploymentSpec(baseSpec, 'default');
|
||||||
|
expect(dep.spec.replicas).toBe(1);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('generateNamespaceSpec', () => {
|
||||||
|
it('generates namespace manifest', () => {
|
||||||
|
const ns = generateNamespaceSpec('mcpctl-prod');
|
||||||
|
expect(ns.apiVersion).toBe('v1');
|
||||||
|
expect(ns.kind).toBe('Namespace');
|
||||||
|
expect(ns.metadata.name).toBe('mcpctl-prod');
|
||||||
|
});
|
||||||
|
});
|
||||||
266
src/mcpd/tests/k8s-orchestrator.test.ts
Normal file
266
src/mcpd/tests/k8s-orchestrator.test.ts
Normal file
@@ -0,0 +1,266 @@
|
|||||||
|
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||||
|
import type { K8sClientConfig } from '../src/services/k8s/k8s-client.js';
|
||||||
|
|
||||||
|
// Mock the K8sClient before importing KubernetesOrchestrator
|
||||||
|
vi.mock('../src/services/k8s/k8s-client.js', () => {
|
||||||
|
class MockK8sClient {
|
||||||
|
defaultNamespace: string;
|
||||||
|
// Store mock handlers so tests can override
|
||||||
|
_handlers = new Map<string, { statusCode: number; body: unknown }>();
|
||||||
|
|
||||||
|
constructor(config: K8sClientConfig) {
|
||||||
|
this.defaultNamespace = config.namespace ?? 'default';
|
||||||
|
}
|
||||||
|
|
||||||
|
_setResponse(key: string, statusCode: number, body: unknown) {
|
||||||
|
this._handlers.set(key, { statusCode, body });
|
||||||
|
}
|
||||||
|
|
||||||
|
_getResponse(key: string) {
|
||||||
|
return this._handlers.get(key) ?? { statusCode: 200, body: {} };
|
||||||
|
}
|
||||||
|
|
||||||
|
async get(path: string) { return this._getResponse(`GET:${path}`); }
|
||||||
|
async post(path: string, _body: unknown) { return this._getResponse(`POST:${path}`); }
|
||||||
|
async delete(path: string) { return this._getResponse(`DELETE:${path}`); }
|
||||||
|
async patch(path: string, _body: unknown) { return this._getResponse(`PATCH:${path}`); }
|
||||||
|
async getLogs(_ns: string, _pod: string, _opts?: unknown) {
|
||||||
|
return this._getResponse('LOGS')?.body ?? '';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
K8sClient: MockK8sClient,
|
||||||
|
loadDefaultConfig: vi.fn(),
|
||||||
|
parseKubeconfig: vi.fn(),
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
import { KubernetesOrchestrator } from '../src/services/k8s/kubernetes-orchestrator.js';
|
||||||
|
import type { ContainerSpec } from '../src/services/orchestrator.js';
|
||||||
|
|
||||||
|
function getClient(orch: KubernetesOrchestrator): {
|
||||||
|
_setResponse(key: string, statusCode: number, body: unknown): void;
|
||||||
|
} {
|
||||||
|
// Access private client for test setup
|
||||||
|
return (orch as unknown as { client: { _setResponse(k: string, sc: number, b: unknown): void } }).client;
|
||||||
|
}
|
||||||
|
|
||||||
|
const testConfig: K8sClientConfig = {
|
||||||
|
apiServer: 'https://localhost:6443',
|
||||||
|
token: 'test-token',
|
||||||
|
namespace: 'test-ns',
|
||||||
|
};
|
||||||
|
|
||||||
|
const testSpec: ContainerSpec = {
|
||||||
|
image: 'mcpctl/server:latest',
|
||||||
|
name: 'my-server',
|
||||||
|
env: { PORT: '3000' },
|
||||||
|
containerPort: 3000,
|
||||||
|
};
|
||||||
|
|
||||||
|
const podStatusRunning = {
|
||||||
|
metadata: {
|
||||||
|
name: 'my-server',
|
||||||
|
namespace: 'test-ns',
|
||||||
|
creationTimestamp: '2026-01-01T00:00:00Z',
|
||||||
|
labels: { 'mcpctl.managed': 'true' },
|
||||||
|
},
|
||||||
|
status: {
|
||||||
|
phase: 'Running',
|
||||||
|
containerStatuses: [{
|
||||||
|
state: { running: { startedAt: '2026-01-01T00:00:00Z' } },
|
||||||
|
}],
|
||||||
|
},
|
||||||
|
spec: {
|
||||||
|
containers: [{ ports: [{ containerPort: 3000 }] }],
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
const podStatusPending = {
|
||||||
|
metadata: {
|
||||||
|
name: 'my-server',
|
||||||
|
namespace: 'test-ns',
|
||||||
|
creationTimestamp: '2026-01-01T00:00:00Z',
|
||||||
|
},
|
||||||
|
status: {
|
||||||
|
phase: 'Pending',
|
||||||
|
containerStatuses: [{
|
||||||
|
state: { waiting: { reason: 'ContainerCreating' } },
|
||||||
|
}],
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
describe('KubernetesOrchestrator', () => {
|
||||||
|
let orch: KubernetesOrchestrator;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
orch = new KubernetesOrchestrator(testConfig);
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('ping', () => {
|
||||||
|
it('returns true on successful API call', async () => {
|
||||||
|
getClient(orch)._setResponse('GET:/api/v1', 200, { kind: 'APIResourceList' });
|
||||||
|
expect(await orch.ping()).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('returns false on error', async () => {
|
||||||
|
getClient(orch)._setResponse('GET:/api/v1', 500, { message: 'internal error' });
|
||||||
|
expect(await orch.ping()).toBe(false);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('pullImage', () => {
|
||||||
|
it('is a no-op for K8s', async () => {
|
||||||
|
await expect(orch.pullImage('some-image:latest')).resolves.toBeUndefined();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('createContainer', () => {
|
||||||
|
it('creates a pod and returns container info', async () => {
|
||||||
|
const client = getClient(orch);
|
||||||
|
// ensureNamespace check
|
||||||
|
client._setResponse('GET:/api/v1/namespaces/test-ns', 200, {});
|
||||||
|
// create pod
|
||||||
|
client._setResponse('POST:/api/v1/namespaces/test-ns/pods', 201, podStatusRunning);
|
||||||
|
// inspect after creation
|
||||||
|
client._setResponse('GET:/api/v1/namespaces/test-ns/pods/my-server', 200, podStatusRunning);
|
||||||
|
|
||||||
|
const info = await orch.createContainer(testSpec);
|
||||||
|
expect(info.containerId).toBe('my-server');
|
||||||
|
expect(info.state).toBe('running');
|
||||||
|
expect(info.port).toBe(3000);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('throws on API error', async () => {
|
||||||
|
const client = getClient(orch);
|
||||||
|
client._setResponse('GET:/api/v1/namespaces/test-ns', 200, {});
|
||||||
|
client._setResponse('POST:/api/v1/namespaces/test-ns/pods', 422, {
|
||||||
|
message: 'pod already exists',
|
||||||
|
});
|
||||||
|
|
||||||
|
await expect(orch.createContainer(testSpec)).rejects.toThrow('Failed to create pod');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('inspectContainer', () => {
|
||||||
|
it('returns running container info', async () => {
|
||||||
|
getClient(orch)._setResponse('GET:/api/v1/namespaces/test-ns/pods/my-server', 200, podStatusRunning);
|
||||||
|
|
||||||
|
const info = await orch.inspectContainer('my-server');
|
||||||
|
expect(info.state).toBe('running');
|
||||||
|
expect(info.name).toBe('my-server');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('maps pending state correctly', async () => {
|
||||||
|
getClient(orch)._setResponse('GET:/api/v1/namespaces/test-ns/pods/my-server', 200, podStatusPending);
|
||||||
|
|
||||||
|
const info = await orch.inspectContainer('my-server');
|
||||||
|
expect(info.state).toBe('starting');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('throws on 404', async () => {
|
||||||
|
getClient(orch)._setResponse('GET:/api/v1/namespaces/test-ns/pods/missing', 404, {
|
||||||
|
message: 'pods "missing" not found',
|
||||||
|
});
|
||||||
|
|
||||||
|
await expect(orch.inspectContainer('missing')).rejects.toThrow('not found');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('stopContainer', () => {
|
||||||
|
it('deletes the pod', async () => {
|
||||||
|
getClient(orch)._setResponse('DELETE:/api/v1/namespaces/test-ns/pods/my-server', 200, {});
|
||||||
|
await expect(orch.stopContainer('my-server')).resolves.toBeUndefined();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('removeContainer', () => {
|
||||||
|
it('deletes the pod successfully', async () => {
|
||||||
|
getClient(orch)._setResponse('DELETE:/api/v1/namespaces/test-ns/pods/my-server', 200, {});
|
||||||
|
await expect(orch.removeContainer('my-server')).resolves.toBeUndefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('ignores 404 (already deleted)', async () => {
|
||||||
|
getClient(orch)._setResponse('DELETE:/api/v1/namespaces/test-ns/pods/my-server', 404, {});
|
||||||
|
await expect(orch.removeContainer('my-server')).resolves.toBeUndefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('throws on other errors', async () => {
|
||||||
|
getClient(orch)._setResponse('DELETE:/api/v1/namespaces/test-ns/pods/my-server', 403, {
|
||||||
|
message: 'forbidden',
|
||||||
|
});
|
||||||
|
await expect(orch.removeContainer('my-server')).rejects.toThrow('Failed to delete pod');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('getContainerLogs', () => {
|
||||||
|
it('returns logs from pod', async () => {
|
||||||
|
getClient(orch)._setResponse('LOGS', 200, 'log line 1\nlog line 2\n');
|
||||||
|
|
||||||
|
const logs = await orch.getContainerLogs('my-server');
|
||||||
|
expect(logs.stdout).toBe('log line 1\nlog line 2\n');
|
||||||
|
expect(logs.stderr).toBe('');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('listContainers', () => {
|
||||||
|
it('lists managed pods', async () => {
|
||||||
|
getClient(orch)._setResponse(
|
||||||
|
'GET:/api/v1/namespaces/test-ns/pods?labelSelector=mcpctl.managed%3Dtrue',
|
||||||
|
200,
|
||||||
|
{ items: [podStatusRunning] },
|
||||||
|
);
|
||||||
|
|
||||||
|
const containers = await orch.listContainers();
|
||||||
|
expect(containers).toHaveLength(1);
|
||||||
|
expect(containers[0]!.containerId).toBe('my-server');
|
||||||
|
expect(containers[0]!.state).toBe('running');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('returns empty on API error', async () => {
|
||||||
|
getClient(orch)._setResponse(
|
||||||
|
'GET:/api/v1/namespaces/test-ns/pods?labelSelector=mcpctl.managed%3Dtrue',
|
||||||
|
500,
|
||||||
|
{},
|
||||||
|
);
|
||||||
|
|
||||||
|
const containers = await orch.listContainers();
|
||||||
|
expect(containers).toEqual([]);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('ensureNamespace', () => {
|
||||||
|
it('does nothing if namespace exists', async () => {
|
||||||
|
getClient(orch)._setResponse('GET:/api/v1/namespaces/test-ns', 200, {});
|
||||||
|
await expect(orch.ensureNamespace('test-ns')).resolves.toBeUndefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('creates namespace if not found', async () => {
|
||||||
|
const client = getClient(orch);
|
||||||
|
client._setResponse('GET:/api/v1/namespaces/new-ns', 404, {});
|
||||||
|
client._setResponse('POST:/api/v1/namespaces', 201, {});
|
||||||
|
await expect(orch.ensureNamespace('new-ns')).resolves.toBeUndefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('handles conflict (namespace already created by another process)', async () => {
|
||||||
|
const client = getClient(orch);
|
||||||
|
client._setResponse('GET:/api/v1/namespaces/new-ns', 404, {});
|
||||||
|
client._setResponse('POST:/api/v1/namespaces', 409, { message: 'already exists' });
|
||||||
|
await expect(orch.ensureNamespace('new-ns')).resolves.toBeUndefined();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('getNamespace', () => {
|
||||||
|
it('returns configured namespace', () => {
|
||||||
|
expect(orch.getNamespace()).toBe('test-ns');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('defaults to "default"', () => {
|
||||||
|
const defaultOrch = new KubernetesOrchestrator({
|
||||||
|
apiServer: 'https://localhost:6443',
|
||||||
|
});
|
||||||
|
expect(defaultOrch.getNamespace()).toBe('default');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
Reference in New Issue
Block a user