The pool refactor made ACP client creation lazy, causing the first /llm/health call to spawn + initialize + prompt Gemini in one request (30s+). Now warmup() eagerly starts the subprocess on mcplocal boot. Also fetch models in parallel with LLM health check. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
206 lines
7.4 KiB
TypeScript
206 lines
7.4 KiB
TypeScript
import { Command } from 'commander';
|
|
import http from 'node:http';
|
|
import { loadConfig } from '../config/index.js';
|
|
import type { ConfigLoaderDeps } from '../config/index.js';
|
|
import { loadCredentials } from '../auth/index.js';
|
|
import type { CredentialsDeps } from '../auth/index.js';
|
|
import { formatJson, formatYaml } from '../formatters/index.js';
|
|
import { APP_VERSION } from '@mcpctl/shared';
|
|
|
|
// ANSI helpers
|
|
const GREEN = '\x1b[32m';
|
|
const RED = '\x1b[31m';
|
|
const DIM = '\x1b[2m';
|
|
const RESET = '\x1b[0m';
|
|
const CLEAR_LINE = '\x1b[2K\r';
|
|
|
|
export interface StatusCommandDeps {
|
|
configDeps: Partial<ConfigLoaderDeps>;
|
|
credentialsDeps: Partial<CredentialsDeps>;
|
|
log: (...args: string[]) => void;
|
|
write: (text: string) => void;
|
|
checkHealth: (url: string) => Promise<boolean>;
|
|
/** Check LLM health via mcplocal's /llm/health endpoint */
|
|
checkLlm: (mcplocalUrl: string) => Promise<string>;
|
|
/** Fetch available models from mcplocal's /llm/models endpoint */
|
|
fetchModels: (mcplocalUrl: string) => Promise<string[]>;
|
|
isTTY: boolean;
|
|
}
|
|
|
|
function defaultCheckHealth(url: string): Promise<boolean> {
|
|
return new Promise((resolve) => {
|
|
const req = http.get(`${url}/health`, { timeout: 3000 }, (res) => {
|
|
resolve(res.statusCode !== undefined && res.statusCode >= 200 && res.statusCode < 400);
|
|
res.resume();
|
|
});
|
|
req.on('error', () => resolve(false));
|
|
req.on('timeout', () => {
|
|
req.destroy();
|
|
resolve(false);
|
|
});
|
|
});
|
|
}
|
|
|
|
/**
|
|
* Check LLM health by querying mcplocal's /llm/health endpoint.
|
|
* This tests the actual provider running inside the daemon (uses persistent ACP for gemini, etc.)
|
|
*/
|
|
function defaultCheckLlm(mcplocalUrl: string): Promise<string> {
|
|
return new Promise((resolve) => {
|
|
const req = http.get(`${mcplocalUrl}/llm/health`, { timeout: 45000 }, (res) => {
|
|
const chunks: Buffer[] = [];
|
|
res.on('data', (chunk: Buffer) => chunks.push(chunk));
|
|
res.on('end', () => {
|
|
try {
|
|
const body = JSON.parse(Buffer.concat(chunks).toString('utf-8')) as { status: string; error?: string };
|
|
if (body.status === 'ok') {
|
|
resolve('ok');
|
|
} else if (body.status === 'not configured') {
|
|
resolve('not configured');
|
|
} else if (body.error) {
|
|
resolve(body.error.slice(0, 80));
|
|
} else {
|
|
resolve(body.status);
|
|
}
|
|
} catch {
|
|
resolve('invalid response');
|
|
}
|
|
});
|
|
});
|
|
req.on('error', () => resolve('mcplocal unreachable'));
|
|
req.on('timeout', () => { req.destroy(); resolve('timeout'); });
|
|
});
|
|
}
|
|
|
|
function defaultFetchModels(mcplocalUrl: string): Promise<string[]> {
|
|
return new Promise((resolve) => {
|
|
const req = http.get(`${mcplocalUrl}/llm/models`, { timeout: 5000 }, (res) => {
|
|
const chunks: Buffer[] = [];
|
|
res.on('data', (chunk: Buffer) => chunks.push(chunk));
|
|
res.on('end', () => {
|
|
try {
|
|
const body = JSON.parse(Buffer.concat(chunks).toString('utf-8')) as { models?: string[] };
|
|
resolve(body.models ?? []);
|
|
} catch {
|
|
resolve([]);
|
|
}
|
|
});
|
|
});
|
|
req.on('error', () => resolve([]));
|
|
req.on('timeout', () => { req.destroy(); resolve([]); });
|
|
});
|
|
}
|
|
|
|
const SPINNER_FRAMES = ['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏'];
|
|
|
|
const defaultDeps: StatusCommandDeps = {
|
|
configDeps: {},
|
|
credentialsDeps: {},
|
|
log: (...args) => console.log(...args),
|
|
write: (text) => process.stdout.write(text),
|
|
checkHealth: defaultCheckHealth,
|
|
checkLlm: defaultCheckLlm,
|
|
fetchModels: defaultFetchModels,
|
|
isTTY: process.stdout.isTTY ?? false,
|
|
};
|
|
|
|
export function createStatusCommand(deps?: Partial<StatusCommandDeps>): Command {
|
|
const { configDeps, credentialsDeps, log, write, checkHealth, checkLlm, fetchModels, isTTY } = { ...defaultDeps, ...deps };
|
|
|
|
return new Command('status')
|
|
.description('Show mcpctl status and connectivity')
|
|
.option('-o, --output <format>', 'output format (table, json, yaml)', 'table')
|
|
.action(async (opts: { output: string }) => {
|
|
const config = loadConfig(configDeps);
|
|
const creds = loadCredentials(credentialsDeps);
|
|
|
|
const llmLabel = config.llm && config.llm.provider !== 'none'
|
|
? `${config.llm.provider}${config.llm.model ? ` / ${config.llm.model}` : ''}`
|
|
: null;
|
|
|
|
if (opts.output !== 'table') {
|
|
// JSON/YAML: run everything in parallel, wait, output at once
|
|
const [mcplocalReachable, mcpdReachable, llmStatus] = await Promise.all([
|
|
checkHealth(config.mcplocalUrl),
|
|
checkHealth(config.mcpdUrl),
|
|
llmLabel ? checkLlm(config.mcplocalUrl) : Promise.resolve(null),
|
|
]);
|
|
|
|
const llm = llmLabel
|
|
? llmStatus === 'ok' ? llmLabel : `${llmLabel} (${llmStatus})`
|
|
: null;
|
|
|
|
const status = {
|
|
version: APP_VERSION,
|
|
mcplocalUrl: config.mcplocalUrl,
|
|
mcplocalReachable,
|
|
mcpdUrl: config.mcpdUrl,
|
|
mcpdReachable,
|
|
auth: creds ? { user: creds.user } : null,
|
|
registries: config.registries,
|
|
outputFormat: config.outputFormat,
|
|
llm,
|
|
llmStatus,
|
|
};
|
|
|
|
log(opts.output === 'json' ? formatJson(status) : formatYaml(status));
|
|
return;
|
|
}
|
|
|
|
// Table format: print lines progressively, LLM last with spinner
|
|
|
|
// Fast health checks first
|
|
const [mcplocalReachable, mcpdReachable] = await Promise.all([
|
|
checkHealth(config.mcplocalUrl),
|
|
checkHealth(config.mcpdUrl),
|
|
]);
|
|
|
|
log(`mcpctl v${APP_VERSION}`);
|
|
log(`mcplocal: ${config.mcplocalUrl} (${mcplocalReachable ? 'connected' : 'unreachable'})`);
|
|
log(`mcpd: ${config.mcpdUrl} (${mcpdReachable ? 'connected' : 'unreachable'})`);
|
|
log(`Auth: ${creds ? `logged in as ${creds.user}` : 'not logged in'}`);
|
|
log(`Registries: ${config.registries.join(', ')}`);
|
|
log(`Output: ${config.outputFormat}`);
|
|
|
|
if (!llmLabel) {
|
|
log(`LLM: not configured (run 'mcpctl config setup')`);
|
|
return;
|
|
}
|
|
|
|
// LLM check + models fetch in parallel — queries mcplocal endpoints
|
|
const llmPromise = checkLlm(config.mcplocalUrl);
|
|
const modelsPromise = fetchModels(config.mcplocalUrl);
|
|
|
|
if (isTTY) {
|
|
let frame = 0;
|
|
const interval = setInterval(() => {
|
|
write(`${CLEAR_LINE}LLM: ${llmLabel} ${DIM}${SPINNER_FRAMES[frame % SPINNER_FRAMES.length]} checking...${RESET}`);
|
|
frame++;
|
|
}, 80);
|
|
|
|
const [llmStatus, models] = await Promise.all([llmPromise, modelsPromise]);
|
|
clearInterval(interval);
|
|
|
|
if (llmStatus === 'ok' || llmStatus === 'ok (key stored)') {
|
|
write(`${CLEAR_LINE}LLM: ${llmLabel} ${GREEN}✓ ${llmStatus}${RESET}\n`);
|
|
} else {
|
|
write(`${CLEAR_LINE}LLM: ${llmLabel} ${RED}✗ ${llmStatus}${RESET}\n`);
|
|
}
|
|
if (models.length > 0) {
|
|
log(`${DIM} Available: ${models.join(', ')}${RESET}`);
|
|
}
|
|
} else {
|
|
// Non-TTY: no spinner, just wait and print
|
|
const [llmStatus, models] = await Promise.all([llmPromise, modelsPromise]);
|
|
if (llmStatus === 'ok' || llmStatus === 'ok (key stored)') {
|
|
log(`LLM: ${llmLabel} ✓ ${llmStatus}`);
|
|
} else {
|
|
log(`LLM: ${llmLabel} ✗ ${llmStatus}`);
|
|
}
|
|
if (models.length > 0) {
|
|
log(`${DIM} Available: ${models.join(', ')}${RESET}`);
|
|
}
|
|
}
|
|
});
|
|
}
|