Some checks failed
CI/CD / lint (pull_request) Successful in 55s
CI/CD / test (pull_request) Successful in 1m13s
CI/CD / typecheck (pull_request) Successful in 3m10s
CI/CD / smoke (pull_request) Failing after 1m46s
CI/CD / build (pull_request) Successful in 3m24s
CI/CD / publish (pull_request) Has been skipped
Status was showing the server-side LLM list but not whether each one
actually serves inference. This adds a per-LLM probe that POSTs a
tiny prompt to /api/v1/llms/<name>/infer:
messages: [{ role: 'user', content: "Say exactly the word 'hi' and nothing else." }]
max_tokens: 8, temperature: 0
Each registered LLM gets a one-line health line:
Server LLMs: 2 registered (probing live "say hi"...)
fast qwen3-thinking ✓ "hi" 312ms
openai → qwen3-thinking http://litellm.../v1 key:litellm/API_KEY
heavy sonnet ✗ upstream auth failed: 401
anthropic → claude-sonnet-4-5 provider default no key
Probes run in parallel so a single slow LLM doesn't gate the others;
each has its own 15-second timeout. JSON/YAML output gains a
\`health: { ok, ms, say?, error? }\` field per server LLM so dashboards
get the same liveness signal.
Tests: 25/25 (was 24, +1 new for the failure-path render). Workspace
suite: 2006/2006 across 149 files.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
296 lines
12 KiB
TypeScript
296 lines
12 KiB
TypeScript
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
|
|
import { mkdtempSync, rmSync } from 'node:fs';
|
|
import { join } from 'node:path';
|
|
import { tmpdir } from 'node:os';
|
|
import { createStatusCommand } from '../../src/commands/status.js';
|
|
import type { StatusCommandDeps } from '../../src/commands/status.js';
|
|
import { saveConfig, DEFAULT_CONFIG } from '../../src/config/index.js';
|
|
import { saveCredentials } from '../../src/auth/index.js';
|
|
|
|
let tempDir: string;
|
|
let output: string[];
|
|
let written: string[];
|
|
|
|
function log(...args: string[]) {
|
|
output.push(args.join(' '));
|
|
}
|
|
|
|
function write(text: string) {
|
|
written.push(text);
|
|
}
|
|
|
|
function baseDeps(overrides?: Partial<StatusCommandDeps>): Partial<StatusCommandDeps> {
|
|
return {
|
|
configDeps: { configDir: tempDir },
|
|
credentialsDeps: { configDir: tempDir },
|
|
log,
|
|
write,
|
|
checkHealth: async () => true,
|
|
fetchProviders: async () => null,
|
|
fetchServerLlms: async () => null,
|
|
probeServerLlm: async () => ({ ok: true, ms: 12, say: 'hi' }),
|
|
isTTY: false,
|
|
...overrides,
|
|
};
|
|
}
|
|
|
|
beforeEach(() => {
|
|
tempDir = mkdtempSync(join(tmpdir(), 'mcpctl-status-test-'));
|
|
output = [];
|
|
written = [];
|
|
});
|
|
|
|
afterEach(() => {
|
|
rmSync(tempDir, { recursive: true, force: true });
|
|
});
|
|
|
|
describe('status command', () => {
|
|
it('shows status in table format', async () => {
|
|
const cmd = createStatusCommand(baseDeps());
|
|
await cmd.parseAsync([], { from: 'user' });
|
|
const out = output.join('\n');
|
|
expect(out).toContain('mcpctl v');
|
|
expect(out).toContain('mcplocal:');
|
|
expect(out).toContain('mcpd:');
|
|
expect(out).toContain('connected');
|
|
});
|
|
|
|
it('shows unreachable when daemons are down', async () => {
|
|
const cmd = createStatusCommand(baseDeps({ checkHealth: async () => false }));
|
|
await cmd.parseAsync([], { from: 'user' });
|
|
expect(output.join('\n')).toContain('unreachable');
|
|
});
|
|
|
|
it('shows not logged in when no credentials', async () => {
|
|
const cmd = createStatusCommand(baseDeps());
|
|
await cmd.parseAsync([], { from: 'user' });
|
|
expect(output.join('\n')).toContain('not logged in');
|
|
});
|
|
|
|
it('shows logged in user when credentials exist', async () => {
|
|
saveCredentials({ token: 'tok', mcpdUrl: 'http://x:3100', user: 'alice@example.com' }, { configDir: tempDir });
|
|
const cmd = createStatusCommand(baseDeps());
|
|
await cmd.parseAsync([], { from: 'user' });
|
|
expect(output.join('\n')).toContain('logged in as alice@example.com');
|
|
});
|
|
|
|
it('shows status in JSON format', async () => {
|
|
const cmd = createStatusCommand(baseDeps());
|
|
await cmd.parseAsync(['-o', 'json'], { from: 'user' });
|
|
const parsed = JSON.parse(output[0]) as Record<string, unknown>;
|
|
expect(parsed['version']).toBe('0.0.1');
|
|
expect(parsed['mcplocalReachable']).toBe(true);
|
|
expect(parsed['mcpdReachable']).toBe(true);
|
|
});
|
|
|
|
it('shows status in YAML format', async () => {
|
|
const cmd = createStatusCommand(baseDeps({ checkHealth: async () => false }));
|
|
await cmd.parseAsync(['-o', 'yaml'], { from: 'user' });
|
|
expect(output[0]).toContain('mcplocalReachable: false');
|
|
});
|
|
|
|
it('checks correct URLs from config', async () => {
|
|
saveConfig({ ...DEFAULT_CONFIG, mcplocalUrl: 'http://local:3200', mcpdUrl: 'http://remote:3100' }, { configDir: tempDir });
|
|
const checkedUrls: string[] = [];
|
|
const cmd = createStatusCommand(baseDeps({
|
|
checkHealth: async (url) => {
|
|
checkedUrls.push(url);
|
|
return false;
|
|
},
|
|
}));
|
|
await cmd.parseAsync([], { from: 'user' });
|
|
expect(checkedUrls).toContain('http://local:3200');
|
|
expect(checkedUrls).toContain('http://remote:3100');
|
|
});
|
|
|
|
it('shows registries from config', async () => {
|
|
saveConfig({ ...DEFAULT_CONFIG, registries: ['official'] }, { configDir: tempDir });
|
|
const cmd = createStatusCommand(baseDeps());
|
|
await cmd.parseAsync([], { from: 'user' });
|
|
expect(output.join('\n')).toContain('official');
|
|
expect(output.join('\n')).not.toContain('glama');
|
|
});
|
|
|
|
it('shows LLM not configured hint when no LLM is set', async () => {
|
|
const cmd = createStatusCommand(baseDeps());
|
|
await cmd.parseAsync([], { from: 'user' });
|
|
const out = output.join('\n');
|
|
expect(out).toContain('LLM:');
|
|
expect(out).toContain('not configured');
|
|
expect(out).toContain('mcpctl config setup');
|
|
});
|
|
|
|
it('shows green check when LLM is healthy (non-TTY)', async () => {
|
|
saveConfig({ ...DEFAULT_CONFIG, llm: { provider: 'anthropic', model: 'claude-haiku-3-5-20241022' } }, { configDir: tempDir });
|
|
const cmd = createStatusCommand(baseDeps({ checkLlm: async () => 'ok' }));
|
|
await cmd.parseAsync([], { from: 'user' });
|
|
const out = output.join('\n');
|
|
expect(out).toContain('anthropic / claude-haiku-3-5-20241022');
|
|
expect(out).toContain('✓ ok');
|
|
});
|
|
|
|
it('shows red cross when LLM check fails (non-TTY)', async () => {
|
|
saveConfig({ ...DEFAULT_CONFIG, llm: { provider: 'gemini-cli', model: 'gemini-2.5-flash' } }, { configDir: tempDir });
|
|
const cmd = createStatusCommand(baseDeps({ checkLlm: async () => 'not authenticated' }));
|
|
await cmd.parseAsync([], { from: 'user' });
|
|
const out = output.join('\n');
|
|
expect(out).toContain('✗ not authenticated');
|
|
});
|
|
|
|
it('shows error message from mcplocal', async () => {
|
|
saveConfig({ ...DEFAULT_CONFIG, llm: { provider: 'gemini-cli', model: 'gemini-2.5-flash' } }, { configDir: tempDir });
|
|
const cmd = createStatusCommand(baseDeps({ checkLlm: async () => 'binary not found' }));
|
|
await cmd.parseAsync([], { from: 'user' });
|
|
expect(output.join('\n')).toContain('✗ binary not found');
|
|
});
|
|
|
|
it('queries mcplocal URL for LLM health', async () => {
|
|
saveConfig({ ...DEFAULT_CONFIG, mcplocalUrl: 'http://custom:9999', llm: { provider: 'gemini-cli', model: 'gemini-2.5-flash' } }, { configDir: tempDir });
|
|
let queriedUrl = '';
|
|
const cmd = createStatusCommand(baseDeps({
|
|
checkLlm: async (url) => { queriedUrl = url; return 'ok'; },
|
|
}));
|
|
await cmd.parseAsync([], { from: 'user' });
|
|
expect(queriedUrl).toBe('http://custom:9999');
|
|
});
|
|
|
|
it('uses spinner on TTY and writes final result', async () => {
|
|
saveConfig({ ...DEFAULT_CONFIG, llm: { provider: 'gemini-cli', model: 'gemini-2.5-flash' } }, { configDir: tempDir });
|
|
const cmd = createStatusCommand(baseDeps({
|
|
isTTY: true,
|
|
checkLlm: async () => 'ok',
|
|
}));
|
|
await cmd.parseAsync([], { from: 'user' });
|
|
// On TTY, the final LLM line goes through write(), not log()
|
|
const finalWrite = written[written.length - 1];
|
|
expect(finalWrite).toContain('gemini-cli / gemini-2.5-flash');
|
|
expect(finalWrite).toContain('✓ ok');
|
|
});
|
|
|
|
it('uses spinner on TTY and shows failure', async () => {
|
|
saveConfig({ ...DEFAULT_CONFIG, llm: { provider: 'gemini-cli', model: 'gemini-2.5-flash' } }, { configDir: tempDir });
|
|
const cmd = createStatusCommand(baseDeps({
|
|
isTTY: true,
|
|
checkLlm: async () => 'not authenticated',
|
|
}));
|
|
await cmd.parseAsync([], { from: 'user' });
|
|
const finalWrite = written[written.length - 1];
|
|
expect(finalWrite).toContain('✗ not authenticated');
|
|
});
|
|
|
|
it('shows not configured when LLM provider is none', async () => {
|
|
saveConfig({ ...DEFAULT_CONFIG, llm: { provider: 'none' } }, { configDir: tempDir });
|
|
const cmd = createStatusCommand(baseDeps());
|
|
await cmd.parseAsync([], { from: 'user' });
|
|
expect(output.join('\n')).toContain('not configured');
|
|
});
|
|
|
|
it('includes llm and llmStatus in JSON output', async () => {
|
|
saveConfig({ ...DEFAULT_CONFIG, llm: { provider: 'gemini-cli', model: 'gemini-2.5-flash' } }, { configDir: tempDir });
|
|
const cmd = createStatusCommand(baseDeps({ checkLlm: async () => 'ok' }));
|
|
await cmd.parseAsync(['-o', 'json'], { from: 'user' });
|
|
const parsed = JSON.parse(output[0]) as Record<string, unknown>;
|
|
expect(parsed['llm']).toBe('gemini-cli / gemini-2.5-flash');
|
|
expect(parsed['llmStatus']).toBe('ok');
|
|
});
|
|
|
|
it('includes null llm in JSON output when not configured', async () => {
|
|
const cmd = createStatusCommand(baseDeps());
|
|
await cmd.parseAsync(['-o', 'json'], { from: 'user' });
|
|
const parsed = JSON.parse(output[0]) as Record<string, unknown>;
|
|
expect(parsed['llm']).toBeNull();
|
|
expect(parsed['llmStatus']).toBeNull();
|
|
});
|
|
|
|
// ── Server LLMs (mcpd-managed Llm rows) ──
|
|
|
|
it('renders a "Server LLMs:" section grouped by tier in table mode', async () => {
|
|
saveCredentials({ token: 't', mcpdUrl: 'http://mcpd', user: 'u' }, { configDir: tempDir });
|
|
const cmd = createStatusCommand(baseDeps({
|
|
fetchServerLlms: async () => [
|
|
{ id: 'l1', name: 'qwen3-thinking', type: 'openai', model: 'qwen3-thinking', tier: 'fast', url: 'http://x:4000/v1', apiKeyRef: { name: 'litellm', key: 'API_KEY' } },
|
|
{ id: 'l2', name: 'sonnet', type: 'anthropic', model: 'claude-sonnet-4-5', tier: 'heavy', url: '', apiKeyRef: null },
|
|
],
|
|
probeServerLlm: async () => ({ ok: true, ms: 42, say: 'hi' }),
|
|
}));
|
|
await cmd.parseAsync([], { from: 'user' });
|
|
const out = output.join('\n');
|
|
expect(out).toContain('Server LLMs: 2 registered');
|
|
expect(out).toContain('qwen3-thinking');
|
|
expect(out).toContain('openai → qwen3-thinking');
|
|
expect(out).toContain('sonnet');
|
|
expect(out).toContain('anthropic → claude-sonnet-4-5');
|
|
expect(out).toMatch(/fast\s+qwen3-thinking/);
|
|
expect(out).toMatch(/heavy\s+sonnet/);
|
|
// Health probe result rendered for each LLM
|
|
expect(out).toContain('✓ "hi" 42ms');
|
|
});
|
|
|
|
it('renders a failed "say hi" probe with the error message', async () => {
|
|
const cmd = createStatusCommand(baseDeps({
|
|
fetchServerLlms: async () => [
|
|
{ id: 'l1', name: 'broken', type: 'openai', model: 'gpt-4o', tier: 'fast', url: 'http://x', apiKeyRef: null },
|
|
],
|
|
probeServerLlm: async () => ({ ok: false, ms: 5000, error: 'upstream auth failed: 401' }),
|
|
}));
|
|
await cmd.parseAsync([], { from: 'user' });
|
|
const out = output.join('\n');
|
|
expect(out).toContain('Server LLMs: 1 registered');
|
|
expect(out).toContain('broken');
|
|
expect(out).toContain('✗ upstream auth failed: 401');
|
|
});
|
|
|
|
it('renders "none registered" when mcpd has no Llm rows', async () => {
|
|
const cmd = createStatusCommand(baseDeps({ fetchServerLlms: async () => [] }));
|
|
await cmd.parseAsync([], { from: 'user' });
|
|
const out = output.join('\n');
|
|
expect(out).toContain('Server LLMs: none registered');
|
|
expect(out).toContain("'mcpctl create llm'");
|
|
});
|
|
|
|
it('omits the section silently when mcpd is unreachable (fetcher returns null)', async () => {
|
|
const cmd = createStatusCommand(baseDeps({ fetchServerLlms: async () => null }));
|
|
await cmd.parseAsync([], { from: 'user' });
|
|
const out = output.join('\n');
|
|
expect(out).not.toContain('Server LLMs');
|
|
});
|
|
|
|
it('passes the bearer token from saved credentials to the fetcher', async () => {
|
|
saveCredentials({ token: 'tok-abc', mcpdUrl: 'http://mcpd', user: 'u' }, { configDir: tempDir });
|
|
let capturedToken: string | null = '<unseen>';
|
|
const cmd = createStatusCommand(baseDeps({
|
|
fetchServerLlms: async (_url, token) => { capturedToken = token; return []; },
|
|
}));
|
|
await cmd.parseAsync([], { from: 'user' });
|
|
expect(capturedToken).toBe('tok-abc');
|
|
});
|
|
|
|
it('passes null token when there are no saved credentials', async () => {
|
|
let capturedToken: string | null = '<unseen>';
|
|
const cmd = createStatusCommand(baseDeps({
|
|
fetchServerLlms: async (_url, token) => { capturedToken = token; return []; },
|
|
}));
|
|
await cmd.parseAsync([], { from: 'user' });
|
|
expect(capturedToken).toBeNull();
|
|
});
|
|
|
|
it('includes serverLlms with probed health in JSON output', async () => {
|
|
const llms = [
|
|
{ id: 'l1', name: 'qwen3-thinking', type: 'openai', model: 'qwen3-thinking', tier: 'fast', url: 'http://x', apiKeyRef: null },
|
|
];
|
|
const cmd = createStatusCommand(baseDeps({
|
|
fetchServerLlms: async () => llms,
|
|
probeServerLlm: async () => ({ ok: true, ms: 99, say: 'hi' }),
|
|
}));
|
|
await cmd.parseAsync(['-o', 'json'], { from: 'user' });
|
|
const parsed = JSON.parse(output[0]) as {
|
|
serverLlms?: Array<typeof llms[number] & { health: { ok: boolean; ms: number; say?: string } }>;
|
|
};
|
|
expect(parsed.serverLlms).toHaveLength(1);
|
|
expect(parsed.serverLlms![0]).toMatchObject({
|
|
name: 'qwen3-thinking',
|
|
health: { ok: true, ms: 99, say: 'hi' },
|
|
});
|
|
});
|
|
});
|