feat: LLM provider configuration, secret store, and setup wizard
Add secure credential storage (GNOME Keyring + file fallback), LLM provider config in ~/.mcpctl/config.json, interactive setup wizard (mcpctl config setup), and wire configured provider into mcplocal for smart pagination summaries. - Secret store: SecretStore interface, GnomeKeyringStore, FileSecretStore - Config schema: LlmConfigSchema with provider/model/url/binaryPath - Setup wizard: arrow-key provider/model selection, dynamic model fetch - Provider factory: creates ProviderRegistry from config + secrets - Status: shows LLM line with hint when not configured - 572 tests passing across all packages Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
268
src/cli/tests/commands/config-setup.test.ts
Normal file
268
src/cli/tests/commands/config-setup.test.ts
Normal file
@@ -0,0 +1,268 @@
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import { createConfigSetupCommand } from '../../src/commands/config-setup.js';
|
||||
import type { ConfigSetupDeps, ConfigSetupPrompt } from '../../src/commands/config-setup.js';
|
||||
import type { SecretStore } from '@mcpctl/shared';
|
||||
import { mkdtempSync, rmSync, readFileSync } from 'node:fs';
|
||||
import { join } from 'node:path';
|
||||
import { tmpdir } from 'node:os';
|
||||
|
||||
let tempDir: string;
|
||||
let logs: string[];
|
||||
|
||||
beforeEach(() => {
|
||||
tempDir = mkdtempSync(join(tmpdir(), 'mcpctl-config-setup-test-'));
|
||||
logs = [];
|
||||
});
|
||||
|
||||
function cleanup() {
|
||||
rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
|
||||
function mockSecretStore(secrets: Record<string, string> = {}): SecretStore {
|
||||
const store: Record<string, string> = { ...secrets };
|
||||
return {
|
||||
get: vi.fn(async (key: string) => store[key] ?? null),
|
||||
set: vi.fn(async (key: string, value: string) => { store[key] = value; }),
|
||||
delete: vi.fn(async () => true),
|
||||
backend: () => 'mock',
|
||||
};
|
||||
}
|
||||
|
||||
function mockPrompt(answers: unknown[]): ConfigSetupPrompt {
|
||||
let callIndex = 0;
|
||||
return {
|
||||
select: vi.fn(async () => answers[callIndex++]),
|
||||
input: vi.fn(async () => answers[callIndex++] as string),
|
||||
password: vi.fn(async () => answers[callIndex++] as string),
|
||||
confirm: vi.fn(async () => answers[callIndex++] as boolean),
|
||||
};
|
||||
}
|
||||
|
||||
function buildDeps(overrides: {
|
||||
secrets?: Record<string, string>;
|
||||
answers?: unknown[];
|
||||
fetchModels?: ConfigSetupDeps['fetchModels'];
|
||||
} = {}): ConfigSetupDeps {
|
||||
return {
|
||||
configDeps: { configDir: tempDir },
|
||||
secretStore: mockSecretStore(overrides.secrets),
|
||||
log: (...args: string[]) => logs.push(args.join(' ')),
|
||||
prompt: mockPrompt(overrides.answers ?? []),
|
||||
fetchModels: overrides.fetchModels ?? vi.fn(async () => []),
|
||||
};
|
||||
}
|
||||
|
||||
function readConfig(): Record<string, unknown> {
|
||||
const raw = readFileSync(join(tempDir, 'config.json'), 'utf-8');
|
||||
return JSON.parse(raw) as Record<string, unknown>;
|
||||
}
|
||||
|
||||
async function runSetup(deps: ConfigSetupDeps): Promise<void> {
|
||||
const cmd = createConfigSetupCommand(deps);
|
||||
await cmd.parseAsync([], { from: 'user' });
|
||||
}
|
||||
|
||||
describe('config setup wizard', () => {
|
||||
describe('provider: none', () => {
|
||||
it('disables LLM and saves config', async () => {
|
||||
const deps = buildDeps({ answers: ['none'] });
|
||||
await runSetup(deps);
|
||||
|
||||
const config = readConfig();
|
||||
expect(config.llm).toEqual({ provider: 'none' });
|
||||
expect(logs.some((l) => l.includes('LLM disabled'))).toBe(true);
|
||||
cleanup();
|
||||
});
|
||||
});
|
||||
|
||||
describe('provider: gemini-cli', () => {
|
||||
it('saves gemini-cli with selected model', async () => {
|
||||
// Answers: select provider, select model, confirm custom binary=false
|
||||
const deps = buildDeps({ answers: ['gemini-cli', 'gemini-2.5-flash', false] });
|
||||
await runSetup(deps);
|
||||
|
||||
const config = readConfig();
|
||||
expect((config.llm as Record<string, unknown>).provider).toBe('gemini-cli');
|
||||
expect((config.llm as Record<string, unknown>).model).toBe('gemini-2.5-flash');
|
||||
cleanup();
|
||||
});
|
||||
|
||||
it('saves gemini-cli with custom model and binary path', async () => {
|
||||
// Answers: select provider, select custom, enter model name, confirm custom binary=true, enter path
|
||||
const deps = buildDeps({ answers: ['gemini-cli', '__custom__', 'gemini-3.0-flash', true, '/opt/gemini'] });
|
||||
await runSetup(deps);
|
||||
|
||||
const config = readConfig();
|
||||
const llm = config.llm as Record<string, unknown>;
|
||||
expect(llm.model).toBe('gemini-3.0-flash');
|
||||
expect(llm.binaryPath).toBe('/opt/gemini');
|
||||
cleanup();
|
||||
});
|
||||
});
|
||||
|
||||
describe('provider: ollama', () => {
|
||||
it('fetches models and allows selection', async () => {
|
||||
const fetchModels = vi.fn(async () => ['llama3.2', 'codellama', 'mistral']);
|
||||
// Answers: select provider, enter URL, select model
|
||||
const deps = buildDeps({
|
||||
answers: ['ollama', 'http://localhost:11434', 'codellama'],
|
||||
fetchModels,
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
expect(fetchModels).toHaveBeenCalledWith('http://localhost:11434', '/api/tags');
|
||||
const config = readConfig();
|
||||
const llm = config.llm as Record<string, unknown>;
|
||||
expect(llm.provider).toBe('ollama');
|
||||
expect(llm.model).toBe('codellama');
|
||||
expect(llm.url).toBe('http://localhost:11434');
|
||||
cleanup();
|
||||
});
|
||||
|
||||
it('falls back to manual input when fetch fails', async () => {
|
||||
const fetchModels = vi.fn(async () => []);
|
||||
// Answers: select provider, enter URL, enter model manually
|
||||
const deps = buildDeps({
|
||||
answers: ['ollama', 'http://localhost:11434', 'llama3.2'],
|
||||
fetchModels,
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
const config = readConfig();
|
||||
expect((config.llm as Record<string, unknown>).model).toBe('llama3.2');
|
||||
cleanup();
|
||||
});
|
||||
});
|
||||
|
||||
describe('provider: anthropic', () => {
|
||||
it('prompts for API key and saves to secret store', async () => {
|
||||
// Answers: select provider, enter API key, select model
|
||||
const deps = buildDeps({
|
||||
answers: ['anthropic', 'sk-ant-new-key', 'claude-haiku-3-5-20241022'],
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
expect(deps.secretStore.set).toHaveBeenCalledWith('anthropic-api-key', 'sk-ant-new-key');
|
||||
const config = readConfig();
|
||||
const llm = config.llm as Record<string, unknown>;
|
||||
expect(llm.provider).toBe('anthropic');
|
||||
expect(llm.model).toBe('claude-haiku-3-5-20241022');
|
||||
// API key should NOT be in config file
|
||||
expect(llm).not.toHaveProperty('apiKey');
|
||||
cleanup();
|
||||
});
|
||||
|
||||
it('shows existing key masked and allows keeping it', async () => {
|
||||
// Answers: select provider, confirm change=false, select model
|
||||
const deps = buildDeps({
|
||||
secrets: { 'anthropic-api-key': 'sk-ant-existing-key-1234' },
|
||||
answers: ['anthropic', false, 'claude-sonnet-4-20250514'],
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
// Should NOT have called set (kept existing key)
|
||||
expect(deps.secretStore.set).not.toHaveBeenCalled();
|
||||
const config = readConfig();
|
||||
expect((config.llm as Record<string, unknown>).model).toBe('claude-sonnet-4-20250514');
|
||||
cleanup();
|
||||
});
|
||||
|
||||
it('allows replacing existing key', async () => {
|
||||
// Answers: select provider, confirm change=true, enter new key, select model
|
||||
const deps = buildDeps({
|
||||
secrets: { 'anthropic-api-key': 'sk-ant-old' },
|
||||
answers: ['anthropic', true, 'sk-ant-new', 'claude-haiku-3-5-20241022'],
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
expect(deps.secretStore.set).toHaveBeenCalledWith('anthropic-api-key', 'sk-ant-new');
|
||||
cleanup();
|
||||
});
|
||||
});
|
||||
|
||||
describe('provider: vllm', () => {
|
||||
it('fetches models from vLLM and allows selection', async () => {
|
||||
const fetchModels = vi.fn(async () => ['my-model', 'llama-70b']);
|
||||
// Answers: select provider, enter URL, select model
|
||||
const deps = buildDeps({
|
||||
answers: ['vllm', 'http://gpu:8000', 'llama-70b'],
|
||||
fetchModels,
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
expect(fetchModels).toHaveBeenCalledWith('http://gpu:8000', '/v1/models');
|
||||
const config = readConfig();
|
||||
const llm = config.llm as Record<string, unknown>;
|
||||
expect(llm.provider).toBe('vllm');
|
||||
expect(llm.url).toBe('http://gpu:8000');
|
||||
expect(llm.model).toBe('llama-70b');
|
||||
cleanup();
|
||||
});
|
||||
});
|
||||
|
||||
describe('provider: openai', () => {
|
||||
it('prompts for key, model, and optional custom endpoint', async () => {
|
||||
// Answers: select provider, enter key, enter model, confirm custom URL=true, enter URL
|
||||
const deps = buildDeps({
|
||||
answers: ['openai', 'sk-openai-key', 'gpt-4o', true, 'https://custom.api.com'],
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
expect(deps.secretStore.set).toHaveBeenCalledWith('openai-api-key', 'sk-openai-key');
|
||||
const config = readConfig();
|
||||
const llm = config.llm as Record<string, unknown>;
|
||||
expect(llm.provider).toBe('openai');
|
||||
expect(llm.model).toBe('gpt-4o');
|
||||
expect(llm.url).toBe('https://custom.api.com');
|
||||
cleanup();
|
||||
});
|
||||
|
||||
it('skips custom URL when not requested', async () => {
|
||||
// Answers: select provider, enter key, enter model, confirm custom URL=false
|
||||
const deps = buildDeps({
|
||||
answers: ['openai', 'sk-openai-key', 'gpt-4o-mini', false],
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
const config = readConfig();
|
||||
const llm = config.llm as Record<string, unknown>;
|
||||
expect(llm.url).toBeUndefined();
|
||||
cleanup();
|
||||
});
|
||||
});
|
||||
|
||||
describe('provider: deepseek', () => {
|
||||
it('prompts for key and model', async () => {
|
||||
// Answers: select provider, enter key, select model
|
||||
const deps = buildDeps({
|
||||
answers: ['deepseek', 'sk-ds-key', 'deepseek-chat'],
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
expect(deps.secretStore.set).toHaveBeenCalledWith('deepseek-api-key', 'sk-ds-key');
|
||||
const config = readConfig();
|
||||
const llm = config.llm as Record<string, unknown>;
|
||||
expect(llm.provider).toBe('deepseek');
|
||||
expect(llm.model).toBe('deepseek-chat');
|
||||
cleanup();
|
||||
});
|
||||
});
|
||||
|
||||
describe('output messages', () => {
|
||||
it('shows restart instruction', async () => {
|
||||
const deps = buildDeps({ answers: ['gemini-cli', 'gemini-2.5-flash', false] });
|
||||
await runSetup(deps);
|
||||
|
||||
expect(logs.some((l) => l.includes('systemctl --user restart mcplocal'))).toBe(true);
|
||||
cleanup();
|
||||
});
|
||||
|
||||
it('shows configured provider and model', async () => {
|
||||
const deps = buildDeps({ answers: ['gemini-cli', 'gemini-2.5-flash', false] });
|
||||
await runSetup(deps);
|
||||
|
||||
expect(logs.some((l) => l.includes('gemini-cli') && l.includes('gemini-2.5-flash'))).toBe(true);
|
||||
cleanup();
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -126,4 +126,69 @@ describe('status command', () => {
|
||||
expect(output.join('\n')).toContain('official');
|
||||
expect(output.join('\n')).not.toContain('glama');
|
||||
});
|
||||
|
||||
it('shows LLM not configured hint when no LLM is set', async () => {
|
||||
const cmd = createStatusCommand({
|
||||
configDeps: { configDir: tempDir },
|
||||
credentialsDeps: { configDir: tempDir },
|
||||
log,
|
||||
checkHealth: async () => true,
|
||||
});
|
||||
await cmd.parseAsync([], { from: 'user' });
|
||||
const out = output.join('\n');
|
||||
expect(out).toContain('LLM:');
|
||||
expect(out).toContain('not configured');
|
||||
expect(out).toContain('mcpctl config setup');
|
||||
});
|
||||
|
||||
it('shows configured LLM provider and model', async () => {
|
||||
saveConfig({ ...DEFAULT_CONFIG, llm: { provider: 'anthropic', model: 'claude-haiku-3-5-20241022' } }, { configDir: tempDir });
|
||||
const cmd = createStatusCommand({
|
||||
configDeps: { configDir: tempDir },
|
||||
credentialsDeps: { configDir: tempDir },
|
||||
log,
|
||||
checkHealth: async () => true,
|
||||
});
|
||||
await cmd.parseAsync([], { from: 'user' });
|
||||
const out = output.join('\n');
|
||||
expect(out).toContain('LLM:');
|
||||
expect(out).toContain('anthropic / claude-haiku-3-5-20241022');
|
||||
});
|
||||
|
||||
it('shows not configured when LLM provider is none', async () => {
|
||||
saveConfig({ ...DEFAULT_CONFIG, llm: { provider: 'none' } }, { configDir: tempDir });
|
||||
const cmd = createStatusCommand({
|
||||
configDeps: { configDir: tempDir },
|
||||
credentialsDeps: { configDir: tempDir },
|
||||
log,
|
||||
checkHealth: async () => true,
|
||||
});
|
||||
await cmd.parseAsync([], { from: 'user' });
|
||||
expect(output.join('\n')).toContain('not configured');
|
||||
});
|
||||
|
||||
it('includes llm field in JSON output', async () => {
|
||||
saveConfig({ ...DEFAULT_CONFIG, llm: { provider: 'gemini-cli', model: 'gemini-2.5-flash' } }, { configDir: tempDir });
|
||||
const cmd = createStatusCommand({
|
||||
configDeps: { configDir: tempDir },
|
||||
credentialsDeps: { configDir: tempDir },
|
||||
log,
|
||||
checkHealth: async () => true,
|
||||
});
|
||||
await cmd.parseAsync(['-o', 'json'], { from: 'user' });
|
||||
const parsed = JSON.parse(output[0]) as Record<string, unknown>;
|
||||
expect(parsed['llm']).toBe('gemini-cli / gemini-2.5-flash');
|
||||
});
|
||||
|
||||
it('includes null llm in JSON output when not configured', async () => {
|
||||
const cmd = createStatusCommand({
|
||||
configDeps: { configDir: tempDir },
|
||||
credentialsDeps: { configDir: tempDir },
|
||||
log,
|
||||
checkHealth: async () => true,
|
||||
});
|
||||
await cmd.parseAsync(['-o', 'json'], { from: 'user' });
|
||||
const parsed = JSON.parse(output[0]) as Record<string, unknown>;
|
||||
expect(parsed['llm']).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
Reference in New Issue
Block a user