feat: LLM provider configuration, secret store, and setup wizard
Add secure credential storage (GNOME Keyring + file fallback), LLM provider config in ~/.mcpctl/config.json, interactive setup wizard (mcpctl config setup), and wire configured provider into mcplocal for smart pagination summaries. - Secret store: SecretStore interface, GnomeKeyringStore, FileSecretStore - Config schema: LlmConfigSchema with provider/model/url/binaryPath - Setup wizard: arrow-key provider/model selection, dynamic model fetch - Provider factory: creates ProviderRegistry from config + secrets - Status: shows LLM line with hint when not configured - 572 tests passing across all packages Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
319
src/cli/src/commands/config-setup.ts
Normal file
319
src/cli/src/commands/config-setup.ts
Normal file
@@ -0,0 +1,319 @@
|
||||
import { Command } from 'commander';
|
||||
import http from 'node:http';
|
||||
import https from 'node:https';
|
||||
import { loadConfig, saveConfig } from '../config/index.js';
|
||||
import type { ConfigLoaderDeps, McpctlConfig, LlmConfig, LlmProviderName } from '../config/index.js';
|
||||
import type { SecretStore } from '@mcpctl/shared';
|
||||
import { createSecretStore } from '@mcpctl/shared';
|
||||
|
||||
export interface ConfigSetupPrompt {
|
||||
select<T>(message: string, choices: Array<{ name: string; value: T; description?: string }>): Promise<T>;
|
||||
input(message: string, defaultValue?: string): Promise<string>;
|
||||
password(message: string): Promise<string>;
|
||||
confirm(message: string, defaultValue?: boolean): Promise<boolean>;
|
||||
}
|
||||
|
||||
export interface ConfigSetupDeps {
|
||||
configDeps: Partial<ConfigLoaderDeps>;
|
||||
secretStore: SecretStore;
|
||||
log: (...args: string[]) => void;
|
||||
prompt: ConfigSetupPrompt;
|
||||
fetchModels: (url: string, path: string) => Promise<string[]>;
|
||||
}
|
||||
|
||||
interface ProviderChoice {
|
||||
name: string;
|
||||
value: LlmProviderName;
|
||||
description: string;
|
||||
}
|
||||
|
||||
const PROVIDER_CHOICES: ProviderChoice[] = [
|
||||
{ name: 'Gemini CLI', value: 'gemini-cli', description: 'Google Gemini via local CLI (free, no API key)' },
|
||||
{ name: 'Ollama', value: 'ollama', description: 'Local models via Ollama' },
|
||||
{ name: 'Anthropic (Claude)', value: 'anthropic', description: 'Claude API (requires API key)' },
|
||||
{ name: 'vLLM', value: 'vllm', description: 'Self-hosted vLLM (OpenAI-compatible)' },
|
||||
{ name: 'OpenAI', value: 'openai', description: 'OpenAI API (requires API key)' },
|
||||
{ name: 'DeepSeek', value: 'deepseek', description: 'DeepSeek API (requires API key)' },
|
||||
{ name: 'None (disable)', value: 'none', description: 'Disable LLM features' },
|
||||
];
|
||||
|
||||
const GEMINI_MODELS = ['gemini-2.5-flash', 'gemini-2.5-pro', 'gemini-2.0-flash'];
|
||||
const ANTHROPIC_MODELS = ['claude-haiku-3-5-20241022', 'claude-sonnet-4-20250514', 'claude-opus-4-20250514'];
|
||||
const DEEPSEEK_MODELS = ['deepseek-chat', 'deepseek-reasoner'];
|
||||
|
||||
function defaultFetchModels(baseUrl: string, path: string): Promise<string[]> {
|
||||
return new Promise((resolve) => {
|
||||
const url = new URL(path, baseUrl);
|
||||
const isHttps = url.protocol === 'https:';
|
||||
const transport = isHttps ? https : http;
|
||||
|
||||
const req = transport.get({
|
||||
hostname: url.hostname,
|
||||
port: url.port || (isHttps ? 443 : 80),
|
||||
path: url.pathname,
|
||||
timeout: 5000,
|
||||
}, (res) => {
|
||||
const chunks: Buffer[] = [];
|
||||
res.on('data', (chunk: Buffer) => chunks.push(chunk));
|
||||
res.on('end', () => {
|
||||
try {
|
||||
const raw = Buffer.concat(chunks).toString('utf-8');
|
||||
const data = JSON.parse(raw) as { models?: Array<{ name: string }>; data?: Array<{ id: string }> };
|
||||
// Ollama format: { models: [{ name }] }
|
||||
if (data.models) {
|
||||
resolve(data.models.map((m) => m.name));
|
||||
return;
|
||||
}
|
||||
// OpenAI/vLLM format: { data: [{ id }] }
|
||||
if (data.data) {
|
||||
resolve(data.data.map((m) => m.id));
|
||||
return;
|
||||
}
|
||||
resolve([]);
|
||||
} catch {
|
||||
resolve([]);
|
||||
}
|
||||
});
|
||||
});
|
||||
req.on('error', () => resolve([]));
|
||||
req.on('timeout', () => { req.destroy(); resolve([]); });
|
||||
});
|
||||
}
|
||||
|
||||
async function defaultSelect<T>(message: string, choices: Array<{ name: string; value: T; description?: string }>): Promise<T> {
|
||||
const { default: inquirer } = await import('inquirer');
|
||||
const { answer } = await inquirer.prompt([{
|
||||
type: 'list',
|
||||
name: 'answer',
|
||||
message,
|
||||
choices: choices.map((c) => ({
|
||||
name: c.description ? `${c.name} — ${c.description}` : c.name,
|
||||
value: c.value,
|
||||
short: c.name,
|
||||
})),
|
||||
}]);
|
||||
return answer as T;
|
||||
}
|
||||
|
||||
async function defaultInput(message: string, defaultValue?: string): Promise<string> {
|
||||
const { default: inquirer } = await import('inquirer');
|
||||
const { answer } = await inquirer.prompt([{
|
||||
type: 'input',
|
||||
name: 'answer',
|
||||
message,
|
||||
default: defaultValue,
|
||||
}]);
|
||||
return answer as string;
|
||||
}
|
||||
|
||||
async function defaultPassword(message: string): Promise<string> {
|
||||
const { default: inquirer } = await import('inquirer');
|
||||
const { answer } = await inquirer.prompt([{ type: 'password', name: 'answer', message }]);
|
||||
return answer as string;
|
||||
}
|
||||
|
||||
async function defaultConfirm(message: string, defaultValue?: boolean): Promise<boolean> {
|
||||
const { default: inquirer } = await import('inquirer');
|
||||
const { answer } = await inquirer.prompt([{
|
||||
type: 'confirm',
|
||||
name: 'answer',
|
||||
message,
|
||||
default: defaultValue ?? true,
|
||||
}]);
|
||||
return answer as boolean;
|
||||
}
|
||||
|
||||
const defaultPrompt: ConfigSetupPrompt = {
|
||||
select: defaultSelect,
|
||||
input: defaultInput,
|
||||
password: defaultPassword,
|
||||
confirm: defaultConfirm,
|
||||
};
|
||||
|
||||
export function createConfigSetupCommand(deps?: Partial<ConfigSetupDeps>): Command {
|
||||
return new Command('setup')
|
||||
.description('Interactive LLM provider setup wizard')
|
||||
.action(async () => {
|
||||
const configDeps = deps?.configDeps ?? {};
|
||||
const log = deps?.log ?? ((...args: string[]) => console.log(...args));
|
||||
const prompt = deps?.prompt ?? defaultPrompt;
|
||||
const fetchModels = deps?.fetchModels ?? defaultFetchModels;
|
||||
const secretStore = deps?.secretStore ?? await createSecretStore();
|
||||
|
||||
const config = loadConfig(configDeps);
|
||||
const currentLlm = config.llm;
|
||||
|
||||
// Annotate current provider in choices
|
||||
const choices = PROVIDER_CHOICES.map((c) => {
|
||||
if (currentLlm?.provider === c.value) {
|
||||
return { ...c, name: `${c.name} (current)` };
|
||||
}
|
||||
return c;
|
||||
});
|
||||
|
||||
const provider = await prompt.select<LlmProviderName>('Select LLM provider:', choices);
|
||||
|
||||
if (provider === 'none') {
|
||||
const updated: McpctlConfig = { ...config, llm: { provider: 'none' } };
|
||||
saveConfig(updated, configDeps);
|
||||
log('LLM disabled. Restart mcplocal: systemctl --user restart mcplocal');
|
||||
return;
|
||||
}
|
||||
|
||||
let llmConfig: LlmConfig;
|
||||
|
||||
switch (provider) {
|
||||
case 'gemini-cli':
|
||||
llmConfig = await setupGeminiCli(prompt, currentLlm);
|
||||
break;
|
||||
case 'ollama':
|
||||
llmConfig = await setupOllama(prompt, fetchModels, currentLlm);
|
||||
break;
|
||||
case 'anthropic':
|
||||
llmConfig = await setupApiKeyProvider(prompt, secretStore, 'anthropic', 'anthropic-api-key', ANTHROPIC_MODELS, currentLlm);
|
||||
break;
|
||||
case 'vllm':
|
||||
llmConfig = await setupVllm(prompt, fetchModels, currentLlm);
|
||||
break;
|
||||
case 'openai':
|
||||
llmConfig = await setupApiKeyProvider(prompt, secretStore, 'openai', 'openai-api-key', [], currentLlm);
|
||||
break;
|
||||
case 'deepseek':
|
||||
llmConfig = await setupApiKeyProvider(prompt, secretStore, 'deepseek', 'deepseek-api-key', DEEPSEEK_MODELS, currentLlm);
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
||||
const updated: McpctlConfig = { ...config, llm: llmConfig };
|
||||
saveConfig(updated, configDeps);
|
||||
log(`\nLLM configured: ${llmConfig.provider}${llmConfig.model ? ` / ${llmConfig.model}` : ''}`);
|
||||
log('Restart mcplocal: systemctl --user restart mcplocal');
|
||||
});
|
||||
}
|
||||
|
||||
async function setupGeminiCli(prompt: ConfigSetupPrompt, current?: LlmConfig): Promise<LlmConfig> {
|
||||
const model = await prompt.select<string>('Select model:', [
|
||||
...GEMINI_MODELS.map((m) => ({
|
||||
name: m === current?.model ? `${m} (current)` : m,
|
||||
value: m,
|
||||
})),
|
||||
{ name: 'Custom...', value: '__custom__' },
|
||||
]);
|
||||
|
||||
const finalModel = model === '__custom__'
|
||||
? await prompt.input('Model name:', current?.model)
|
||||
: model;
|
||||
|
||||
const customBinary = await prompt.confirm('Use custom binary path?', false);
|
||||
const binaryPath = customBinary
|
||||
? await prompt.input('Binary path:', current?.binaryPath ?? 'gemini')
|
||||
: undefined;
|
||||
|
||||
return { provider: 'gemini-cli', model: finalModel, binaryPath };
|
||||
}
|
||||
|
||||
async function setupOllama(prompt: ConfigSetupPrompt, fetchModels: ConfigSetupDeps['fetchModels'], current?: LlmConfig): Promise<LlmConfig> {
|
||||
const url = await prompt.input('Ollama URL:', current?.url ?? 'http://localhost:11434');
|
||||
|
||||
// Try to fetch models from Ollama
|
||||
const models = await fetchModels(url, '/api/tags');
|
||||
let model: string;
|
||||
|
||||
if (models.length > 0) {
|
||||
const choices = models.map((m) => ({
|
||||
name: m === current?.model ? `${m} (current)` : m,
|
||||
value: m,
|
||||
}));
|
||||
choices.push({ name: 'Custom...', value: '__custom__' });
|
||||
model = await prompt.select<string>('Select model:', choices);
|
||||
if (model === '__custom__') {
|
||||
model = await prompt.input('Model name:', current?.model);
|
||||
}
|
||||
} else {
|
||||
model = await prompt.input('Model name (could not fetch models):', current?.model ?? 'llama3.2');
|
||||
}
|
||||
|
||||
return { provider: 'ollama', model, url };
|
||||
}
|
||||
|
||||
async function setupVllm(prompt: ConfigSetupPrompt, fetchModels: ConfigSetupDeps['fetchModels'], current?: LlmConfig): Promise<LlmConfig> {
|
||||
const url = await prompt.input('vLLM URL:', current?.url ?? 'http://localhost:8000');
|
||||
|
||||
// Try to fetch models from vLLM (OpenAI-compatible)
|
||||
const models = await fetchModels(url, '/v1/models');
|
||||
let model: string;
|
||||
|
||||
if (models.length > 0) {
|
||||
const choices = models.map((m) => ({
|
||||
name: m === current?.model ? `${m} (current)` : m,
|
||||
value: m,
|
||||
}));
|
||||
choices.push({ name: 'Custom...', value: '__custom__' });
|
||||
model = await prompt.select<string>('Select model:', choices);
|
||||
if (model === '__custom__') {
|
||||
model = await prompt.input('Model name:', current?.model);
|
||||
}
|
||||
} else {
|
||||
model = await prompt.input('Model name (could not fetch models):', current?.model ?? 'default');
|
||||
}
|
||||
|
||||
return { provider: 'vllm', model, url };
|
||||
}
|
||||
|
||||
async function setupApiKeyProvider(
|
||||
prompt: ConfigSetupPrompt,
|
||||
secretStore: SecretStore,
|
||||
provider: LlmProviderName,
|
||||
secretKey: string,
|
||||
hardcodedModels: string[],
|
||||
current?: LlmConfig,
|
||||
): Promise<LlmConfig> {
|
||||
// Check for existing API key
|
||||
const existingKey = await secretStore.get(secretKey);
|
||||
let apiKey: string;
|
||||
|
||||
if (existingKey) {
|
||||
const masked = `****${existingKey.slice(-4)}`;
|
||||
const changeKey = await prompt.confirm(`API key stored (${masked}). Change it?`, false);
|
||||
if (changeKey) {
|
||||
apiKey = await prompt.password('API key:');
|
||||
} else {
|
||||
apiKey = existingKey;
|
||||
}
|
||||
} else {
|
||||
apiKey = await prompt.password('API key:');
|
||||
}
|
||||
|
||||
// Store API key
|
||||
if (apiKey !== existingKey) {
|
||||
await secretStore.set(secretKey, apiKey);
|
||||
}
|
||||
|
||||
// Model selection
|
||||
let model: string;
|
||||
if (hardcodedModels.length > 0) {
|
||||
const choices = hardcodedModels.map((m) => ({
|
||||
name: m === current?.model ? `${m} (current)` : m,
|
||||
value: m,
|
||||
}));
|
||||
choices.push({ name: 'Custom...', value: '__custom__' });
|
||||
model = await prompt.select<string>('Select model:', choices);
|
||||
if (model === '__custom__') {
|
||||
model = await prompt.input('Model name:', current?.model);
|
||||
}
|
||||
} else {
|
||||
model = await prompt.input('Model name:', current?.model ?? 'gpt-4o');
|
||||
}
|
||||
|
||||
// Optional custom URL for openai
|
||||
let url: string | undefined;
|
||||
if (provider === 'openai') {
|
||||
const customUrl = await prompt.confirm('Use custom API endpoint?', false);
|
||||
if (customUrl) {
|
||||
url = await prompt.input('API URL:', current?.url ?? 'https://api.openai.com');
|
||||
}
|
||||
}
|
||||
|
||||
return { provider, model, url };
|
||||
}
|
||||
@@ -6,6 +6,7 @@ import { loadConfig, saveConfig, mergeConfig, getConfigPath, DEFAULT_CONFIG } fr
|
||||
import type { McpctlConfig, ConfigLoaderDeps } from '../config/index.js';
|
||||
import { formatJson, formatYaml } from '../formatters/index.js';
|
||||
import { saveCredentials, loadCredentials } from '../auth/index.js';
|
||||
import { createConfigSetupCommand } from './config-setup.js';
|
||||
import type { CredentialsDeps, StoredCredentials } from '../auth/index.js';
|
||||
import type { ApiClient } from '../api-client.js';
|
||||
|
||||
@@ -138,6 +139,8 @@ export function createConfigCommand(deps?: Partial<ConfigCommandDeps>, apiDeps?:
|
||||
registerClaudeCommand('claude', false);
|
||||
registerClaudeCommand('claude-generate', true); // backward compat
|
||||
|
||||
config.addCommand(createConfigSetupCommand({ configDeps }));
|
||||
|
||||
if (apiDeps) {
|
||||
const { client, credentialsDeps, log: apiLog } = apiDeps;
|
||||
|
||||
|
||||
@@ -50,6 +50,10 @@ export function createStatusCommand(deps?: Partial<StatusCommandDeps>): Command
|
||||
checkHealth(config.mcpdUrl),
|
||||
]);
|
||||
|
||||
const llm = config.llm && config.llm.provider !== 'none'
|
||||
? `${config.llm.provider}${config.llm.model ? ` / ${config.llm.model}` : ''}`
|
||||
: null;
|
||||
|
||||
const status = {
|
||||
version: APP_VERSION,
|
||||
mcplocalUrl: config.mcplocalUrl,
|
||||
@@ -59,6 +63,7 @@ export function createStatusCommand(deps?: Partial<StatusCommandDeps>): Command
|
||||
auth: creds ? { user: creds.user } : null,
|
||||
registries: config.registries,
|
||||
outputFormat: config.outputFormat,
|
||||
llm,
|
||||
};
|
||||
|
||||
if (opts.output === 'json') {
|
||||
@@ -72,6 +77,8 @@ export function createStatusCommand(deps?: Partial<StatusCommandDeps>): Command
|
||||
log(`Auth: ${creds ? `logged in as ${creds.user}` : 'not logged in'}`);
|
||||
log(`Registries: ${status.registries.join(', ')}`);
|
||||
log(`Output: ${status.outputFormat}`);
|
||||
log(`LLM: ${status.llm ?? "not configured (run 'mcpctl config setup')"}`);
|
||||
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
export { McpctlConfigSchema, DEFAULT_CONFIG } from './schema.js';
|
||||
export type { McpctlConfig } from './schema.js';
|
||||
export { McpctlConfigSchema, LlmConfigSchema, LLM_PROVIDERS, DEFAULT_CONFIG } from './schema.js';
|
||||
export type { McpctlConfig, LlmConfig, LlmProviderName } from './schema.js';
|
||||
export { loadConfig, saveConfig, mergeConfig, getConfigPath } from './loader.js';
|
||||
export type { ConfigLoaderDeps } from './loader.js';
|
||||
|
||||
@@ -1,5 +1,21 @@
|
||||
import { z } from 'zod';
|
||||
|
||||
export const LLM_PROVIDERS = ['gemini-cli', 'ollama', 'anthropic', 'openai', 'deepseek', 'vllm', 'none'] as const;
|
||||
export type LlmProviderName = typeof LLM_PROVIDERS[number];
|
||||
|
||||
export const LlmConfigSchema = z.object({
|
||||
/** LLM provider name */
|
||||
provider: z.enum(LLM_PROVIDERS),
|
||||
/** Model name */
|
||||
model: z.string().optional(),
|
||||
/** Provider URL (for ollama, vllm, openai with custom endpoint) */
|
||||
url: z.string().optional(),
|
||||
/** Binary path override (for gemini-cli) */
|
||||
binaryPath: z.string().optional(),
|
||||
}).strict();
|
||||
|
||||
export type LlmConfig = z.infer<typeof LlmConfigSchema>;
|
||||
|
||||
export const McpctlConfigSchema = z.object({
|
||||
/** mcplocal daemon endpoint (local LLM pre-processing proxy) */
|
||||
mcplocalUrl: z.string().default('http://localhost:3200'),
|
||||
@@ -19,6 +35,8 @@ export const McpctlConfigSchema = z.object({
|
||||
outputFormat: z.enum(['table', 'json', 'yaml']).default('table'),
|
||||
/** Smithery API key */
|
||||
smitheryApiKey: z.string().optional(),
|
||||
/** LLM provider configuration for smart features (pagination summaries, etc.) */
|
||||
llm: LlmConfigSchema.optional(),
|
||||
}).transform((cfg) => {
|
||||
// Backward compatibility: if old daemonUrl is set but mcplocalUrl wasn't explicitly changed,
|
||||
// use daemonUrl as mcplocalUrl
|
||||
|
||||
268
src/cli/tests/commands/config-setup.test.ts
Normal file
268
src/cli/tests/commands/config-setup.test.ts
Normal file
@@ -0,0 +1,268 @@
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import { createConfigSetupCommand } from '../../src/commands/config-setup.js';
|
||||
import type { ConfigSetupDeps, ConfigSetupPrompt } from '../../src/commands/config-setup.js';
|
||||
import type { SecretStore } from '@mcpctl/shared';
|
||||
import { mkdtempSync, rmSync, readFileSync } from 'node:fs';
|
||||
import { join } from 'node:path';
|
||||
import { tmpdir } from 'node:os';
|
||||
|
||||
let tempDir: string;
|
||||
let logs: string[];
|
||||
|
||||
beforeEach(() => {
|
||||
tempDir = mkdtempSync(join(tmpdir(), 'mcpctl-config-setup-test-'));
|
||||
logs = [];
|
||||
});
|
||||
|
||||
function cleanup() {
|
||||
rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
|
||||
function mockSecretStore(secrets: Record<string, string> = {}): SecretStore {
|
||||
const store: Record<string, string> = { ...secrets };
|
||||
return {
|
||||
get: vi.fn(async (key: string) => store[key] ?? null),
|
||||
set: vi.fn(async (key: string, value: string) => { store[key] = value; }),
|
||||
delete: vi.fn(async () => true),
|
||||
backend: () => 'mock',
|
||||
};
|
||||
}
|
||||
|
||||
function mockPrompt(answers: unknown[]): ConfigSetupPrompt {
|
||||
let callIndex = 0;
|
||||
return {
|
||||
select: vi.fn(async () => answers[callIndex++]),
|
||||
input: vi.fn(async () => answers[callIndex++] as string),
|
||||
password: vi.fn(async () => answers[callIndex++] as string),
|
||||
confirm: vi.fn(async () => answers[callIndex++] as boolean),
|
||||
};
|
||||
}
|
||||
|
||||
function buildDeps(overrides: {
|
||||
secrets?: Record<string, string>;
|
||||
answers?: unknown[];
|
||||
fetchModels?: ConfigSetupDeps['fetchModels'];
|
||||
} = {}): ConfigSetupDeps {
|
||||
return {
|
||||
configDeps: { configDir: tempDir },
|
||||
secretStore: mockSecretStore(overrides.secrets),
|
||||
log: (...args: string[]) => logs.push(args.join(' ')),
|
||||
prompt: mockPrompt(overrides.answers ?? []),
|
||||
fetchModels: overrides.fetchModels ?? vi.fn(async () => []),
|
||||
};
|
||||
}
|
||||
|
||||
function readConfig(): Record<string, unknown> {
|
||||
const raw = readFileSync(join(tempDir, 'config.json'), 'utf-8');
|
||||
return JSON.parse(raw) as Record<string, unknown>;
|
||||
}
|
||||
|
||||
async function runSetup(deps: ConfigSetupDeps): Promise<void> {
|
||||
const cmd = createConfigSetupCommand(deps);
|
||||
await cmd.parseAsync([], { from: 'user' });
|
||||
}
|
||||
|
||||
describe('config setup wizard', () => {
|
||||
describe('provider: none', () => {
|
||||
it('disables LLM and saves config', async () => {
|
||||
const deps = buildDeps({ answers: ['none'] });
|
||||
await runSetup(deps);
|
||||
|
||||
const config = readConfig();
|
||||
expect(config.llm).toEqual({ provider: 'none' });
|
||||
expect(logs.some((l) => l.includes('LLM disabled'))).toBe(true);
|
||||
cleanup();
|
||||
});
|
||||
});
|
||||
|
||||
describe('provider: gemini-cli', () => {
|
||||
it('saves gemini-cli with selected model', async () => {
|
||||
// Answers: select provider, select model, confirm custom binary=false
|
||||
const deps = buildDeps({ answers: ['gemini-cli', 'gemini-2.5-flash', false] });
|
||||
await runSetup(deps);
|
||||
|
||||
const config = readConfig();
|
||||
expect((config.llm as Record<string, unknown>).provider).toBe('gemini-cli');
|
||||
expect((config.llm as Record<string, unknown>).model).toBe('gemini-2.5-flash');
|
||||
cleanup();
|
||||
});
|
||||
|
||||
it('saves gemini-cli with custom model and binary path', async () => {
|
||||
// Answers: select provider, select custom, enter model name, confirm custom binary=true, enter path
|
||||
const deps = buildDeps({ answers: ['gemini-cli', '__custom__', 'gemini-3.0-flash', true, '/opt/gemini'] });
|
||||
await runSetup(deps);
|
||||
|
||||
const config = readConfig();
|
||||
const llm = config.llm as Record<string, unknown>;
|
||||
expect(llm.model).toBe('gemini-3.0-flash');
|
||||
expect(llm.binaryPath).toBe('/opt/gemini');
|
||||
cleanup();
|
||||
});
|
||||
});
|
||||
|
||||
describe('provider: ollama', () => {
|
||||
it('fetches models and allows selection', async () => {
|
||||
const fetchModels = vi.fn(async () => ['llama3.2', 'codellama', 'mistral']);
|
||||
// Answers: select provider, enter URL, select model
|
||||
const deps = buildDeps({
|
||||
answers: ['ollama', 'http://localhost:11434', 'codellama'],
|
||||
fetchModels,
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
expect(fetchModels).toHaveBeenCalledWith('http://localhost:11434', '/api/tags');
|
||||
const config = readConfig();
|
||||
const llm = config.llm as Record<string, unknown>;
|
||||
expect(llm.provider).toBe('ollama');
|
||||
expect(llm.model).toBe('codellama');
|
||||
expect(llm.url).toBe('http://localhost:11434');
|
||||
cleanup();
|
||||
});
|
||||
|
||||
it('falls back to manual input when fetch fails', async () => {
|
||||
const fetchModels = vi.fn(async () => []);
|
||||
// Answers: select provider, enter URL, enter model manually
|
||||
const deps = buildDeps({
|
||||
answers: ['ollama', 'http://localhost:11434', 'llama3.2'],
|
||||
fetchModels,
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
const config = readConfig();
|
||||
expect((config.llm as Record<string, unknown>).model).toBe('llama3.2');
|
||||
cleanup();
|
||||
});
|
||||
});
|
||||
|
||||
describe('provider: anthropic', () => {
|
||||
it('prompts for API key and saves to secret store', async () => {
|
||||
// Answers: select provider, enter API key, select model
|
||||
const deps = buildDeps({
|
||||
answers: ['anthropic', 'sk-ant-new-key', 'claude-haiku-3-5-20241022'],
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
expect(deps.secretStore.set).toHaveBeenCalledWith('anthropic-api-key', 'sk-ant-new-key');
|
||||
const config = readConfig();
|
||||
const llm = config.llm as Record<string, unknown>;
|
||||
expect(llm.provider).toBe('anthropic');
|
||||
expect(llm.model).toBe('claude-haiku-3-5-20241022');
|
||||
// API key should NOT be in config file
|
||||
expect(llm).not.toHaveProperty('apiKey');
|
||||
cleanup();
|
||||
});
|
||||
|
||||
it('shows existing key masked and allows keeping it', async () => {
|
||||
// Answers: select provider, confirm change=false, select model
|
||||
const deps = buildDeps({
|
||||
secrets: { 'anthropic-api-key': 'sk-ant-existing-key-1234' },
|
||||
answers: ['anthropic', false, 'claude-sonnet-4-20250514'],
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
// Should NOT have called set (kept existing key)
|
||||
expect(deps.secretStore.set).not.toHaveBeenCalled();
|
||||
const config = readConfig();
|
||||
expect((config.llm as Record<string, unknown>).model).toBe('claude-sonnet-4-20250514');
|
||||
cleanup();
|
||||
});
|
||||
|
||||
it('allows replacing existing key', async () => {
|
||||
// Answers: select provider, confirm change=true, enter new key, select model
|
||||
const deps = buildDeps({
|
||||
secrets: { 'anthropic-api-key': 'sk-ant-old' },
|
||||
answers: ['anthropic', true, 'sk-ant-new', 'claude-haiku-3-5-20241022'],
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
expect(deps.secretStore.set).toHaveBeenCalledWith('anthropic-api-key', 'sk-ant-new');
|
||||
cleanup();
|
||||
});
|
||||
});
|
||||
|
||||
describe('provider: vllm', () => {
|
||||
it('fetches models from vLLM and allows selection', async () => {
|
||||
const fetchModels = vi.fn(async () => ['my-model', 'llama-70b']);
|
||||
// Answers: select provider, enter URL, select model
|
||||
const deps = buildDeps({
|
||||
answers: ['vllm', 'http://gpu:8000', 'llama-70b'],
|
||||
fetchModels,
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
expect(fetchModels).toHaveBeenCalledWith('http://gpu:8000', '/v1/models');
|
||||
const config = readConfig();
|
||||
const llm = config.llm as Record<string, unknown>;
|
||||
expect(llm.provider).toBe('vllm');
|
||||
expect(llm.url).toBe('http://gpu:8000');
|
||||
expect(llm.model).toBe('llama-70b');
|
||||
cleanup();
|
||||
});
|
||||
});
|
||||
|
||||
describe('provider: openai', () => {
|
||||
it('prompts for key, model, and optional custom endpoint', async () => {
|
||||
// Answers: select provider, enter key, enter model, confirm custom URL=true, enter URL
|
||||
const deps = buildDeps({
|
||||
answers: ['openai', 'sk-openai-key', 'gpt-4o', true, 'https://custom.api.com'],
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
expect(deps.secretStore.set).toHaveBeenCalledWith('openai-api-key', 'sk-openai-key');
|
||||
const config = readConfig();
|
||||
const llm = config.llm as Record<string, unknown>;
|
||||
expect(llm.provider).toBe('openai');
|
||||
expect(llm.model).toBe('gpt-4o');
|
||||
expect(llm.url).toBe('https://custom.api.com');
|
||||
cleanup();
|
||||
});
|
||||
|
||||
it('skips custom URL when not requested', async () => {
|
||||
// Answers: select provider, enter key, enter model, confirm custom URL=false
|
||||
const deps = buildDeps({
|
||||
answers: ['openai', 'sk-openai-key', 'gpt-4o-mini', false],
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
const config = readConfig();
|
||||
const llm = config.llm as Record<string, unknown>;
|
||||
expect(llm.url).toBeUndefined();
|
||||
cleanup();
|
||||
});
|
||||
});
|
||||
|
||||
describe('provider: deepseek', () => {
|
||||
it('prompts for key and model', async () => {
|
||||
// Answers: select provider, enter key, select model
|
||||
const deps = buildDeps({
|
||||
answers: ['deepseek', 'sk-ds-key', 'deepseek-chat'],
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
expect(deps.secretStore.set).toHaveBeenCalledWith('deepseek-api-key', 'sk-ds-key');
|
||||
const config = readConfig();
|
||||
const llm = config.llm as Record<string, unknown>;
|
||||
expect(llm.provider).toBe('deepseek');
|
||||
expect(llm.model).toBe('deepseek-chat');
|
||||
cleanup();
|
||||
});
|
||||
});
|
||||
|
||||
describe('output messages', () => {
|
||||
it('shows restart instruction', async () => {
|
||||
const deps = buildDeps({ answers: ['gemini-cli', 'gemini-2.5-flash', false] });
|
||||
await runSetup(deps);
|
||||
|
||||
expect(logs.some((l) => l.includes('systemctl --user restart mcplocal'))).toBe(true);
|
||||
cleanup();
|
||||
});
|
||||
|
||||
it('shows configured provider and model', async () => {
|
||||
const deps = buildDeps({ answers: ['gemini-cli', 'gemini-2.5-flash', false] });
|
||||
await runSetup(deps);
|
||||
|
||||
expect(logs.some((l) => l.includes('gemini-cli') && l.includes('gemini-2.5-flash'))).toBe(true);
|
||||
cleanup();
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -126,4 +126,69 @@ describe('status command', () => {
|
||||
expect(output.join('\n')).toContain('official');
|
||||
expect(output.join('\n')).not.toContain('glama');
|
||||
});
|
||||
|
||||
it('shows LLM not configured hint when no LLM is set', async () => {
|
||||
const cmd = createStatusCommand({
|
||||
configDeps: { configDir: tempDir },
|
||||
credentialsDeps: { configDir: tempDir },
|
||||
log,
|
||||
checkHealth: async () => true,
|
||||
});
|
||||
await cmd.parseAsync([], { from: 'user' });
|
||||
const out = output.join('\n');
|
||||
expect(out).toContain('LLM:');
|
||||
expect(out).toContain('not configured');
|
||||
expect(out).toContain('mcpctl config setup');
|
||||
});
|
||||
|
||||
it('shows configured LLM provider and model', async () => {
|
||||
saveConfig({ ...DEFAULT_CONFIG, llm: { provider: 'anthropic', model: 'claude-haiku-3-5-20241022' } }, { configDir: tempDir });
|
||||
const cmd = createStatusCommand({
|
||||
configDeps: { configDir: tempDir },
|
||||
credentialsDeps: { configDir: tempDir },
|
||||
log,
|
||||
checkHealth: async () => true,
|
||||
});
|
||||
await cmd.parseAsync([], { from: 'user' });
|
||||
const out = output.join('\n');
|
||||
expect(out).toContain('LLM:');
|
||||
expect(out).toContain('anthropic / claude-haiku-3-5-20241022');
|
||||
});
|
||||
|
||||
it('shows not configured when LLM provider is none', async () => {
|
||||
saveConfig({ ...DEFAULT_CONFIG, llm: { provider: 'none' } }, { configDir: tempDir });
|
||||
const cmd = createStatusCommand({
|
||||
configDeps: { configDir: tempDir },
|
||||
credentialsDeps: { configDir: tempDir },
|
||||
log,
|
||||
checkHealth: async () => true,
|
||||
});
|
||||
await cmd.parseAsync([], { from: 'user' });
|
||||
expect(output.join('\n')).toContain('not configured');
|
||||
});
|
||||
|
||||
it('includes llm field in JSON output', async () => {
|
||||
saveConfig({ ...DEFAULT_CONFIG, llm: { provider: 'gemini-cli', model: 'gemini-2.5-flash' } }, { configDir: tempDir });
|
||||
const cmd = createStatusCommand({
|
||||
configDeps: { configDir: tempDir },
|
||||
credentialsDeps: { configDir: tempDir },
|
||||
log,
|
||||
checkHealth: async () => true,
|
||||
});
|
||||
await cmd.parseAsync(['-o', 'json'], { from: 'user' });
|
||||
const parsed = JSON.parse(output[0]) as Record<string, unknown>;
|
||||
expect(parsed['llm']).toBe('gemini-cli / gemini-2.5-flash');
|
||||
});
|
||||
|
||||
it('includes null llm in JSON output when not configured', async () => {
|
||||
const cmd = createStatusCommand({
|
||||
configDeps: { configDir: tempDir },
|
||||
credentialsDeps: { configDir: tempDir },
|
||||
log,
|
||||
checkHealth: async () => true,
|
||||
});
|
||||
await cmd.parseAsync(['-o', 'json'], { from: 'user' });
|
||||
const parsed = JSON.parse(output[0]) as Record<string, unknown>;
|
||||
expect(parsed['llm']).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
Reference in New Issue
Block a user