feat(project): Project.llmProvider semantically names an Llm resource
Why: Phases 0-3 built the server-managed Llm registry; this phase pivots the existing Project.llmProvider column from "local provider hint" to "named Llm reference" so operators can pick a centralised Llm per project. No schema change — the column stays a free-form string for backward compat. - `mcpctl create project --llm <name>` (+ `--llm-model <override>`) sets llmProvider/llmModel to a centralised Llm reference, or 'none' to disable. - `mcpctl describe project` fetches the Llm catalogue alongside prompts and flags values that don't resolve with a visible warning. 'none' is treated as an explicit disable, not an orphan. - `apply -f` doc comments updated; --llm-provider still accepted but now documented as naming an Llm resource. - New `resolveProjectLlmReference(mcpdClient, name)` helper in mcplocal's discovery: returns `registered`/`disabled`/`unregistered`/`unreachable`. The HTTP-mode proxy-model pipeline will consume this when it pivots to mcpd's /api/v1/llms/:name/infer proxy. - project-mcp-endpoint.ts cache-namespace path gets a comment explaining the new resolution order — behavior unchanged, just clarified. Tests: 6 resolver unit tests + 3 new describe-warning cases. Full suite 1853/1853 (+9 from Phase 3's 1844). TypeScript clean; completions regenerated for the new create-project flags. Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -57,9 +57,16 @@ export async function refreshProjectUpstreams(
|
||||
|
||||
/**
|
||||
* Fetch a project's LLM config (llmProvider, llmModel) from mcpd.
|
||||
* These are the project-level "recommendations" — local overrides take priority.
|
||||
*
|
||||
* Phase 4 redefines `llmProvider` semantically: it names a centralized `Llm`
|
||||
* resource (see `mcpctl get llms`) — NOT a local provider. Consumers should
|
||||
* resolve it through mcpd's inference proxy when reachable. The field remains
|
||||
* a free-form string on the wire for backward compatibility; local overrides
|
||||
* in `~/.mcpctl/config.json` still take priority, and unknown names fall
|
||||
* through to the registry default.
|
||||
*/
|
||||
export interface ProjectLlmConfig {
|
||||
/** Name of an `Llm` resource on mcpd, or 'none' to disable LLM features. */
|
||||
llmProvider?: string;
|
||||
llmModel?: string;
|
||||
proxyModel?: string;
|
||||
@@ -67,6 +74,31 @@ export interface ProjectLlmConfig {
|
||||
serverOverrides?: Record<string, { proxyModel?: string }>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve a project's `llmProvider` against mcpd's Llm registry. Returns:
|
||||
* - 'registered' — an Llm with this name exists
|
||||
* - 'disabled' — value is 'none'
|
||||
* - 'unregistered'— no Llm matches (consumer should fall back to registry default)
|
||||
* - 'unreachable' — mcpd couldn't be queried
|
||||
*/
|
||||
export type LlmReferenceStatus = 'registered' | 'disabled' | 'unregistered' | 'unreachable';
|
||||
|
||||
export async function resolveProjectLlmReference(
|
||||
mcpdClient: McpdClient,
|
||||
llmProvider: string | undefined,
|
||||
): Promise<LlmReferenceStatus> {
|
||||
if (llmProvider === undefined || llmProvider === '') return 'unregistered';
|
||||
if (llmProvider === 'none') return 'disabled';
|
||||
try {
|
||||
await mcpdClient.get(`/api/v1/llms/${encodeURIComponent(llmProvider)}`);
|
||||
return 'registered';
|
||||
} catch (err) {
|
||||
const msg = err instanceof Error ? err.message : String(err);
|
||||
if (msg.includes('404') || msg.toLowerCase().includes('not found')) return 'unregistered';
|
||||
return 'unreachable';
|
||||
}
|
||||
}
|
||||
|
||||
export async function fetchProjectLlmConfig(
|
||||
mcpdClient: McpdClient,
|
||||
projectName: string,
|
||||
|
||||
@@ -101,7 +101,16 @@ export function registerProjectMcpEndpoint(app: FastifyInstance, mcpdClient: Mcp
|
||||
complete: async () => '',
|
||||
available: () => false,
|
||||
};
|
||||
// Build cache namespace: provider--model--proxymodel
|
||||
// Build cache namespace: provider--model--proxymodel.
|
||||
// Resolution order:
|
||||
// 1. local ~/.mcpctl override
|
||||
// 2. mcpdConfig.llmProvider (Phase 4: name of a centralized Llm)
|
||||
// 3. local registry default (fast tier → active provider)
|
||||
// 4. literal 'none'
|
||||
// If (2) names an Llm the HTTP-mode proxy-model pipeline can route
|
||||
// through mcpd's /api/v1/llms/:name/infer (pivot lands when the client
|
||||
// integrates that path); meanwhile the value is still usable as a cache
|
||||
// key, and the describe-project warning flags stale configs.
|
||||
const llmProvider = localOverride?.provider ?? mcpdConfig.llmProvider
|
||||
?? effectiveRegistry?.getTierProviders('fast')[0]
|
||||
?? effectiveRegistry?.getActiveName()
|
||||
|
||||
45
src/mcplocal/tests/llm-reference-resolver.test.ts
Normal file
45
src/mcplocal/tests/llm-reference-resolver.test.ts
Normal file
@@ -0,0 +1,45 @@
|
||||
import { describe, it, expect, vi } from 'vitest';
|
||||
import { resolveProjectLlmReference } from '../src/discovery.js';
|
||||
import type { McpdClient } from '../src/http/mcpd-client.js';
|
||||
|
||||
function mockClient(get: (path: string) => Promise<unknown>): McpdClient {
|
||||
return { get } as unknown as McpdClient;
|
||||
}
|
||||
|
||||
describe('resolveProjectLlmReference', () => {
|
||||
it('returns "disabled" for the literal string "none"', async () => {
|
||||
const client = mockClient(async () => { throw new Error('should not be called'); });
|
||||
expect(await resolveProjectLlmReference(client, 'none')).toBe('disabled');
|
||||
});
|
||||
|
||||
it('returns "unregistered" when llmProvider is empty or undefined', async () => {
|
||||
const client = mockClient(async () => { throw new Error('should not be called'); });
|
||||
expect(await resolveProjectLlmReference(client, undefined)).toBe('unregistered');
|
||||
expect(await resolveProjectLlmReference(client, '')).toBe('unregistered');
|
||||
});
|
||||
|
||||
it('returns "registered" when mcpd returns 200 for the name', async () => {
|
||||
const get = vi.fn(async () => ({ name: 'claude' }));
|
||||
expect(await resolveProjectLlmReference(mockClient(get), 'claude')).toBe('registered');
|
||||
expect(get).toHaveBeenCalledWith('/api/v1/llms/claude');
|
||||
});
|
||||
|
||||
it('returns "unregistered" on 404', async () => {
|
||||
const client = mockClient(async () => { throw new Error('HTTP 404 not found'); });
|
||||
expect(await resolveProjectLlmReference(client, 'missing')).toBe('unregistered');
|
||||
});
|
||||
|
||||
it('returns "unreachable" on other errors (500, network)', async () => {
|
||||
const client = mockClient(async () => { throw new Error('HTTP 500 internal error'); });
|
||||
expect(await resolveProjectLlmReference(client, 'x')).toBe('unreachable');
|
||||
|
||||
const client2 = mockClient(async () => { throw new Error('ECONNREFUSED'); });
|
||||
expect(await resolveProjectLlmReference(client2, 'x')).toBe('unreachable');
|
||||
});
|
||||
|
||||
it('URL-encodes names with special characters', async () => {
|
||||
const get = vi.fn(async () => ({}));
|
||||
await resolveProjectLlmReference(mockClient(get), 'weird name/with/slashes');
|
||||
expect(get).toHaveBeenCalledWith('/api/v1/llms/weird%20name%2Fwith%2Fslashes');
|
||||
});
|
||||
});
|
||||
Reference in New Issue
Block a user