Compare commits
30 Commits
feat/compl
...
feat/compl
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9481d394a1 | ||
|
|
bc769c4eeb | ||
| 6f534c8ba9 | |||
|
|
11da8b1fbf | ||
|
|
848868d45f | ||
|
|
869217a07a | ||
| 04d115933b | |||
|
|
7c23da10c6 | ||
| 32b4de4343 | |||
|
|
e06db9afba | ||
|
|
a25809b84a | ||
| f5a902d3e0 | |||
|
|
9cb0c5ce24 | ||
| 06230ec034 | |||
|
|
079c7b3dfa | ||
|
|
7829f4fb92 | ||
|
|
fa6240107f | ||
| b34ea63d3d | |||
|
|
e17a2282e8 | ||
| 01d3c4e02d | |||
|
|
e4affe5962 | ||
| c75e7cdf4d | |||
|
|
65c340a03c | ||
| 677d34b868 | |||
|
|
c5b8cb60b7 | ||
| 9a5deffb8f | |||
|
|
ec7ada5383 | ||
| b81d3be2d5 | |||
|
|
e2c54bfc5c | ||
| 7b7854b007 |
@@ -2,10 +2,10 @@ _mcpctl() {
|
||||
local cur prev words cword
|
||||
_init_completion || return
|
||||
|
||||
local commands="status login logout config get describe delete logs create edit apply backup restore help"
|
||||
local commands="status login logout config get describe delete logs create edit apply backup restore mcp approve help"
|
||||
local project_commands="attach-server detach-server get describe delete logs create edit help"
|
||||
local global_opts="-v --version --daemon-url --direct --project -h --help"
|
||||
local resources="servers instances secrets templates projects users groups rbac"
|
||||
local resources="servers instances secrets templates projects users groups rbac prompts promptrequests"
|
||||
|
||||
# Check if --project was given
|
||||
local has_project=false
|
||||
@@ -46,23 +46,39 @@ _mcpctl() {
|
||||
# If completing the --project value
|
||||
if [[ "$prev" == "--project" ]]; then
|
||||
local names
|
||||
names=$(mcpctl get projects -o json 2>/dev/null | grep -oP '"name":\s*"\K[^"]+')
|
||||
names=$(mcpctl get projects -o json 2>/dev/null | jq -r '.[][].name' 2>/dev/null)
|
||||
COMPREPLY=($(compgen -W "$names" -- "$cur"))
|
||||
return
|
||||
fi
|
||||
|
||||
# Fetch resource names dynamically
|
||||
# Fetch resource names dynamically (jq extracts only top-level names)
|
||||
_mcpctl_resource_names() {
|
||||
local rt="$1"
|
||||
if [[ -n "$rt" ]]; then
|
||||
mcpctl get "$rt" -o json 2>/dev/null | grep -oP '"name":\s*"\K[^"]+'
|
||||
# Instances don't have a name field — use server.name instead
|
||||
if [[ "$rt" == "instances" ]]; then
|
||||
mcpctl get instances -o json 2>/dev/null | jq -r '.[][].server.name' 2>/dev/null
|
||||
else
|
||||
mcpctl get "$rt" -o json 2>/dev/null | jq -r '.[][].name' 2>/dev/null
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Get the --project value from the command line
|
||||
_mcpctl_get_project_value() {
|
||||
local i
|
||||
for ((i=1; i < cword; i++)); do
|
||||
if [[ "${words[i]}" == "--project" ]] && (( i+1 < cword )); then
|
||||
echo "${words[i+1]}"
|
||||
return
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
case "$subcmd" in
|
||||
config)
|
||||
if [[ $((cword - subcmd_pos)) -eq 1 ]]; then
|
||||
COMPREPLY=($(compgen -W "view set path reset claude-generate impersonate help" -- "$cur"))
|
||||
COMPREPLY=($(compgen -W "view set path reset claude claude-generate setup impersonate help" -- "$cur"))
|
||||
fi
|
||||
return ;;
|
||||
status)
|
||||
@@ -73,6 +89,8 @@ _mcpctl() {
|
||||
return ;;
|
||||
logout)
|
||||
return ;;
|
||||
mcp)
|
||||
return ;;
|
||||
get|describe|delete)
|
||||
if [[ -z "$resource_type" ]]; then
|
||||
COMPREPLY=($(compgen -W "$resources" -- "$cur"))
|
||||
@@ -96,7 +114,7 @@ _mcpctl() {
|
||||
return ;;
|
||||
create)
|
||||
if [[ $((cword - subcmd_pos)) -eq 1 ]]; then
|
||||
COMPREPLY=($(compgen -W "server secret project user group rbac help" -- "$cur"))
|
||||
COMPREPLY=($(compgen -W "server secret project user group rbac prompt promptrequest help" -- "$cur"))
|
||||
fi
|
||||
return ;;
|
||||
apply)
|
||||
@@ -108,11 +126,39 @@ _mcpctl() {
|
||||
restore)
|
||||
COMPREPLY=($(compgen -W "-i --input -p --password -c --conflict -h --help" -- "$cur"))
|
||||
return ;;
|
||||
attach-server|detach-server)
|
||||
local names
|
||||
attach-server)
|
||||
# Only complete if no server arg given yet (first arg after subcmd)
|
||||
if [[ $((cword - subcmd_pos)) -ne 1 ]]; then return; fi
|
||||
local proj names all_servers proj_servers
|
||||
proj=$(_mcpctl_get_project_value)
|
||||
if [[ -n "$proj" ]]; then
|
||||
all_servers=$(mcpctl get servers -o json 2>/dev/null | jq -r '.[][].name' 2>/dev/null)
|
||||
proj_servers=$(mcpctl --project "$proj" get servers -o json 2>/dev/null | jq -r '.[][].name' 2>/dev/null)
|
||||
names=$(comm -23 <(echo "$all_servers" | sort) <(echo "$proj_servers" | sort))
|
||||
else
|
||||
names=$(_mcpctl_resource_names "servers")
|
||||
fi
|
||||
COMPREPLY=($(compgen -W "$names" -- "$cur"))
|
||||
return ;;
|
||||
detach-server)
|
||||
# Only complete if no server arg given yet (first arg after subcmd)
|
||||
if [[ $((cword - subcmd_pos)) -ne 1 ]]; then return; fi
|
||||
local proj names
|
||||
proj=$(_mcpctl_get_project_value)
|
||||
if [[ -n "$proj" ]]; then
|
||||
names=$(mcpctl --project "$proj" get servers -o json 2>/dev/null | jq -r '.[][].name' 2>/dev/null)
|
||||
fi
|
||||
COMPREPLY=($(compgen -W "$names" -- "$cur"))
|
||||
return ;;
|
||||
approve)
|
||||
if [[ -z "$resource_type" ]]; then
|
||||
COMPREPLY=($(compgen -W "promptrequest" -- "$cur"))
|
||||
else
|
||||
local names
|
||||
names=$(_mcpctl_resource_names "$resource_type")
|
||||
COMPREPLY=($(compgen -W "$names" -- "$cur"))
|
||||
fi
|
||||
return ;;
|
||||
help)
|
||||
COMPREPLY=($(compgen -W "$commands" -- "$cur"))
|
||||
return ;;
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
# Erase any stale completions from previous versions
|
||||
complete -c mcpctl -e
|
||||
|
||||
set -l commands status login logout config get describe delete logs create edit apply backup restore help
|
||||
set -l commands status login logout config get describe delete logs create edit apply backup restore mcp approve help
|
||||
set -l project_commands attach-server detach-server get describe delete logs create edit help
|
||||
|
||||
# Disable file completions by default
|
||||
@@ -28,7 +28,7 @@ function __mcpctl_has_project
|
||||
end
|
||||
|
||||
# Helper: check if a resource type has been selected after get/describe/delete/edit
|
||||
set -l resources servers instances secrets templates projects users groups rbac
|
||||
set -l resources servers instances secrets templates projects users groups rbac prompts promptrequests
|
||||
|
||||
function __mcpctl_needs_resource_type
|
||||
set -l tokens (commandline -opc)
|
||||
@@ -36,11 +36,11 @@ function __mcpctl_needs_resource_type
|
||||
for tok in $tokens
|
||||
if $found_cmd
|
||||
# Check if next token after get/describe/delete/edit is a resource type
|
||||
if contains -- $tok servers instances secrets templates projects users groups rbac
|
||||
if contains -- $tok servers instances secrets templates projects users groups rbac prompts promptrequests
|
||||
return 1 # resource type already present
|
||||
end
|
||||
end
|
||||
if contains -- $tok get describe delete edit
|
||||
if contains -- $tok get describe delete edit approve
|
||||
set found_cmd true
|
||||
end
|
||||
end
|
||||
@@ -55,30 +55,71 @@ function __mcpctl_get_resource_type
|
||||
set -l found_cmd false
|
||||
for tok in $tokens
|
||||
if $found_cmd
|
||||
if contains -- $tok servers instances secrets templates projects users groups rbac
|
||||
if contains -- $tok servers instances secrets templates projects users groups rbac prompts promptrequests
|
||||
echo $tok
|
||||
return
|
||||
end
|
||||
end
|
||||
if contains -- $tok get describe delete edit
|
||||
if contains -- $tok get describe delete edit approve
|
||||
set found_cmd true
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# Fetch resource names dynamically from the API
|
||||
# Fetch resource names dynamically from the API (jq extracts only top-level names)
|
||||
function __mcpctl_resource_names
|
||||
set -l resource (__mcpctl_get_resource_type)
|
||||
if test -z "$resource"
|
||||
return
|
||||
end
|
||||
# Use mcpctl to fetch names (quick JSON parse with string manipulation)
|
||||
mcpctl get $resource -o json 2>/dev/null | string match -rg '"name":\s*"([^"]+)"'
|
||||
# Instances don't have a name field — use server.name instead
|
||||
if test "$resource" = "instances"
|
||||
mcpctl get instances -o json 2>/dev/null | jq -r '.[][].server.name' 2>/dev/null
|
||||
else
|
||||
mcpctl get $resource -o json 2>/dev/null | jq -r '.[][].name' 2>/dev/null
|
||||
end
|
||||
end
|
||||
|
||||
# Fetch project names for --project value
|
||||
function __mcpctl_project_names
|
||||
mcpctl get projects -o json 2>/dev/null | string match -rg '"name":\s*"([^"]+)"'
|
||||
mcpctl get projects -o json 2>/dev/null | jq -r '.[][].name' 2>/dev/null
|
||||
end
|
||||
|
||||
# Helper: get the --project value from the command line
|
||||
function __mcpctl_get_project_value
|
||||
set -l tokens (commandline -opc)
|
||||
for i in (seq (count $tokens))
|
||||
if test "$tokens[$i]" = "--project"; and test $i -lt (count $tokens)
|
||||
echo $tokens[(math $i + 1)]
|
||||
return
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# Servers currently attached to the project (for detach-server)
|
||||
function __mcpctl_project_servers
|
||||
set -l proj (__mcpctl_get_project_value)
|
||||
if test -z "$proj"
|
||||
return
|
||||
end
|
||||
mcpctl --project $proj get servers -o json 2>/dev/null | jq -r '.[][].name' 2>/dev/null
|
||||
end
|
||||
|
||||
# Servers NOT attached to the project (for attach-server)
|
||||
function __mcpctl_available_servers
|
||||
set -l proj (__mcpctl_get_project_value)
|
||||
if test -z "$proj"
|
||||
# No project — show all servers
|
||||
mcpctl get servers -o json 2>/dev/null | jq -r '.[][].name' 2>/dev/null
|
||||
return
|
||||
end
|
||||
set -l all (mcpctl get servers -o json 2>/dev/null | jq -r '.[][].name' 2>/dev/null)
|
||||
set -l attached (mcpctl --project $proj get servers -o json 2>/dev/null | jq -r '.[][].name' 2>/dev/null)
|
||||
for s in $all
|
||||
if not contains -- $s $attached
|
||||
echo $s
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# --project value completion
|
||||
@@ -98,6 +139,7 @@ complete -c mcpctl -n "not __mcpctl_has_project; and not __fish_seen_subcommand_
|
||||
complete -c mcpctl -n "not __mcpctl_has_project; and not __fish_seen_subcommand_from $commands" -a apply -d 'Apply configuration from file'
|
||||
complete -c mcpctl -n "not __mcpctl_has_project; and not __fish_seen_subcommand_from $commands" -a backup -d 'Backup configuration'
|
||||
complete -c mcpctl -n "not __mcpctl_has_project; and not __fish_seen_subcommand_from $commands" -a restore -d 'Restore from backup'
|
||||
complete -c mcpctl -n "not __mcpctl_has_project; and not __fish_seen_subcommand_from $commands" -a approve -d 'Approve a prompt request'
|
||||
complete -c mcpctl -n "not __mcpctl_has_project; and not __fish_seen_subcommand_from $commands" -a help -d 'Show help'
|
||||
|
||||
# Project-scoped commands (with --project)
|
||||
@@ -116,7 +158,33 @@ complete -c mcpctl -n "__fish_seen_subcommand_from get describe delete; and __mc
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from edit; and __mcpctl_needs_resource_type" -a 'servers projects' -d 'Resource type'
|
||||
|
||||
# Resource names — after resource type is selected
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from get describe delete edit; and not __mcpctl_needs_resource_type" -a '(__mcpctl_resource_names)' -d 'Resource name'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from get describe delete edit approve; and not __mcpctl_needs_resource_type" -a '(__mcpctl_resource_names)' -d 'Resource name'
|
||||
|
||||
# Helper: check if attach-server/detach-server already has a server argument
|
||||
function __mcpctl_needs_server_arg
|
||||
set -l tokens (commandline -opc)
|
||||
set -l found_cmd false
|
||||
for tok in $tokens
|
||||
if $found_cmd
|
||||
if not string match -q -- '-*' $tok
|
||||
return 1 # server arg already present
|
||||
end
|
||||
end
|
||||
if contains -- $tok attach-server detach-server
|
||||
set found_cmd true
|
||||
end
|
||||
end
|
||||
if $found_cmd
|
||||
return 0 # command found but no server arg yet
|
||||
end
|
||||
return 1
|
||||
end
|
||||
|
||||
# attach-server: show servers NOT in the project (only if no server arg yet)
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from attach-server; and __mcpctl_needs_server_arg" -a '(__mcpctl_available_servers)' -d 'Server'
|
||||
|
||||
# detach-server: show servers IN the project (only if no server arg yet)
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from detach-server; and __mcpctl_needs_server_arg" -a '(__mcpctl_project_servers)' -d 'Server'
|
||||
|
||||
# get/describe options
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from get" -s o -l output -d 'Output format' -xa 'table json yaml'
|
||||
@@ -129,22 +197,25 @@ complete -c mcpctl -n "__fish_seen_subcommand_from login" -l email -d 'Email add
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from login" -l password -d 'Password' -x
|
||||
|
||||
# config subcommands
|
||||
set -l config_cmds view set path reset claude-generate impersonate
|
||||
set -l config_cmds view set path reset claude claude-generate setup impersonate
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from config; and not __fish_seen_subcommand_from $config_cmds" -a view -d 'Show configuration'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from config; and not __fish_seen_subcommand_from $config_cmds" -a set -d 'Set a config value'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from config; and not __fish_seen_subcommand_from $config_cmds" -a path -d 'Show config file path'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from config; and not __fish_seen_subcommand_from $config_cmds" -a reset -d 'Reset to defaults'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from config; and not __fish_seen_subcommand_from $config_cmds" -a claude-generate -d 'Generate .mcp.json'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from config; and not __fish_seen_subcommand_from $config_cmds" -a claude -d 'Generate .mcp.json for project'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from config; and not __fish_seen_subcommand_from $config_cmds" -a setup -d 'Configure LLM provider'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from config; and not __fish_seen_subcommand_from $config_cmds" -a impersonate -d 'Impersonate a user'
|
||||
|
||||
# create subcommands
|
||||
set -l create_cmds server secret project user group rbac
|
||||
set -l create_cmds server secret project user group rbac prompt promptrequest
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from create; and not __fish_seen_subcommand_from $create_cmds" -a server -d 'Create a server'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from create; and not __fish_seen_subcommand_from $create_cmds" -a secret -d 'Create a secret'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from create; and not __fish_seen_subcommand_from $create_cmds" -a project -d 'Create a project'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from create; and not __fish_seen_subcommand_from $create_cmds" -a user -d 'Create a user'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from create; and not __fish_seen_subcommand_from $create_cmds" -a group -d 'Create a group'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from create; and not __fish_seen_subcommand_from $create_cmds" -a rbac -d 'Create an RBAC binding'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from create; and not __fish_seen_subcommand_from $create_cmds" -a prompt -d 'Create an approved prompt'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from create; and not __fish_seen_subcommand_from $create_cmds" -a promptrequest -d 'Create a prompt request'
|
||||
|
||||
# logs options
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from logs" -l tail -d 'Number of lines' -x
|
||||
@@ -160,6 +231,9 @@ complete -c mcpctl -n "__fish_seen_subcommand_from restore" -s i -l input -d 'In
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from restore" -s p -l password -d 'Decryption password' -x
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from restore" -s c -l conflict -d 'Conflict strategy' -xa 'skip overwrite fail'
|
||||
|
||||
# approve: first arg is resource type (promptrequest only), second is name
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from approve; and __mcpctl_needs_resource_type" -a 'promptrequest' -d 'Resource type'
|
||||
|
||||
# apply takes a file
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from apply" -s f -l file -d 'Configuration file' -rF
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from apply" -F
|
||||
|
||||
@@ -5,6 +5,8 @@ release: "1"
|
||||
maintainer: michal
|
||||
description: kubectl-like CLI for managing MCP servers
|
||||
license: MIT
|
||||
depends:
|
||||
- jq
|
||||
contents:
|
||||
- src: ./dist/mcpctl
|
||||
dst: /usr/bin/mcpctl
|
||||
|
||||
55
pr.sh
Executable file
55
pr.sh
Executable file
@@ -0,0 +1,55 @@
|
||||
#!/usr/bin/env bash
|
||||
# Usage: bash pr.sh "PR title" "PR body"
|
||||
# Loads GITEA_TOKEN from .env automatically
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Load .env if GITEA_TOKEN not already exported
|
||||
if [ -z "${GITEA_TOKEN:-}" ] && [ -f .env ]; then
|
||||
set -a
|
||||
source .env
|
||||
set +a
|
||||
fi
|
||||
|
||||
GITEA_URL="${GITEA_URL:-http://10.0.0.194:3012}"
|
||||
REPO="${GITEA_OWNER:-michal}/mcpctl"
|
||||
|
||||
TITLE="${1:?Usage: pr.sh <title> [body]}"
|
||||
BODY="${2:-}"
|
||||
BASE="${3:-main}"
|
||||
HEAD=$(git rev-parse --abbrev-ref HEAD)
|
||||
|
||||
if [ "$HEAD" = "$BASE" ]; then
|
||||
echo "Error: already on $BASE, switch to a feature branch first" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "${GITEA_TOKEN:-}" ]; then
|
||||
echo "Error: GITEA_TOKEN not set and .env not found" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Push if needed
|
||||
if ! git rev-parse --verify "origin/$HEAD" &>/dev/null; then
|
||||
git push -u origin "$HEAD"
|
||||
else
|
||||
git push
|
||||
fi
|
||||
|
||||
# Create PR
|
||||
RESPONSE=$(curl -s -X POST "$GITEA_URL/api/v1/repos/$REPO/pulls" \
|
||||
-H "Authorization: token $GITEA_TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "$(jq -n --arg t "$TITLE" --arg b "$BODY" --arg h "$HEAD" --arg base "$BASE" \
|
||||
'{title: $t, body: $b, head: $h, base: $base}')")
|
||||
|
||||
PR_NUM=$(echo "$RESPONSE" | jq -r '.number // empty')
|
||||
PR_URL=$(echo "$RESPONSE" | jq -r '.html_url // empty')
|
||||
|
||||
if [ -z "$PR_NUM" ]; then
|
||||
echo "Error creating PR:" >&2
|
||||
echo "$RESPONSE" | jq . 2>/dev/null || echo "$RESPONSE" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "PR #$PR_NUM: https://mysources.co.uk/$REPO/pulls/$PR_NUM"
|
||||
@@ -24,7 +24,10 @@ export class ApiError extends Error {
|
||||
function request<T>(method: string, url: string, timeout: number, body?: unknown, token?: string): Promise<ApiResponse<T>> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const parsed = new URL(url);
|
||||
const headers: Record<string, string> = { 'Content-Type': 'application/json' };
|
||||
const headers: Record<string, string> = {};
|
||||
if (body !== undefined) {
|
||||
headers['Content-Type'] = 'application/json';
|
||||
}
|
||||
if (token) {
|
||||
headers['Authorization'] = `Bearer ${token}`;
|
||||
}
|
||||
|
||||
@@ -76,13 +76,14 @@ const GroupSpecSchema = z.object({
|
||||
});
|
||||
|
||||
const RbacSubjectSchema = z.object({
|
||||
kind: z.enum(['User', 'Group']),
|
||||
kind: z.enum(['User', 'Group', 'ServiceAccount']),
|
||||
name: z.string().min(1),
|
||||
});
|
||||
|
||||
const RESOURCE_ALIASES: Record<string, string> = {
|
||||
server: 'servers', instance: 'instances', secret: 'secrets',
|
||||
project: 'projects', template: 'templates', user: 'users', group: 'groups',
|
||||
prompt: 'prompts', promptrequest: 'promptrequests',
|
||||
};
|
||||
|
||||
const RbacRoleBindingSchema = z.union([
|
||||
@@ -103,9 +104,16 @@ const RbacBindingSpecSchema = z.object({
|
||||
roleBindings: z.array(RbacRoleBindingSchema).default([]),
|
||||
});
|
||||
|
||||
const PromptSpecSchema = z.object({
|
||||
name: z.string().min(1).max(100).regex(/^[a-z0-9-]+$/),
|
||||
content: z.string().min(1).max(50000),
|
||||
projectId: z.string().optional(),
|
||||
});
|
||||
|
||||
const ProjectSpecSchema = z.object({
|
||||
name: z.string().min(1),
|
||||
description: z.string().default(''),
|
||||
prompt: z.string().max(10000).default(''),
|
||||
proxyMode: z.enum(['direct', 'filtered']).default('direct'),
|
||||
llmProvider: z.string().optional(),
|
||||
llmModel: z.string().optional(),
|
||||
@@ -121,6 +129,7 @@ const ApplyConfigSchema = z.object({
|
||||
templates: z.array(TemplateSpecSchema).default([]),
|
||||
rbacBindings: z.array(RbacBindingSpecSchema).default([]),
|
||||
rbac: z.array(RbacBindingSpecSchema).default([]),
|
||||
prompts: z.array(PromptSpecSchema).default([]),
|
||||
}).transform((data) => ({
|
||||
...data,
|
||||
// Merge rbac into rbacBindings so both keys work
|
||||
@@ -158,6 +167,7 @@ export function createApplyCommand(deps: ApplyCommandDeps): Command {
|
||||
if (config.projects.length > 0) log(` ${config.projects.length} project(s)`);
|
||||
if (config.templates.length > 0) log(` ${config.templates.length} template(s)`);
|
||||
if (config.rbacBindings.length > 0) log(` ${config.rbacBindings.length} rbacBinding(s)`);
|
||||
if (config.prompts.length > 0) log(` ${config.prompts.length} prompt(s)`);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -292,6 +302,22 @@ async function applyConfig(client: ApiClient, config: ApplyConfig, log: (...args
|
||||
log(`Error applying rbacBinding '${rbacBinding.name}': ${err instanceof Error ? err.message : err}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Apply prompts
|
||||
for (const prompt of config.prompts) {
|
||||
try {
|
||||
const existing = await findByName(client, 'prompts', prompt.name);
|
||||
if (existing) {
|
||||
await client.put(`/api/v1/prompts/${(existing as { id: string }).id}`, { content: prompt.content });
|
||||
log(`Updated prompt: ${prompt.name}`);
|
||||
} else {
|
||||
await client.post('/api/v1/prompts', prompt);
|
||||
log(`Created prompt: ${prompt.name}`);
|
||||
}
|
||||
} catch (err) {
|
||||
log(`Error applying prompt '${prompt.name}': ${err instanceof Error ? err.message : err}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function findByName(client: ApiClient, resource: string, name: string): Promise<unknown | null> {
|
||||
|
||||
347
src/cli/src/commands/config-setup.ts
Normal file
347
src/cli/src/commands/config-setup.ts
Normal file
@@ -0,0 +1,347 @@
|
||||
import { Command } from 'commander';
|
||||
import http from 'node:http';
|
||||
import https from 'node:https';
|
||||
import { execFile } from 'node:child_process';
|
||||
import { promisify } from 'node:util';
|
||||
import { loadConfig, saveConfig } from '../config/index.js';
|
||||
import type { ConfigLoaderDeps, McpctlConfig, LlmConfig, LlmProviderName } from '../config/index.js';
|
||||
import type { SecretStore } from '@mcpctl/shared';
|
||||
import { createSecretStore } from '@mcpctl/shared';
|
||||
|
||||
const execFileAsync = promisify(execFile);
|
||||
|
||||
export interface ConfigSetupPrompt {
|
||||
select<T>(message: string, choices: Array<{ name: string; value: T; description?: string }>): Promise<T>;
|
||||
input(message: string, defaultValue?: string): Promise<string>;
|
||||
password(message: string): Promise<string>;
|
||||
confirm(message: string, defaultValue?: boolean): Promise<boolean>;
|
||||
}
|
||||
|
||||
export interface ConfigSetupDeps {
|
||||
configDeps: Partial<ConfigLoaderDeps>;
|
||||
secretStore: SecretStore;
|
||||
log: (...args: string[]) => void;
|
||||
prompt: ConfigSetupPrompt;
|
||||
fetchModels: (url: string, path: string) => Promise<string[]>;
|
||||
whichBinary: (name: string) => Promise<string | null>;
|
||||
}
|
||||
|
||||
interface ProviderChoice {
|
||||
name: string;
|
||||
value: LlmProviderName;
|
||||
description: string;
|
||||
}
|
||||
|
||||
const PROVIDER_CHOICES: ProviderChoice[] = [
|
||||
{ name: 'Gemini CLI', value: 'gemini-cli', description: 'Google Gemini via local CLI (free, no API key)' },
|
||||
{ name: 'Ollama', value: 'ollama', description: 'Local models via Ollama' },
|
||||
{ name: 'Anthropic (Claude)', value: 'anthropic', description: 'Claude API (requires API key)' },
|
||||
{ name: 'vLLM', value: 'vllm', description: 'Self-hosted vLLM (OpenAI-compatible)' },
|
||||
{ name: 'OpenAI', value: 'openai', description: 'OpenAI API (requires API key)' },
|
||||
{ name: 'DeepSeek', value: 'deepseek', description: 'DeepSeek API (requires API key)' },
|
||||
{ name: 'None (disable)', value: 'none', description: 'Disable LLM features' },
|
||||
];
|
||||
|
||||
const GEMINI_MODELS = ['gemini-2.5-flash', 'gemini-2.5-pro', 'gemini-2.0-flash'];
|
||||
const ANTHROPIC_MODELS = ['claude-haiku-3-5-20241022', 'claude-sonnet-4-20250514', 'claude-opus-4-20250514'];
|
||||
const DEEPSEEK_MODELS = ['deepseek-chat', 'deepseek-reasoner'];
|
||||
|
||||
function defaultFetchModels(baseUrl: string, path: string): Promise<string[]> {
|
||||
return new Promise((resolve) => {
|
||||
const url = new URL(path, baseUrl);
|
||||
const isHttps = url.protocol === 'https:';
|
||||
const transport = isHttps ? https : http;
|
||||
|
||||
const req = transport.get({
|
||||
hostname: url.hostname,
|
||||
port: url.port || (isHttps ? 443 : 80),
|
||||
path: url.pathname,
|
||||
timeout: 5000,
|
||||
}, (res) => {
|
||||
const chunks: Buffer[] = [];
|
||||
res.on('data', (chunk: Buffer) => chunks.push(chunk));
|
||||
res.on('end', () => {
|
||||
try {
|
||||
const raw = Buffer.concat(chunks).toString('utf-8');
|
||||
const data = JSON.parse(raw) as { models?: Array<{ name: string }>; data?: Array<{ id: string }> };
|
||||
// Ollama format: { models: [{ name }] }
|
||||
if (data.models) {
|
||||
resolve(data.models.map((m) => m.name));
|
||||
return;
|
||||
}
|
||||
// OpenAI/vLLM format: { data: [{ id }] }
|
||||
if (data.data) {
|
||||
resolve(data.data.map((m) => m.id));
|
||||
return;
|
||||
}
|
||||
resolve([]);
|
||||
} catch {
|
||||
resolve([]);
|
||||
}
|
||||
});
|
||||
});
|
||||
req.on('error', () => resolve([]));
|
||||
req.on('timeout', () => { req.destroy(); resolve([]); });
|
||||
});
|
||||
}
|
||||
|
||||
async function defaultSelect<T>(message: string, choices: Array<{ name: string; value: T; description?: string }>): Promise<T> {
|
||||
const { default: inquirer } = await import('inquirer');
|
||||
const { answer } = await inquirer.prompt([{
|
||||
type: 'list',
|
||||
name: 'answer',
|
||||
message,
|
||||
choices: choices.map((c) => ({
|
||||
name: c.description ? `${c.name} — ${c.description}` : c.name,
|
||||
value: c.value,
|
||||
short: c.name,
|
||||
})),
|
||||
}]);
|
||||
return answer as T;
|
||||
}
|
||||
|
||||
async function defaultInput(message: string, defaultValue?: string): Promise<string> {
|
||||
const { default: inquirer } = await import('inquirer');
|
||||
const { answer } = await inquirer.prompt([{
|
||||
type: 'input',
|
||||
name: 'answer',
|
||||
message,
|
||||
default: defaultValue,
|
||||
}]);
|
||||
return answer as string;
|
||||
}
|
||||
|
||||
async function defaultPassword(message: string): Promise<string> {
|
||||
const { default: inquirer } = await import('inquirer');
|
||||
const { answer } = await inquirer.prompt([{ type: 'password', name: 'answer', message }]);
|
||||
return answer as string;
|
||||
}
|
||||
|
||||
async function defaultConfirm(message: string, defaultValue?: boolean): Promise<boolean> {
|
||||
const { default: inquirer } = await import('inquirer');
|
||||
const { answer } = await inquirer.prompt([{
|
||||
type: 'confirm',
|
||||
name: 'answer',
|
||||
message,
|
||||
default: defaultValue ?? true,
|
||||
}]);
|
||||
return answer as boolean;
|
||||
}
|
||||
|
||||
const defaultPrompt: ConfigSetupPrompt = {
|
||||
select: defaultSelect,
|
||||
input: defaultInput,
|
||||
password: defaultPassword,
|
||||
confirm: defaultConfirm,
|
||||
};
|
||||
|
||||
async function defaultWhichBinary(name: string): Promise<string | null> {
|
||||
try {
|
||||
const { stdout } = await execFileAsync('which', [name], { timeout: 3000 });
|
||||
const path = stdout.trim();
|
||||
return path || null;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
export function createConfigSetupCommand(deps?: Partial<ConfigSetupDeps>): Command {
|
||||
return new Command('setup')
|
||||
.description('Interactive LLM provider setup wizard')
|
||||
.action(async () => {
|
||||
const configDeps = deps?.configDeps ?? {};
|
||||
const log = deps?.log ?? ((...args: string[]) => console.log(...args));
|
||||
const prompt = deps?.prompt ?? defaultPrompt;
|
||||
const fetchModels = deps?.fetchModels ?? defaultFetchModels;
|
||||
const whichBinary = deps?.whichBinary ?? defaultWhichBinary;
|
||||
const secretStore = deps?.secretStore ?? await createSecretStore();
|
||||
|
||||
const config = loadConfig(configDeps);
|
||||
const currentLlm = config.llm;
|
||||
|
||||
// Annotate current provider in choices
|
||||
const choices = PROVIDER_CHOICES.map((c) => {
|
||||
if (currentLlm?.provider === c.value) {
|
||||
return { ...c, name: `${c.name} (current)` };
|
||||
}
|
||||
return c;
|
||||
});
|
||||
|
||||
const provider = await prompt.select<LlmProviderName>('Select LLM provider:', choices);
|
||||
|
||||
if (provider === 'none') {
|
||||
const updated: McpctlConfig = { ...config, llm: { provider: 'none' } };
|
||||
saveConfig(updated, configDeps);
|
||||
log('LLM disabled. Restart mcplocal: systemctl --user restart mcplocal');
|
||||
return;
|
||||
}
|
||||
|
||||
let llmConfig: LlmConfig;
|
||||
|
||||
switch (provider) {
|
||||
case 'gemini-cli':
|
||||
llmConfig = await setupGeminiCli(prompt, log, whichBinary, currentLlm);
|
||||
break;
|
||||
case 'ollama':
|
||||
llmConfig = await setupOllama(prompt, fetchModels, currentLlm);
|
||||
break;
|
||||
case 'anthropic':
|
||||
llmConfig = await setupApiKeyProvider(prompt, secretStore, 'anthropic', 'anthropic-api-key', ANTHROPIC_MODELS, currentLlm);
|
||||
break;
|
||||
case 'vllm':
|
||||
llmConfig = await setupVllm(prompt, fetchModels, currentLlm);
|
||||
break;
|
||||
case 'openai':
|
||||
llmConfig = await setupApiKeyProvider(prompt, secretStore, 'openai', 'openai-api-key', [], currentLlm);
|
||||
break;
|
||||
case 'deepseek':
|
||||
llmConfig = await setupApiKeyProvider(prompt, secretStore, 'deepseek', 'deepseek-api-key', DEEPSEEK_MODELS, currentLlm);
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
||||
const updated: McpctlConfig = { ...config, llm: llmConfig };
|
||||
saveConfig(updated, configDeps);
|
||||
log(`\nLLM configured: ${llmConfig.provider}${llmConfig.model ? ` / ${llmConfig.model}` : ''}`);
|
||||
log('Restart mcplocal: systemctl --user restart mcplocal');
|
||||
});
|
||||
}
|
||||
|
||||
async function setupGeminiCli(
|
||||
prompt: ConfigSetupPrompt,
|
||||
log: (...args: string[]) => void,
|
||||
whichBinary: (name: string) => Promise<string | null>,
|
||||
current?: LlmConfig,
|
||||
): Promise<LlmConfig> {
|
||||
const model = await prompt.select<string>('Select model:', [
|
||||
...GEMINI_MODELS.map((m) => ({
|
||||
name: m === current?.model ? `${m} (current)` : m,
|
||||
value: m,
|
||||
})),
|
||||
{ name: 'Custom...', value: '__custom__' },
|
||||
]);
|
||||
|
||||
const finalModel = model === '__custom__'
|
||||
? await prompt.input('Model name:', current?.model)
|
||||
: model;
|
||||
|
||||
// Auto-detect gemini binary path
|
||||
let binaryPath: string | undefined;
|
||||
const detected = await whichBinary('gemini');
|
||||
if (detected) {
|
||||
log(`Found gemini at: ${detected}`);
|
||||
binaryPath = detected;
|
||||
} else {
|
||||
log('Warning: gemini binary not found in PATH');
|
||||
const manualPath = await prompt.input('Binary path (or install with: npm i -g @google/gemini-cli):');
|
||||
if (manualPath) binaryPath = manualPath;
|
||||
}
|
||||
|
||||
return { provider: 'gemini-cli', model: finalModel, binaryPath };
|
||||
}
|
||||
|
||||
async function setupOllama(prompt: ConfigSetupPrompt, fetchModels: ConfigSetupDeps['fetchModels'], current?: LlmConfig): Promise<LlmConfig> {
|
||||
const url = await prompt.input('Ollama URL:', current?.url ?? 'http://localhost:11434');
|
||||
|
||||
// Try to fetch models from Ollama
|
||||
const models = await fetchModels(url, '/api/tags');
|
||||
let model: string;
|
||||
|
||||
if (models.length > 0) {
|
||||
const choices = models.map((m) => ({
|
||||
name: m === current?.model ? `${m} (current)` : m,
|
||||
value: m,
|
||||
}));
|
||||
choices.push({ name: 'Custom...', value: '__custom__' });
|
||||
model = await prompt.select<string>('Select model:', choices);
|
||||
if (model === '__custom__') {
|
||||
model = await prompt.input('Model name:', current?.model);
|
||||
}
|
||||
} else {
|
||||
model = await prompt.input('Model name (could not fetch models):', current?.model ?? 'llama3.2');
|
||||
}
|
||||
|
||||
return { provider: 'ollama', model, url };
|
||||
}
|
||||
|
||||
async function setupVllm(prompt: ConfigSetupPrompt, fetchModels: ConfigSetupDeps['fetchModels'], current?: LlmConfig): Promise<LlmConfig> {
|
||||
const url = await prompt.input('vLLM URL:', current?.url ?? 'http://localhost:8000');
|
||||
|
||||
// Try to fetch models from vLLM (OpenAI-compatible)
|
||||
const models = await fetchModels(url, '/v1/models');
|
||||
let model: string;
|
||||
|
||||
if (models.length > 0) {
|
||||
const choices = models.map((m) => ({
|
||||
name: m === current?.model ? `${m} (current)` : m,
|
||||
value: m,
|
||||
}));
|
||||
choices.push({ name: 'Custom...', value: '__custom__' });
|
||||
model = await prompt.select<string>('Select model:', choices);
|
||||
if (model === '__custom__') {
|
||||
model = await prompt.input('Model name:', current?.model);
|
||||
}
|
||||
} else {
|
||||
model = await prompt.input('Model name (could not fetch models):', current?.model ?? 'default');
|
||||
}
|
||||
|
||||
return { provider: 'vllm', model, url };
|
||||
}
|
||||
|
||||
async function setupApiKeyProvider(
|
||||
prompt: ConfigSetupPrompt,
|
||||
secretStore: SecretStore,
|
||||
provider: LlmProviderName,
|
||||
secretKey: string,
|
||||
hardcodedModels: string[],
|
||||
current?: LlmConfig,
|
||||
): Promise<LlmConfig> {
|
||||
// Check for existing API key
|
||||
const existingKey = await secretStore.get(secretKey);
|
||||
let apiKey: string;
|
||||
|
||||
if (existingKey) {
|
||||
const masked = `****${existingKey.slice(-4)}`;
|
||||
const changeKey = await prompt.confirm(`API key stored (${masked}). Change it?`, false);
|
||||
if (changeKey) {
|
||||
apiKey = await prompt.password('API key:');
|
||||
} else {
|
||||
apiKey = existingKey;
|
||||
}
|
||||
} else {
|
||||
apiKey = await prompt.password('API key:');
|
||||
}
|
||||
|
||||
// Store API key
|
||||
if (apiKey !== existingKey) {
|
||||
await secretStore.set(secretKey, apiKey);
|
||||
}
|
||||
|
||||
// Model selection
|
||||
let model: string;
|
||||
if (hardcodedModels.length > 0) {
|
||||
const choices = hardcodedModels.map((m) => ({
|
||||
name: m === current?.model ? `${m} (current)` : m,
|
||||
value: m,
|
||||
}));
|
||||
choices.push({ name: 'Custom...', value: '__custom__' });
|
||||
model = await prompt.select<string>('Select model:', choices);
|
||||
if (model === '__custom__') {
|
||||
model = await prompt.input('Model name:', current?.model);
|
||||
}
|
||||
} else {
|
||||
model = await prompt.input('Model name:', current?.model ?? 'gpt-4o');
|
||||
}
|
||||
|
||||
// Optional custom URL for openai
|
||||
let url: string | undefined;
|
||||
if (provider === 'openai') {
|
||||
const customUrl = await prompt.confirm('Use custom API endpoint?', false);
|
||||
if (customUrl) {
|
||||
url = await prompt.input('API URL:', current?.url ?? 'https://api.openai.com');
|
||||
}
|
||||
}
|
||||
|
||||
return { provider, model, url };
|
||||
}
|
||||
@@ -6,11 +6,12 @@ import { loadConfig, saveConfig, mergeConfig, getConfigPath, DEFAULT_CONFIG } fr
|
||||
import type { McpctlConfig, ConfigLoaderDeps } from '../config/index.js';
|
||||
import { formatJson, formatYaml } from '../formatters/index.js';
|
||||
import { saveCredentials, loadCredentials } from '../auth/index.js';
|
||||
import { createConfigSetupCommand } from './config-setup.js';
|
||||
import type { CredentialsDeps, StoredCredentials } from '../auth/index.js';
|
||||
import type { ApiClient } from '../api-client.js';
|
||||
|
||||
interface McpConfig {
|
||||
mcpServers: Record<string, { command: string; args: string[]; env?: Record<string, string> }>;
|
||||
mcpServers: Record<string, { command?: string; args?: string[]; url?: string; env?: Record<string, string> }>;
|
||||
}
|
||||
|
||||
export interface ConfigCommandDeps {
|
||||
@@ -84,21 +85,27 @@ export function createConfigCommand(deps?: Partial<ConfigCommandDeps>, apiDeps?:
|
||||
log('Configuration reset to defaults');
|
||||
});
|
||||
|
||||
if (apiDeps) {
|
||||
const { client, credentialsDeps, log: apiLog } = apiDeps;
|
||||
|
||||
config
|
||||
.command('claude-generate')
|
||||
.description('Generate .mcp.json from a project configuration')
|
||||
// claude/claude-generate: generate .mcp.json pointing at mcpctl mcp bridge
|
||||
function registerClaudeCommand(name: string, hidden: boolean): void {
|
||||
const cmd = config
|
||||
.command(name)
|
||||
.description(hidden ? '' : 'Generate .mcp.json that connects a project via mcpctl mcp bridge')
|
||||
.requiredOption('--project <name>', 'Project name')
|
||||
.option('-o, --output <path>', 'Output file path', '.mcp.json')
|
||||
.option('--merge', 'Merge with existing .mcp.json instead of overwriting')
|
||||
.option('--stdout', 'Print to stdout instead of writing a file')
|
||||
.action(async (opts: { project: string; output: string; merge?: boolean; stdout?: boolean }) => {
|
||||
const mcpConfig = await client.get<McpConfig>(`/api/v1/projects/${opts.project}/mcp-config`);
|
||||
.action((opts: { project: string; output: string; merge?: boolean; stdout?: boolean }) => {
|
||||
const mcpConfig: McpConfig = {
|
||||
mcpServers: {
|
||||
[opts.project]: {
|
||||
command: 'mcpctl',
|
||||
args: ['mcp', '-p', opts.project],
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
if (opts.stdout) {
|
||||
apiLog(JSON.stringify(mcpConfig, null, 2));
|
||||
log(JSON.stringify(mcpConfig, null, 2));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -121,8 +128,21 @@ export function createConfigCommand(deps?: Partial<ConfigCommandDeps>, apiDeps?:
|
||||
|
||||
writeFileSync(outputPath, JSON.stringify(finalConfig, null, 2) + '\n');
|
||||
const serverCount = Object.keys(finalConfig.mcpServers).length;
|
||||
apiLog(`Wrote ${outputPath} (${serverCount} server(s))`);
|
||||
log(`Wrote ${outputPath} (${serverCount} server(s))`);
|
||||
});
|
||||
if (hidden) {
|
||||
// Commander shows empty-description commands but they won't clutter help output
|
||||
void cmd; // suppress unused lint
|
||||
}
|
||||
}
|
||||
|
||||
registerClaudeCommand('claude', false);
|
||||
registerClaudeCommand('claude-generate', true); // backward compat
|
||||
|
||||
config.addCommand(createConfigSetupCommand({ configDeps }));
|
||||
|
||||
if (apiDeps) {
|
||||
const { client, credentialsDeps, log: apiLog } = apiDeps;
|
||||
|
||||
config
|
||||
.command('impersonate')
|
||||
|
||||
@@ -196,8 +196,9 @@ export function createCreateCommand(deps: CreateCommandDeps): Command {
|
||||
.argument('<name>', 'Project name')
|
||||
.option('-d, --description <text>', 'Project description', '')
|
||||
.option('--proxy-mode <mode>', 'Proxy mode (direct, filtered)')
|
||||
.option('--proxy-mode-llm-provider <name>', 'LLM provider name (for filtered proxy mode)')
|
||||
.option('--proxy-mode-llm-model <name>', 'LLM model name (for filtered proxy mode)')
|
||||
.option('--llm-provider <name>', 'LLM provider name')
|
||||
.option('--llm-model <name>', 'LLM model name')
|
||||
.option('--prompt <text>', 'Project-level prompt / instructions for the LLM')
|
||||
.option('--server <name>', 'Server name (repeat for multiple)', collect, [])
|
||||
.option('--force', 'Update if already exists')
|
||||
.action(async (name: string, opts) => {
|
||||
@@ -206,8 +207,9 @@ export function createCreateCommand(deps: CreateCommandDeps): Command {
|
||||
description: opts.description,
|
||||
proxyMode: opts.proxyMode ?? 'direct',
|
||||
};
|
||||
if (opts.proxyModeLlmProvider) body.llmProvider = opts.proxyModeLlmProvider;
|
||||
if (opts.proxyModeLlmModel) body.llmModel = opts.proxyModeLlmModel;
|
||||
if (opts.prompt) body.prompt = opts.prompt;
|
||||
if (opts.llmProvider) body.llmProvider = opts.llmProvider;
|
||||
if (opts.llmModel) body.llmModel = opts.llmModel;
|
||||
if (opts.server.length > 0) body.servers = opts.server;
|
||||
|
||||
try {
|
||||
@@ -347,5 +349,61 @@ export function createCreateCommand(deps: CreateCommandDeps): Command {
|
||||
}
|
||||
});
|
||||
|
||||
// --- create prompt ---
|
||||
cmd.command('prompt')
|
||||
.description('Create an approved prompt')
|
||||
.argument('<name>', 'Prompt name (lowercase alphanumeric with hyphens)')
|
||||
.option('--project <name>', 'Project name to scope the prompt to')
|
||||
.option('--content <text>', 'Prompt content text')
|
||||
.option('--content-file <path>', 'Read prompt content from file')
|
||||
.action(async (name: string, opts) => {
|
||||
let content = opts.content as string | undefined;
|
||||
if (opts.contentFile) {
|
||||
const fs = await import('node:fs/promises');
|
||||
content = await fs.readFile(opts.contentFile as string, 'utf-8');
|
||||
}
|
||||
if (!content) {
|
||||
throw new Error('--content or --content-file is required');
|
||||
}
|
||||
|
||||
const body: Record<string, unknown> = { name, content };
|
||||
if (opts.project) {
|
||||
// Resolve project name to ID
|
||||
const projects = await client.get<Array<{ id: string; name: string }>>('/api/v1/projects');
|
||||
const project = projects.find((p) => p.name === opts.project);
|
||||
if (!project) throw new Error(`Project '${opts.project as string}' not found`);
|
||||
body.projectId = project.id;
|
||||
}
|
||||
|
||||
const prompt = await client.post<{ id: string; name: string }>('/api/v1/prompts', body);
|
||||
log(`prompt '${prompt.name}' created (id: ${prompt.id})`);
|
||||
});
|
||||
|
||||
// --- create promptrequest ---
|
||||
cmd.command('promptrequest')
|
||||
.description('Create a prompt request (pending proposal that needs approval)')
|
||||
.argument('<name>', 'Prompt request name (lowercase alphanumeric with hyphens)')
|
||||
.requiredOption('--project <name>', 'Project name (required)')
|
||||
.option('--content <text>', 'Prompt content text')
|
||||
.option('--content-file <path>', 'Read prompt content from file')
|
||||
.action(async (name: string, opts) => {
|
||||
let content = opts.content as string | undefined;
|
||||
if (opts.contentFile) {
|
||||
const fs = await import('node:fs/promises');
|
||||
content = await fs.readFile(opts.contentFile as string, 'utf-8');
|
||||
}
|
||||
if (!content) {
|
||||
throw new Error('--content or --content-file is required');
|
||||
}
|
||||
|
||||
const projectName = opts.project as string;
|
||||
const pr = await client.post<{ id: string; name: string }>(
|
||||
`/api/v1/projects/${encodeURIComponent(projectName)}/promptrequests`,
|
||||
{ name, content },
|
||||
);
|
||||
log(`prompt request '${pr.name}' created (id: ${pr.id})`);
|
||||
log(` approve with: mcpctl approve promptrequest ${pr.name}`);
|
||||
});
|
||||
|
||||
return cmd;
|
||||
}
|
||||
|
||||
@@ -130,6 +130,36 @@ const templateColumns: Column<TemplateRow>[] = [
|
||||
{ header: 'DESCRIPTION', key: 'description', width: 50 },
|
||||
];
|
||||
|
||||
interface PromptRow {
|
||||
id: string;
|
||||
name: string;
|
||||
projectId: string | null;
|
||||
createdAt: string;
|
||||
}
|
||||
|
||||
interface PromptRequestRow {
|
||||
id: string;
|
||||
name: string;
|
||||
projectId: string | null;
|
||||
createdBySession: string | null;
|
||||
createdAt: string;
|
||||
}
|
||||
|
||||
const promptColumns: Column<PromptRow>[] = [
|
||||
{ header: 'NAME', key: 'name' },
|
||||
{ header: 'PROJECT', key: (r) => r.projectId ?? '-', width: 20 },
|
||||
{ header: 'CREATED', key: (r) => new Date(r.createdAt).toLocaleString(), width: 20 },
|
||||
{ header: 'ID', key: 'id' },
|
||||
];
|
||||
|
||||
const promptRequestColumns: Column<PromptRequestRow>[] = [
|
||||
{ header: 'NAME', key: 'name' },
|
||||
{ header: 'PROJECT', key: (r) => r.projectId ?? '-', width: 20 },
|
||||
{ header: 'SESSION', key: (r) => r.createdBySession ? r.createdBySession.slice(0, 12) : '-', width: 14 },
|
||||
{ header: 'CREATED', key: (r) => new Date(r.createdAt).toLocaleString(), width: 20 },
|
||||
{ header: 'ID', key: 'id' },
|
||||
];
|
||||
|
||||
const instanceColumns: Column<InstanceRow>[] = [
|
||||
{ header: 'NAME', key: (r) => r.server?.name ?? '-', width: 20 },
|
||||
{ header: 'STATUS', key: 'status', width: 10 },
|
||||
@@ -157,6 +187,10 @@ function getColumnsForResource(resource: string): Column<Record<string, unknown>
|
||||
return groupColumns as unknown as Column<Record<string, unknown>>[];
|
||||
case 'rbac':
|
||||
return rbacColumns as unknown as Column<Record<string, unknown>>[];
|
||||
case 'prompts':
|
||||
return promptColumns as unknown as Column<Record<string, unknown>>[];
|
||||
case 'promptrequests':
|
||||
return promptRequestColumns as unknown as Column<Record<string, unknown>>[];
|
||||
default:
|
||||
return [
|
||||
{ header: 'ID', key: 'id' as keyof Record<string, unknown> },
|
||||
|
||||
224
src/cli/src/commands/mcp.ts
Normal file
224
src/cli/src/commands/mcp.ts
Normal file
@@ -0,0 +1,224 @@
|
||||
import { Command } from 'commander';
|
||||
import http from 'node:http';
|
||||
import { createInterface } from 'node:readline';
|
||||
|
||||
export interface McpBridgeOptions {
|
||||
projectName: string;
|
||||
mcplocalUrl: string;
|
||||
token?: string | undefined;
|
||||
stdin: NodeJS.ReadableStream;
|
||||
stdout: NodeJS.WritableStream;
|
||||
stderr: NodeJS.WritableStream;
|
||||
}
|
||||
|
||||
function postJsonRpc(
|
||||
url: string,
|
||||
body: string,
|
||||
sessionId: string | undefined,
|
||||
token: string | undefined,
|
||||
): Promise<{ status: number; headers: http.IncomingHttpHeaders; body: string }> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const parsed = new URL(url);
|
||||
const headers: Record<string, string> = {
|
||||
'Content-Type': 'application/json',
|
||||
'Accept': 'application/json, text/event-stream',
|
||||
};
|
||||
if (sessionId) {
|
||||
headers['mcp-session-id'] = sessionId;
|
||||
}
|
||||
if (token) {
|
||||
headers['Authorization'] = `Bearer ${token}`;
|
||||
}
|
||||
|
||||
const req = http.request(
|
||||
{
|
||||
hostname: parsed.hostname,
|
||||
port: parsed.port,
|
||||
path: parsed.pathname,
|
||||
method: 'POST',
|
||||
headers,
|
||||
timeout: 30_000,
|
||||
},
|
||||
(res) => {
|
||||
const chunks: Buffer[] = [];
|
||||
res.on('data', (chunk: Buffer) => chunks.push(chunk));
|
||||
res.on('end', () => {
|
||||
resolve({
|
||||
status: res.statusCode ?? 0,
|
||||
headers: res.headers,
|
||||
body: Buffer.concat(chunks).toString('utf-8'),
|
||||
});
|
||||
});
|
||||
},
|
||||
);
|
||||
req.on('error', reject);
|
||||
req.on('timeout', () => {
|
||||
req.destroy();
|
||||
reject(new Error('Request timed out'));
|
||||
});
|
||||
req.write(body);
|
||||
req.end();
|
||||
});
|
||||
}
|
||||
|
||||
function sendDelete(
|
||||
url: string,
|
||||
sessionId: string,
|
||||
token: string | undefined,
|
||||
): Promise<void> {
|
||||
return new Promise((resolve) => {
|
||||
const parsed = new URL(url);
|
||||
const headers: Record<string, string> = {
|
||||
'mcp-session-id': sessionId,
|
||||
};
|
||||
if (token) {
|
||||
headers['Authorization'] = `Bearer ${token}`;
|
||||
}
|
||||
|
||||
const req = http.request(
|
||||
{
|
||||
hostname: parsed.hostname,
|
||||
port: parsed.port,
|
||||
path: parsed.pathname,
|
||||
method: 'DELETE',
|
||||
headers,
|
||||
timeout: 5_000,
|
||||
},
|
||||
() => resolve(),
|
||||
);
|
||||
req.on('error', () => resolve()); // Best effort cleanup
|
||||
req.on('timeout', () => {
|
||||
req.destroy();
|
||||
resolve();
|
||||
});
|
||||
req.end();
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract JSON-RPC messages from an HTTP response body.
|
||||
* Handles both plain JSON and SSE (text/event-stream) formats.
|
||||
*/
|
||||
function extractJsonRpcMessages(contentType: string | undefined, body: string): string[] {
|
||||
if (contentType?.includes('text/event-stream')) {
|
||||
// Parse SSE: extract data: lines
|
||||
const messages: string[] = [];
|
||||
for (const line of body.split('\n')) {
|
||||
if (line.startsWith('data: ')) {
|
||||
messages.push(line.slice(6));
|
||||
}
|
||||
}
|
||||
return messages;
|
||||
}
|
||||
// Plain JSON response
|
||||
return [body];
|
||||
}
|
||||
|
||||
/**
|
||||
* STDIO-to-Streamable-HTTP MCP bridge.
|
||||
*
|
||||
* Reads JSON-RPC messages line-by-line from stdin, POSTs them to
|
||||
* mcplocal's project endpoint, and writes responses to stdout.
|
||||
*/
|
||||
export async function runMcpBridge(opts: McpBridgeOptions): Promise<void> {
|
||||
const { projectName, mcplocalUrl, token, stdin, stdout, stderr } = opts;
|
||||
const endpointUrl = `${mcplocalUrl.replace(/\/$/, '')}/projects/${encodeURIComponent(projectName)}/mcp`;
|
||||
|
||||
let sessionId: string | undefined;
|
||||
|
||||
const rl = createInterface({ input: stdin, crlfDelay: Infinity });
|
||||
|
||||
for await (const line of rl) {
|
||||
const trimmed = line.trim();
|
||||
if (!trimmed) continue;
|
||||
|
||||
try {
|
||||
const result = await postJsonRpc(endpointUrl, trimmed, sessionId, token);
|
||||
|
||||
// Capture session ID from first response
|
||||
if (!sessionId) {
|
||||
const sid = result.headers['mcp-session-id'];
|
||||
if (typeof sid === 'string') {
|
||||
sessionId = sid;
|
||||
}
|
||||
}
|
||||
|
||||
if (result.status >= 400) {
|
||||
stderr.write(`MCP bridge error: HTTP ${result.status}: ${result.body}\n`);
|
||||
}
|
||||
|
||||
// Handle both plain JSON and SSE responses
|
||||
const messages = extractJsonRpcMessages(result.headers['content-type'], result.body);
|
||||
for (const msg of messages) {
|
||||
const trimmedMsg = msg.trim();
|
||||
if (trimmedMsg) {
|
||||
stdout.write(trimmedMsg + '\n');
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
stderr.write(`MCP bridge error: ${err instanceof Error ? err.message : String(err)}\n`);
|
||||
}
|
||||
}
|
||||
|
||||
// stdin closed — cleanup session
|
||||
if (sessionId) {
|
||||
await sendDelete(endpointUrl, sessionId, token);
|
||||
}
|
||||
}
|
||||
|
||||
export interface McpCommandDeps {
|
||||
getProject: () => string | undefined;
|
||||
configLoader?: () => { mcplocalUrl: string };
|
||||
credentialsLoader?: () => { token: string } | null;
|
||||
}
|
||||
|
||||
export function createMcpCommand(deps: McpCommandDeps): Command {
|
||||
const cmd = new Command('mcp')
|
||||
.description('MCP STDIO transport bridge — connects stdin/stdout to a project MCP endpoint')
|
||||
.passThroughOptions()
|
||||
.option('-p, --project <name>', 'Project name')
|
||||
.action(async (opts: { project?: string }) => {
|
||||
// Accept -p/--project on the command itself, or fall back to global --project
|
||||
const projectName = opts.project ?? deps.getProject();
|
||||
if (!projectName) {
|
||||
process.stderr.write('Error: --project is required for the mcp command\n');
|
||||
process.exitCode = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
let mcplocalUrl = 'http://localhost:3200';
|
||||
if (deps.configLoader) {
|
||||
mcplocalUrl = deps.configLoader().mcplocalUrl;
|
||||
} else {
|
||||
try {
|
||||
const { loadConfig } = await import('../config/index.js');
|
||||
mcplocalUrl = loadConfig().mcplocalUrl;
|
||||
} catch {
|
||||
// Use default
|
||||
}
|
||||
}
|
||||
|
||||
let token: string | undefined;
|
||||
if (deps.credentialsLoader) {
|
||||
token = deps.credentialsLoader()?.token;
|
||||
} else {
|
||||
try {
|
||||
const { loadCredentials } = await import('../auth/index.js');
|
||||
token = loadCredentials()?.token;
|
||||
} catch {
|
||||
// No credentials
|
||||
}
|
||||
}
|
||||
|
||||
await runMcpBridge({
|
||||
projectName,
|
||||
mcplocalUrl,
|
||||
token,
|
||||
stdin: process.stdin,
|
||||
stdout: process.stdout,
|
||||
stderr: process.stderr,
|
||||
});
|
||||
});
|
||||
|
||||
return cmd;
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
import { Command } from 'commander';
|
||||
import type { ApiClient } from '../api-client.js';
|
||||
import { resolveNameOrId } from './shared.js';
|
||||
import { resolveNameOrId, resolveResource } from './shared.js';
|
||||
|
||||
export interface ProjectOpsDeps {
|
||||
client: ApiClient;
|
||||
@@ -45,3 +45,22 @@ export function createDetachServerCommand(deps: ProjectOpsDeps): Command {
|
||||
log(`server '${serverName}' detached from project '${projectName}'`);
|
||||
});
|
||||
}
|
||||
|
||||
export function createApproveCommand(deps: ProjectOpsDeps): Command {
|
||||
const { client, log } = deps;
|
||||
|
||||
return new Command('approve')
|
||||
.description('Approve a pending prompt request (atomic: delete request, create prompt)')
|
||||
.argument('<resource>', 'Resource type (promptrequest)')
|
||||
.argument('<name>', 'Prompt request name or ID')
|
||||
.action(async (resourceArg: string, nameOrId: string) => {
|
||||
const resource = resolveResource(resourceArg);
|
||||
if (resource !== 'promptrequests') {
|
||||
throw new Error(`approve is only supported for 'promptrequest', got '${resourceArg}'`);
|
||||
}
|
||||
|
||||
const id = await resolveNameOrId(client, 'promptrequests', nameOrId);
|
||||
const prompt = await client.post<{ id: string; name: string }>(`/api/v1/promptrequests/${id}/approve`, {});
|
||||
log(`prompt request approved → prompt '${prompt.name}' created (id: ${prompt.id})`);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -16,6 +16,11 @@ export const RESOURCE_ALIASES: Record<string, string> = {
|
||||
rbac: 'rbac',
|
||||
'rbac-definition': 'rbac',
|
||||
'rbac-binding': 'rbac',
|
||||
prompt: 'prompts',
|
||||
prompts: 'prompts',
|
||||
promptrequest: 'promptrequests',
|
||||
promptrequests: 'promptrequests',
|
||||
pr: 'promptrequests',
|
||||
};
|
||||
|
||||
export function resolveResource(name: string): string {
|
||||
|
||||
@@ -7,11 +7,22 @@ import type { CredentialsDeps } from '../auth/index.js';
|
||||
import { formatJson, formatYaml } from '../formatters/index.js';
|
||||
import { APP_VERSION } from '@mcpctl/shared';
|
||||
|
||||
// ANSI helpers
|
||||
const GREEN = '\x1b[32m';
|
||||
const RED = '\x1b[31m';
|
||||
const DIM = '\x1b[2m';
|
||||
const RESET = '\x1b[0m';
|
||||
const CLEAR_LINE = '\x1b[2K\r';
|
||||
|
||||
export interface StatusCommandDeps {
|
||||
configDeps: Partial<ConfigLoaderDeps>;
|
||||
credentialsDeps: Partial<CredentialsDeps>;
|
||||
log: (...args: string[]) => void;
|
||||
write: (text: string) => void;
|
||||
checkHealth: (url: string) => Promise<boolean>;
|
||||
/** Check LLM health via mcplocal's /llm/health endpoint */
|
||||
checkLlm: (mcplocalUrl: string) => Promise<string>;
|
||||
isTTY: boolean;
|
||||
}
|
||||
|
||||
function defaultCheckHealth(url: string): Promise<boolean> {
|
||||
@@ -28,15 +39,51 @@ function defaultCheckHealth(url: string): Promise<boolean> {
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Check LLM health by querying mcplocal's /llm/health endpoint.
|
||||
* This tests the actual provider running inside the daemon (uses persistent ACP for gemini, etc.)
|
||||
*/
|
||||
function defaultCheckLlm(mcplocalUrl: string): Promise<string> {
|
||||
return new Promise((resolve) => {
|
||||
const req = http.get(`${mcplocalUrl}/llm/health`, { timeout: 30000 }, (res) => {
|
||||
const chunks: Buffer[] = [];
|
||||
res.on('data', (chunk: Buffer) => chunks.push(chunk));
|
||||
res.on('end', () => {
|
||||
try {
|
||||
const body = JSON.parse(Buffer.concat(chunks).toString('utf-8')) as { status: string; error?: string };
|
||||
if (body.status === 'ok') {
|
||||
resolve('ok');
|
||||
} else if (body.status === 'not configured') {
|
||||
resolve('not configured');
|
||||
} else if (body.error) {
|
||||
resolve(body.error.slice(0, 80));
|
||||
} else {
|
||||
resolve(body.status);
|
||||
}
|
||||
} catch {
|
||||
resolve('invalid response');
|
||||
}
|
||||
});
|
||||
});
|
||||
req.on('error', () => resolve('mcplocal unreachable'));
|
||||
req.on('timeout', () => { req.destroy(); resolve('timeout'); });
|
||||
});
|
||||
}
|
||||
|
||||
const SPINNER_FRAMES = ['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏'];
|
||||
|
||||
const defaultDeps: StatusCommandDeps = {
|
||||
configDeps: {},
|
||||
credentialsDeps: {},
|
||||
log: (...args) => console.log(...args),
|
||||
write: (text) => process.stdout.write(text),
|
||||
checkHealth: defaultCheckHealth,
|
||||
checkLlm: defaultCheckLlm,
|
||||
isTTY: process.stdout.isTTY ?? false,
|
||||
};
|
||||
|
||||
export function createStatusCommand(deps?: Partial<StatusCommandDeps>): Command {
|
||||
const { configDeps, credentialsDeps, log, checkHealth } = { ...defaultDeps, ...deps };
|
||||
const { configDeps, credentialsDeps, log, write, checkHealth, checkLlm, isTTY } = { ...defaultDeps, ...deps };
|
||||
|
||||
return new Command('status')
|
||||
.description('Show mcpctl status and connectivity')
|
||||
@@ -45,11 +92,22 @@ export function createStatusCommand(deps?: Partial<StatusCommandDeps>): Command
|
||||
const config = loadConfig(configDeps);
|
||||
const creds = loadCredentials(credentialsDeps);
|
||||
|
||||
const [mcplocalReachable, mcpdReachable] = await Promise.all([
|
||||
const llmLabel = config.llm && config.llm.provider !== 'none'
|
||||
? `${config.llm.provider}${config.llm.model ? ` / ${config.llm.model}` : ''}`
|
||||
: null;
|
||||
|
||||
if (opts.output !== 'table') {
|
||||
// JSON/YAML: run everything in parallel, wait, output at once
|
||||
const [mcplocalReachable, mcpdReachable, llmStatus] = await Promise.all([
|
||||
checkHealth(config.mcplocalUrl),
|
||||
checkHealth(config.mcpdUrl),
|
||||
llmLabel ? checkLlm(config.mcplocalUrl) : Promise.resolve(null),
|
||||
]);
|
||||
|
||||
const llm = llmLabel
|
||||
? llmStatus === 'ok' ? llmLabel : `${llmLabel} (${llmStatus})`
|
||||
: null;
|
||||
|
||||
const status = {
|
||||
version: APP_VERSION,
|
||||
mcplocalUrl: config.mcplocalUrl,
|
||||
@@ -59,19 +117,60 @@ export function createStatusCommand(deps?: Partial<StatusCommandDeps>): Command
|
||||
auth: creds ? { user: creds.user } : null,
|
||||
registries: config.registries,
|
||||
outputFormat: config.outputFormat,
|
||||
llm,
|
||||
llmStatus,
|
||||
};
|
||||
|
||||
if (opts.output === 'json') {
|
||||
log(formatJson(status));
|
||||
} else if (opts.output === 'yaml') {
|
||||
log(formatYaml(status));
|
||||
} else {
|
||||
log(`mcpctl v${status.version}`);
|
||||
log(`mcplocal: ${status.mcplocalUrl} (${mcplocalReachable ? 'connected' : 'unreachable'})`);
|
||||
log(`mcpd: ${status.mcpdUrl} (${mcpdReachable ? 'connected' : 'unreachable'})`);
|
||||
log(opts.output === 'json' ? formatJson(status) : formatYaml(status));
|
||||
return;
|
||||
}
|
||||
|
||||
// Table format: print lines progressively, LLM last with spinner
|
||||
|
||||
// Fast health checks first
|
||||
const [mcplocalReachable, mcpdReachable] = await Promise.all([
|
||||
checkHealth(config.mcplocalUrl),
|
||||
checkHealth(config.mcpdUrl),
|
||||
]);
|
||||
|
||||
log(`mcpctl v${APP_VERSION}`);
|
||||
log(`mcplocal: ${config.mcplocalUrl} (${mcplocalReachable ? 'connected' : 'unreachable'})`);
|
||||
log(`mcpd: ${config.mcpdUrl} (${mcpdReachable ? 'connected' : 'unreachable'})`);
|
||||
log(`Auth: ${creds ? `logged in as ${creds.user}` : 'not logged in'}`);
|
||||
log(`Registries: ${status.registries.join(', ')}`);
|
||||
log(`Output: ${status.outputFormat}`);
|
||||
log(`Registries: ${config.registries.join(', ')}`);
|
||||
log(`Output: ${config.outputFormat}`);
|
||||
|
||||
if (!llmLabel) {
|
||||
log(`LLM: not configured (run 'mcpctl config setup')`);
|
||||
return;
|
||||
}
|
||||
|
||||
// LLM check with spinner — queries mcplocal's /llm/health endpoint
|
||||
const llmPromise = checkLlm(config.mcplocalUrl);
|
||||
|
||||
if (isTTY) {
|
||||
let frame = 0;
|
||||
const interval = setInterval(() => {
|
||||
write(`${CLEAR_LINE}LLM: ${llmLabel} ${DIM}${SPINNER_FRAMES[frame % SPINNER_FRAMES.length]} checking...${RESET}`);
|
||||
frame++;
|
||||
}, 80);
|
||||
|
||||
const llmStatus = await llmPromise;
|
||||
clearInterval(interval);
|
||||
|
||||
if (llmStatus === 'ok' || llmStatus === 'ok (key stored)') {
|
||||
write(`${CLEAR_LINE}LLM: ${llmLabel} ${GREEN}✓ ${llmStatus}${RESET}\n`);
|
||||
} else {
|
||||
write(`${CLEAR_LINE}LLM: ${llmLabel} ${RED}✗ ${llmStatus}${RESET}\n`);
|
||||
}
|
||||
} else {
|
||||
// Non-TTY: no spinner, just wait and print
|
||||
const llmStatus = await llmPromise;
|
||||
if (llmStatus === 'ok' || llmStatus === 'ok (key stored)') {
|
||||
log(`LLM: ${llmLabel} ✓ ${llmStatus}`);
|
||||
} else {
|
||||
log(`LLM: ${llmLabel} ✗ ${llmStatus}`);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
export { McpctlConfigSchema, DEFAULT_CONFIG } from './schema.js';
|
||||
export type { McpctlConfig } from './schema.js';
|
||||
export { McpctlConfigSchema, LlmConfigSchema, LLM_PROVIDERS, DEFAULT_CONFIG } from './schema.js';
|
||||
export type { McpctlConfig, LlmConfig, LlmProviderName } from './schema.js';
|
||||
export { loadConfig, saveConfig, mergeConfig, getConfigPath } from './loader.js';
|
||||
export type { ConfigLoaderDeps } from './loader.js';
|
||||
|
||||
@@ -1,5 +1,21 @@
|
||||
import { z } from 'zod';
|
||||
|
||||
export const LLM_PROVIDERS = ['gemini-cli', 'ollama', 'anthropic', 'openai', 'deepseek', 'vllm', 'none'] as const;
|
||||
export type LlmProviderName = typeof LLM_PROVIDERS[number];
|
||||
|
||||
export const LlmConfigSchema = z.object({
|
||||
/** LLM provider name */
|
||||
provider: z.enum(LLM_PROVIDERS),
|
||||
/** Model name */
|
||||
model: z.string().optional(),
|
||||
/** Provider URL (for ollama, vllm, openai with custom endpoint) */
|
||||
url: z.string().optional(),
|
||||
/** Binary path override (for gemini-cli) */
|
||||
binaryPath: z.string().optional(),
|
||||
}).strict();
|
||||
|
||||
export type LlmConfig = z.infer<typeof LlmConfigSchema>;
|
||||
|
||||
export const McpctlConfigSchema = z.object({
|
||||
/** mcplocal daemon endpoint (local LLM pre-processing proxy) */
|
||||
mcplocalUrl: z.string().default('http://localhost:3200'),
|
||||
@@ -19,6 +35,8 @@ export const McpctlConfigSchema = z.object({
|
||||
outputFormat: z.enum(['table', 'json', 'yaml']).default('table'),
|
||||
/** Smithery API key */
|
||||
smitheryApiKey: z.string().optional(),
|
||||
/** LLM provider configuration for smart features (pagination summaries, etc.) */
|
||||
llm: LlmConfigSchema.optional(),
|
||||
}).transform((cfg) => {
|
||||
// Backward compatibility: if old daemonUrl is set but mcplocalUrl wasn't explicitly changed,
|
||||
// use daemonUrl as mcplocalUrl
|
||||
|
||||
@@ -12,7 +12,8 @@ import { createCreateCommand } from './commands/create.js';
|
||||
import { createEditCommand } from './commands/edit.js';
|
||||
import { createBackupCommand, createRestoreCommand } from './commands/backup.js';
|
||||
import { createLoginCommand, createLogoutCommand } from './commands/auth.js';
|
||||
import { createAttachServerCommand, createDetachServerCommand } from './commands/project-ops.js';
|
||||
import { createAttachServerCommand, createDetachServerCommand, createApproveCommand } from './commands/project-ops.js';
|
||||
import { createMcpCommand } from './commands/mcp.js';
|
||||
import { ApiClient, ApiError } from './api-client.js';
|
||||
import { loadConfig } from './config/index.js';
|
||||
import { loadCredentials } from './auth/index.js';
|
||||
@@ -150,6 +151,10 @@ export function createProgram(): Command {
|
||||
};
|
||||
program.addCommand(createAttachServerCommand(projectOpsDeps), { hidden: true });
|
||||
program.addCommand(createDetachServerCommand(projectOpsDeps), { hidden: true });
|
||||
program.addCommand(createApproveCommand(projectOpsDeps));
|
||||
program.addCommand(createMcpCommand({
|
||||
getProject: () => program.opts().project as string | undefined,
|
||||
}), { hidden: true });
|
||||
|
||||
return program;
|
||||
}
|
||||
|
||||
@@ -21,6 +21,16 @@ beforeAll(async () => {
|
||||
res.writeHead(201, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify({ id: 'srv-new', ...body }));
|
||||
});
|
||||
} else if (req.url === '/api/v1/servers/srv-1' && req.method === 'DELETE') {
|
||||
// Fastify rejects empty body with Content-Type: application/json
|
||||
const ct = req.headers['content-type'] ?? '';
|
||||
if (ct.includes('application/json')) {
|
||||
res.writeHead(400, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify({ error: "Body cannot be empty when content-type is set to 'application/json'" }));
|
||||
} else {
|
||||
res.writeHead(204);
|
||||
res.end();
|
||||
}
|
||||
} else if (req.url === '/api/v1/missing' && req.method === 'GET') {
|
||||
res.writeHead(404, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify({ error: 'Not found' }));
|
||||
@@ -75,6 +85,12 @@ describe('ApiClient', () => {
|
||||
await expect(client.get('/anything')).rejects.toThrow();
|
||||
});
|
||||
|
||||
it('performs DELETE without Content-Type header', async () => {
|
||||
const client = new ApiClient({ baseUrl: `http://localhost:${port}` });
|
||||
// Should succeed (204) because no Content-Type is sent on bodyless DELETE
|
||||
await expect(client.delete('/api/v1/servers/srv-1')).resolves.toBeUndefined();
|
||||
});
|
||||
|
||||
it('sends Authorization header when token provided', async () => {
|
||||
// We need a separate server to check the header
|
||||
let receivedAuth = '';
|
||||
|
||||
@@ -8,19 +8,14 @@ import { saveCredentials, loadCredentials } from '../../src/auth/index.js';
|
||||
|
||||
function mockClient(): ApiClient {
|
||||
return {
|
||||
get: vi.fn(async () => ({
|
||||
mcpServers: {
|
||||
'slack--default': { command: 'npx', args: ['-y', '@anthropic/slack-mcp'], env: { WORKSPACE: 'test' } },
|
||||
'github--default': { command: 'npx', args: ['-y', '@anthropic/github-mcp'] },
|
||||
},
|
||||
})),
|
||||
get: vi.fn(async () => ({})),
|
||||
post: vi.fn(async () => ({ token: 'impersonated-tok', user: { email: 'other@test.com' } })),
|
||||
put: vi.fn(async () => ({})),
|
||||
delete: vi.fn(async () => {}),
|
||||
} as unknown as ApiClient;
|
||||
}
|
||||
|
||||
describe('config claude-generate', () => {
|
||||
describe('config claude', () => {
|
||||
let client: ReturnType<typeof mockClient>;
|
||||
let output: string[];
|
||||
let tmpDir: string;
|
||||
@@ -36,18 +31,23 @@ describe('config claude-generate', () => {
|
||||
rmSync(tmpDir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it('generates .mcp.json from project config', async () => {
|
||||
it('generates .mcp.json with mcpctl mcp bridge entry', async () => {
|
||||
const outPath = join(tmpDir, '.mcp.json');
|
||||
const cmd = createConfigCommand(
|
||||
{ configDeps: { configDir: tmpDir }, log },
|
||||
{ client, credentialsDeps: { configDir: tmpDir }, log },
|
||||
);
|
||||
await cmd.parseAsync(['claude-generate', '--project', 'proj-1', '-o', outPath], { from: 'user' });
|
||||
await cmd.parseAsync(['claude', '--project', 'homeautomation', '-o', outPath], { from: 'user' });
|
||||
|
||||
// No API call should be made
|
||||
expect(client.get).not.toHaveBeenCalled();
|
||||
|
||||
expect(client.get).toHaveBeenCalledWith('/api/v1/projects/proj-1/mcp-config');
|
||||
const written = JSON.parse(readFileSync(outPath, 'utf-8'));
|
||||
expect(written.mcpServers['slack--default']).toBeDefined();
|
||||
expect(output.join('\n')).toContain('2 server(s)');
|
||||
expect(written.mcpServers['homeautomation']).toEqual({
|
||||
command: 'mcpctl',
|
||||
args: ['mcp', '-p', 'homeautomation'],
|
||||
});
|
||||
expect(output.join('\n')).toContain('1 server(s)');
|
||||
});
|
||||
|
||||
it('prints to stdout with --stdout', async () => {
|
||||
@@ -55,9 +55,13 @@ describe('config claude-generate', () => {
|
||||
{ configDeps: { configDir: tmpDir }, log },
|
||||
{ client, credentialsDeps: { configDir: tmpDir }, log },
|
||||
);
|
||||
await cmd.parseAsync(['claude-generate', '--project', 'proj-1', '--stdout'], { from: 'user' });
|
||||
await cmd.parseAsync(['claude', '--project', 'myproj', '--stdout'], { from: 'user' });
|
||||
|
||||
expect(output[0]).toContain('mcpServers');
|
||||
const parsed = JSON.parse(output[0]);
|
||||
expect(parsed.mcpServers['myproj']).toEqual({
|
||||
command: 'mcpctl',
|
||||
args: ['mcp', '-p', 'myproj'],
|
||||
});
|
||||
});
|
||||
|
||||
it('merges with existing .mcp.json', async () => {
|
||||
@@ -70,12 +74,41 @@ describe('config claude-generate', () => {
|
||||
{ configDeps: { configDir: tmpDir }, log },
|
||||
{ client, credentialsDeps: { configDir: tmpDir }, log },
|
||||
);
|
||||
await cmd.parseAsync(['claude-generate', '--project', 'proj-1', '-o', outPath, '--merge'], { from: 'user' });
|
||||
await cmd.parseAsync(['claude', '--project', 'proj-1', '-o', outPath, '--merge'], { from: 'user' });
|
||||
|
||||
const written = JSON.parse(readFileSync(outPath, 'utf-8'));
|
||||
expect(written.mcpServers['existing--server']).toBeDefined();
|
||||
expect(written.mcpServers['slack--default']).toBeDefined();
|
||||
expect(output.join('\n')).toContain('3 server(s)');
|
||||
expect(written.mcpServers['proj-1']).toEqual({
|
||||
command: 'mcpctl',
|
||||
args: ['mcp', '-p', 'proj-1'],
|
||||
});
|
||||
expect(output.join('\n')).toContain('2 server(s)');
|
||||
});
|
||||
|
||||
it('backward compat: claude-generate still works', async () => {
|
||||
const outPath = join(tmpDir, '.mcp.json');
|
||||
const cmd = createConfigCommand(
|
||||
{ configDeps: { configDir: tmpDir }, log },
|
||||
{ client, credentialsDeps: { configDir: tmpDir }, log },
|
||||
);
|
||||
await cmd.parseAsync(['claude-generate', '--project', 'proj-1', '-o', outPath], { from: 'user' });
|
||||
|
||||
const written = JSON.parse(readFileSync(outPath, 'utf-8'));
|
||||
expect(written.mcpServers['proj-1']).toEqual({
|
||||
command: 'mcpctl',
|
||||
args: ['mcp', '-p', 'proj-1'],
|
||||
});
|
||||
});
|
||||
|
||||
it('uses project name as the server key', async () => {
|
||||
const outPath = join(tmpDir, '.mcp.json');
|
||||
const cmd = createConfigCommand(
|
||||
{ configDeps: { configDir: tmpDir }, log },
|
||||
);
|
||||
await cmd.parseAsync(['claude', '--project', 'my-fancy-project', '-o', outPath], { from: 'user' });
|
||||
|
||||
const written = JSON.parse(readFileSync(outPath, 'utf-8'));
|
||||
expect(Object.keys(written.mcpServers)).toEqual(['my-fancy-project']);
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
293
src/cli/tests/commands/config-setup.test.ts
Normal file
293
src/cli/tests/commands/config-setup.test.ts
Normal file
@@ -0,0 +1,293 @@
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import { createConfigSetupCommand } from '../../src/commands/config-setup.js';
|
||||
import type { ConfigSetupDeps, ConfigSetupPrompt } from '../../src/commands/config-setup.js';
|
||||
import type { SecretStore } from '@mcpctl/shared';
|
||||
import { mkdtempSync, rmSync, readFileSync } from 'node:fs';
|
||||
import { join } from 'node:path';
|
||||
import { tmpdir } from 'node:os';
|
||||
|
||||
let tempDir: string;
|
||||
let logs: string[];
|
||||
|
||||
beforeEach(() => {
|
||||
tempDir = mkdtempSync(join(tmpdir(), 'mcpctl-config-setup-test-'));
|
||||
logs = [];
|
||||
});
|
||||
|
||||
function cleanup() {
|
||||
rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
|
||||
function mockSecretStore(secrets: Record<string, string> = {}): SecretStore {
|
||||
const store: Record<string, string> = { ...secrets };
|
||||
return {
|
||||
get: vi.fn(async (key: string) => store[key] ?? null),
|
||||
set: vi.fn(async (key: string, value: string) => { store[key] = value; }),
|
||||
delete: vi.fn(async () => true),
|
||||
backend: () => 'mock',
|
||||
};
|
||||
}
|
||||
|
||||
function mockPrompt(answers: unknown[]): ConfigSetupPrompt {
|
||||
let callIndex = 0;
|
||||
return {
|
||||
select: vi.fn(async () => answers[callIndex++]),
|
||||
input: vi.fn(async () => answers[callIndex++] as string),
|
||||
password: vi.fn(async () => answers[callIndex++] as string),
|
||||
confirm: vi.fn(async () => answers[callIndex++] as boolean),
|
||||
};
|
||||
}
|
||||
|
||||
function buildDeps(overrides: {
|
||||
secrets?: Record<string, string>;
|
||||
answers?: unknown[];
|
||||
fetchModels?: ConfigSetupDeps['fetchModels'];
|
||||
whichBinary?: ConfigSetupDeps['whichBinary'];
|
||||
} = {}): ConfigSetupDeps {
|
||||
return {
|
||||
configDeps: { configDir: tempDir },
|
||||
secretStore: mockSecretStore(overrides.secrets),
|
||||
log: (...args: string[]) => logs.push(args.join(' ')),
|
||||
prompt: mockPrompt(overrides.answers ?? []),
|
||||
fetchModels: overrides.fetchModels ?? vi.fn(async () => []),
|
||||
whichBinary: overrides.whichBinary ?? vi.fn(async () => '/usr/bin/gemini'),
|
||||
};
|
||||
}
|
||||
|
||||
function readConfig(): Record<string, unknown> {
|
||||
const raw = readFileSync(join(tempDir, 'config.json'), 'utf-8');
|
||||
return JSON.parse(raw) as Record<string, unknown>;
|
||||
}
|
||||
|
||||
async function runSetup(deps: ConfigSetupDeps): Promise<void> {
|
||||
const cmd = createConfigSetupCommand(deps);
|
||||
await cmd.parseAsync([], { from: 'user' });
|
||||
}
|
||||
|
||||
describe('config setup wizard', () => {
|
||||
describe('provider: none', () => {
|
||||
it('disables LLM and saves config', async () => {
|
||||
const deps = buildDeps({ answers: ['none'] });
|
||||
await runSetup(deps);
|
||||
|
||||
const config = readConfig();
|
||||
expect(config.llm).toEqual({ provider: 'none' });
|
||||
expect(logs.some((l) => l.includes('LLM disabled'))).toBe(true);
|
||||
cleanup();
|
||||
});
|
||||
});
|
||||
|
||||
describe('provider: gemini-cli', () => {
|
||||
it('auto-detects binary path and saves config', async () => {
|
||||
// Answers: select provider, select model (no binary prompt — auto-detected)
|
||||
const deps = buildDeps({
|
||||
answers: ['gemini-cli', 'gemini-2.5-flash'],
|
||||
whichBinary: vi.fn(async () => '/home/user/.npm-global/bin/gemini'),
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
const config = readConfig();
|
||||
const llm = config.llm as Record<string, unknown>;
|
||||
expect(llm.provider).toBe('gemini-cli');
|
||||
expect(llm.model).toBe('gemini-2.5-flash');
|
||||
expect(llm.binaryPath).toBe('/home/user/.npm-global/bin/gemini');
|
||||
expect(logs.some((l) => l.includes('Found gemini at'))).toBe(true);
|
||||
cleanup();
|
||||
});
|
||||
|
||||
it('prompts for manual path when binary not found', async () => {
|
||||
// Answers: select provider, select model, enter manual path
|
||||
const deps = buildDeps({
|
||||
answers: ['gemini-cli', 'gemini-2.5-flash', '/opt/gemini'],
|
||||
whichBinary: vi.fn(async () => null),
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
const config = readConfig();
|
||||
const llm = config.llm as Record<string, unknown>;
|
||||
expect(llm.binaryPath).toBe('/opt/gemini');
|
||||
expect(logs.some((l) => l.includes('not found'))).toBe(true);
|
||||
cleanup();
|
||||
});
|
||||
|
||||
it('saves gemini-cli with custom model', async () => {
|
||||
// Answers: select provider, select custom, enter model name
|
||||
const deps = buildDeps({
|
||||
answers: ['gemini-cli', '__custom__', 'gemini-3.0-flash'],
|
||||
whichBinary: vi.fn(async () => '/usr/bin/gemini'),
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
const config = readConfig();
|
||||
const llm = config.llm as Record<string, unknown>;
|
||||
expect(llm.model).toBe('gemini-3.0-flash');
|
||||
cleanup();
|
||||
});
|
||||
});
|
||||
|
||||
describe('provider: ollama', () => {
|
||||
it('fetches models and allows selection', async () => {
|
||||
const fetchModels = vi.fn(async () => ['llama3.2', 'codellama', 'mistral']);
|
||||
// Answers: select provider, enter URL, select model
|
||||
const deps = buildDeps({
|
||||
answers: ['ollama', 'http://localhost:11434', 'codellama'],
|
||||
fetchModels,
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
expect(fetchModels).toHaveBeenCalledWith('http://localhost:11434', '/api/tags');
|
||||
const config = readConfig();
|
||||
const llm = config.llm as Record<string, unknown>;
|
||||
expect(llm.provider).toBe('ollama');
|
||||
expect(llm.model).toBe('codellama');
|
||||
expect(llm.url).toBe('http://localhost:11434');
|
||||
cleanup();
|
||||
});
|
||||
|
||||
it('falls back to manual input when fetch fails', async () => {
|
||||
const fetchModels = vi.fn(async () => []);
|
||||
// Answers: select provider, enter URL, enter model manually
|
||||
const deps = buildDeps({
|
||||
answers: ['ollama', 'http://localhost:11434', 'llama3.2'],
|
||||
fetchModels,
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
const config = readConfig();
|
||||
expect((config.llm as Record<string, unknown>).model).toBe('llama3.2');
|
||||
cleanup();
|
||||
});
|
||||
});
|
||||
|
||||
describe('provider: anthropic', () => {
|
||||
it('prompts for API key and saves to secret store', async () => {
|
||||
// Answers: select provider, enter API key, select model
|
||||
const deps = buildDeps({
|
||||
answers: ['anthropic', 'sk-ant-new-key', 'claude-haiku-3-5-20241022'],
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
expect(deps.secretStore.set).toHaveBeenCalledWith('anthropic-api-key', 'sk-ant-new-key');
|
||||
const config = readConfig();
|
||||
const llm = config.llm as Record<string, unknown>;
|
||||
expect(llm.provider).toBe('anthropic');
|
||||
expect(llm.model).toBe('claude-haiku-3-5-20241022');
|
||||
// API key should NOT be in config file
|
||||
expect(llm).not.toHaveProperty('apiKey');
|
||||
cleanup();
|
||||
});
|
||||
|
||||
it('shows existing key masked and allows keeping it', async () => {
|
||||
// Answers: select provider, confirm change=false, select model
|
||||
const deps = buildDeps({
|
||||
secrets: { 'anthropic-api-key': 'sk-ant-existing-key-1234' },
|
||||
answers: ['anthropic', false, 'claude-sonnet-4-20250514'],
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
// Should NOT have called set (kept existing key)
|
||||
expect(deps.secretStore.set).not.toHaveBeenCalled();
|
||||
const config = readConfig();
|
||||
expect((config.llm as Record<string, unknown>).model).toBe('claude-sonnet-4-20250514');
|
||||
cleanup();
|
||||
});
|
||||
|
||||
it('allows replacing existing key', async () => {
|
||||
// Answers: select provider, confirm change=true, enter new key, select model
|
||||
const deps = buildDeps({
|
||||
secrets: { 'anthropic-api-key': 'sk-ant-old' },
|
||||
answers: ['anthropic', true, 'sk-ant-new', 'claude-haiku-3-5-20241022'],
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
expect(deps.secretStore.set).toHaveBeenCalledWith('anthropic-api-key', 'sk-ant-new');
|
||||
cleanup();
|
||||
});
|
||||
});
|
||||
|
||||
describe('provider: vllm', () => {
|
||||
it('fetches models from vLLM and allows selection', async () => {
|
||||
const fetchModels = vi.fn(async () => ['my-model', 'llama-70b']);
|
||||
// Answers: select provider, enter URL, select model
|
||||
const deps = buildDeps({
|
||||
answers: ['vllm', 'http://gpu:8000', 'llama-70b'],
|
||||
fetchModels,
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
expect(fetchModels).toHaveBeenCalledWith('http://gpu:8000', '/v1/models');
|
||||
const config = readConfig();
|
||||
const llm = config.llm as Record<string, unknown>;
|
||||
expect(llm.provider).toBe('vllm');
|
||||
expect(llm.url).toBe('http://gpu:8000');
|
||||
expect(llm.model).toBe('llama-70b');
|
||||
cleanup();
|
||||
});
|
||||
});
|
||||
|
||||
describe('provider: openai', () => {
|
||||
it('prompts for key, model, and optional custom endpoint', async () => {
|
||||
// Answers: select provider, enter key, enter model, confirm custom URL=true, enter URL
|
||||
const deps = buildDeps({
|
||||
answers: ['openai', 'sk-openai-key', 'gpt-4o', true, 'https://custom.api.com'],
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
expect(deps.secretStore.set).toHaveBeenCalledWith('openai-api-key', 'sk-openai-key');
|
||||
const config = readConfig();
|
||||
const llm = config.llm as Record<string, unknown>;
|
||||
expect(llm.provider).toBe('openai');
|
||||
expect(llm.model).toBe('gpt-4o');
|
||||
expect(llm.url).toBe('https://custom.api.com');
|
||||
cleanup();
|
||||
});
|
||||
|
||||
it('skips custom URL when not requested', async () => {
|
||||
// Answers: select provider, enter key, enter model, confirm custom URL=false
|
||||
const deps = buildDeps({
|
||||
answers: ['openai', 'sk-openai-key', 'gpt-4o-mini', false],
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
const config = readConfig();
|
||||
const llm = config.llm as Record<string, unknown>;
|
||||
expect(llm.url).toBeUndefined();
|
||||
cleanup();
|
||||
});
|
||||
});
|
||||
|
||||
describe('provider: deepseek', () => {
|
||||
it('prompts for key and model', async () => {
|
||||
// Answers: select provider, enter key, select model
|
||||
const deps = buildDeps({
|
||||
answers: ['deepseek', 'sk-ds-key', 'deepseek-chat'],
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
expect(deps.secretStore.set).toHaveBeenCalledWith('deepseek-api-key', 'sk-ds-key');
|
||||
const config = readConfig();
|
||||
const llm = config.llm as Record<string, unknown>;
|
||||
expect(llm.provider).toBe('deepseek');
|
||||
expect(llm.model).toBe('deepseek-chat');
|
||||
cleanup();
|
||||
});
|
||||
});
|
||||
|
||||
describe('output messages', () => {
|
||||
it('shows restart instruction', async () => {
|
||||
const deps = buildDeps({ answers: ['gemini-cli', 'gemini-2.5-flash'] });
|
||||
await runSetup(deps);
|
||||
|
||||
expect(logs.some((l) => l.includes('systemctl --user restart mcplocal'))).toBe(true);
|
||||
cleanup();
|
||||
});
|
||||
|
||||
it('shows configured provider and model', async () => {
|
||||
const deps = buildDeps({ answers: ['gemini-cli', 'gemini-2.5-flash'] });
|
||||
await runSetup(deps);
|
||||
|
||||
expect(logs.some((l) => l.includes('gemini-cli') && l.includes('gemini-2.5-flash'))).toBe(true);
|
||||
cleanup();
|
||||
});
|
||||
});
|
||||
});
|
||||
481
src/cli/tests/commands/mcp.test.ts
Normal file
481
src/cli/tests/commands/mcp.test.ts
Normal file
@@ -0,0 +1,481 @@
|
||||
import { describe, it, expect, beforeAll, afterAll } from 'vitest';
|
||||
import http from 'node:http';
|
||||
import { Readable, Writable } from 'node:stream';
|
||||
import { runMcpBridge, createMcpCommand } from '../../src/commands/mcp.js';
|
||||
|
||||
// ---- Mock MCP server (simulates mcplocal project endpoint) ----
|
||||
|
||||
interface RecordedRequest {
|
||||
method: string;
|
||||
url: string;
|
||||
headers: http.IncomingHttpHeaders;
|
||||
body: string;
|
||||
}
|
||||
|
||||
let mockServer: http.Server;
|
||||
let mockPort: number;
|
||||
const recorded: RecordedRequest[] = [];
|
||||
let sessionCounter = 0;
|
||||
|
||||
function makeInitializeResponse(id: number | string) {
|
||||
return JSON.stringify({
|
||||
jsonrpc: '2.0',
|
||||
id,
|
||||
result: {
|
||||
protocolVersion: '2024-11-05',
|
||||
capabilities: { tools: {} },
|
||||
serverInfo: { name: 'test-server', version: '1.0.0' },
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
function makeToolsListResponse(id: number | string) {
|
||||
return JSON.stringify({
|
||||
jsonrpc: '2.0',
|
||||
id,
|
||||
result: {
|
||||
tools: [
|
||||
{ name: 'grafana/query', description: 'Query Grafana', inputSchema: { type: 'object', properties: {} } },
|
||||
],
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
function makeToolCallResponse(id: number | string) {
|
||||
return JSON.stringify({
|
||||
jsonrpc: '2.0',
|
||||
id,
|
||||
result: {
|
||||
content: [{ type: 'text', text: 'tool result' }],
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
beforeAll(async () => {
|
||||
mockServer = http.createServer((req, res) => {
|
||||
const chunks: Buffer[] = [];
|
||||
req.on('data', (c: Buffer) => chunks.push(c));
|
||||
req.on('end', () => {
|
||||
const body = Buffer.concat(chunks).toString('utf-8');
|
||||
recorded.push({ method: req.method ?? '', url: req.url ?? '', headers: req.headers, body });
|
||||
|
||||
if (req.method === 'DELETE') {
|
||||
res.writeHead(200);
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
|
||||
if (req.method === 'POST' && req.url?.startsWith('/projects/')) {
|
||||
let sessionId = req.headers['mcp-session-id'] as string | undefined;
|
||||
|
||||
// Assign session ID on first request
|
||||
if (!sessionId) {
|
||||
sessionCounter++;
|
||||
sessionId = `session-${sessionCounter}`;
|
||||
}
|
||||
res.setHeader('mcp-session-id', sessionId);
|
||||
|
||||
// Parse JSON-RPC and respond based on method
|
||||
try {
|
||||
const rpc = JSON.parse(body) as { id: number | string; method: string };
|
||||
let responseBody: string;
|
||||
|
||||
switch (rpc.method) {
|
||||
case 'initialize':
|
||||
responseBody = makeInitializeResponse(rpc.id);
|
||||
break;
|
||||
case 'tools/list':
|
||||
responseBody = makeToolsListResponse(rpc.id);
|
||||
break;
|
||||
case 'tools/call':
|
||||
responseBody = makeToolCallResponse(rpc.id);
|
||||
break;
|
||||
default:
|
||||
responseBody = JSON.stringify({ jsonrpc: '2.0', id: rpc.id, error: { code: -32601, message: 'Method not found' } });
|
||||
}
|
||||
|
||||
// Respond in SSE format for /projects/sse-project/mcp
|
||||
if (req.url?.includes('sse-project')) {
|
||||
res.writeHead(200, { 'Content-Type': 'text/event-stream' });
|
||||
res.end(`event: message\ndata: ${responseBody}\n\n`);
|
||||
} else {
|
||||
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||
res.end(responseBody);
|
||||
}
|
||||
} catch {
|
||||
res.writeHead(400, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify({ error: 'Invalid JSON' }));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
res.writeHead(404);
|
||||
res.end();
|
||||
});
|
||||
});
|
||||
|
||||
await new Promise<void>((resolve) => {
|
||||
mockServer.listen(0, () => {
|
||||
const addr = mockServer.address();
|
||||
if (addr && typeof addr === 'object') {
|
||||
mockPort = addr.port;
|
||||
}
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
afterAll(() => {
|
||||
mockServer.close();
|
||||
});
|
||||
|
||||
// ---- Helper to run bridge with mock streams ----
|
||||
|
||||
function createMockStreams() {
|
||||
const stdoutChunks: string[] = [];
|
||||
const stderrChunks: string[] = [];
|
||||
|
||||
const stdout = new Writable({
|
||||
write(chunk: Buffer, _encoding, callback) {
|
||||
stdoutChunks.push(chunk.toString());
|
||||
callback();
|
||||
},
|
||||
});
|
||||
|
||||
const stderr = new Writable({
|
||||
write(chunk: Buffer, _encoding, callback) {
|
||||
stderrChunks.push(chunk.toString());
|
||||
callback();
|
||||
},
|
||||
});
|
||||
|
||||
return { stdout, stderr, stdoutChunks, stderrChunks };
|
||||
}
|
||||
|
||||
function pushAndEnd(stdin: Readable, lines: string[]) {
|
||||
for (const line of lines) {
|
||||
stdin.push(line + '\n');
|
||||
}
|
||||
stdin.push(null); // EOF
|
||||
}
|
||||
|
||||
// ---- Tests ----
|
||||
|
||||
describe('MCP STDIO Bridge', () => {
|
||||
beforeAll(() => {
|
||||
recorded.length = 0;
|
||||
sessionCounter = 0;
|
||||
});
|
||||
|
||||
it('forwards initialize request and returns response', async () => {
|
||||
recorded.length = 0;
|
||||
const stdin = new Readable({ read() {} });
|
||||
const { stdout, stdoutChunks } = createMockStreams();
|
||||
|
||||
const initMsg = JSON.stringify({
|
||||
jsonrpc: '2.0', id: 1, method: 'initialize',
|
||||
params: { protocolVersion: '2024-11-05', capabilities: {}, clientInfo: { name: 'test', version: '1.0' } },
|
||||
});
|
||||
|
||||
pushAndEnd(stdin, [initMsg]);
|
||||
|
||||
await runMcpBridge({
|
||||
projectName: 'test-project',
|
||||
mcplocalUrl: `http://localhost:${mockPort}`,
|
||||
stdin, stdout, stderr: new Writable({ write(_, __, cb) { cb(); } }),
|
||||
});
|
||||
|
||||
// Verify request was made to correct URL
|
||||
expect(recorded.some((r) => r.url === '/projects/test-project/mcp' && r.method === 'POST')).toBe(true);
|
||||
|
||||
// Verify response on stdout
|
||||
const output = stdoutChunks.join('');
|
||||
const parsed = JSON.parse(output.trim());
|
||||
expect(parsed.result.serverInfo.name).toBe('test-server');
|
||||
expect(parsed.result.protocolVersion).toBe('2024-11-05');
|
||||
});
|
||||
|
||||
it('sends session ID on subsequent requests', async () => {
|
||||
recorded.length = 0;
|
||||
const stdin = new Readable({ read() {} });
|
||||
const { stdout, stdoutChunks } = createMockStreams();
|
||||
|
||||
const initMsg = JSON.stringify({
|
||||
jsonrpc: '2.0', id: 1, method: 'initialize',
|
||||
params: { protocolVersion: '2024-11-05', capabilities: {}, clientInfo: { name: 'test', version: '1.0' } },
|
||||
});
|
||||
const toolsListMsg = JSON.stringify({ jsonrpc: '2.0', id: 2, method: 'tools/list', params: {} });
|
||||
|
||||
pushAndEnd(stdin, [initMsg, toolsListMsg]);
|
||||
|
||||
await runMcpBridge({
|
||||
projectName: 'test-project',
|
||||
mcplocalUrl: `http://localhost:${mockPort}`,
|
||||
stdin, stdout, stderr: new Writable({ write(_, __, cb) { cb(); } }),
|
||||
});
|
||||
|
||||
// First POST should NOT have mcp-session-id header
|
||||
const firstPost = recorded.find((r) => r.method === 'POST' && r.body.includes('initialize'));
|
||||
expect(firstPost).toBeDefined();
|
||||
expect(firstPost!.headers['mcp-session-id']).toBeUndefined();
|
||||
|
||||
// Second POST SHOULD have mcp-session-id header
|
||||
const secondPost = recorded.find((r) => r.method === 'POST' && r.body.includes('tools/list'));
|
||||
expect(secondPost).toBeDefined();
|
||||
expect(secondPost!.headers['mcp-session-id']).toMatch(/^session-/);
|
||||
|
||||
// Verify tools/list response
|
||||
const lines = stdoutChunks.join('').trim().split('\n');
|
||||
expect(lines.length).toBe(2);
|
||||
const toolsResponse = JSON.parse(lines[1]);
|
||||
expect(toolsResponse.result.tools[0].name).toBe('grafana/query');
|
||||
});
|
||||
|
||||
it('forwards tools/call and returns result', async () => {
|
||||
recorded.length = 0;
|
||||
const stdin = new Readable({ read() {} });
|
||||
const { stdout, stdoutChunks } = createMockStreams();
|
||||
|
||||
const initMsg = JSON.stringify({
|
||||
jsonrpc: '2.0', id: 1, method: 'initialize',
|
||||
params: { protocolVersion: '2024-11-05', capabilities: {}, clientInfo: { name: 'test', version: '1.0' } },
|
||||
});
|
||||
const callMsg = JSON.stringify({
|
||||
jsonrpc: '2.0', id: 2, method: 'tools/call',
|
||||
params: { name: 'grafana/query', arguments: { query: 'test' } },
|
||||
});
|
||||
|
||||
pushAndEnd(stdin, [initMsg, callMsg]);
|
||||
|
||||
await runMcpBridge({
|
||||
projectName: 'test-project',
|
||||
mcplocalUrl: `http://localhost:${mockPort}`,
|
||||
stdin, stdout, stderr: new Writable({ write(_, __, cb) { cb(); } }),
|
||||
});
|
||||
|
||||
const lines = stdoutChunks.join('').trim().split('\n');
|
||||
expect(lines.length).toBe(2);
|
||||
const callResponse = JSON.parse(lines[1]);
|
||||
expect(callResponse.result.content[0].text).toBe('tool result');
|
||||
});
|
||||
|
||||
it('forwards Authorization header when token provided', async () => {
|
||||
recorded.length = 0;
|
||||
const stdin = new Readable({ read() {} });
|
||||
const { stdout } = createMockStreams();
|
||||
|
||||
const initMsg = JSON.stringify({
|
||||
jsonrpc: '2.0', id: 1, method: 'initialize',
|
||||
params: { protocolVersion: '2024-11-05', capabilities: {}, clientInfo: { name: 'test', version: '1.0' } },
|
||||
});
|
||||
|
||||
pushAndEnd(stdin, [initMsg]);
|
||||
|
||||
await runMcpBridge({
|
||||
projectName: 'test-project',
|
||||
mcplocalUrl: `http://localhost:${mockPort}`,
|
||||
token: 'my-secret-token',
|
||||
stdin, stdout, stderr: new Writable({ write(_, __, cb) { cb(); } }),
|
||||
});
|
||||
|
||||
const post = recorded.find((r) => r.method === 'POST');
|
||||
expect(post).toBeDefined();
|
||||
expect(post!.headers['authorization']).toBe('Bearer my-secret-token');
|
||||
});
|
||||
|
||||
it('does not send Authorization header when no token', async () => {
|
||||
recorded.length = 0;
|
||||
const stdin = new Readable({ read() {} });
|
||||
const { stdout } = createMockStreams();
|
||||
|
||||
const initMsg = JSON.stringify({
|
||||
jsonrpc: '2.0', id: 1, method: 'initialize',
|
||||
params: { protocolVersion: '2024-11-05', capabilities: {}, clientInfo: { name: 'test', version: '1.0' } },
|
||||
});
|
||||
|
||||
pushAndEnd(stdin, [initMsg]);
|
||||
|
||||
await runMcpBridge({
|
||||
projectName: 'test-project',
|
||||
mcplocalUrl: `http://localhost:${mockPort}`,
|
||||
stdin, stdout, stderr: new Writable({ write(_, __, cb) { cb(); } }),
|
||||
});
|
||||
|
||||
const post = recorded.find((r) => r.method === 'POST');
|
||||
expect(post).toBeDefined();
|
||||
expect(post!.headers['authorization']).toBeUndefined();
|
||||
});
|
||||
|
||||
it('sends DELETE to clean up session on stdin EOF', async () => {
|
||||
recorded.length = 0;
|
||||
const stdin = new Readable({ read() {} });
|
||||
const { stdout } = createMockStreams();
|
||||
|
||||
const initMsg = JSON.stringify({
|
||||
jsonrpc: '2.0', id: 1, method: 'initialize',
|
||||
params: { protocolVersion: '2024-11-05', capabilities: {}, clientInfo: { name: 'test', version: '1.0' } },
|
||||
});
|
||||
|
||||
pushAndEnd(stdin, [initMsg]);
|
||||
|
||||
await runMcpBridge({
|
||||
projectName: 'test-project',
|
||||
mcplocalUrl: `http://localhost:${mockPort}`,
|
||||
stdin, stdout, stderr: new Writable({ write(_, __, cb) { cb(); } }),
|
||||
});
|
||||
|
||||
// Should have a DELETE request for session cleanup
|
||||
const deleteReq = recorded.find((r) => r.method === 'DELETE');
|
||||
expect(deleteReq).toBeDefined();
|
||||
expect(deleteReq!.headers['mcp-session-id']).toMatch(/^session-/);
|
||||
});
|
||||
|
||||
it('does not send DELETE if no session was established', async () => {
|
||||
recorded.length = 0;
|
||||
const stdin = new Readable({ read() {} });
|
||||
const { stdout } = createMockStreams();
|
||||
|
||||
// Push EOF immediately with no messages
|
||||
stdin.push(null);
|
||||
|
||||
await runMcpBridge({
|
||||
projectName: 'test-project',
|
||||
mcplocalUrl: `http://localhost:${mockPort}`,
|
||||
stdin, stdout, stderr: new Writable({ write(_, __, cb) { cb(); } }),
|
||||
});
|
||||
|
||||
expect(recorded.filter((r) => r.method === 'DELETE')).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('writes errors to stderr, not stdout', async () => {
|
||||
recorded.length = 0;
|
||||
const stdin = new Readable({ read() {} });
|
||||
const { stdout, stdoutChunks, stderr, stderrChunks } = createMockStreams();
|
||||
|
||||
// Send to a non-existent port to trigger connection error
|
||||
const badMsg = JSON.stringify({ jsonrpc: '2.0', id: 1, method: 'initialize', params: {} });
|
||||
pushAndEnd(stdin, [badMsg]);
|
||||
|
||||
await runMcpBridge({
|
||||
projectName: 'test-project',
|
||||
mcplocalUrl: 'http://localhost:1', // will fail to connect
|
||||
stdin, stdout, stderr,
|
||||
});
|
||||
|
||||
// Error should be on stderr
|
||||
expect(stderrChunks.join('')).toContain('MCP bridge error');
|
||||
// stdout should be empty (no corrupted output)
|
||||
expect(stdoutChunks.join('')).toBe('');
|
||||
});
|
||||
|
||||
it('skips blank lines in stdin', async () => {
|
||||
recorded.length = 0;
|
||||
const stdin = new Readable({ read() {} });
|
||||
const { stdout, stdoutChunks } = createMockStreams();
|
||||
|
||||
const initMsg = JSON.stringify({
|
||||
jsonrpc: '2.0', id: 1, method: 'initialize',
|
||||
params: { protocolVersion: '2024-11-05', capabilities: {}, clientInfo: { name: 'test', version: '1.0' } },
|
||||
});
|
||||
|
||||
pushAndEnd(stdin, ['', ' ', initMsg, '']);
|
||||
|
||||
await runMcpBridge({
|
||||
projectName: 'test-project',
|
||||
mcplocalUrl: `http://localhost:${mockPort}`,
|
||||
stdin, stdout, stderr: new Writable({ write(_, __, cb) { cb(); } }),
|
||||
});
|
||||
|
||||
// Only one POST (for the actual message)
|
||||
const posts = recorded.filter((r) => r.method === 'POST');
|
||||
expect(posts).toHaveLength(1);
|
||||
|
||||
// One response line
|
||||
const lines = stdoutChunks.join('').trim().split('\n');
|
||||
expect(lines).toHaveLength(1);
|
||||
});
|
||||
|
||||
it('handles SSE (text/event-stream) responses', async () => {
|
||||
recorded.length = 0;
|
||||
const stdin = new Readable({ read() {} });
|
||||
const { stdout, stdoutChunks } = createMockStreams();
|
||||
|
||||
const initMsg = JSON.stringify({
|
||||
jsonrpc: '2.0', id: 1, method: 'initialize',
|
||||
params: { protocolVersion: '2024-11-05', capabilities: {}, clientInfo: { name: 'test', version: '1.0' } },
|
||||
});
|
||||
|
||||
pushAndEnd(stdin, [initMsg]);
|
||||
|
||||
await runMcpBridge({
|
||||
projectName: 'sse-project', // triggers SSE response from mock server
|
||||
mcplocalUrl: `http://localhost:${mockPort}`,
|
||||
stdin, stdout, stderr: new Writable({ write(_, __, cb) { cb(); } }),
|
||||
});
|
||||
|
||||
// Should extract JSON from SSE data: lines
|
||||
const output = stdoutChunks.join('').trim();
|
||||
const parsed = JSON.parse(output);
|
||||
expect(parsed.result.serverInfo.name).toBe('test-server');
|
||||
});
|
||||
|
||||
it('URL-encodes project name', async () => {
|
||||
recorded.length = 0;
|
||||
const stdin = new Readable({ read() {} });
|
||||
const { stdout } = createMockStreams();
|
||||
const { stderr } = createMockStreams();
|
||||
|
||||
const initMsg = JSON.stringify({
|
||||
jsonrpc: '2.0', id: 1, method: 'initialize',
|
||||
params: { protocolVersion: '2024-11-05', capabilities: {}, clientInfo: { name: 'test', version: '1.0' } },
|
||||
});
|
||||
|
||||
pushAndEnd(stdin, [initMsg]);
|
||||
|
||||
await runMcpBridge({
|
||||
projectName: 'my project',
|
||||
mcplocalUrl: `http://localhost:${mockPort}`,
|
||||
stdin, stdout, stderr,
|
||||
});
|
||||
|
||||
const post = recorded.find((r) => r.method === 'POST');
|
||||
expect(post?.url).toBe('/projects/my%20project/mcp');
|
||||
});
|
||||
});
|
||||
|
||||
describe('createMcpCommand', () => {
|
||||
it('accepts --project option directly', () => {
|
||||
const cmd = createMcpCommand({
|
||||
getProject: () => undefined,
|
||||
configLoader: () => ({ mcplocalUrl: 'http://localhost:3200' }),
|
||||
credentialsLoader: () => null,
|
||||
});
|
||||
const opt = cmd.options.find((o) => o.long === '--project');
|
||||
expect(opt).toBeDefined();
|
||||
expect(opt!.short).toBe('-p');
|
||||
});
|
||||
|
||||
it('parses --project from command args', async () => {
|
||||
let capturedProject: string | undefined;
|
||||
const cmd = createMcpCommand({
|
||||
getProject: () => undefined,
|
||||
configLoader: () => ({ mcplocalUrl: `http://localhost:${mockPort}` }),
|
||||
credentialsLoader: () => null,
|
||||
});
|
||||
// Override the action to capture what project was parsed
|
||||
// We test by checking the option parsing works, not by running the full bridge
|
||||
const parsed = cmd.parse(['--project', 'test-proj'], { from: 'user' });
|
||||
capturedProject = parsed.opts().project;
|
||||
expect(capturedProject).toBe('test-proj');
|
||||
});
|
||||
|
||||
it('parses -p shorthand from command args', () => {
|
||||
const cmd = createMcpCommand({
|
||||
getProject: () => undefined,
|
||||
configLoader: () => ({ mcplocalUrl: `http://localhost:${mockPort}` }),
|
||||
credentialsLoader: () => null,
|
||||
});
|
||||
const parsed = cmd.parse(['-p', 'my-project'], { from: 'user' });
|
||||
expect(parsed.opts().project).toBe('my-project');
|
||||
});
|
||||
});
|
||||
@@ -30,8 +30,8 @@ describe('project with new fields', () => {
|
||||
'project', 'smart-home',
|
||||
'-d', 'Smart home project',
|
||||
'--proxy-mode', 'filtered',
|
||||
'--proxy-mode-llm-provider', 'gemini-cli',
|
||||
'--proxy-mode-llm-model', 'gemini-2.0-flash',
|
||||
'--llm-provider', 'gemini-cli',
|
||||
'--llm-model', 'gemini-2.0-flash',
|
||||
'--server', 'my-grafana',
|
||||
'--server', 'my-ha',
|
||||
], { from: 'user' });
|
||||
|
||||
@@ -3,19 +3,38 @@ import { mkdtempSync, rmSync } from 'node:fs';
|
||||
import { join } from 'node:path';
|
||||
import { tmpdir } from 'node:os';
|
||||
import { createStatusCommand } from '../../src/commands/status.js';
|
||||
import type { StatusCommandDeps } from '../../src/commands/status.js';
|
||||
import { saveConfig, DEFAULT_CONFIG } from '../../src/config/index.js';
|
||||
import { saveCredentials } from '../../src/auth/index.js';
|
||||
|
||||
let tempDir: string;
|
||||
let output: string[];
|
||||
let written: string[];
|
||||
|
||||
function log(...args: string[]) {
|
||||
output.push(args.join(' '));
|
||||
}
|
||||
|
||||
function write(text: string) {
|
||||
written.push(text);
|
||||
}
|
||||
|
||||
function baseDeps(overrides?: Partial<StatusCommandDeps>): Partial<StatusCommandDeps> {
|
||||
return {
|
||||
configDeps: { configDir: tempDir },
|
||||
credentialsDeps: { configDir: tempDir },
|
||||
log,
|
||||
write,
|
||||
checkHealth: async () => true,
|
||||
isTTY: false,
|
||||
...overrides,
|
||||
};
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
tempDir = mkdtempSync(join(tmpdir(), 'mcpctl-status-test-'));
|
||||
output = [];
|
||||
written = [];
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
@@ -24,12 +43,7 @@ afterEach(() => {
|
||||
|
||||
describe('status command', () => {
|
||||
it('shows status in table format', async () => {
|
||||
const cmd = createStatusCommand({
|
||||
configDeps: { configDir: tempDir },
|
||||
credentialsDeps: { configDir: tempDir },
|
||||
log,
|
||||
checkHealth: async () => true,
|
||||
});
|
||||
const cmd = createStatusCommand(baseDeps());
|
||||
await cmd.parseAsync([], { from: 'user' });
|
||||
const out = output.join('\n');
|
||||
expect(out).toContain('mcpctl v');
|
||||
@@ -39,46 +53,26 @@ describe('status command', () => {
|
||||
});
|
||||
|
||||
it('shows unreachable when daemons are down', async () => {
|
||||
const cmd = createStatusCommand({
|
||||
configDeps: { configDir: tempDir },
|
||||
credentialsDeps: { configDir: tempDir },
|
||||
log,
|
||||
checkHealth: async () => false,
|
||||
});
|
||||
const cmd = createStatusCommand(baseDeps({ checkHealth: async () => false }));
|
||||
await cmd.parseAsync([], { from: 'user' });
|
||||
expect(output.join('\n')).toContain('unreachable');
|
||||
});
|
||||
|
||||
it('shows not logged in when no credentials', async () => {
|
||||
const cmd = createStatusCommand({
|
||||
configDeps: { configDir: tempDir },
|
||||
credentialsDeps: { configDir: tempDir },
|
||||
log,
|
||||
checkHealth: async () => true,
|
||||
});
|
||||
const cmd = createStatusCommand(baseDeps());
|
||||
await cmd.parseAsync([], { from: 'user' });
|
||||
expect(output.join('\n')).toContain('not logged in');
|
||||
});
|
||||
|
||||
it('shows logged in user when credentials exist', async () => {
|
||||
saveCredentials({ token: 'tok', mcpdUrl: 'http://x:3100', user: 'alice@example.com' }, { configDir: tempDir });
|
||||
const cmd = createStatusCommand({
|
||||
configDeps: { configDir: tempDir },
|
||||
credentialsDeps: { configDir: tempDir },
|
||||
log,
|
||||
checkHealth: async () => true,
|
||||
});
|
||||
const cmd = createStatusCommand(baseDeps());
|
||||
await cmd.parseAsync([], { from: 'user' });
|
||||
expect(output.join('\n')).toContain('logged in as alice@example.com');
|
||||
});
|
||||
|
||||
it('shows status in JSON format', async () => {
|
||||
const cmd = createStatusCommand({
|
||||
configDeps: { configDir: tempDir },
|
||||
credentialsDeps: { configDir: tempDir },
|
||||
log,
|
||||
checkHealth: async () => true,
|
||||
});
|
||||
const cmd = createStatusCommand(baseDeps());
|
||||
await cmd.parseAsync(['-o', 'json'], { from: 'user' });
|
||||
const parsed = JSON.parse(output[0]) as Record<string, unknown>;
|
||||
expect(parsed['version']).toBe('0.1.0');
|
||||
@@ -87,12 +81,7 @@ describe('status command', () => {
|
||||
});
|
||||
|
||||
it('shows status in YAML format', async () => {
|
||||
const cmd = createStatusCommand({
|
||||
configDeps: { configDir: tempDir },
|
||||
credentialsDeps: { configDir: tempDir },
|
||||
log,
|
||||
checkHealth: async () => false,
|
||||
});
|
||||
const cmd = createStatusCommand(baseDeps({ checkHealth: async () => false }));
|
||||
await cmd.parseAsync(['-o', 'yaml'], { from: 'user' });
|
||||
expect(output[0]).toContain('mcplocalReachable: false');
|
||||
});
|
||||
@@ -100,15 +89,12 @@ describe('status command', () => {
|
||||
it('checks correct URLs from config', async () => {
|
||||
saveConfig({ ...DEFAULT_CONFIG, mcplocalUrl: 'http://local:3200', mcpdUrl: 'http://remote:3100' }, { configDir: tempDir });
|
||||
const checkedUrls: string[] = [];
|
||||
const cmd = createStatusCommand({
|
||||
configDeps: { configDir: tempDir },
|
||||
credentialsDeps: { configDir: tempDir },
|
||||
log,
|
||||
const cmd = createStatusCommand(baseDeps({
|
||||
checkHealth: async (url) => {
|
||||
checkedUrls.push(url);
|
||||
return false;
|
||||
},
|
||||
});
|
||||
}));
|
||||
await cmd.parseAsync([], { from: 'user' });
|
||||
expect(checkedUrls).toContain('http://local:3200');
|
||||
expect(checkedUrls).toContain('http://remote:3100');
|
||||
@@ -116,14 +102,100 @@ describe('status command', () => {
|
||||
|
||||
it('shows registries from config', async () => {
|
||||
saveConfig({ ...DEFAULT_CONFIG, registries: ['official'] }, { configDir: tempDir });
|
||||
const cmd = createStatusCommand({
|
||||
configDeps: { configDir: tempDir },
|
||||
credentialsDeps: { configDir: tempDir },
|
||||
log,
|
||||
checkHealth: async () => true,
|
||||
});
|
||||
const cmd = createStatusCommand(baseDeps());
|
||||
await cmd.parseAsync([], { from: 'user' });
|
||||
expect(output.join('\n')).toContain('official');
|
||||
expect(output.join('\n')).not.toContain('glama');
|
||||
});
|
||||
|
||||
it('shows LLM not configured hint when no LLM is set', async () => {
|
||||
const cmd = createStatusCommand(baseDeps());
|
||||
await cmd.parseAsync([], { from: 'user' });
|
||||
const out = output.join('\n');
|
||||
expect(out).toContain('LLM:');
|
||||
expect(out).toContain('not configured');
|
||||
expect(out).toContain('mcpctl config setup');
|
||||
});
|
||||
|
||||
it('shows green check when LLM is healthy (non-TTY)', async () => {
|
||||
saveConfig({ ...DEFAULT_CONFIG, llm: { provider: 'anthropic', model: 'claude-haiku-3-5-20241022' } }, { configDir: tempDir });
|
||||
const cmd = createStatusCommand(baseDeps({ checkLlm: async () => 'ok' }));
|
||||
await cmd.parseAsync([], { from: 'user' });
|
||||
const out = output.join('\n');
|
||||
expect(out).toContain('anthropic / claude-haiku-3-5-20241022');
|
||||
expect(out).toContain('✓ ok');
|
||||
});
|
||||
|
||||
it('shows red cross when LLM check fails (non-TTY)', async () => {
|
||||
saveConfig({ ...DEFAULT_CONFIG, llm: { provider: 'gemini-cli', model: 'gemini-2.5-flash' } }, { configDir: tempDir });
|
||||
const cmd = createStatusCommand(baseDeps({ checkLlm: async () => 'not authenticated' }));
|
||||
await cmd.parseAsync([], { from: 'user' });
|
||||
const out = output.join('\n');
|
||||
expect(out).toContain('✗ not authenticated');
|
||||
});
|
||||
|
||||
it('shows error message from mcplocal', async () => {
|
||||
saveConfig({ ...DEFAULT_CONFIG, llm: { provider: 'gemini-cli', model: 'gemini-2.5-flash' } }, { configDir: tempDir });
|
||||
const cmd = createStatusCommand(baseDeps({ checkLlm: async () => 'binary not found' }));
|
||||
await cmd.parseAsync([], { from: 'user' });
|
||||
expect(output.join('\n')).toContain('✗ binary not found');
|
||||
});
|
||||
|
||||
it('queries mcplocal URL for LLM health', async () => {
|
||||
saveConfig({ ...DEFAULT_CONFIG, mcplocalUrl: 'http://custom:9999', llm: { provider: 'gemini-cli', model: 'gemini-2.5-flash' } }, { configDir: tempDir });
|
||||
let queriedUrl = '';
|
||||
const cmd = createStatusCommand(baseDeps({
|
||||
checkLlm: async (url) => { queriedUrl = url; return 'ok'; },
|
||||
}));
|
||||
await cmd.parseAsync([], { from: 'user' });
|
||||
expect(queriedUrl).toBe('http://custom:9999');
|
||||
});
|
||||
|
||||
it('uses spinner on TTY and writes final result', async () => {
|
||||
saveConfig({ ...DEFAULT_CONFIG, llm: { provider: 'gemini-cli', model: 'gemini-2.5-flash' } }, { configDir: tempDir });
|
||||
const cmd = createStatusCommand(baseDeps({
|
||||
isTTY: true,
|
||||
checkLlm: async () => 'ok',
|
||||
}));
|
||||
await cmd.parseAsync([], { from: 'user' });
|
||||
// On TTY, the final LLM line goes through write(), not log()
|
||||
const finalWrite = written[written.length - 1];
|
||||
expect(finalWrite).toContain('gemini-cli / gemini-2.5-flash');
|
||||
expect(finalWrite).toContain('✓ ok');
|
||||
});
|
||||
|
||||
it('uses spinner on TTY and shows failure', async () => {
|
||||
saveConfig({ ...DEFAULT_CONFIG, llm: { provider: 'gemini-cli', model: 'gemini-2.5-flash' } }, { configDir: tempDir });
|
||||
const cmd = createStatusCommand(baseDeps({
|
||||
isTTY: true,
|
||||
checkLlm: async () => 'not authenticated',
|
||||
}));
|
||||
await cmd.parseAsync([], { from: 'user' });
|
||||
const finalWrite = written[written.length - 1];
|
||||
expect(finalWrite).toContain('✗ not authenticated');
|
||||
});
|
||||
|
||||
it('shows not configured when LLM provider is none', async () => {
|
||||
saveConfig({ ...DEFAULT_CONFIG, llm: { provider: 'none' } }, { configDir: tempDir });
|
||||
const cmd = createStatusCommand(baseDeps());
|
||||
await cmd.parseAsync([], { from: 'user' });
|
||||
expect(output.join('\n')).toContain('not configured');
|
||||
});
|
||||
|
||||
it('includes llm and llmStatus in JSON output', async () => {
|
||||
saveConfig({ ...DEFAULT_CONFIG, llm: { provider: 'gemini-cli', model: 'gemini-2.5-flash' } }, { configDir: tempDir });
|
||||
const cmd = createStatusCommand(baseDeps({ checkLlm: async () => 'ok' }));
|
||||
await cmd.parseAsync(['-o', 'json'], { from: 'user' });
|
||||
const parsed = JSON.parse(output[0]) as Record<string, unknown>;
|
||||
expect(parsed['llm']).toBe('gemini-cli / gemini-2.5-flash');
|
||||
expect(parsed['llmStatus']).toBe('ok');
|
||||
});
|
||||
|
||||
it('includes null llm in JSON output when not configured', async () => {
|
||||
const cmd = createStatusCommand(baseDeps());
|
||||
await cmd.parseAsync(['-o', 'json'], { from: 'user' });
|
||||
const parsed = JSON.parse(output[0]) as Record<string, unknown>;
|
||||
expect(parsed['llm']).toBeNull();
|
||||
expect(parsed['llmStatus']).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -15,7 +15,7 @@ describe('fish completions', () => {
|
||||
});
|
||||
|
||||
it('does not offer resource types without __mcpctl_needs_resource_type guard', () => {
|
||||
const resourceTypes = ['servers', 'instances', 'secrets', 'templates', 'projects', 'users', 'groups', 'rbac'];
|
||||
const resourceTypes = ['servers', 'instances', 'secrets', 'templates', 'projects', 'users', 'groups', 'rbac', 'prompts', 'promptrequests'];
|
||||
const lines = fishFile.split('\n').filter((l) => l.startsWith('complete '));
|
||||
|
||||
for (const line of lines) {
|
||||
@@ -56,22 +56,60 @@ describe('fish completions', () => {
|
||||
expect(fishFile).toContain("complete -c mcpctl -l project");
|
||||
});
|
||||
|
||||
it('attach-server only shows with --project', () => {
|
||||
const lines = fishFile.split('\n').filter((l) => l.includes('attach-server') && l.startsWith('complete'));
|
||||
it('attach-server command only shows with --project', () => {
|
||||
// Only check lines that OFFER attach-server as a command (via -a attach-server), not argument completions
|
||||
const lines = fishFile.split('\n').filter((l) =>
|
||||
l.startsWith('complete') && l.includes("-a attach-server"));
|
||||
expect(lines.length).toBeGreaterThan(0);
|
||||
for (const line of lines) {
|
||||
expect(line).toContain('__mcpctl_has_project');
|
||||
}
|
||||
});
|
||||
|
||||
it('detach-server only shows with --project', () => {
|
||||
const lines = fishFile.split('\n').filter((l) => l.includes('detach-server') && l.startsWith('complete'));
|
||||
it('detach-server command only shows with --project', () => {
|
||||
const lines = fishFile.split('\n').filter((l) =>
|
||||
l.startsWith('complete') && l.includes("-a detach-server"));
|
||||
expect(lines.length).toBeGreaterThan(0);
|
||||
for (const line of lines) {
|
||||
expect(line).toContain('__mcpctl_has_project');
|
||||
}
|
||||
});
|
||||
|
||||
it('resource name functions use jq .[][].name to unwrap wrapped JSON and avoid nested matches', () => {
|
||||
// API returns { "resources": [...] } not [...], so .[].name fails silently.
|
||||
// Must use .[][].name to unwrap the outer object then iterate the array.
|
||||
// Also must not use string match regex which matches nested name fields.
|
||||
const resourceNamesFn = fishFile.match(/function __mcpctl_resource_names[\s\S]*?^end/m)?.[0] ?? '';
|
||||
const projectNamesFn = fishFile.match(/function __mcpctl_project_names[\s\S]*?^end/m)?.[0] ?? '';
|
||||
|
||||
expect(resourceNamesFn, '__mcpctl_resource_names must use jq .[][].name').toContain("jq -r '.[][].name'");
|
||||
expect(resourceNamesFn, '__mcpctl_resource_names must not use string match on name').not.toMatch(/string match.*"name"/);
|
||||
|
||||
expect(projectNamesFn, '__mcpctl_project_names must use jq .[][].name').toContain("jq -r '.[][].name'");
|
||||
expect(projectNamesFn, '__mcpctl_project_names must not use string match on name').not.toMatch(/string match.*"name"/);
|
||||
});
|
||||
|
||||
it('instances use server.name instead of name', () => {
|
||||
const resourceNamesFn = fishFile.match(/function __mcpctl_resource_names[\s\S]*?^end/m)?.[0] ?? '';
|
||||
expect(resourceNamesFn, 'must handle instances via server.name').toContain('.server.name');
|
||||
});
|
||||
|
||||
it('attach-server completes with available (unattached) servers and guards against repeat', () => {
|
||||
const attachLine = fishFile.split('\n').find((l) =>
|
||||
l.startsWith('complete') && l.includes('__fish_seen_subcommand_from attach-server'));
|
||||
expect(attachLine, 'attach-server argument completion must exist').toBeDefined();
|
||||
expect(attachLine, 'attach-server must use __mcpctl_available_servers').toContain('__mcpctl_available_servers');
|
||||
expect(attachLine, 'attach-server must guard with __mcpctl_needs_server_arg').toContain('__mcpctl_needs_server_arg');
|
||||
});
|
||||
|
||||
it('detach-server completes with project servers and guards against repeat', () => {
|
||||
const detachLine = fishFile.split('\n').find((l) =>
|
||||
l.startsWith('complete') && l.includes('__fish_seen_subcommand_from detach-server'));
|
||||
expect(detachLine, 'detach-server argument completion must exist').toBeDefined();
|
||||
expect(detachLine, 'detach-server must use __mcpctl_project_servers').toContain('__mcpctl_project_servers');
|
||||
expect(detachLine, 'detach-server must guard with __mcpctl_needs_server_arg').toContain('__mcpctl_needs_server_arg');
|
||||
});
|
||||
|
||||
it('non-project commands do not show with --project', () => {
|
||||
const nonProjectCmds = ['status', 'login', 'logout', 'config', 'apply', 'backup', 'restore'];
|
||||
const lines = fishFile.split('\n').filter((l) => l.startsWith('complete') && l.includes('-a '));
|
||||
@@ -105,11 +143,34 @@ describe('bash completions', () => {
|
||||
expect(bashFile).toMatch(/get\|describe\|delete\)[\s\S]*?_mcpctl_resource_names/);
|
||||
});
|
||||
|
||||
it('offers server names for attach-server/detach-server', () => {
|
||||
expect(bashFile).toMatch(/attach-server\|detach-server\)[\s\S]*?_mcpctl_resource_names.*servers/);
|
||||
it('attach-server filters out already-attached servers and guards against repeat', () => {
|
||||
const attachBlock = bashFile.match(/attach-server\)[\s\S]*?return ;;/)?.[0] ?? '';
|
||||
expect(attachBlock, 'attach-server must use _mcpctl_get_project_value').toContain('_mcpctl_get_project_value');
|
||||
expect(attachBlock, 'attach-server must query project servers to exclude').toContain('--project');
|
||||
expect(attachBlock, 'attach-server must check position to prevent repeat').toContain('cword - subcmd_pos');
|
||||
});
|
||||
|
||||
it('detach-server shows only project servers and guards against repeat', () => {
|
||||
const detachBlock = bashFile.match(/detach-server\)[\s\S]*?return ;;/)?.[0] ?? '';
|
||||
expect(detachBlock, 'detach-server must use _mcpctl_get_project_value').toContain('_mcpctl_get_project_value');
|
||||
expect(detachBlock, 'detach-server must query project servers').toContain('--project');
|
||||
expect(detachBlock, 'detach-server must check position to prevent repeat').toContain('cword - subcmd_pos');
|
||||
});
|
||||
|
||||
it('instances use server.name instead of name', () => {
|
||||
const fnMatch = bashFile.match(/_mcpctl_resource_names\(\)[\s\S]*?\n\s*\}/)?.[0] ?? '';
|
||||
expect(fnMatch, 'must handle instances via .server.name').toContain('.server.name');
|
||||
});
|
||||
|
||||
it('defines --project option', () => {
|
||||
expect(bashFile).toContain('--project');
|
||||
});
|
||||
|
||||
it('resource name function uses jq .[][].name to unwrap wrapped JSON and avoid nested matches', () => {
|
||||
const fnMatch = bashFile.match(/_mcpctl_resource_names\(\)[\s\S]*?\n\s*\}/)?.[0] ?? '';
|
||||
expect(fnMatch, '_mcpctl_resource_names must use jq .[][].name').toContain("jq -r '.[][].name'");
|
||||
expect(fnMatch, '_mcpctl_resource_names must not use grep on name').not.toMatch(/grep.*"name"/);
|
||||
// Guard against .[].name (single bracket) which fails on wrapped JSON
|
||||
expect(fnMatch, '_mcpctl_resource_names must not use .[].name (needs .[][].name)').not.toMatch(/jq.*'\.\[\]\.name'/);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -170,6 +170,7 @@ model Project {
|
||||
id String @id @default(cuid())
|
||||
name String @unique
|
||||
description String @default("")
|
||||
prompt String @default("")
|
||||
proxyMode String @default("direct")
|
||||
llmProvider String?
|
||||
llmModel String?
|
||||
@@ -180,6 +181,8 @@ model Project {
|
||||
|
||||
owner User @relation(fields: [ownerId], references: [id], onDelete: Cascade)
|
||||
servers ProjectServer[]
|
||||
prompts Prompt[]
|
||||
promptRequests PromptRequest[]
|
||||
|
||||
@@index([name])
|
||||
@@index([ownerId])
|
||||
@@ -227,6 +230,41 @@ enum InstanceStatus {
|
||||
ERROR
|
||||
}
|
||||
|
||||
// ── Prompts (approved content resources) ──
|
||||
|
||||
model Prompt {
|
||||
id String @id @default(cuid())
|
||||
name String
|
||||
content String @db.Text
|
||||
projectId String?
|
||||
version Int @default(1)
|
||||
createdAt DateTime @default(now())
|
||||
updatedAt DateTime @updatedAt
|
||||
|
||||
project Project? @relation(fields: [projectId], references: [id], onDelete: Cascade)
|
||||
|
||||
@@unique([name, projectId])
|
||||
@@index([projectId])
|
||||
}
|
||||
|
||||
// ── Prompt Requests (pending proposals from LLM sessions) ──
|
||||
|
||||
model PromptRequest {
|
||||
id String @id @default(cuid())
|
||||
name String
|
||||
content String @db.Text
|
||||
projectId String?
|
||||
createdBySession String?
|
||||
createdByUserId String?
|
||||
createdAt DateTime @default(now())
|
||||
|
||||
project Project? @relation(fields: [projectId], references: [id], onDelete: Cascade)
|
||||
|
||||
@@unique([name, projectId])
|
||||
@@index([projectId])
|
||||
@@index([createdBySession])
|
||||
}
|
||||
|
||||
// ── Audit Logs ──
|
||||
|
||||
model AuditLog {
|
||||
|
||||
@@ -18,6 +18,8 @@ import {
|
||||
UserRepository,
|
||||
GroupRepository,
|
||||
} from './repositories/index.js';
|
||||
import { PromptRepository } from './repositories/prompt.repository.js';
|
||||
import { PromptRequestRepository } from './repositories/prompt-request.repository.js';
|
||||
import {
|
||||
McpServerService,
|
||||
SecretService,
|
||||
@@ -56,6 +58,8 @@ import {
|
||||
registerUserRoutes,
|
||||
registerGroupRoutes,
|
||||
} from './routes/index.js';
|
||||
import { registerPromptRoutes } from './routes/prompts.js';
|
||||
import { PromptService } from './services/prompt.service.js';
|
||||
|
||||
type PermissionCheck =
|
||||
| { kind: 'resource'; resource: string; action: RbacAction; resourceName?: string }
|
||||
@@ -88,11 +92,38 @@ function mapUrlToPermission(method: string, url: string): PermissionCheck {
|
||||
'rbac': 'rbac',
|
||||
'audit-logs': 'rbac',
|
||||
'mcp': 'servers',
|
||||
'prompts': 'prompts',
|
||||
'promptrequests': 'promptrequests',
|
||||
};
|
||||
|
||||
const resource = resourceMap[segment];
|
||||
if (resource === undefined) return { kind: 'skip' };
|
||||
|
||||
// Special case: /api/v1/promptrequests/:id/approve → needs both delete+promptrequests and create+prompts
|
||||
// We check delete on promptrequests (the harder permission); create on prompts is checked in the service layer
|
||||
const approveMatch = url.match(/^\/api\/v1\/promptrequests\/([^/?]+)\/approve/);
|
||||
if (approveMatch?.[1]) {
|
||||
return { kind: 'resource', resource: 'promptrequests', action: 'delete', resourceName: approveMatch[1] };
|
||||
}
|
||||
|
||||
// Special case: /api/v1/projects/:name/prompts/visible → view prompts
|
||||
const visiblePromptsMatch = url.match(/^\/api\/v1\/projects\/([^/?]+)\/prompts\/visible/);
|
||||
if (visiblePromptsMatch?.[1]) {
|
||||
return { kind: 'resource', resource: 'prompts', action: 'view' };
|
||||
}
|
||||
|
||||
// Special case: /api/v1/projects/:name/promptrequests → create promptrequests
|
||||
const projectPromptrequestsMatch = url.match(/^\/api\/v1\/projects\/([^/?]+)\/promptrequests/);
|
||||
if (projectPromptrequestsMatch?.[1] && method === 'POST') {
|
||||
return { kind: 'resource', resource: 'promptrequests', action: 'create' };
|
||||
}
|
||||
|
||||
// Special case: /api/v1/projects/:id/instructions → view projects
|
||||
const instructionsMatch = url.match(/^\/api\/v1\/projects\/([^/?]+)\/instructions/);
|
||||
if (instructionsMatch?.[1]) {
|
||||
return { kind: 'resource', resource: 'projects', action: 'view', resourceName: instructionsMatch[1] };
|
||||
}
|
||||
|
||||
// Special case: /api/v1/projects/:id/mcp-config → requires 'expose' permission
|
||||
const mcpConfigMatch = url.match(/^\/api\/v1\/projects\/([^/?]+)\/mcp-config/);
|
||||
if (mcpConfigMatch?.[1]) {
|
||||
@@ -243,11 +274,14 @@ async function main(): Promise<void> {
|
||||
const restoreService = new RestoreService(serverRepo, projectRepo, secretRepo, userRepo, groupRepo, rbacDefinitionRepo);
|
||||
const authService = new AuthService(prisma);
|
||||
const templateService = new TemplateService(templateRepo);
|
||||
const mcpProxyService = new McpProxyService(instanceRepo, serverRepo);
|
||||
const mcpProxyService = new McpProxyService(instanceRepo, serverRepo, orchestrator);
|
||||
const rbacDefinitionService = new RbacDefinitionService(rbacDefinitionRepo);
|
||||
const rbacService = new RbacService(rbacDefinitionRepo, prisma);
|
||||
const userService = new UserService(userRepo);
|
||||
const groupService = new GroupService(groupRepo, userRepo);
|
||||
const promptRepo = new PromptRepository(prisma);
|
||||
const promptRequestRepo = new PromptRequestRepository(prisma);
|
||||
const promptService = new PromptService(promptRepo, promptRequestRepo, projectRepo);
|
||||
|
||||
// Auth middleware for global hooks
|
||||
const authMiddleware = createAuthMiddleware({
|
||||
@@ -294,9 +328,13 @@ async function main(): Promise<void> {
|
||||
const check = mapUrlToPermission(request.method, url);
|
||||
if (check.kind === 'skip') return;
|
||||
|
||||
// Extract service account identity from header (sent by mcplocal)
|
||||
const saHeader = request.headers['x-service-account'];
|
||||
const serviceAccountName = typeof saHeader === 'string' ? saHeader : undefined;
|
||||
|
||||
let allowed: boolean;
|
||||
if (check.kind === 'operation') {
|
||||
allowed = await rbacService.canRunOperation(request.userId, check.operation);
|
||||
allowed = await rbacService.canRunOperation(request.userId, check.operation, serviceAccountName);
|
||||
} else {
|
||||
// Resolve CUID → human name for name-scoped RBAC bindings
|
||||
if (check.resourceName !== undefined && CUID_RE.test(check.resourceName)) {
|
||||
@@ -306,10 +344,10 @@ async function main(): Promise<void> {
|
||||
if (entity) check.resourceName = entity.name;
|
||||
}
|
||||
}
|
||||
allowed = await rbacService.canAccess(request.userId, check.action, check.resource, check.resourceName);
|
||||
allowed = await rbacService.canAccess(request.userId, check.action, check.resource, check.resourceName, serviceAccountName);
|
||||
// Compute scope for list filtering (used by preSerialization hook)
|
||||
if (allowed && check.resourceName === undefined) {
|
||||
request.rbacScope = await rbacService.getAllowedScope(request.userId, check.action, check.resource);
|
||||
request.rbacScope = await rbacService.getAllowedScope(request.userId, check.action, check.resource, serviceAccountName);
|
||||
}
|
||||
}
|
||||
if (!allowed) {
|
||||
@@ -335,6 +373,7 @@ async function main(): Promise<void> {
|
||||
registerRbacRoutes(app, rbacDefinitionService);
|
||||
registerUserRoutes(app, userService);
|
||||
registerGroupRoutes(app, groupService);
|
||||
registerPromptRoutes(app, promptService, projectRepo);
|
||||
|
||||
// ── RBAC list filtering hook ──
|
||||
// Filters array responses to only include resources the user is allowed to see.
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
import type { PrismaClient, Project } from '@prisma/client';
|
||||
|
||||
export interface ProjectWithRelations extends Project {
|
||||
servers: Array<{ id: string; server: { id: string; name: string } }>;
|
||||
servers: Array<{ id: string; projectId: string; serverId: string; server: Record<string, unknown> & { id: string; name: string } }>;
|
||||
}
|
||||
|
||||
const PROJECT_INCLUDE = {
|
||||
servers: { include: { server: { select: { id: true, name: true } } } },
|
||||
servers: { include: { server: true } },
|
||||
} as const;
|
||||
|
||||
export interface IProjectRepository {
|
||||
findAll(ownerId?: string): Promise<ProjectWithRelations[]>;
|
||||
findById(id: string): Promise<ProjectWithRelations | null>;
|
||||
findByName(name: string): Promise<ProjectWithRelations | null>;
|
||||
create(data: { name: string; description: string; ownerId: string; proxyMode: string; llmProvider?: string; llmModel?: string }): Promise<ProjectWithRelations>;
|
||||
create(data: { name: string; description: string; prompt?: string; ownerId: string; proxyMode: string; llmProvider?: string; llmModel?: string }): Promise<ProjectWithRelations>;
|
||||
update(id: string, data: Record<string, unknown>): Promise<ProjectWithRelations>;
|
||||
delete(id: string): Promise<void>;
|
||||
setServers(projectId: string, serverIds: string[]): Promise<void>;
|
||||
@@ -36,13 +36,14 @@ export class ProjectRepository implements IProjectRepository {
|
||||
return this.prisma.project.findUnique({ where: { name }, include: PROJECT_INCLUDE }) as unknown as Promise<ProjectWithRelations | null>;
|
||||
}
|
||||
|
||||
async create(data: { name: string; description: string; ownerId: string; proxyMode: string; llmProvider?: string; llmModel?: string }): Promise<ProjectWithRelations> {
|
||||
async create(data: { name: string; description: string; prompt?: string; ownerId: string; proxyMode: string; llmProvider?: string; llmModel?: string }): Promise<ProjectWithRelations> {
|
||||
const createData: Record<string, unknown> = {
|
||||
name: data.name,
|
||||
description: data.description,
|
||||
ownerId: data.ownerId,
|
||||
proxyMode: data.proxyMode,
|
||||
};
|
||||
if (data.prompt !== undefined) createData['prompt'] = data.prompt;
|
||||
if (data.llmProvider !== undefined) createData['llmProvider'] = data.llmProvider;
|
||||
if (data.llmModel !== undefined) createData['llmModel'] = data.llmModel;
|
||||
|
||||
|
||||
53
src/mcpd/src/repositories/prompt-request.repository.ts
Normal file
53
src/mcpd/src/repositories/prompt-request.repository.ts
Normal file
@@ -0,0 +1,53 @@
|
||||
import type { PrismaClient, PromptRequest } from '@prisma/client';
|
||||
|
||||
export interface IPromptRequestRepository {
|
||||
findAll(projectId?: string): Promise<PromptRequest[]>;
|
||||
findById(id: string): Promise<PromptRequest | null>;
|
||||
findByNameAndProject(name: string, projectId: string | null): Promise<PromptRequest | null>;
|
||||
findBySession(sessionId: string, projectId?: string): Promise<PromptRequest[]>;
|
||||
create(data: { name: string; content: string; projectId?: string; createdBySession?: string; createdByUserId?: string }): Promise<PromptRequest>;
|
||||
delete(id: string): Promise<void>;
|
||||
}
|
||||
|
||||
export class PromptRequestRepository implements IPromptRequestRepository {
|
||||
constructor(private readonly prisma: PrismaClient) {}
|
||||
|
||||
async findAll(projectId?: string): Promise<PromptRequest[]> {
|
||||
if (projectId !== undefined) {
|
||||
return this.prisma.promptRequest.findMany({
|
||||
where: { OR: [{ projectId }, { projectId: null }] },
|
||||
orderBy: { createdAt: 'desc' },
|
||||
});
|
||||
}
|
||||
return this.prisma.promptRequest.findMany({ orderBy: { createdAt: 'desc' } });
|
||||
}
|
||||
|
||||
async findById(id: string): Promise<PromptRequest | null> {
|
||||
return this.prisma.promptRequest.findUnique({ where: { id } });
|
||||
}
|
||||
|
||||
async findByNameAndProject(name: string, projectId: string | null): Promise<PromptRequest | null> {
|
||||
return this.prisma.promptRequest.findUnique({
|
||||
where: { name_projectId: { name, projectId: projectId ?? '' } },
|
||||
});
|
||||
}
|
||||
|
||||
async findBySession(sessionId: string, projectId?: string): Promise<PromptRequest[]> {
|
||||
const where: Record<string, unknown> = { createdBySession: sessionId };
|
||||
if (projectId !== undefined) {
|
||||
where['OR'] = [{ projectId }, { projectId: null }];
|
||||
}
|
||||
return this.prisma.promptRequest.findMany({
|
||||
where,
|
||||
orderBy: { createdAt: 'desc' },
|
||||
});
|
||||
}
|
||||
|
||||
async create(data: { name: string; content: string; projectId?: string; createdBySession?: string; createdByUserId?: string }): Promise<PromptRequest> {
|
||||
return this.prisma.promptRequest.create({ data });
|
||||
}
|
||||
|
||||
async delete(id: string): Promise<void> {
|
||||
await this.prisma.promptRequest.delete({ where: { id } });
|
||||
}
|
||||
}
|
||||
47
src/mcpd/src/repositories/prompt.repository.ts
Normal file
47
src/mcpd/src/repositories/prompt.repository.ts
Normal file
@@ -0,0 +1,47 @@
|
||||
import type { PrismaClient, Prompt } from '@prisma/client';
|
||||
|
||||
export interface IPromptRepository {
|
||||
findAll(projectId?: string): Promise<Prompt[]>;
|
||||
findById(id: string): Promise<Prompt | null>;
|
||||
findByNameAndProject(name: string, projectId: string | null): Promise<Prompt | null>;
|
||||
create(data: { name: string; content: string; projectId?: string }): Promise<Prompt>;
|
||||
update(id: string, data: { content?: string }): Promise<Prompt>;
|
||||
delete(id: string): Promise<void>;
|
||||
}
|
||||
|
||||
export class PromptRepository implements IPromptRepository {
|
||||
constructor(private readonly prisma: PrismaClient) {}
|
||||
|
||||
async findAll(projectId?: string): Promise<Prompt[]> {
|
||||
if (projectId !== undefined) {
|
||||
// Project-scoped + global prompts
|
||||
return this.prisma.prompt.findMany({
|
||||
where: { OR: [{ projectId }, { projectId: null }] },
|
||||
orderBy: { name: 'asc' },
|
||||
});
|
||||
}
|
||||
return this.prisma.prompt.findMany({ orderBy: { name: 'asc' } });
|
||||
}
|
||||
|
||||
async findById(id: string): Promise<Prompt | null> {
|
||||
return this.prisma.prompt.findUnique({ where: { id } });
|
||||
}
|
||||
|
||||
async findByNameAndProject(name: string, projectId: string | null): Promise<Prompt | null> {
|
||||
return this.prisma.prompt.findUnique({
|
||||
where: { name_projectId: { name, projectId: projectId ?? '' } },
|
||||
});
|
||||
}
|
||||
|
||||
async create(data: { name: string; content: string; projectId?: string }): Promise<Prompt> {
|
||||
return this.prisma.prompt.create({ data });
|
||||
}
|
||||
|
||||
async update(id: string, data: { content?: string }): Promise<Prompt> {
|
||||
return this.prisma.prompt.update({ where: { id }, data });
|
||||
}
|
||||
|
||||
async delete(id: string): Promise<void> {
|
||||
await this.prisma.prompt.delete({ where: { id } });
|
||||
}
|
||||
}
|
||||
@@ -54,4 +54,16 @@ export function registerProjectRoutes(app: FastifyInstance, service: ProjectServ
|
||||
const project = await service.resolveAndGet(request.params.id);
|
||||
return project.servers.map((ps) => ps.server);
|
||||
});
|
||||
|
||||
// Get project instructions for LLM (prompt + server list)
|
||||
app.get<{ Params: { id: string } }>('/api/v1/projects/:id/instructions', async (request) => {
|
||||
const project = await service.resolveAndGet(request.params.id);
|
||||
return {
|
||||
prompt: project.prompt,
|
||||
servers: project.servers.map((ps) => ({
|
||||
name: (ps.server as Record<string, unknown>).name as string,
|
||||
description: (ps.server as Record<string, unknown>).description as string,
|
||||
})),
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
86
src/mcpd/src/routes/prompts.ts
Normal file
86
src/mcpd/src/routes/prompts.ts
Normal file
@@ -0,0 +1,86 @@
|
||||
import type { FastifyInstance } from 'fastify';
|
||||
import type { PromptService } from '../services/prompt.service.js';
|
||||
import type { IProjectRepository } from '../repositories/project.repository.js';
|
||||
|
||||
export function registerPromptRoutes(
|
||||
app: FastifyInstance,
|
||||
service: PromptService,
|
||||
projectRepo: IProjectRepository,
|
||||
): void {
|
||||
// ── Prompts (approved) ──
|
||||
|
||||
app.get('/api/v1/prompts', async () => {
|
||||
return service.listPrompts();
|
||||
});
|
||||
|
||||
app.get<{ Params: { id: string } }>('/api/v1/prompts/:id', async (request) => {
|
||||
return service.getPrompt(request.params.id);
|
||||
});
|
||||
|
||||
app.post('/api/v1/prompts', async (request, reply) => {
|
||||
const prompt = await service.createPrompt(request.body);
|
||||
reply.code(201);
|
||||
return prompt;
|
||||
});
|
||||
|
||||
app.put<{ Params: { id: string } }>('/api/v1/prompts/:id', async (request) => {
|
||||
return service.updatePrompt(request.params.id, request.body);
|
||||
});
|
||||
|
||||
app.delete<{ Params: { id: string } }>('/api/v1/prompts/:id', async (request, reply) => {
|
||||
await service.deletePrompt(request.params.id);
|
||||
reply.code(204);
|
||||
});
|
||||
|
||||
// ── Prompt Requests (pending proposals) ──
|
||||
|
||||
app.get('/api/v1/promptrequests', async () => {
|
||||
return service.listPromptRequests();
|
||||
});
|
||||
|
||||
app.get<{ Params: { id: string } }>('/api/v1/promptrequests/:id', async (request) => {
|
||||
return service.getPromptRequest(request.params.id);
|
||||
});
|
||||
|
||||
app.delete<{ Params: { id: string } }>('/api/v1/promptrequests/:id', async (request, reply) => {
|
||||
await service.deletePromptRequest(request.params.id);
|
||||
reply.code(204);
|
||||
});
|
||||
|
||||
// Approve: atomic delete request → create prompt
|
||||
app.post<{ Params: { id: string } }>('/api/v1/promptrequests/:id/approve', async (request) => {
|
||||
return service.approve(request.params.id);
|
||||
});
|
||||
|
||||
// ── Project-scoped endpoints (for mcplocal) ──
|
||||
|
||||
// Visible prompts: approved + session's pending requests
|
||||
app.get<{ Params: { name: string }; Querystring: { session?: string } }>(
|
||||
'/api/v1/projects/:name/prompts/visible',
|
||||
async (request) => {
|
||||
const project = await projectRepo.findByName(request.params.name);
|
||||
if (!project) {
|
||||
throw Object.assign(new Error(`Project not found: ${request.params.name}`), { statusCode: 404 });
|
||||
}
|
||||
return service.getVisiblePrompts(project.id, request.query.session);
|
||||
},
|
||||
);
|
||||
|
||||
// LLM propose: create a PromptRequest for a project
|
||||
app.post<{ Params: { name: string } }>(
|
||||
'/api/v1/projects/:name/promptrequests',
|
||||
async (request, reply) => {
|
||||
const project = await projectRepo.findByName(request.params.name);
|
||||
if (!project) {
|
||||
throw Object.assign(new Error(`Project not found: ${request.params.name}`), { statusCode: 404 });
|
||||
}
|
||||
const body = request.body as Record<string, unknown>;
|
||||
const req = await service.propose({
|
||||
...body,
|
||||
projectId: project.id,
|
||||
});
|
||||
reply.code(201);
|
||||
return req;
|
||||
},
|
||||
);
|
||||
}
|
||||
@@ -1,7 +1,10 @@
|
||||
import type { McpInstance } from '@prisma/client';
|
||||
import type { McpInstance, McpServer } from '@prisma/client';
|
||||
import type { IMcpInstanceRepository, IMcpServerRepository } from '../repositories/interfaces.js';
|
||||
import type { McpOrchestrator } from './orchestrator.js';
|
||||
import { NotFoundError } from './mcp-server.service.js';
|
||||
import { InvalidStateError } from './instance.service.js';
|
||||
import { sendViaSse } from './transport/sse-client.js';
|
||||
import { sendViaStdio } from './transport/stdio-client.js';
|
||||
|
||||
export interface McpProxyRequest {
|
||||
serverId: string;
|
||||
@@ -38,17 +41,21 @@ export class McpProxyService {
|
||||
constructor(
|
||||
private readonly instanceRepo: IMcpInstanceRepository,
|
||||
private readonly serverRepo: IMcpServerRepository,
|
||||
private readonly orchestrator?: McpOrchestrator,
|
||||
) {}
|
||||
|
||||
async execute(request: McpProxyRequest): Promise<McpProxyResponse> {
|
||||
const server = await this.serverRepo.findById(request.serverId);
|
||||
|
||||
// External server: proxy directly to externalUrl
|
||||
if (server?.externalUrl) {
|
||||
return this.sendToExternal(server.id, server.externalUrl, request.method, request.params);
|
||||
if (!server) {
|
||||
throw new NotFoundError(`Server '${request.serverId}' not found`);
|
||||
}
|
||||
|
||||
// Managed server: find running instance
|
||||
// External server: proxy directly to externalUrl
|
||||
if (server.externalUrl) {
|
||||
return this.sendToExternal(server, request.method, request.params);
|
||||
}
|
||||
|
||||
// Managed server: find running instance and dispatch by transport
|
||||
const instances = await this.instanceRepo.findAll(request.serverId);
|
||||
const running = instances.find((i) => i.status === 'RUNNING');
|
||||
|
||||
@@ -56,20 +63,95 @@ export class McpProxyService {
|
||||
throw new NotFoundError(`No running instance found for server '${request.serverId}'`);
|
||||
}
|
||||
|
||||
if (running.port === null || running.port === undefined) {
|
||||
throw new InvalidStateError(
|
||||
`Running instance '${running.id}' for server '${request.serverId}' has no port assigned`,
|
||||
);
|
||||
}
|
||||
|
||||
return this.sendJsonRpc(running, request.method, request.params);
|
||||
return this.sendToManaged(server, running, request.method, request.params);
|
||||
}
|
||||
|
||||
/**
|
||||
* Send a JSON-RPC request to an external MCP server.
|
||||
* Handles streamable-http protocol (session management + SSE response parsing).
|
||||
* Send to an external MCP server. Dispatches based on transport type.
|
||||
*/
|
||||
private async sendToExternal(
|
||||
server: McpServer,
|
||||
method: string,
|
||||
params?: Record<string, unknown>,
|
||||
): Promise<McpProxyResponse> {
|
||||
const url = server.externalUrl as string;
|
||||
|
||||
if (server.transport === 'SSE') {
|
||||
return sendViaSse(url, method, params);
|
||||
}
|
||||
|
||||
// STREAMABLE_HTTP (default for external)
|
||||
return this.sendStreamableHttp(server.id, url, method, params);
|
||||
}
|
||||
|
||||
/**
|
||||
* Send to a managed (containerized) MCP server. Dispatches based on transport type.
|
||||
*/
|
||||
private async sendToManaged(
|
||||
server: McpServer,
|
||||
instance: McpInstance,
|
||||
method: string,
|
||||
params?: Record<string, unknown>,
|
||||
): Promise<McpProxyResponse> {
|
||||
const transport = server.transport as string;
|
||||
|
||||
// STDIO: use docker exec
|
||||
if (transport === 'STDIO') {
|
||||
if (!this.orchestrator) {
|
||||
throw new InvalidStateError('Orchestrator required for STDIO transport');
|
||||
}
|
||||
if (!instance.containerId) {
|
||||
throw new InvalidStateError(`Instance '${instance.id}' has no container ID`);
|
||||
}
|
||||
const packageName = server.packageName as string | null;
|
||||
if (!packageName) {
|
||||
throw new InvalidStateError(`Server '${server.id}' has no package name for STDIO transport`);
|
||||
}
|
||||
return sendViaStdio(this.orchestrator, instance.containerId, packageName, method, params);
|
||||
}
|
||||
|
||||
// SSE or STREAMABLE_HTTP: need a base URL
|
||||
const baseUrl = await this.resolveBaseUrl(instance, server);
|
||||
|
||||
if (transport === 'SSE') {
|
||||
return sendViaSse(baseUrl, method, params);
|
||||
}
|
||||
|
||||
// STREAMABLE_HTTP (default)
|
||||
return this.sendStreamableHttp(server.id, baseUrl, method, params);
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve the base URL for an HTTP-based managed server.
|
||||
* Prefers container internal IP on Docker network, falls back to localhost:port.
|
||||
*/
|
||||
private async resolveBaseUrl(instance: McpInstance, server: McpServer): Promise<string> {
|
||||
const containerPort = (server.containerPort as number | null) ?? 3000;
|
||||
|
||||
if (this.orchestrator && instance.containerId) {
|
||||
try {
|
||||
const containerInfo = await this.orchestrator.inspectContainer(instance.containerId);
|
||||
if (containerInfo.ip) {
|
||||
return `http://${containerInfo.ip}:${containerPort}`;
|
||||
}
|
||||
} catch {
|
||||
// Fall through to localhost
|
||||
}
|
||||
}
|
||||
|
||||
if (instance.port !== null && instance.port !== undefined) {
|
||||
return `http://localhost:${instance.port}`;
|
||||
}
|
||||
|
||||
throw new InvalidStateError(
|
||||
`Cannot resolve URL for instance '${instance.id}': no container IP or host port`,
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Send via streamable-http protocol with session management.
|
||||
*/
|
||||
private async sendStreamableHttp(
|
||||
serverId: string,
|
||||
url: string,
|
||||
method: string,
|
||||
@@ -109,14 +191,14 @@ export class McpProxyService {
|
||||
// Session expired? Clear and retry once
|
||||
if (response.status === 400 || response.status === 404) {
|
||||
this.sessions.delete(serverId);
|
||||
return this.sendToExternal(serverId, url, method, params);
|
||||
return this.sendStreamableHttp(serverId, url, method, params);
|
||||
}
|
||||
return {
|
||||
jsonrpc: '2.0',
|
||||
id: 1,
|
||||
error: {
|
||||
code: -32000,
|
||||
message: `External MCP server returned HTTP ${response.status}: ${response.statusText}`,
|
||||
message: `MCP server returned HTTP ${response.status}: ${response.statusText}`,
|
||||
},
|
||||
};
|
||||
}
|
||||
@@ -126,8 +208,7 @@ export class McpProxyService {
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize a streamable-http session with an external server.
|
||||
* Sends `initialize` and `notifications/initialized`, caches the session ID.
|
||||
* Initialize a streamable-http session with a server.
|
||||
*/
|
||||
private async initSession(serverId: string, url: string): Promise<void> {
|
||||
const initBody = {
|
||||
@@ -174,41 +255,4 @@ export class McpProxyService {
|
||||
body: JSON.stringify({ jsonrpc: '2.0', method: 'notifications/initialized' }),
|
||||
});
|
||||
}
|
||||
|
||||
private async sendJsonRpc(
|
||||
instance: McpInstance,
|
||||
method: string,
|
||||
params?: Record<string, unknown>,
|
||||
): Promise<McpProxyResponse> {
|
||||
const url = `http://localhost:${instance.port}`;
|
||||
|
||||
const body: Record<string, unknown> = {
|
||||
jsonrpc: '2.0',
|
||||
id: 1,
|
||||
method,
|
||||
};
|
||||
if (params !== undefined) {
|
||||
body.params = params;
|
||||
}
|
||||
|
||||
const response = await fetch(url, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify(body),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
return {
|
||||
jsonrpc: '2.0',
|
||||
id: 1,
|
||||
error: {
|
||||
code: -32000,
|
||||
message: `MCP server returned HTTP ${response.status}: ${response.statusText}`,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
const result = (await response.json()) as McpProxyResponse;
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -53,6 +53,7 @@ export class ProjectService {
|
||||
const project = await this.projectRepo.create({
|
||||
name: data.name,
|
||||
description: data.description,
|
||||
prompt: data.prompt,
|
||||
ownerId,
|
||||
proxyMode: data.proxyMode,
|
||||
...(data.llmProvider !== undefined ? { llmProvider: data.llmProvider } : {}),
|
||||
@@ -75,6 +76,7 @@ export class ProjectService {
|
||||
// Build update data for scalar fields
|
||||
const updateData: Record<string, unknown> = {};
|
||||
if (data.description !== undefined) updateData['description'] = data.description;
|
||||
if (data.prompt !== undefined) updateData['prompt'] = data.prompt;
|
||||
if (data.proxyMode !== undefined) updateData['proxyMode'] = data.proxyMode;
|
||||
if (data.llmProvider !== undefined) updateData['llmProvider'] = data.llmProvider;
|
||||
if (data.llmModel !== undefined) updateData['llmModel'] = data.llmModel;
|
||||
|
||||
137
src/mcpd/src/services/prompt.service.ts
Normal file
137
src/mcpd/src/services/prompt.service.ts
Normal file
@@ -0,0 +1,137 @@
|
||||
import type { Prompt, PromptRequest } from '@prisma/client';
|
||||
import type { IPromptRepository } from '../repositories/prompt.repository.js';
|
||||
import type { IPromptRequestRepository } from '../repositories/prompt-request.repository.js';
|
||||
import type { IProjectRepository } from '../repositories/project.repository.js';
|
||||
import { CreatePromptSchema, UpdatePromptSchema, CreatePromptRequestSchema } from '../validation/prompt.schema.js';
|
||||
import { NotFoundError } from './mcp-server.service.js';
|
||||
|
||||
export class PromptService {
|
||||
constructor(
|
||||
private readonly promptRepo: IPromptRepository,
|
||||
private readonly promptRequestRepo: IPromptRequestRepository,
|
||||
private readonly projectRepo: IProjectRepository,
|
||||
) {}
|
||||
|
||||
// ── Prompt CRUD ──
|
||||
|
||||
async listPrompts(projectId?: string): Promise<Prompt[]> {
|
||||
return this.promptRepo.findAll(projectId);
|
||||
}
|
||||
|
||||
async getPrompt(id: string): Promise<Prompt> {
|
||||
const prompt = await this.promptRepo.findById(id);
|
||||
if (prompt === null) throw new NotFoundError(`Prompt not found: ${id}`);
|
||||
return prompt;
|
||||
}
|
||||
|
||||
async createPrompt(input: unknown): Promise<Prompt> {
|
||||
const data = CreatePromptSchema.parse(input);
|
||||
|
||||
if (data.projectId) {
|
||||
const project = await this.projectRepo.findById(data.projectId);
|
||||
if (project === null) throw new NotFoundError(`Project not found: ${data.projectId}`);
|
||||
}
|
||||
|
||||
const createData: { name: string; content: string; projectId?: string } = {
|
||||
name: data.name,
|
||||
content: data.content,
|
||||
};
|
||||
if (data.projectId !== undefined) createData.projectId = data.projectId;
|
||||
return this.promptRepo.create(createData);
|
||||
}
|
||||
|
||||
async updatePrompt(id: string, input: unknown): Promise<Prompt> {
|
||||
const data = UpdatePromptSchema.parse(input);
|
||||
await this.getPrompt(id);
|
||||
const updateData: { content?: string } = {};
|
||||
if (data.content !== undefined) updateData.content = data.content;
|
||||
return this.promptRepo.update(id, updateData);
|
||||
}
|
||||
|
||||
async deletePrompt(id: string): Promise<void> {
|
||||
await this.getPrompt(id);
|
||||
await this.promptRepo.delete(id);
|
||||
}
|
||||
|
||||
// ── PromptRequest CRUD ──
|
||||
|
||||
async listPromptRequests(projectId?: string): Promise<PromptRequest[]> {
|
||||
return this.promptRequestRepo.findAll(projectId);
|
||||
}
|
||||
|
||||
async getPromptRequest(id: string): Promise<PromptRequest> {
|
||||
const req = await this.promptRequestRepo.findById(id);
|
||||
if (req === null) throw new NotFoundError(`PromptRequest not found: ${id}`);
|
||||
return req;
|
||||
}
|
||||
|
||||
async deletePromptRequest(id: string): Promise<void> {
|
||||
await this.getPromptRequest(id);
|
||||
await this.promptRequestRepo.delete(id);
|
||||
}
|
||||
|
||||
// ── Propose (LLM creates a PromptRequest) ──
|
||||
|
||||
async propose(input: unknown): Promise<PromptRequest> {
|
||||
const data = CreatePromptRequestSchema.parse(input);
|
||||
|
||||
if (data.projectId) {
|
||||
const project = await this.projectRepo.findById(data.projectId);
|
||||
if (project === null) throw new NotFoundError(`Project not found: ${data.projectId}`);
|
||||
}
|
||||
|
||||
const createData: { name: string; content: string; projectId?: string; createdBySession?: string; createdByUserId?: string } = {
|
||||
name: data.name,
|
||||
content: data.content,
|
||||
};
|
||||
if (data.projectId !== undefined) createData.projectId = data.projectId;
|
||||
if (data.createdBySession !== undefined) createData.createdBySession = data.createdBySession;
|
||||
if (data.createdByUserId !== undefined) createData.createdByUserId = data.createdByUserId;
|
||||
return this.promptRequestRepo.create(createData);
|
||||
}
|
||||
|
||||
// ── Approve (delete PromptRequest → create Prompt) ──
|
||||
|
||||
async approve(requestId: string): Promise<Prompt> {
|
||||
const req = await this.getPromptRequest(requestId);
|
||||
|
||||
// Create the approved prompt
|
||||
const createData: { name: string; content: string; projectId?: string } = {
|
||||
name: req.name,
|
||||
content: req.content,
|
||||
};
|
||||
if (req.projectId !== null) createData.projectId = req.projectId;
|
||||
|
||||
const prompt = await this.promptRepo.create(createData);
|
||||
|
||||
// Delete the request
|
||||
await this.promptRequestRepo.delete(requestId);
|
||||
|
||||
return prompt;
|
||||
}
|
||||
|
||||
// ── Visibility for MCP (approved prompts + session's pending requests) ──
|
||||
|
||||
async getVisiblePrompts(
|
||||
projectId?: string,
|
||||
sessionId?: string,
|
||||
): Promise<Array<{ name: string; content: string; type: 'prompt' | 'promptrequest' }>> {
|
||||
const results: Array<{ name: string; content: string; type: 'prompt' | 'promptrequest' }> = [];
|
||||
|
||||
// Approved prompts (project-scoped + global)
|
||||
const prompts = await this.promptRepo.findAll(projectId);
|
||||
for (const p of prompts) {
|
||||
results.push({ name: p.name, content: p.content, type: 'prompt' });
|
||||
}
|
||||
|
||||
// Session's own pending requests
|
||||
if (sessionId) {
|
||||
const requests = await this.promptRequestRepo.findBySession(sessionId, projectId);
|
||||
for (const r of requests) {
|
||||
results.push({ name: r.name, content: r.content, type: 'promptrequest' });
|
||||
}
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
}
|
||||
@@ -50,8 +50,8 @@ export class RbacService {
|
||||
* If provided, name-scoped bindings only match when their name equals this.
|
||||
* If omitted (listing), name-scoped bindings still grant access.
|
||||
*/
|
||||
async canAccess(userId: string, action: RbacAction, resource: string, resourceName?: string): Promise<boolean> {
|
||||
const permissions = await this.getPermissions(userId);
|
||||
async canAccess(userId: string, action: RbacAction, resource: string, resourceName?: string, serviceAccountName?: string): Promise<boolean> {
|
||||
const permissions = await this.getPermissions(userId, serviceAccountName);
|
||||
const normalized = normalizeResource(resource);
|
||||
|
||||
for (const perm of permissions) {
|
||||
@@ -73,8 +73,8 @@ export class RbacService {
|
||||
* Check whether a user is allowed to perform a named operation.
|
||||
* Operations require an explicit 'run' role binding with a matching action.
|
||||
*/
|
||||
async canRunOperation(userId: string, operation: string): Promise<boolean> {
|
||||
const permissions = await this.getPermissions(userId);
|
||||
async canRunOperation(userId: string, operation: string, serviceAccountName?: string): Promise<boolean> {
|
||||
const permissions = await this.getPermissions(userId, serviceAccountName);
|
||||
|
||||
for (const perm of permissions) {
|
||||
if ('action' in perm && perm.role === 'run' && perm.action === operation) {
|
||||
@@ -90,8 +90,8 @@ export class RbacService {
|
||||
* Returns wildcard:true if any matching binding is unscoped (no name constraint).
|
||||
* Returns wildcard:false with a set of allowed names if all bindings are name-scoped.
|
||||
*/
|
||||
async getAllowedScope(userId: string, action: RbacAction, resource: string): Promise<AllowedScope> {
|
||||
const permissions = await this.getPermissions(userId);
|
||||
async getAllowedScope(userId: string, action: RbacAction, resource: string, serviceAccountName?: string): Promise<AllowedScope> {
|
||||
const permissions = await this.getPermissions(userId, serviceAccountName);
|
||||
const normalized = normalizeResource(resource);
|
||||
const names = new Set<string>();
|
||||
|
||||
@@ -113,31 +113,35 @@ export class RbacService {
|
||||
/**
|
||||
* Collect all permissions for a user across all matching RbacDefinitions.
|
||||
*/
|
||||
async getPermissions(userId: string): Promise<Permission[]> {
|
||||
async getPermissions(userId: string, serviceAccountName?: string): Promise<Permission[]> {
|
||||
// 1. Resolve user email
|
||||
const user = await this.prisma.user.findUnique({
|
||||
where: { id: userId },
|
||||
select: { email: true },
|
||||
});
|
||||
if (user === null) return [];
|
||||
if (user === null && serviceAccountName === undefined) return [];
|
||||
|
||||
// 2. Resolve group names the user belongs to
|
||||
let groupNames: string[] = [];
|
||||
if (user !== null) {
|
||||
const memberships = await this.prisma.groupMember.findMany({
|
||||
where: { userId },
|
||||
select: { group: { select: { name: true } } },
|
||||
});
|
||||
const groupNames = memberships.map((m) => m.group.name);
|
||||
groupNames = memberships.map((m) => m.group.name);
|
||||
}
|
||||
|
||||
// 3. Load all RbacDefinitions
|
||||
const definitions = await this.rbacRepo.findAll();
|
||||
|
||||
// 4. Find definitions where user is a subject
|
||||
// 4. Find definitions where user or service account is a subject
|
||||
const permissions: Permission[] = [];
|
||||
for (const def of definitions) {
|
||||
const subjects = def.subjects as RbacSubject[];
|
||||
const matched = subjects.some((s) => {
|
||||
if (s.kind === 'User') return s.name === user.email;
|
||||
if (s.kind === 'User') return user !== null && s.name === user.email;
|
||||
if (s.kind === 'Group') return groupNames.includes(s.name);
|
||||
if (s.kind === 'ServiceAccount') return serviceAccountName !== undefined && s.name === serviceAccountName;
|
||||
return false;
|
||||
});
|
||||
|
||||
|
||||
2
src/mcpd/src/services/transport/index.ts
Normal file
2
src/mcpd/src/services/transport/index.ts
Normal file
@@ -0,0 +1,2 @@
|
||||
export { sendViaSse } from './sse-client.js';
|
||||
export { sendViaStdio } from './stdio-client.js';
|
||||
150
src/mcpd/src/services/transport/sse-client.ts
Normal file
150
src/mcpd/src/services/transport/sse-client.ts
Normal file
@@ -0,0 +1,150 @@
|
||||
import type { McpProxyResponse } from '../mcp-proxy-service.js';
|
||||
|
||||
/**
|
||||
* SSE transport client for MCP servers using the legacy SSE protocol.
|
||||
*
|
||||
* Protocol: GET /sse → endpoint event with messages URL → POST to messages URL.
|
||||
* Responses come back on the SSE stream, matched by JSON-RPC request ID.
|
||||
*
|
||||
* Each call opens a fresh SSE connection, initializes, sends the request,
|
||||
* reads the response, and closes. Session caching may be added later.
|
||||
*/
|
||||
export async function sendViaSse(
|
||||
baseUrl: string,
|
||||
method: string,
|
||||
params?: Record<string, unknown>,
|
||||
timeoutMs = 30_000,
|
||||
): Promise<McpProxyResponse> {
|
||||
const controller = new AbortController();
|
||||
const timer = setTimeout(() => controller.abort(), timeoutMs);
|
||||
|
||||
try {
|
||||
// 1. GET /sse → SSE stream
|
||||
const sseResp = await fetch(`${baseUrl}/sse`, {
|
||||
method: 'GET',
|
||||
headers: { 'Accept': 'text/event-stream' },
|
||||
signal: controller.signal,
|
||||
});
|
||||
|
||||
if (!sseResp.ok) {
|
||||
return errorResponse(`SSE connect failed: HTTP ${sseResp.status}`);
|
||||
}
|
||||
|
||||
const reader = sseResp.body?.getReader();
|
||||
if (!reader) {
|
||||
return errorResponse('No SSE stream body');
|
||||
}
|
||||
|
||||
// 2. Read until we get the endpoint event with messages URL
|
||||
const decoder = new TextDecoder();
|
||||
let buffer = '';
|
||||
let messagesUrl = '';
|
||||
|
||||
while (!messagesUrl) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
buffer += decoder.decode(value, { stream: true });
|
||||
|
||||
for (const line of buffer.split('\n')) {
|
||||
if (line.startsWith('data: ') && buffer.includes('event: endpoint')) {
|
||||
const endpoint = line.slice(6).trim();
|
||||
messagesUrl = endpoint.startsWith('http') ? endpoint : `${baseUrl}${endpoint}`;
|
||||
}
|
||||
}
|
||||
const lines = buffer.split('\n');
|
||||
buffer = lines[lines.length - 1] ?? '';
|
||||
}
|
||||
|
||||
if (!messagesUrl) {
|
||||
reader.cancel();
|
||||
return errorResponse('No endpoint event from SSE stream');
|
||||
}
|
||||
|
||||
const postHeaders = { 'Content-Type': 'application/json' };
|
||||
|
||||
// 3. Initialize
|
||||
const initResp = await fetch(messagesUrl, {
|
||||
method: 'POST',
|
||||
headers: postHeaders,
|
||||
body: JSON.stringify({
|
||||
jsonrpc: '2.0',
|
||||
id: 1,
|
||||
method: 'initialize',
|
||||
params: {
|
||||
protocolVersion: '2024-11-05',
|
||||
capabilities: {},
|
||||
clientInfo: { name: 'mcpctl-proxy', version: '0.1.0' },
|
||||
},
|
||||
}),
|
||||
signal: controller.signal,
|
||||
});
|
||||
|
||||
if (!initResp.ok) {
|
||||
reader.cancel();
|
||||
return errorResponse(`SSE initialize failed: HTTP ${initResp.status}`);
|
||||
}
|
||||
|
||||
// 4. Send notifications/initialized
|
||||
await fetch(messagesUrl, {
|
||||
method: 'POST',
|
||||
headers: postHeaders,
|
||||
body: JSON.stringify({ jsonrpc: '2.0', method: 'notifications/initialized' }),
|
||||
signal: controller.signal,
|
||||
});
|
||||
|
||||
// 5. Send the actual request
|
||||
const requestId = 2;
|
||||
await fetch(messagesUrl, {
|
||||
method: 'POST',
|
||||
headers: postHeaders,
|
||||
body: JSON.stringify({
|
||||
jsonrpc: '2.0',
|
||||
id: requestId,
|
||||
method,
|
||||
...(params !== undefined ? { params } : {}),
|
||||
}),
|
||||
signal: controller.signal,
|
||||
});
|
||||
|
||||
// 6. Read response from SSE stream (matched by request ID)
|
||||
let responseBuffer = '';
|
||||
const readTimeout = setTimeout(() => reader.cancel(), 5000);
|
||||
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
responseBuffer += decoder.decode(value, { stream: true });
|
||||
|
||||
for (const line of responseBuffer.split('\n')) {
|
||||
if (line.startsWith('data: ')) {
|
||||
try {
|
||||
const parsed = JSON.parse(line.slice(6)) as McpProxyResponse;
|
||||
if (parsed.id === requestId) {
|
||||
clearTimeout(readTimeout);
|
||||
reader.cancel();
|
||||
return parsed;
|
||||
}
|
||||
} catch {
|
||||
// Not valid JSON, skip
|
||||
}
|
||||
}
|
||||
}
|
||||
const respLines = responseBuffer.split('\n');
|
||||
responseBuffer = respLines[respLines.length - 1] ?? '';
|
||||
}
|
||||
|
||||
clearTimeout(readTimeout);
|
||||
reader.cancel();
|
||||
return errorResponse('No response received from SSE stream');
|
||||
} finally {
|
||||
clearTimeout(timer);
|
||||
}
|
||||
}
|
||||
|
||||
function errorResponse(message: string): McpProxyResponse {
|
||||
return {
|
||||
jsonrpc: '2.0',
|
||||
id: 1,
|
||||
error: { code: -32000, message },
|
||||
};
|
||||
}
|
||||
119
src/mcpd/src/services/transport/stdio-client.ts
Normal file
119
src/mcpd/src/services/transport/stdio-client.ts
Normal file
@@ -0,0 +1,119 @@
|
||||
import type { McpOrchestrator } from '../orchestrator.js';
|
||||
import type { McpProxyResponse } from '../mcp-proxy-service.js';
|
||||
|
||||
/**
|
||||
* STDIO transport client for MCP servers running as Docker containers.
|
||||
*
|
||||
* Runs `docker exec` with an inline Node.js script that spawns the MCP server
|
||||
* binary, pipes JSON-RPC messages via stdin/stdout, and returns the response.
|
||||
*
|
||||
* Each call is self-contained: initialize → notifications/initialized → request → response.
|
||||
*/
|
||||
export async function sendViaStdio(
|
||||
orchestrator: McpOrchestrator,
|
||||
containerId: string,
|
||||
packageName: string,
|
||||
method: string,
|
||||
params?: Record<string, unknown>,
|
||||
timeoutMs = 30_000,
|
||||
): Promise<McpProxyResponse> {
|
||||
const initMsg = JSON.stringify({
|
||||
jsonrpc: '2.0',
|
||||
id: 1,
|
||||
method: 'initialize',
|
||||
params: {
|
||||
protocolVersion: '2024-11-05',
|
||||
capabilities: {},
|
||||
clientInfo: { name: 'mcpctl-proxy', version: '0.1.0' },
|
||||
},
|
||||
});
|
||||
const initializedMsg = JSON.stringify({
|
||||
jsonrpc: '2.0',
|
||||
method: 'notifications/initialized',
|
||||
});
|
||||
|
||||
const requestBody: Record<string, unknown> = {
|
||||
jsonrpc: '2.0',
|
||||
id: 2,
|
||||
method,
|
||||
};
|
||||
if (params !== undefined) {
|
||||
requestBody.params = params;
|
||||
}
|
||||
const requestMsg = JSON.stringify(requestBody);
|
||||
|
||||
// Inline Node.js script that:
|
||||
// 1. Spawns the MCP server binary via npx
|
||||
// 2. Sends initialize → initialized → actual request via stdin
|
||||
// 3. Reads stdout for JSON-RPC response with id: 2
|
||||
// 4. Outputs the full JSON-RPC response to stdout
|
||||
const probeScript = `
|
||||
const { spawn } = require('child_process');
|
||||
const proc = spawn('npx', ['--prefer-offline', '-y', ${JSON.stringify(packageName)}], { stdio: ['pipe', 'pipe', 'pipe'] });
|
||||
let output = '';
|
||||
let responded = false;
|
||||
proc.stdout.on('data', d => {
|
||||
output += d;
|
||||
const lines = output.split('\\n');
|
||||
for (const line of lines) {
|
||||
if (!line.trim()) continue;
|
||||
try {
|
||||
const msg = JSON.parse(line);
|
||||
if (msg.id === 2) {
|
||||
responded = true;
|
||||
process.stdout.write(JSON.stringify(msg), () => {
|
||||
proc.kill();
|
||||
process.exit(0);
|
||||
});
|
||||
}
|
||||
} catch {}
|
||||
}
|
||||
output = lines[lines.length - 1] || '';
|
||||
});
|
||||
proc.stderr.on('data', () => {});
|
||||
proc.on('error', e => { process.stdout.write(JSON.stringify({jsonrpc:'2.0',id:2,error:{code:-32000,message:e.message}})); process.exit(1); });
|
||||
proc.on('exit', (code) => { if (!responded) { process.stdout.write(JSON.stringify({jsonrpc:'2.0',id:2,error:{code:-32000,message:'process exited '+code}})); process.exit(1); } });
|
||||
setTimeout(() => { if (!responded) { process.stdout.write(JSON.stringify({jsonrpc:'2.0',id:2,error:{code:-32000,message:'timeout'}})); proc.kill(); process.exit(1); } }, ${timeoutMs - 2000});
|
||||
proc.stdin.write(${JSON.stringify(initMsg)} + '\\n');
|
||||
setTimeout(() => {
|
||||
proc.stdin.write(${JSON.stringify(initializedMsg)} + '\\n');
|
||||
setTimeout(() => {
|
||||
proc.stdin.write(${JSON.stringify(requestMsg)} + '\\n');
|
||||
}, 500);
|
||||
}, 500);
|
||||
`.trim();
|
||||
|
||||
try {
|
||||
const result = await orchestrator.execInContainer(
|
||||
containerId,
|
||||
['node', '-e', probeScript],
|
||||
{ timeoutMs },
|
||||
);
|
||||
|
||||
if (result.exitCode === 0 && result.stdout.trim()) {
|
||||
try {
|
||||
return JSON.parse(result.stdout.trim()) as McpProxyResponse;
|
||||
} catch {
|
||||
return errorResponse(`Failed to parse STDIO response: ${result.stdout.slice(0, 200)}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Try to parse error response from stdout
|
||||
try {
|
||||
return JSON.parse(result.stdout.trim()) as McpProxyResponse;
|
||||
} catch {
|
||||
const errorMsg = result.stderr.trim() || `docker exec exit code ${result.exitCode}`;
|
||||
return errorResponse(errorMsg);
|
||||
}
|
||||
} catch (err) {
|
||||
return errorResponse(err instanceof Error ? err.message : String(err));
|
||||
}
|
||||
}
|
||||
|
||||
function errorResponse(message: string): McpProxyResponse {
|
||||
return {
|
||||
jsonrpc: '2.0',
|
||||
id: 2,
|
||||
error: { code: -32000, message },
|
||||
};
|
||||
}
|
||||
@@ -3,6 +3,7 @@ import { z } from 'zod';
|
||||
export const CreateProjectSchema = z.object({
|
||||
name: z.string().min(1).max(100).regex(/^[a-z0-9-]+$/, 'Name must be lowercase alphanumeric with hyphens'),
|
||||
description: z.string().max(1000).default(''),
|
||||
prompt: z.string().max(10000).default(''),
|
||||
proxyMode: z.enum(['direct', 'filtered']).default('direct'),
|
||||
llmProvider: z.string().max(100).optional(),
|
||||
llmModel: z.string().max(100).optional(),
|
||||
@@ -14,6 +15,7 @@ export const CreateProjectSchema = z.object({
|
||||
|
||||
export const UpdateProjectSchema = z.object({
|
||||
description: z.string().max(1000).optional(),
|
||||
prompt: z.string().max(10000).optional(),
|
||||
proxyMode: z.enum(['direct', 'filtered']).optional(),
|
||||
llmProvider: z.string().max(100).nullable().optional(),
|
||||
llmModel: z.string().max(100).nullable().optional(),
|
||||
|
||||
23
src/mcpd/src/validation/prompt.schema.ts
Normal file
23
src/mcpd/src/validation/prompt.schema.ts
Normal file
@@ -0,0 +1,23 @@
|
||||
import { z } from 'zod';
|
||||
|
||||
export const CreatePromptSchema = z.object({
|
||||
name: z.string().min(1).max(100).regex(/^[a-z0-9-]+$/, 'Name must be lowercase alphanumeric with hyphens'),
|
||||
content: z.string().min(1).max(50000),
|
||||
projectId: z.string().optional(),
|
||||
});
|
||||
|
||||
export const UpdatePromptSchema = z.object({
|
||||
content: z.string().min(1).max(50000).optional(),
|
||||
});
|
||||
|
||||
export const CreatePromptRequestSchema = z.object({
|
||||
name: z.string().min(1).max(100).regex(/^[a-z0-9-]+$/, 'Name must be lowercase alphanumeric with hyphens'),
|
||||
content: z.string().min(1).max(50000),
|
||||
projectId: z.string().optional(),
|
||||
createdBySession: z.string().optional(),
|
||||
createdByUserId: z.string().optional(),
|
||||
});
|
||||
|
||||
export type CreatePromptInput = z.infer<typeof CreatePromptSchema>;
|
||||
export type UpdatePromptInput = z.infer<typeof UpdatePromptSchema>;
|
||||
export type CreatePromptRequestInput = z.infer<typeof CreatePromptRequestSchema>;
|
||||
@@ -1,7 +1,7 @@
|
||||
import { z } from 'zod';
|
||||
|
||||
export const RBAC_ROLES = ['edit', 'view', 'create', 'delete', 'run', 'expose'] as const;
|
||||
export const RBAC_RESOURCES = ['*', 'servers', 'instances', 'secrets', 'projects', 'templates', 'users', 'groups', 'rbac'] as const;
|
||||
export const RBAC_RESOURCES = ['*', 'servers', 'instances', 'secrets', 'projects', 'templates', 'users', 'groups', 'rbac', 'prompts', 'promptrequests'] as const;
|
||||
|
||||
/** Singular→plural map for resource names. */
|
||||
const RESOURCE_ALIASES: Record<string, string> = {
|
||||
@@ -12,6 +12,8 @@ const RESOURCE_ALIASES: Record<string, string> = {
|
||||
template: 'templates',
|
||||
user: 'users',
|
||||
group: 'groups',
|
||||
prompt: 'prompts',
|
||||
promptrequest: 'promptrequests',
|
||||
};
|
||||
|
||||
/** Normalize a resource name to its canonical plural form. */
|
||||
@@ -20,7 +22,7 @@ export function normalizeResource(resource: string): string {
|
||||
}
|
||||
|
||||
export const RbacSubjectSchema = z.object({
|
||||
kind: z.enum(['User', 'Group']),
|
||||
kind: z.enum(['User', 'Group', 'ServiceAccount']),
|
||||
name: z.string().min(1),
|
||||
});
|
||||
|
||||
|
||||
302
src/mcpd/tests/services/prompt-service.test.ts
Normal file
302
src/mcpd/tests/services/prompt-service.test.ts
Normal file
@@ -0,0 +1,302 @@
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import { PromptService } from '../../src/services/prompt.service.js';
|
||||
import type { IPromptRepository } from '../../src/repositories/prompt.repository.js';
|
||||
import type { IPromptRequestRepository } from '../../src/repositories/prompt-request.repository.js';
|
||||
import type { IProjectRepository } from '../../src/repositories/project.repository.js';
|
||||
import type { Prompt, PromptRequest, Project } from '@prisma/client';
|
||||
|
||||
function makePrompt(overrides: Partial<Prompt> = {}): Prompt {
|
||||
return {
|
||||
id: 'prompt-1',
|
||||
name: 'test-prompt',
|
||||
content: 'Hello world',
|
||||
projectId: null,
|
||||
version: 1,
|
||||
createdAt: new Date(),
|
||||
updatedAt: new Date(),
|
||||
...overrides,
|
||||
};
|
||||
}
|
||||
|
||||
function makePromptRequest(overrides: Partial<PromptRequest> = {}): PromptRequest {
|
||||
return {
|
||||
id: 'req-1',
|
||||
name: 'test-request',
|
||||
content: 'Proposed content',
|
||||
projectId: null,
|
||||
createdBySession: 'session-abc',
|
||||
createdByUserId: null,
|
||||
createdAt: new Date(),
|
||||
...overrides,
|
||||
};
|
||||
}
|
||||
|
||||
function makeProject(overrides: Partial<Project> = {}): Project {
|
||||
return {
|
||||
id: 'proj-1',
|
||||
name: 'test-project',
|
||||
description: '',
|
||||
prompt: '',
|
||||
proxyMode: 'direct',
|
||||
llmProvider: null,
|
||||
llmModel: null,
|
||||
ownerId: 'user-1',
|
||||
createdAt: new Date(),
|
||||
updatedAt: new Date(),
|
||||
...overrides,
|
||||
} as Project;
|
||||
}
|
||||
|
||||
function mockPromptRepo(): IPromptRepository {
|
||||
return {
|
||||
findAll: vi.fn(async () => []),
|
||||
findById: vi.fn(async () => null),
|
||||
findByNameAndProject: vi.fn(async () => null),
|
||||
create: vi.fn(async (data) => makePrompt(data)),
|
||||
update: vi.fn(async (id, data) => makePrompt({ id, ...data })),
|
||||
delete: vi.fn(async () => {}),
|
||||
};
|
||||
}
|
||||
|
||||
function mockPromptRequestRepo(): IPromptRequestRepository {
|
||||
return {
|
||||
findAll: vi.fn(async () => []),
|
||||
findById: vi.fn(async () => null),
|
||||
findByNameAndProject: vi.fn(async () => null),
|
||||
findBySession: vi.fn(async () => []),
|
||||
create: vi.fn(async (data) => makePromptRequest(data)),
|
||||
delete: vi.fn(async () => {}),
|
||||
};
|
||||
}
|
||||
|
||||
function mockProjectRepo(): IProjectRepository {
|
||||
return {
|
||||
findAll: vi.fn(async () => []),
|
||||
findById: vi.fn(async () => null),
|
||||
findByName: vi.fn(async () => null),
|
||||
create: vi.fn(async (data) => makeProject(data)),
|
||||
update: vi.fn(async (id, data) => makeProject({ id, ...data })),
|
||||
delete: vi.fn(async () => {}),
|
||||
};
|
||||
}
|
||||
|
||||
describe('PromptService', () => {
|
||||
let promptRepo: IPromptRepository;
|
||||
let promptRequestRepo: IPromptRequestRepository;
|
||||
let projectRepo: IProjectRepository;
|
||||
let service: PromptService;
|
||||
|
||||
beforeEach(() => {
|
||||
promptRepo = mockPromptRepo();
|
||||
promptRequestRepo = mockPromptRequestRepo();
|
||||
projectRepo = mockProjectRepo();
|
||||
service = new PromptService(promptRepo, promptRequestRepo, projectRepo);
|
||||
});
|
||||
|
||||
// ── Prompt CRUD ──
|
||||
|
||||
describe('listPrompts', () => {
|
||||
it('should return all prompts', async () => {
|
||||
const prompts = [makePrompt(), makePrompt({ id: 'prompt-2', name: 'other' })];
|
||||
vi.mocked(promptRepo.findAll).mockResolvedValue(prompts);
|
||||
|
||||
const result = await service.listPrompts();
|
||||
expect(result).toEqual(prompts);
|
||||
expect(promptRepo.findAll).toHaveBeenCalledWith(undefined);
|
||||
});
|
||||
|
||||
it('should filter by projectId', async () => {
|
||||
await service.listPrompts('proj-1');
|
||||
expect(promptRepo.findAll).toHaveBeenCalledWith('proj-1');
|
||||
});
|
||||
});
|
||||
|
||||
describe('getPrompt', () => {
|
||||
it('should return a prompt by id', async () => {
|
||||
const prompt = makePrompt();
|
||||
vi.mocked(promptRepo.findById).mockResolvedValue(prompt);
|
||||
|
||||
const result = await service.getPrompt('prompt-1');
|
||||
expect(result).toEqual(prompt);
|
||||
});
|
||||
|
||||
it('should throw NotFoundError for missing prompt', async () => {
|
||||
await expect(service.getPrompt('nope')).rejects.toThrow('Prompt not found: nope');
|
||||
});
|
||||
});
|
||||
|
||||
describe('createPrompt', () => {
|
||||
it('should create a prompt', async () => {
|
||||
const result = await service.createPrompt({ name: 'new-prompt', content: 'stuff' });
|
||||
expect(promptRepo.create).toHaveBeenCalledWith({ name: 'new-prompt', content: 'stuff' });
|
||||
expect(result.name).toBe('new-prompt');
|
||||
});
|
||||
|
||||
it('should validate project exists when projectId given', async () => {
|
||||
vi.mocked(projectRepo.findById).mockResolvedValue(makeProject());
|
||||
await service.createPrompt({ name: 'scoped', content: 'x', projectId: 'proj-1' });
|
||||
expect(projectRepo.findById).toHaveBeenCalledWith('proj-1');
|
||||
});
|
||||
|
||||
it('should throw when project not found', async () => {
|
||||
await expect(
|
||||
service.createPrompt({ name: 'bad', content: 'x', projectId: 'nope' }),
|
||||
).rejects.toThrow('Project not found: nope');
|
||||
});
|
||||
|
||||
it('should reject invalid name format', async () => {
|
||||
await expect(
|
||||
service.createPrompt({ name: 'INVALID_NAME', content: 'x' }),
|
||||
).rejects.toThrow();
|
||||
});
|
||||
});
|
||||
|
||||
describe('updatePrompt', () => {
|
||||
it('should update prompt content', async () => {
|
||||
vi.mocked(promptRepo.findById).mockResolvedValue(makePrompt());
|
||||
await service.updatePrompt('prompt-1', { content: 'updated' });
|
||||
expect(promptRepo.update).toHaveBeenCalledWith('prompt-1', { content: 'updated' });
|
||||
});
|
||||
|
||||
it('should throw for missing prompt', async () => {
|
||||
await expect(service.updatePrompt('nope', { content: 'x' })).rejects.toThrow('Prompt not found');
|
||||
});
|
||||
});
|
||||
|
||||
describe('deletePrompt', () => {
|
||||
it('should delete an existing prompt', async () => {
|
||||
vi.mocked(promptRepo.findById).mockResolvedValue(makePrompt());
|
||||
await service.deletePrompt('prompt-1');
|
||||
expect(promptRepo.delete).toHaveBeenCalledWith('prompt-1');
|
||||
});
|
||||
|
||||
it('should throw for missing prompt', async () => {
|
||||
await expect(service.deletePrompt('nope')).rejects.toThrow('Prompt not found');
|
||||
});
|
||||
});
|
||||
|
||||
// ── PromptRequest CRUD ──
|
||||
|
||||
describe('listPromptRequests', () => {
|
||||
it('should return all prompt requests', async () => {
|
||||
const reqs = [makePromptRequest()];
|
||||
vi.mocked(promptRequestRepo.findAll).mockResolvedValue(reqs);
|
||||
|
||||
const result = await service.listPromptRequests();
|
||||
expect(result).toEqual(reqs);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getPromptRequest', () => {
|
||||
it('should return a prompt request by id', async () => {
|
||||
const req = makePromptRequest();
|
||||
vi.mocked(promptRequestRepo.findById).mockResolvedValue(req);
|
||||
|
||||
const result = await service.getPromptRequest('req-1');
|
||||
expect(result).toEqual(req);
|
||||
});
|
||||
|
||||
it('should throw for missing request', async () => {
|
||||
await expect(service.getPromptRequest('nope')).rejects.toThrow('PromptRequest not found');
|
||||
});
|
||||
});
|
||||
|
||||
describe('deletePromptRequest', () => {
|
||||
it('should delete an existing request', async () => {
|
||||
vi.mocked(promptRequestRepo.findById).mockResolvedValue(makePromptRequest());
|
||||
await service.deletePromptRequest('req-1');
|
||||
expect(promptRequestRepo.delete).toHaveBeenCalledWith('req-1');
|
||||
});
|
||||
});
|
||||
|
||||
// ── Propose ──
|
||||
|
||||
describe('propose', () => {
|
||||
it('should create a prompt request', async () => {
|
||||
const result = await service.propose({
|
||||
name: 'my-prompt',
|
||||
content: 'proposal',
|
||||
createdBySession: 'sess-1',
|
||||
});
|
||||
expect(promptRequestRepo.create).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ name: 'my-prompt', content: 'proposal', createdBySession: 'sess-1' }),
|
||||
);
|
||||
expect(result.name).toBe('my-prompt');
|
||||
});
|
||||
|
||||
it('should validate project exists when projectId given', async () => {
|
||||
vi.mocked(projectRepo.findById).mockResolvedValue(makeProject());
|
||||
await service.propose({
|
||||
name: 'scoped',
|
||||
content: 'x',
|
||||
projectId: 'proj-1',
|
||||
});
|
||||
expect(projectRepo.findById).toHaveBeenCalledWith('proj-1');
|
||||
});
|
||||
});
|
||||
|
||||
// ── Approve ──
|
||||
|
||||
describe('approve', () => {
|
||||
it('should delete request and create prompt (atomic)', async () => {
|
||||
const req = makePromptRequest({ id: 'req-1', name: 'approved', content: 'good stuff', projectId: 'proj-1' });
|
||||
vi.mocked(promptRequestRepo.findById).mockResolvedValue(req);
|
||||
|
||||
const result = await service.approve('req-1');
|
||||
|
||||
expect(promptRepo.create).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ name: 'approved', content: 'good stuff', projectId: 'proj-1' }),
|
||||
);
|
||||
expect(promptRequestRepo.delete).toHaveBeenCalledWith('req-1');
|
||||
expect(result.name).toBe('approved');
|
||||
});
|
||||
|
||||
it('should throw for missing request', async () => {
|
||||
await expect(service.approve('nope')).rejects.toThrow('PromptRequest not found');
|
||||
});
|
||||
|
||||
it('should handle global prompt (no projectId)', async () => {
|
||||
const req = makePromptRequest({ id: 'req-2', name: 'global', content: 'stuff', projectId: null });
|
||||
vi.mocked(promptRequestRepo.findById).mockResolvedValue(req);
|
||||
|
||||
await service.approve('req-2');
|
||||
|
||||
// Should NOT include projectId in the create call
|
||||
const createArg = vi.mocked(promptRepo.create).mock.calls[0]![0];
|
||||
expect(createArg).not.toHaveProperty('projectId');
|
||||
});
|
||||
});
|
||||
|
||||
// ── Visibility ──
|
||||
|
||||
describe('getVisiblePrompts', () => {
|
||||
it('should return approved prompts and session requests', async () => {
|
||||
vi.mocked(promptRepo.findAll).mockResolvedValue([
|
||||
makePrompt({ name: 'approved-1', content: 'A' }),
|
||||
]);
|
||||
vi.mocked(promptRequestRepo.findBySession).mockResolvedValue([
|
||||
makePromptRequest({ name: 'pending-1', content: 'B' }),
|
||||
]);
|
||||
|
||||
const result = await service.getVisiblePrompts('proj-1', 'sess-1');
|
||||
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result[0]).toEqual({ name: 'approved-1', content: 'A', type: 'prompt' });
|
||||
expect(result[1]).toEqual({ name: 'pending-1', content: 'B', type: 'promptrequest' });
|
||||
});
|
||||
|
||||
it('should not include pending requests without sessionId', async () => {
|
||||
vi.mocked(promptRepo.findAll).mockResolvedValue([makePrompt()]);
|
||||
|
||||
const result = await service.getVisiblePrompts('proj-1');
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(promptRequestRepo.findBySession).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should return empty when no prompts or requests', async () => {
|
||||
const result = await service.getVisiblePrompts();
|
||||
expect(result).toEqual([]);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -5,6 +5,7 @@ import { McpdUpstream } from './upstream/mcpd.js';
|
||||
interface McpdServer {
|
||||
id: string;
|
||||
name: string;
|
||||
description?: string;
|
||||
transport: string;
|
||||
status?: string;
|
||||
}
|
||||
@@ -35,7 +36,7 @@ export async function refreshProjectUpstreams(
|
||||
let servers: McpdServer[];
|
||||
if (authToken) {
|
||||
// Forward the client's auth token to mcpd so RBAC applies
|
||||
const result = await mcpdClient.forward('GET', path, '', undefined);
|
||||
const result = await mcpdClient.forward('GET', path, '', undefined, authToken);
|
||||
if (result.status >= 400) {
|
||||
throw new Error(`Failed to fetch project servers: ${result.status}`);
|
||||
}
|
||||
@@ -63,7 +64,7 @@ function syncUpstreams(router: McpRouter, mcpdClient: McpdClient, servers: McpdS
|
||||
// Add/update upstreams for each server
|
||||
for (const server of servers) {
|
||||
if (!currentNames.has(server.name)) {
|
||||
const upstream = new McpdUpstream(server.id, server.name, mcpdClient);
|
||||
const upstream = new McpdUpstream(server.id, server.name, mcpdClient, server.description);
|
||||
router.addUpstream(upstream);
|
||||
}
|
||||
registered.push(server.name);
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
import { existsSync, readFileSync } from 'node:fs';
|
||||
import { join } from 'node:path';
|
||||
import { homedir } from 'node:os';
|
||||
|
||||
/** Configuration for the mcplocal HTTP server. */
|
||||
export interface HttpConfig {
|
||||
/** Port for the HTTP server (default: 3200) */
|
||||
@@ -15,9 +19,48 @@ export interface HttpConfig {
|
||||
const DEFAULT_HTTP_PORT = 3200;
|
||||
const DEFAULT_HTTP_HOST = '127.0.0.1';
|
||||
const DEFAULT_MCPD_URL = 'http://localhost:3100';
|
||||
const DEFAULT_MCPD_TOKEN = '';
|
||||
const DEFAULT_LOG_LEVEL = 'info';
|
||||
|
||||
/**
|
||||
* Read the user's mcpctl credentials from ~/.mcpctl/credentials.
|
||||
* Returns the token if found, empty string otherwise.
|
||||
*/
|
||||
function loadUserToken(): string {
|
||||
try {
|
||||
const credPath = join(homedir(), '.mcpctl', 'credentials');
|
||||
if (!existsSync(credPath)) return '';
|
||||
const raw = readFileSync(credPath, 'utf-8');
|
||||
const parsed = JSON.parse(raw) as { token?: string };
|
||||
return parsed.token ?? '';
|
||||
} catch {
|
||||
return '';
|
||||
}
|
||||
}
|
||||
|
||||
export interface LlmFileConfig {
|
||||
provider: string;
|
||||
model?: string;
|
||||
url?: string;
|
||||
binaryPath?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Load LLM configuration from ~/.mcpctl/config.json.
|
||||
* Returns undefined if no LLM section is configured.
|
||||
*/
|
||||
export function loadLlmConfig(): LlmFileConfig | undefined {
|
||||
try {
|
||||
const configPath = join(homedir(), '.mcpctl', 'config.json');
|
||||
if (!existsSync(configPath)) return undefined;
|
||||
const raw = readFileSync(configPath, 'utf-8');
|
||||
const parsed = JSON.parse(raw) as { llm?: LlmFileConfig };
|
||||
if (!parsed.llm?.provider || parsed.llm.provider === 'none') return undefined;
|
||||
return parsed.llm;
|
||||
} catch {
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
|
||||
export function loadHttpConfig(env: Record<string, string | undefined> = process.env): HttpConfig {
|
||||
const portStr = env['MCPLOCAL_HTTP_PORT'];
|
||||
const port = portStr !== undefined ? parseInt(portStr, 10) : DEFAULT_HTTP_PORT;
|
||||
@@ -26,7 +69,7 @@ export function loadHttpConfig(env: Record<string, string | undefined> = process
|
||||
httpPort: Number.isFinite(port) ? port : DEFAULT_HTTP_PORT,
|
||||
httpHost: env['MCPLOCAL_HTTP_HOST'] ?? DEFAULT_HTTP_HOST,
|
||||
mcpdUrl: env['MCPLOCAL_MCPD_URL'] ?? DEFAULT_MCPD_URL,
|
||||
mcpdToken: env['MCPLOCAL_MCPD_TOKEN'] ?? DEFAULT_MCPD_TOKEN,
|
||||
mcpdToken: env['MCPLOCAL_MCPD_TOKEN'] ?? loadUserToken(),
|
||||
logLevel: (env['MCPLOCAL_LOG_LEVEL'] as HttpConfig['logLevel'] | undefined) ?? DEFAULT_LOG_LEVEL,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -23,11 +23,21 @@ export class ConnectionError extends Error {
|
||||
export class McpdClient {
|
||||
private readonly baseUrl: string;
|
||||
private readonly token: string;
|
||||
private readonly extraHeaders: Record<string, string>;
|
||||
|
||||
constructor(baseUrl: string, token: string) {
|
||||
constructor(baseUrl: string, token: string, extraHeaders?: Record<string, string>) {
|
||||
// Strip trailing slash for consistent URL joining
|
||||
this.baseUrl = baseUrl.replace(/\/+$/, '');
|
||||
this.token = token;
|
||||
this.extraHeaders = extraHeaders ?? {};
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new client with additional default headers.
|
||||
* Inherits base URL and token from the current client.
|
||||
*/
|
||||
withHeaders(headers: Record<string, string>): McpdClient {
|
||||
return new McpdClient(this.baseUrl, this.token, { ...this.extraHeaders, ...headers });
|
||||
}
|
||||
|
||||
async get<T>(path: string): Promise<T> {
|
||||
@@ -62,6 +72,7 @@ export class McpdClient {
|
||||
): Promise<{ status: number; body: unknown }> {
|
||||
const url = `${this.baseUrl}${path}${query ? `?${query}` : ''}`;
|
||||
const headers: Record<string, string> = {
|
||||
...this.extraHeaders,
|
||||
'Authorization': `Bearer ${authOverride ?? this.token}`,
|
||||
'Accept': 'application/json',
|
||||
};
|
||||
|
||||
@@ -12,8 +12,10 @@ import type { FastifyInstance } from 'fastify';
|
||||
import { StreamableHTTPServerTransport } from '@modelcontextprotocol/sdk/server/streamableHttp.js';
|
||||
import type { JSONRPCMessage } from '@modelcontextprotocol/sdk/types.js';
|
||||
import { McpRouter } from '../router.js';
|
||||
import { ResponsePaginator } from '../llm/pagination.js';
|
||||
import { refreshProjectUpstreams } from '../discovery.js';
|
||||
import type { McpdClient } from './mcpd-client.js';
|
||||
import type { ProviderRegistry } from '../providers/registry.js';
|
||||
import type { JsonRpcRequest } from '../types.js';
|
||||
|
||||
interface ProjectCacheEntry {
|
||||
@@ -28,7 +30,7 @@ interface SessionEntry {
|
||||
|
||||
const CACHE_TTL_MS = 60_000; // 60 seconds
|
||||
|
||||
export function registerProjectMcpEndpoint(app: FastifyInstance, mcpdClient: McpdClient): void {
|
||||
export function registerProjectMcpEndpoint(app: FastifyInstance, mcpdClient: McpdClient, providerRegistry?: ProviderRegistry | null): void {
|
||||
const projectCache = new Map<string, ProjectCacheEntry>();
|
||||
const sessions = new Map<string, SessionEntry>();
|
||||
|
||||
@@ -44,6 +46,35 @@ export function registerProjectMcpEndpoint(app: FastifyInstance, mcpdClient: Mcp
|
||||
const router = existing?.router ?? new McpRouter();
|
||||
await refreshProjectUpstreams(router, mcpdClient, projectName, authToken);
|
||||
|
||||
// Wire pagination support with LLM provider if configured
|
||||
router.setPaginator(new ResponsePaginator(providerRegistry ?? null));
|
||||
|
||||
// Configure prompt resources with SA-scoped client for RBAC
|
||||
const saClient = mcpdClient.withHeaders({ 'X-Service-Account': `project:${projectName}` });
|
||||
router.setPromptConfig(saClient, projectName);
|
||||
|
||||
// Fetch project instructions and set on router
|
||||
try {
|
||||
const instructions = await mcpdClient.get<{ prompt: string; servers: Array<{ name: string; description: string }> }>(
|
||||
`/api/v1/projects/${encodeURIComponent(projectName)}/instructions`,
|
||||
);
|
||||
const parts: string[] = [];
|
||||
if (instructions.prompt) {
|
||||
parts.push(instructions.prompt);
|
||||
}
|
||||
if (instructions.servers.length > 0) {
|
||||
parts.push('Available MCP servers:');
|
||||
for (const s of instructions.servers) {
|
||||
parts.push(`- ${s.name}${s.description ? `: ${s.description}` : ''}`);
|
||||
}
|
||||
}
|
||||
if (parts.length > 0) {
|
||||
router.setInstructions(parts.join('\n'));
|
||||
}
|
||||
} catch {
|
||||
// Instructions are optional — don't fail if endpoint is unavailable
|
||||
}
|
||||
|
||||
projectCache.set(projectName, { router, lastRefresh: now });
|
||||
return router;
|
||||
}
|
||||
@@ -84,7 +115,8 @@ export function registerProjectMcpEndpoint(app: FastifyInstance, mcpdClient: Mcp
|
||||
|
||||
transport.onmessage = async (message: JSONRPCMessage) => {
|
||||
if ('method' in message && 'id' in message) {
|
||||
const response = await router.route(message as unknown as JsonRpcRequest);
|
||||
const ctx = transport.sessionId ? { sessionId: transport.sessionId } : undefined;
|
||||
const response = await router.route(message as unknown as JsonRpcRequest, ctx);
|
||||
await transport.send(response as unknown as JSONRPCMessage);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -10,11 +10,13 @@ import { registerProjectMcpEndpoint } from './project-mcp-endpoint.js';
|
||||
import type { McpRouter } from '../router.js';
|
||||
import type { HealthMonitor } from '../health.js';
|
||||
import type { TieredHealthMonitor } from '../health/tiered.js';
|
||||
import type { ProviderRegistry } from '../providers/registry.js';
|
||||
|
||||
export interface HttpServerDeps {
|
||||
router: McpRouter;
|
||||
healthMonitor?: HealthMonitor | undefined;
|
||||
tieredHealthMonitor?: TieredHealthMonitor | undefined;
|
||||
providerRegistry?: ProviderRegistry | null | undefined;
|
||||
}
|
||||
|
||||
export async function createHttpServer(
|
||||
@@ -79,6 +81,34 @@ export async function createHttpServer(
|
||||
reply.code(200).send({ status: 'ok' });
|
||||
});
|
||||
|
||||
// LLM health check — tests the active provider with a tiny prompt
|
||||
app.get('/llm/health', async (_request, reply) => {
|
||||
const provider = deps.providerRegistry?.getActive() ?? null;
|
||||
if (!provider) {
|
||||
reply.code(200).send({ status: 'not configured' });
|
||||
return;
|
||||
}
|
||||
try {
|
||||
const result = await provider.complete({
|
||||
messages: [{ role: 'user', content: 'Respond with exactly: ok' }],
|
||||
maxTokens: 10,
|
||||
});
|
||||
const ok = result.content.trim().toLowerCase().includes('ok');
|
||||
reply.code(200).send({
|
||||
status: ok ? 'ok' : 'unexpected response',
|
||||
provider: provider.name,
|
||||
response: result.content.trim().slice(0, 100),
|
||||
});
|
||||
} catch (err) {
|
||||
const msg = (err as Error).message ?? String(err);
|
||||
reply.code(200).send({
|
||||
status: 'error',
|
||||
provider: provider.name,
|
||||
error: msg.slice(0, 200),
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Proxy management routes to mcpd
|
||||
const mcpdClient = new McpdClient(config.mcpdUrl, config.mcpdToken);
|
||||
registerProxyRoutes(app, mcpdClient);
|
||||
@@ -87,7 +117,7 @@ export async function createHttpServer(
|
||||
registerMcpEndpoint(app, deps.router);
|
||||
|
||||
// Project-scoped MCP endpoint at /projects/:projectName/mcp
|
||||
registerProjectMcpEndpoint(app, mcpdClient);
|
||||
registerProjectMcpEndpoint(app, mcpdClient, deps.providerRegistry);
|
||||
|
||||
return app;
|
||||
}
|
||||
|
||||
97
src/mcplocal/src/llm-config.ts
Normal file
97
src/mcplocal/src/llm-config.ts
Normal file
@@ -0,0 +1,97 @@
|
||||
import type { SecretStore } from '@mcpctl/shared';
|
||||
import type { LlmFileConfig } from './http/config.js';
|
||||
import { ProviderRegistry } from './providers/registry.js';
|
||||
import { GeminiAcpProvider } from './providers/gemini-acp.js';
|
||||
import { OllamaProvider } from './providers/ollama.js';
|
||||
import { AnthropicProvider } from './providers/anthropic.js';
|
||||
import { OpenAiProvider } from './providers/openai.js';
|
||||
import { DeepSeekProvider } from './providers/deepseek.js';
|
||||
import type { GeminiAcpConfig } from './providers/gemini-acp.js';
|
||||
import type { OllamaConfig } from './providers/ollama.js';
|
||||
import type { AnthropicConfig } from './providers/anthropic.js';
|
||||
import type { OpenAiConfig } from './providers/openai.js';
|
||||
import type { DeepSeekConfig } from './providers/deepseek.js';
|
||||
|
||||
/**
|
||||
* Create a ProviderRegistry from user config + secret store.
|
||||
* Returns an empty registry if config is undefined or provider is 'none'.
|
||||
*/
|
||||
export async function createProviderFromConfig(
|
||||
config: LlmFileConfig | undefined,
|
||||
secretStore: SecretStore,
|
||||
): Promise<ProviderRegistry> {
|
||||
const registry = new ProviderRegistry();
|
||||
if (!config?.provider || config.provider === 'none') return registry;
|
||||
|
||||
switch (config.provider) {
|
||||
case 'gemini-cli': {
|
||||
const cfg: GeminiAcpConfig = {};
|
||||
if (config.binaryPath) cfg.binaryPath = config.binaryPath;
|
||||
if (config.model) cfg.defaultModel = config.model;
|
||||
registry.register(new GeminiAcpProvider(cfg));
|
||||
break;
|
||||
}
|
||||
|
||||
case 'ollama': {
|
||||
const cfg: OllamaConfig = {};
|
||||
if (config.url) cfg.baseUrl = config.url;
|
||||
if (config.model) cfg.defaultModel = config.model;
|
||||
registry.register(new OllamaProvider(cfg));
|
||||
break;
|
||||
}
|
||||
|
||||
case 'anthropic': {
|
||||
const apiKey = await secretStore.get('anthropic-api-key');
|
||||
if (!apiKey) {
|
||||
process.stderr.write('Warning: Anthropic API key not found in secret store. Run "mcpctl config setup" to configure.\n');
|
||||
return registry;
|
||||
}
|
||||
const cfg: AnthropicConfig = { apiKey };
|
||||
if (config.model) cfg.defaultModel = config.model;
|
||||
registry.register(new AnthropicProvider(cfg));
|
||||
break;
|
||||
}
|
||||
|
||||
case 'openai': {
|
||||
const apiKey = await secretStore.get('openai-api-key');
|
||||
if (!apiKey) {
|
||||
process.stderr.write('Warning: OpenAI API key not found in secret store. Run "mcpctl config setup" to configure.\n');
|
||||
return registry;
|
||||
}
|
||||
const cfg: OpenAiConfig = { apiKey };
|
||||
if (config.url) cfg.baseUrl = config.url;
|
||||
if (config.model) cfg.defaultModel = config.model;
|
||||
registry.register(new OpenAiProvider(cfg));
|
||||
break;
|
||||
}
|
||||
|
||||
case 'deepseek': {
|
||||
const apiKey = await secretStore.get('deepseek-api-key');
|
||||
if (!apiKey) {
|
||||
process.stderr.write('Warning: DeepSeek API key not found in secret store. Run "mcpctl config setup" to configure.\n');
|
||||
return registry;
|
||||
}
|
||||
const cfg: DeepSeekConfig = { apiKey };
|
||||
if (config.url) cfg.baseUrl = config.url;
|
||||
if (config.model) cfg.defaultModel = config.model;
|
||||
registry.register(new DeepSeekProvider(cfg));
|
||||
break;
|
||||
}
|
||||
|
||||
case 'vllm': {
|
||||
// vLLM uses OpenAI-compatible API
|
||||
if (!config.url) {
|
||||
process.stderr.write('Warning: vLLM URL not configured. Run "mcpctl config setup" to configure.\n');
|
||||
return registry;
|
||||
}
|
||||
registry.register(new OpenAiProvider({
|
||||
apiKey: 'unused',
|
||||
baseUrl: config.url,
|
||||
defaultModel: config.model ?? 'default',
|
||||
}));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return registry;
|
||||
}
|
||||
@@ -6,3 +6,5 @@ export { FilterCache, DEFAULT_FILTER_CACHE_CONFIG } from './filter-cache.js';
|
||||
export type { FilterCacheConfig } from './filter-cache.js';
|
||||
export { FilterMetrics } from './metrics.js';
|
||||
export type { FilterMetricsSnapshot } from './metrics.js';
|
||||
export { ResponsePaginator, DEFAULT_PAGINATION_CONFIG, PAGINATION_INDEX_SYSTEM_PROMPT } from './pagination.js';
|
||||
export type { PaginationConfig, PaginationIndex, PageSummary, PaginatedToolResponse } from './pagination.js';
|
||||
|
||||
354
src/mcplocal/src/llm/pagination.ts
Normal file
354
src/mcplocal/src/llm/pagination.ts
Normal file
@@ -0,0 +1,354 @@
|
||||
import { randomUUID } from 'node:crypto';
|
||||
import type { ProviderRegistry } from '../providers/registry.js';
|
||||
import { estimateTokens } from './token-counter.js';
|
||||
|
||||
// --- Configuration ---
|
||||
|
||||
export interface PaginationConfig {
|
||||
/** Character threshold above which responses get paginated (default 80_000) */
|
||||
sizeThreshold: number;
|
||||
/** Characters per page (default 40_000) */
|
||||
pageSize: number;
|
||||
/** Max cached results (LRU eviction) (default 64) */
|
||||
maxCachedResults: number;
|
||||
/** TTL for cached results in ms (default 300_000 = 5 min) */
|
||||
ttlMs: number;
|
||||
/** Max tokens for the LLM index generation call (default 2048) */
|
||||
indexMaxTokens: number;
|
||||
}
|
||||
|
||||
export const DEFAULT_PAGINATION_CONFIG: PaginationConfig = {
|
||||
sizeThreshold: 80_000,
|
||||
pageSize: 40_000,
|
||||
maxCachedResults: 64,
|
||||
ttlMs: 300_000,
|
||||
indexMaxTokens: 2048,
|
||||
};
|
||||
|
||||
// --- Cache Entry ---
|
||||
|
||||
interface PageInfo {
|
||||
/** 0-based page index */
|
||||
index: number;
|
||||
/** Start character offset in the raw string */
|
||||
startChar: number;
|
||||
/** End character offset (exclusive) */
|
||||
endChar: number;
|
||||
/** Approximate token count */
|
||||
estimatedTokens: number;
|
||||
}
|
||||
|
||||
interface CachedResult {
|
||||
resultId: string;
|
||||
toolName: string;
|
||||
raw: string;
|
||||
pages: PageInfo[];
|
||||
index: PaginationIndex;
|
||||
createdAt: number;
|
||||
}
|
||||
|
||||
// --- Index Types ---
|
||||
|
||||
export interface PageSummary {
|
||||
page: number;
|
||||
startChar: number;
|
||||
endChar: number;
|
||||
estimatedTokens: number;
|
||||
summary: string;
|
||||
}
|
||||
|
||||
export interface PaginationIndex {
|
||||
resultId: string;
|
||||
toolName: string;
|
||||
totalSize: number;
|
||||
totalTokens: number;
|
||||
totalPages: number;
|
||||
pageSummaries: PageSummary[];
|
||||
indexType: 'smart' | 'simple';
|
||||
}
|
||||
|
||||
// --- The MCP response format ---
|
||||
|
||||
export interface PaginatedToolResponse {
|
||||
content: Array<{
|
||||
type: 'text';
|
||||
text: string;
|
||||
}>;
|
||||
}
|
||||
|
||||
// --- LLM Prompt ---
|
||||
|
||||
export const PAGINATION_INDEX_SYSTEM_PROMPT = `You are a document indexing assistant. Given a large tool response split into pages, generate a concise summary for each page describing what data it contains.
|
||||
|
||||
Rules:
|
||||
- For each page, write 1-2 sentences describing the key content
|
||||
- Be specific: mention entity names, IDs, counts, or key fields visible on that page
|
||||
- If it's JSON, describe the structure and notable entries
|
||||
- If it's text, describe the topics covered
|
||||
- Output valid JSON only: an array of objects with "page" (1-based number) and "summary" (string)
|
||||
- Example output: [{"page": 1, "summary": "Configuration nodes and global settings (inject, debug, function nodes 1-15)"}, {"page": 2, "summary": "HTTP request nodes and API integrations (nodes 16-40)"}]`;
|
||||
|
||||
/**
|
||||
* Handles transparent pagination of large MCP tool responses.
|
||||
*
|
||||
* When a tool response exceeds the size threshold, it is cached and an
|
||||
* index is returned instead. The LLM can then request specific pages
|
||||
* via _page/_resultId parameters on subsequent tool calls.
|
||||
*
|
||||
* If an LLM provider is available, the index includes AI-generated
|
||||
* per-page summaries. Otherwise, simple byte-range descriptions are used.
|
||||
*/
|
||||
export class ResponsePaginator {
|
||||
private cache = new Map<string, CachedResult>();
|
||||
private readonly config: PaginationConfig;
|
||||
|
||||
constructor(
|
||||
private providers: ProviderRegistry | null,
|
||||
config: Partial<PaginationConfig> = {},
|
||||
) {
|
||||
this.config = { ...DEFAULT_PAGINATION_CONFIG, ...config };
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a raw response string should be paginated.
|
||||
*/
|
||||
shouldPaginate(raw: string): boolean {
|
||||
return raw.length >= this.config.sizeThreshold;
|
||||
}
|
||||
|
||||
/**
|
||||
* Paginate a large response: cache it and return the index.
|
||||
* Returns null if the response is below threshold.
|
||||
*/
|
||||
async paginate(toolName: string, raw: string): Promise<PaginatedToolResponse | null> {
|
||||
if (!this.shouldPaginate(raw)) return null;
|
||||
|
||||
const resultId = randomUUID();
|
||||
const pages = this.splitPages(raw);
|
||||
let index: PaginationIndex;
|
||||
|
||||
try {
|
||||
index = await this.generateSmartIndex(resultId, toolName, raw, pages);
|
||||
} catch {
|
||||
index = this.generateSimpleIndex(resultId, toolName, raw, pages);
|
||||
}
|
||||
|
||||
// Store in cache
|
||||
this.evictExpired();
|
||||
this.evictLRU();
|
||||
this.cache.set(resultId, {
|
||||
resultId,
|
||||
toolName,
|
||||
raw,
|
||||
pages,
|
||||
index,
|
||||
createdAt: Date.now(),
|
||||
});
|
||||
|
||||
return this.formatIndexResponse(index);
|
||||
}
|
||||
|
||||
/**
|
||||
* Serve a specific page from cache.
|
||||
* Returns null if the resultId is not found (cache miss / expired).
|
||||
*/
|
||||
getPage(resultId: string, page: number | 'all'): PaginatedToolResponse | null {
|
||||
this.evictExpired();
|
||||
const entry = this.cache.get(resultId);
|
||||
if (!entry) return null;
|
||||
|
||||
if (page === 'all') {
|
||||
return {
|
||||
content: [{ type: 'text', text: entry.raw }],
|
||||
};
|
||||
}
|
||||
|
||||
// Pages are 1-based in the API
|
||||
const pageInfo = entry.pages[page - 1];
|
||||
if (!pageInfo) {
|
||||
return {
|
||||
content: [{
|
||||
type: 'text',
|
||||
text: `Error: page ${String(page)} is out of range. This result has ${String(entry.pages.length)} pages (1-${String(entry.pages.length)}).`,
|
||||
}],
|
||||
};
|
||||
}
|
||||
|
||||
const pageContent = entry.raw.slice(pageInfo.startChar, pageInfo.endChar);
|
||||
return {
|
||||
content: [{
|
||||
type: 'text',
|
||||
text: `[Page ${String(page)}/${String(entry.pages.length)} of result ${resultId}]\n\n${pageContent}`,
|
||||
}],
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a tool call has pagination parameters (_page / _resultId).
|
||||
* Returns the parsed pagination request, or null if not a pagination request.
|
||||
*/
|
||||
static extractPaginationParams(
|
||||
args: Record<string, unknown>,
|
||||
): { resultId: string; page: number | 'all' } | null {
|
||||
const resultId = args['_resultId'];
|
||||
const pageParam = args['_page'];
|
||||
if (typeof resultId !== 'string' || pageParam === undefined) return null;
|
||||
|
||||
if (pageParam === 'all') return { resultId, page: 'all' };
|
||||
|
||||
const page = Number(pageParam);
|
||||
if (!Number.isInteger(page) || page < 1) return null;
|
||||
|
||||
return { resultId, page };
|
||||
}
|
||||
|
||||
// --- Private methods ---
|
||||
|
||||
private splitPages(raw: string): PageInfo[] {
|
||||
const pages: PageInfo[] = [];
|
||||
let offset = 0;
|
||||
let pageIndex = 0;
|
||||
|
||||
while (offset < raw.length) {
|
||||
const end = Math.min(offset + this.config.pageSize, raw.length);
|
||||
// Try to break at a newline boundary if we're not at the end
|
||||
let breakAt = end;
|
||||
if (end < raw.length) {
|
||||
const lastNewline = raw.lastIndexOf('\n', end);
|
||||
if (lastNewline > offset) {
|
||||
breakAt = lastNewline + 1;
|
||||
}
|
||||
}
|
||||
|
||||
pages.push({
|
||||
index: pageIndex,
|
||||
startChar: offset,
|
||||
endChar: breakAt,
|
||||
estimatedTokens: estimateTokens(raw.slice(offset, breakAt)),
|
||||
});
|
||||
|
||||
offset = breakAt;
|
||||
pageIndex++;
|
||||
}
|
||||
|
||||
return pages;
|
||||
}
|
||||
|
||||
private async generateSmartIndex(
|
||||
resultId: string,
|
||||
toolName: string,
|
||||
raw: string,
|
||||
pages: PageInfo[],
|
||||
): Promise<PaginationIndex> {
|
||||
const provider = this.providers?.getActive();
|
||||
if (!provider) {
|
||||
return this.generateSimpleIndex(resultId, toolName, raw, pages);
|
||||
}
|
||||
|
||||
// Build a prompt with page previews (first ~500 chars of each page)
|
||||
const previews = pages.map((p, i) => {
|
||||
const preview = raw.slice(p.startChar, Math.min(p.startChar + 500, p.endChar));
|
||||
const truncated = p.endChar - p.startChar > 500 ? '\n[...]' : '';
|
||||
return `--- Page ${String(i + 1)} (chars ${String(p.startChar)}-${String(p.endChar)}, ~${String(p.estimatedTokens)} tokens) ---\n${preview}${truncated}`;
|
||||
}).join('\n\n');
|
||||
|
||||
const result = await provider.complete({
|
||||
messages: [
|
||||
{ role: 'system', content: PAGINATION_INDEX_SYSTEM_PROMPT },
|
||||
{ role: 'user', content: `Tool: ${toolName}\nTotal size: ${String(raw.length)} chars, ${String(pages.length)} pages\n\n${previews}` },
|
||||
],
|
||||
maxTokens: this.config.indexMaxTokens,
|
||||
temperature: 0,
|
||||
});
|
||||
|
||||
const summaries = JSON.parse(result.content) as Array<{ page: number; summary: string }>;
|
||||
|
||||
return {
|
||||
resultId,
|
||||
toolName,
|
||||
totalSize: raw.length,
|
||||
totalTokens: estimateTokens(raw),
|
||||
totalPages: pages.length,
|
||||
indexType: 'smart',
|
||||
pageSummaries: pages.map((p, i) => ({
|
||||
page: i + 1,
|
||||
startChar: p.startChar,
|
||||
endChar: p.endChar,
|
||||
estimatedTokens: p.estimatedTokens,
|
||||
summary: summaries.find((s) => s.page === i + 1)?.summary ?? `Page ${String(i + 1)}`,
|
||||
})),
|
||||
};
|
||||
}
|
||||
|
||||
private generateSimpleIndex(
|
||||
resultId: string,
|
||||
toolName: string,
|
||||
raw: string,
|
||||
pages: PageInfo[],
|
||||
): PaginationIndex {
|
||||
return {
|
||||
resultId,
|
||||
toolName,
|
||||
totalSize: raw.length,
|
||||
totalTokens: estimateTokens(raw),
|
||||
totalPages: pages.length,
|
||||
indexType: 'simple',
|
||||
pageSummaries: pages.map((p, i) => ({
|
||||
page: i + 1,
|
||||
startChar: p.startChar,
|
||||
endChar: p.endChar,
|
||||
estimatedTokens: p.estimatedTokens,
|
||||
summary: `Page ${String(i + 1)}: characters ${String(p.startChar)}-${String(p.endChar)} (~${String(p.estimatedTokens)} tokens)`,
|
||||
})),
|
||||
};
|
||||
}
|
||||
|
||||
private formatIndexResponse(index: PaginationIndex): PaginatedToolResponse {
|
||||
const lines = [
|
||||
`This response is too large to return directly (${String(index.totalSize)} chars, ~${String(index.totalTokens)} tokens).`,
|
||||
`It has been split into ${String(index.totalPages)} pages.`,
|
||||
'',
|
||||
'To retrieve a specific page, call this same tool again with additional arguments:',
|
||||
` "_resultId": "${index.resultId}"`,
|
||||
` "_page": <page_number> (1-${String(index.totalPages)})`,
|
||||
' "_page": "all" (returns the full response)',
|
||||
'',
|
||||
`--- Page Index${index.indexType === 'smart' ? ' (AI-generated summaries)' : ''} ---`,
|
||||
];
|
||||
|
||||
for (const page of index.pageSummaries) {
|
||||
lines.push(` Page ${String(page.page)}: ${page.summary}`);
|
||||
}
|
||||
|
||||
return {
|
||||
content: [{ type: 'text', text: lines.join('\n') }],
|
||||
};
|
||||
}
|
||||
|
||||
private evictExpired(): void {
|
||||
const now = Date.now();
|
||||
for (const [id, entry] of this.cache) {
|
||||
if (now - entry.createdAt > this.config.ttlMs) {
|
||||
this.cache.delete(id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private evictLRU(): void {
|
||||
while (this.cache.size >= this.config.maxCachedResults) {
|
||||
const oldest = this.cache.keys().next();
|
||||
if (oldest.done) break;
|
||||
this.cache.delete(oldest.value);
|
||||
}
|
||||
}
|
||||
|
||||
/** Exposed for testing. */
|
||||
get cacheSize(): number {
|
||||
return this.cache.size;
|
||||
}
|
||||
|
||||
/** Clear all cached results. */
|
||||
clearCache(): void {
|
||||
this.cache.clear();
|
||||
}
|
||||
}
|
||||
@@ -7,8 +7,11 @@ import { StdioProxyServer } from './server.js';
|
||||
import { StdioUpstream } from './upstream/stdio.js';
|
||||
import { HttpUpstream } from './upstream/http.js';
|
||||
import { createHttpServer } from './http/server.js';
|
||||
import { loadHttpConfig } from './http/config.js';
|
||||
import { loadHttpConfig, loadLlmConfig } from './http/config.js';
|
||||
import type { HttpConfig } from './http/config.js';
|
||||
import { createProviderFromConfig } from './llm-config.js';
|
||||
import { createSecretStore } from '@mcpctl/shared';
|
||||
import type { ProviderRegistry } from './providers/registry.js';
|
||||
|
||||
interface ParsedArgs {
|
||||
configPath: string | undefined;
|
||||
@@ -55,12 +58,22 @@ export interface MainResult {
|
||||
server: StdioProxyServer;
|
||||
httpServer: FastifyInstance | undefined;
|
||||
httpConfig: HttpConfig;
|
||||
providerRegistry: ProviderRegistry;
|
||||
}
|
||||
|
||||
export async function main(argv: string[] = process.argv): Promise<MainResult> {
|
||||
const args = parseArgs(argv);
|
||||
const httpConfig = loadHttpConfig();
|
||||
|
||||
// Load LLM provider from user config + secret store
|
||||
const llmConfig = loadLlmConfig();
|
||||
const secretStore = await createSecretStore();
|
||||
const providerRegistry = await createProviderFromConfig(llmConfig, secretStore);
|
||||
const activeLlm = providerRegistry.getActive();
|
||||
if (activeLlm) {
|
||||
process.stderr.write(`LLM provider: ${activeLlm.name}\n`);
|
||||
}
|
||||
|
||||
let upstreamConfigs: UpstreamConfig[] = [];
|
||||
|
||||
if (args.configPath) {
|
||||
@@ -115,7 +128,7 @@ export async function main(argv: string[] = process.argv): Promise<MainResult> {
|
||||
// Start HTTP server unless disabled
|
||||
let httpServer: FastifyInstance | undefined;
|
||||
if (!args.noHttp) {
|
||||
httpServer = await createHttpServer(httpConfig, { router });
|
||||
httpServer = await createHttpServer(httpConfig, { router, providerRegistry });
|
||||
await httpServer.listen({ port: httpConfig.httpPort, host: httpConfig.httpHost });
|
||||
process.stderr.write(`mcpctl-proxy HTTP server listening on ${httpConfig.httpHost}:${httpConfig.httpPort}\n`);
|
||||
}
|
||||
@@ -126,6 +139,7 @@ export async function main(argv: string[] = process.argv): Promise<MainResult> {
|
||||
if (shuttingDown) return;
|
||||
shuttingDown = true;
|
||||
|
||||
providerRegistry.disposeAll();
|
||||
server.stop();
|
||||
if (httpServer) {
|
||||
await httpServer.close();
|
||||
@@ -137,7 +151,7 @@ export async function main(argv: string[] = process.argv): Promise<MainResult> {
|
||||
process.on('SIGTERM', () => void shutdown());
|
||||
process.on('SIGINT', () => void shutdown());
|
||||
|
||||
return { router, server, httpServer, httpConfig };
|
||||
return { router, server, httpServer, httpConfig, providerRegistry };
|
||||
}
|
||||
|
||||
// Run when executed directly
|
||||
|
||||
291
src/mcplocal/src/providers/acp-client.ts
Normal file
291
src/mcplocal/src/providers/acp-client.ts
Normal file
@@ -0,0 +1,291 @@
|
||||
import { spawn, type ChildProcess } from 'node:child_process';
|
||||
import { createInterface, type Interface as ReadlineInterface } from 'node:readline';
|
||||
|
||||
export interface AcpClientConfig {
|
||||
binaryPath: string;
|
||||
model: string;
|
||||
/** Timeout for individual RPC requests in ms (default: 60000) */
|
||||
requestTimeoutMs: number;
|
||||
/** Timeout for process initialization in ms (default: 30000) */
|
||||
initTimeoutMs: number;
|
||||
/** Override spawn for testing */
|
||||
spawn?: typeof spawn;
|
||||
}
|
||||
|
||||
interface PendingRequest {
|
||||
resolve: (result: unknown) => void;
|
||||
reject: (err: Error) => void;
|
||||
timer: ReturnType<typeof setTimeout>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Low-level ACP (Agent Client Protocol) client.
|
||||
* Manages a persistent `gemini --experimental-acp` subprocess and communicates
|
||||
* via JSON-RPC 2.0 over NDJSON stdio.
|
||||
*
|
||||
* Pattern follows StdioUpstream: readline for parsing, pending request map with timeouts.
|
||||
*/
|
||||
export class AcpClient {
|
||||
private process: ChildProcess | null = null;
|
||||
private readline: ReadlineInterface | null = null;
|
||||
private pendingRequests = new Map<number, PendingRequest>();
|
||||
private nextId = 1;
|
||||
private sessionId: string | null = null;
|
||||
private ready = false;
|
||||
private initPromise: Promise<void> | null = null;
|
||||
private readonly config: AcpClientConfig;
|
||||
|
||||
/** Accumulates text chunks from session/update agent_message_chunk during a prompt. */
|
||||
private activePromptChunks: string[] = [];
|
||||
|
||||
constructor(config: AcpClientConfig) {
|
||||
this.config = config;
|
||||
}
|
||||
|
||||
/** Ensure the subprocess is spawned and initialized. Idempotent and lazy. */
|
||||
async ensureReady(): Promise<void> {
|
||||
if (this.ready && this.process && !this.process.killed) return;
|
||||
|
||||
// If already initializing, wait for it
|
||||
if (this.initPromise) return this.initPromise;
|
||||
|
||||
this.initPromise = this.doInit();
|
||||
try {
|
||||
await this.initPromise;
|
||||
} catch (err) {
|
||||
this.initPromise = null;
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
/** Send a prompt and collect the streamed text response. */
|
||||
async prompt(text: string): Promise<string> {
|
||||
await this.ensureReady();
|
||||
|
||||
// Set up chunk accumulator
|
||||
this.activePromptChunks = [];
|
||||
|
||||
const result = await this.sendRequest('session/prompt', {
|
||||
sessionId: this.sessionId,
|
||||
prompt: [{ type: 'text', text }],
|
||||
}, this.config.requestTimeoutMs) as { stopReason: string };
|
||||
|
||||
const collected = this.activePromptChunks.join('');
|
||||
this.activePromptChunks = [];
|
||||
|
||||
if (result.stopReason === 'refusal') {
|
||||
throw new Error('Gemini refused to process the prompt');
|
||||
}
|
||||
|
||||
return collected;
|
||||
}
|
||||
|
||||
/** Kill the subprocess and clean up. */
|
||||
dispose(): void {
|
||||
this.cleanup();
|
||||
}
|
||||
|
||||
/** Check if the subprocess is alive and initialized. */
|
||||
get isAlive(): boolean {
|
||||
return this.ready && this.process !== null && !this.process.killed;
|
||||
}
|
||||
|
||||
// --- Private ---
|
||||
|
||||
private async doInit(): Promise<void> {
|
||||
// Clean up any previous state
|
||||
this.cleanup();
|
||||
|
||||
this.spawnProcess();
|
||||
this.setupReadline();
|
||||
|
||||
// ACP handshake: initialize
|
||||
await this.sendRequest('initialize', {
|
||||
protocolVersion: 1,
|
||||
clientCapabilities: {},
|
||||
clientInfo: { name: 'mcpctl', version: '1.0.0' },
|
||||
}, this.config.initTimeoutMs);
|
||||
|
||||
// ACP handshake: session/new
|
||||
const sessionResult = await this.sendRequest('session/new', {
|
||||
cwd: '/tmp',
|
||||
mcpServers: [],
|
||||
}, this.config.initTimeoutMs) as { sessionId: string };
|
||||
|
||||
this.sessionId = sessionResult.sessionId;
|
||||
this.ready = true;
|
||||
}
|
||||
|
||||
private spawnProcess(): void {
|
||||
const spawnFn = this.config.spawn ?? spawn;
|
||||
this.process = spawnFn(this.config.binaryPath, ['--experimental-acp'], {
|
||||
stdio: ['pipe', 'pipe', 'pipe'],
|
||||
env: process.env,
|
||||
});
|
||||
|
||||
this.process.on('exit', () => {
|
||||
this.ready = false;
|
||||
this.initPromise = null;
|
||||
this.sessionId = null;
|
||||
|
||||
// Reject all pending requests
|
||||
for (const [id, pending] of this.pendingRequests) {
|
||||
clearTimeout(pending.timer);
|
||||
pending.reject(new Error('Gemini ACP process exited'));
|
||||
this.pendingRequests.delete(id);
|
||||
}
|
||||
});
|
||||
|
||||
this.process.on('error', (err) => {
|
||||
this.ready = false;
|
||||
this.initPromise = null;
|
||||
|
||||
for (const [id, pending] of this.pendingRequests) {
|
||||
clearTimeout(pending.timer);
|
||||
pending.reject(err);
|
||||
this.pendingRequests.delete(id);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private setupReadline(): void {
|
||||
if (!this.process?.stdout) return;
|
||||
|
||||
this.readline = createInterface({ input: this.process.stdout });
|
||||
this.readline.on('line', (line) => this.handleLine(line));
|
||||
}
|
||||
|
||||
private handleLine(line: string): void {
|
||||
let msg: Record<string, unknown>;
|
||||
try {
|
||||
msg = JSON.parse(line) as Record<string, unknown>;
|
||||
} catch {
|
||||
// Skip non-JSON lines (e.g., debug output on stdout)
|
||||
return;
|
||||
}
|
||||
|
||||
// Response to a pending request (has 'id')
|
||||
if ('id' in msg && msg.id !== undefined && ('result' in msg || 'error' in msg)) {
|
||||
this.handleResponse(msg as { id: number; result?: unknown; error?: { code: number; message: string } });
|
||||
return;
|
||||
}
|
||||
|
||||
// Notification (has 'method', no 'id')
|
||||
if ('method' in msg && !('id' in msg)) {
|
||||
this.handleNotification(msg as { method: string; params?: Record<string, unknown> });
|
||||
return;
|
||||
}
|
||||
|
||||
// Request from agent (has 'method' AND 'id') — agent asking us for something
|
||||
if ('method' in msg && 'id' in msg) {
|
||||
this.handleAgentRequest(msg as { id: number; method: string; params?: Record<string, unknown> });
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
private handleResponse(msg: { id: number; result?: unknown; error?: { code: number; message: string } }): void {
|
||||
const pending = this.pendingRequests.get(msg.id);
|
||||
if (!pending) return;
|
||||
|
||||
clearTimeout(pending.timer);
|
||||
this.pendingRequests.delete(msg.id);
|
||||
|
||||
if (msg.error) {
|
||||
pending.reject(new Error(`ACP error ${msg.error.code}: ${msg.error.message}`));
|
||||
} else {
|
||||
pending.resolve(msg.result);
|
||||
}
|
||||
}
|
||||
|
||||
private handleNotification(msg: { method: string; params?: Record<string, unknown> }): void {
|
||||
if (msg.method !== 'session/update' || !msg.params) return;
|
||||
|
||||
const update = msg.params.update as Record<string, unknown> | undefined;
|
||||
if (!update) return;
|
||||
|
||||
// Collect text from agent_message_chunk
|
||||
if (update.sessionUpdate === 'agent_message_chunk') {
|
||||
const content = update.content;
|
||||
// Gemini ACP sends content as a single object {type, text} or an array [{type, text}]
|
||||
const blocks: Array<{ type: string; text?: string }> = Array.isArray(content)
|
||||
? content as Array<{ type: string; text?: string }>
|
||||
: content && typeof content === 'object'
|
||||
? [content as { type: string; text?: string }]
|
||||
: [];
|
||||
for (const block of blocks) {
|
||||
if (block.type === 'text' && block.text) {
|
||||
this.activePromptChunks.push(block.text);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Handle requests from the agent (e.g., session/request_permission). Reject them all. */
|
||||
private handleAgentRequest(msg: { id: number; method: string; params?: Record<string, unknown> }): void {
|
||||
if (!this.process?.stdin) return;
|
||||
|
||||
if (msg.method === 'session/request_permission') {
|
||||
// Reject permission requests — we don't want tool use
|
||||
const response = JSON.stringify({
|
||||
jsonrpc: '2.0',
|
||||
id: msg.id,
|
||||
result: { outcome: { outcome: 'cancelled' } },
|
||||
});
|
||||
this.process.stdin.write(response + '\n');
|
||||
} else {
|
||||
// Unknown method — return error
|
||||
const response = JSON.stringify({
|
||||
jsonrpc: '2.0',
|
||||
id: msg.id,
|
||||
error: { code: -32601, message: 'Method not supported' },
|
||||
});
|
||||
this.process.stdin.write(response + '\n');
|
||||
}
|
||||
}
|
||||
|
||||
private sendRequest(method: string, params: Record<string, unknown>, timeoutMs: number): Promise<unknown> {
|
||||
if (!this.process?.stdin) {
|
||||
return Promise.reject(new Error('ACP process not started'));
|
||||
}
|
||||
|
||||
const id = this.nextId++;
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
const timer = setTimeout(() => {
|
||||
this.pendingRequests.delete(id);
|
||||
// Kill the process on timeout — it's hung
|
||||
this.cleanup();
|
||||
reject(new Error(`ACP request '${method}' timed out after ${timeoutMs}ms`));
|
||||
}, timeoutMs);
|
||||
|
||||
this.pendingRequests.set(id, { resolve, reject, timer });
|
||||
|
||||
const msg = JSON.stringify({ jsonrpc: '2.0', id, method, params });
|
||||
this.process!.stdin!.write(msg + '\n');
|
||||
});
|
||||
}
|
||||
|
||||
private cleanup(): void {
|
||||
this.ready = false;
|
||||
this.initPromise = null;
|
||||
this.sessionId = null;
|
||||
this.activePromptChunks = [];
|
||||
|
||||
// Reject all pending requests
|
||||
for (const [id, pending] of this.pendingRequests) {
|
||||
clearTimeout(pending.timer);
|
||||
pending.reject(new Error('ACP client disposed'));
|
||||
this.pendingRequests.delete(id);
|
||||
}
|
||||
|
||||
if (this.readline) {
|
||||
this.readline.close();
|
||||
this.readline = null;
|
||||
}
|
||||
|
||||
if (this.process) {
|
||||
this.process.kill('SIGTERM');
|
||||
this.process = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
97
src/mcplocal/src/providers/gemini-acp.ts
Normal file
97
src/mcplocal/src/providers/gemini-acp.ts
Normal file
@@ -0,0 +1,97 @@
|
||||
import { execFile } from 'node:child_process';
|
||||
import { promisify } from 'node:util';
|
||||
import type { LlmProvider, CompletionOptions, CompletionResult } from './types.js';
|
||||
import { AcpClient } from './acp-client.js';
|
||||
import type { AcpClientConfig } from './acp-client.js';
|
||||
|
||||
const execFileAsync = promisify(execFile);
|
||||
|
||||
export interface GeminiAcpConfig {
|
||||
binaryPath?: string;
|
||||
defaultModel?: string;
|
||||
requestTimeoutMs?: number;
|
||||
initTimeoutMs?: number;
|
||||
/** Override for testing — passed through to AcpClient */
|
||||
spawn?: AcpClientConfig['spawn'];
|
||||
}
|
||||
|
||||
/**
|
||||
* Gemini CLI provider using ACP (Agent Client Protocol) mode.
|
||||
* Keeps the gemini process alive as a persistent subprocess, eliminating
|
||||
* the ~10s cold-start per call. Auto-restarts on crash or timeout.
|
||||
*/
|
||||
export class GeminiAcpProvider implements LlmProvider {
|
||||
readonly name = 'gemini-cli';
|
||||
private client: AcpClient;
|
||||
private binaryPath: string;
|
||||
private defaultModel: string;
|
||||
private queue: Promise<void> = Promise.resolve();
|
||||
|
||||
constructor(config?: GeminiAcpConfig) {
|
||||
this.binaryPath = config?.binaryPath ?? 'gemini';
|
||||
this.defaultModel = config?.defaultModel ?? 'gemini-2.5-flash';
|
||||
|
||||
const acpConfig: AcpClientConfig = {
|
||||
binaryPath: this.binaryPath,
|
||||
model: this.defaultModel,
|
||||
requestTimeoutMs: config?.requestTimeoutMs ?? 60_000,
|
||||
initTimeoutMs: config?.initTimeoutMs ?? 30_000,
|
||||
};
|
||||
if (config?.spawn) acpConfig.spawn = config.spawn;
|
||||
|
||||
this.client = new AcpClient(acpConfig);
|
||||
}
|
||||
|
||||
async complete(options: CompletionOptions): Promise<CompletionResult> {
|
||||
return this.enqueue(() => this.doComplete(options));
|
||||
}
|
||||
|
||||
async listModels(): Promise<string[]> {
|
||||
return ['gemini-2.5-flash', 'gemini-2.5-pro', 'gemini-2.0-flash'];
|
||||
}
|
||||
|
||||
async isAvailable(): Promise<boolean> {
|
||||
try {
|
||||
await execFileAsync(this.binaryPath, ['--version'], { timeout: 5000 });
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
dispose(): void {
|
||||
this.client.dispose();
|
||||
}
|
||||
|
||||
// --- Private ---
|
||||
|
||||
private async doComplete(options: CompletionOptions): Promise<CompletionResult> {
|
||||
const prompt = options.messages
|
||||
.map((m) => {
|
||||
if (m.role === 'system') return `System: ${m.content}`;
|
||||
if (m.role === 'user') return m.content;
|
||||
if (m.role === 'assistant') return `Assistant: ${m.content}`;
|
||||
return m.content;
|
||||
})
|
||||
.join('\n\n');
|
||||
|
||||
const content = await this.client.prompt(prompt);
|
||||
|
||||
return {
|
||||
content: content.trim(),
|
||||
toolCalls: [],
|
||||
usage: { promptTokens: 0, completionTokens: 0, totalTokens: 0 },
|
||||
finishReason: 'stop',
|
||||
};
|
||||
}
|
||||
|
||||
private enqueue<T>(fn: () => Promise<T>): Promise<T> {
|
||||
const result = new Promise<T>((resolve, reject) => {
|
||||
this.queue = this.queue.then(
|
||||
() => fn().then(resolve, reject),
|
||||
() => fn().then(resolve, reject),
|
||||
);
|
||||
});
|
||||
return result;
|
||||
}
|
||||
}
|
||||
@@ -9,4 +9,8 @@ export { GeminiCliProvider } from './gemini-cli.js';
|
||||
export type { GeminiCliConfig } from './gemini-cli.js';
|
||||
export { DeepSeekProvider } from './deepseek.js';
|
||||
export type { DeepSeekConfig } from './deepseek.js';
|
||||
export { GeminiAcpProvider } from './gemini-acp.js';
|
||||
export type { GeminiAcpConfig } from './gemini-acp.js';
|
||||
export { AcpClient } from './acp-client.js';
|
||||
export type { AcpClientConfig } from './acp-client.js';
|
||||
export { ProviderRegistry } from './registry.js';
|
||||
|
||||
@@ -45,4 +45,11 @@ export class ProviderRegistry {
|
||||
getActiveName(): string | null {
|
||||
return this.activeProvider;
|
||||
}
|
||||
|
||||
/** Dispose all registered providers that have a dispose method. */
|
||||
disposeAll(): void {
|
||||
for (const provider of this.providers.values()) {
|
||||
provider.dispose?.();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -53,4 +53,6 @@ export interface LlmProvider {
|
||||
listModels(): Promise<string[]>;
|
||||
/** Check if the provider is configured and reachable */
|
||||
isAvailable(): Promise<boolean>;
|
||||
/** Optional cleanup for providers with persistent resources (e.g., subprocesses). */
|
||||
dispose?(): void;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,11 @@
|
||||
import type { UpstreamConnection, JsonRpcRequest, JsonRpcResponse, JsonRpcNotification } from './types.js';
|
||||
import type { LlmProcessor } from './llm/processor.js';
|
||||
import { ResponsePaginator } from './llm/pagination.js';
|
||||
import type { McpdClient } from './http/mcpd-client.js';
|
||||
|
||||
export interface RouteContext {
|
||||
sessionId?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Routes MCP requests to the appropriate upstream server.
|
||||
@@ -17,11 +23,29 @@ export class McpRouter {
|
||||
private promptToServer = new Map<string, string>();
|
||||
private notificationHandler: ((notification: JsonRpcNotification) => void) | null = null;
|
||||
private llmProcessor: LlmProcessor | null = null;
|
||||
private instructions: string | null = null;
|
||||
private mcpdClient: McpdClient | null = null;
|
||||
private projectName: string | null = null;
|
||||
private mcpctlResourceContents = new Map<string, string>();
|
||||
private paginator: ResponsePaginator | null = null;
|
||||
|
||||
setPaginator(paginator: ResponsePaginator): void {
|
||||
this.paginator = paginator;
|
||||
}
|
||||
|
||||
setLlmProcessor(processor: LlmProcessor): void {
|
||||
this.llmProcessor = processor;
|
||||
}
|
||||
|
||||
setInstructions(instructions: string): void {
|
||||
this.instructions = instructions;
|
||||
}
|
||||
|
||||
setPromptConfig(mcpdClient: McpdClient, projectName: string): void {
|
||||
this.mcpdClient = mcpdClient;
|
||||
this.projectName = projectName;
|
||||
}
|
||||
|
||||
addUpstream(connection: UpstreamConnection): void {
|
||||
this.upstreams.set(connection.name, connection);
|
||||
if (this.notificationHandler && connection.onNotification) {
|
||||
@@ -87,10 +111,18 @@ export class McpRouter {
|
||||
for (const tool of tools) {
|
||||
const namespacedName = `${serverName}/${tool.name}`;
|
||||
this.toolToServer.set(namespacedName, serverName);
|
||||
allTools.push({
|
||||
// Enrich description with server context if available
|
||||
const entry: { name: string; description?: string; inputSchema?: unknown } = {
|
||||
...tool,
|
||||
name: namespacedName,
|
||||
});
|
||||
};
|
||||
if (upstream.description && tool.description) {
|
||||
entry.description = `[${upstream.description}] ${tool.description}`;
|
||||
} else if (upstream.description) {
|
||||
entry.description = `[${upstream.description}]`;
|
||||
}
|
||||
// If neither upstream.description nor tool.description, keep tool.description (may be undefined — that's fine, just don't set it)
|
||||
allTools.push(entry);
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
@@ -223,7 +255,7 @@ export class McpRouter {
|
||||
* Route a generic request. Handles protocol-level methods locally,
|
||||
* delegates tool/resource/prompt calls to upstreams.
|
||||
*/
|
||||
async route(request: JsonRpcRequest): Promise<JsonRpcResponse> {
|
||||
async route(request: JsonRpcRequest, context?: RouteContext): Promise<JsonRpcResponse> {
|
||||
switch (request.method) {
|
||||
case 'initialize':
|
||||
return {
|
||||
@@ -240,11 +272,27 @@ export class McpRouter {
|
||||
resources: {},
|
||||
prompts: {},
|
||||
},
|
||||
...(this.instructions ? { instructions: this.instructions } : {}),
|
||||
},
|
||||
};
|
||||
|
||||
case 'tools/list': {
|
||||
const tools = await this.discoverTools();
|
||||
// Append propose_prompt tool if prompt config is set
|
||||
if (this.mcpdClient && this.projectName) {
|
||||
tools.push({
|
||||
name: 'propose_prompt',
|
||||
description: 'Propose a new prompt for this project. Creates a pending request that must be approved by a user before becoming active.',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
name: { type: 'string', description: 'Prompt name (lowercase alphanumeric with hyphens, e.g. "debug-guide")' },
|
||||
content: { type: 'string', description: 'Prompt content text' },
|
||||
},
|
||||
required: ['name', 'content'],
|
||||
},
|
||||
});
|
||||
}
|
||||
return {
|
||||
jsonrpc: '2.0',
|
||||
id: request.id,
|
||||
@@ -253,10 +301,32 @@ export class McpRouter {
|
||||
}
|
||||
|
||||
case 'tools/call':
|
||||
return this.routeToolCall(request);
|
||||
return this.routeToolCall(request, context);
|
||||
|
||||
case 'resources/list': {
|
||||
const resources = await this.discoverResources();
|
||||
// Append mcpctl prompt resources
|
||||
if (this.mcpdClient && this.projectName) {
|
||||
try {
|
||||
const sessionParam = context?.sessionId ? `?session=${encodeURIComponent(context.sessionId)}` : '';
|
||||
const visible = await this.mcpdClient.get<Array<{ name: string; content: string; type: string }>>(
|
||||
`/api/v1/projects/${encodeURIComponent(this.projectName)}/prompts/visible${sessionParam}`,
|
||||
);
|
||||
this.mcpctlResourceContents.clear();
|
||||
for (const p of visible) {
|
||||
const uri = `mcpctl://prompts/${p.name}`;
|
||||
resources.push({
|
||||
uri,
|
||||
name: p.name,
|
||||
description: p.type === 'promptrequest' ? `[Pending proposal] ${p.name}` : `[Approved prompt] ${p.name}`,
|
||||
mimeType: 'text/plain',
|
||||
});
|
||||
this.mcpctlResourceContents.set(uri, p.content);
|
||||
}
|
||||
} catch {
|
||||
// Prompt resources are optional — don't fail discovery
|
||||
}
|
||||
}
|
||||
return {
|
||||
jsonrpc: '2.0',
|
||||
id: request.id,
|
||||
@@ -264,8 +334,28 @@ export class McpRouter {
|
||||
};
|
||||
}
|
||||
|
||||
case 'resources/read':
|
||||
case 'resources/read': {
|
||||
const params = request.params as Record<string, unknown> | undefined;
|
||||
const uri = params?.['uri'] as string | undefined;
|
||||
if (uri?.startsWith('mcpctl://')) {
|
||||
const content = this.mcpctlResourceContents.get(uri);
|
||||
if (content !== undefined) {
|
||||
return {
|
||||
jsonrpc: '2.0',
|
||||
id: request.id,
|
||||
result: {
|
||||
contents: [{ uri, mimeType: 'text/plain', text: content }],
|
||||
},
|
||||
};
|
||||
}
|
||||
return {
|
||||
jsonrpc: '2.0',
|
||||
id: request.id,
|
||||
error: { code: -32602, message: `Resource not found: ${uri}` },
|
||||
};
|
||||
}
|
||||
return this.routeNamespacedCall(request, 'uri', this.resourceToServer);
|
||||
}
|
||||
|
||||
case 'resources/subscribe':
|
||||
case 'resources/unsubscribe':
|
||||
@@ -283,6 +373,17 @@ export class McpRouter {
|
||||
case 'prompts/get':
|
||||
return this.routeNamespacedCall(request, 'name', this.promptToServer);
|
||||
|
||||
// Handle MCP notifications (no response expected, but return empty result if called as request)
|
||||
case 'notifications/initialized':
|
||||
case 'notifications/cancelled':
|
||||
case 'notifications/progress':
|
||||
case 'notifications/roots/list_changed':
|
||||
return {
|
||||
jsonrpc: '2.0',
|
||||
id: request.id,
|
||||
result: {},
|
||||
};
|
||||
|
||||
default:
|
||||
return {
|
||||
jsonrpc: '2.0',
|
||||
@@ -295,18 +396,45 @@ export class McpRouter {
|
||||
/**
|
||||
* Route a tools/call request, optionally applying LLM pre/post-processing.
|
||||
*/
|
||||
private async routeToolCall(request: JsonRpcRequest): Promise<JsonRpcResponse> {
|
||||
private async routeToolCall(request: JsonRpcRequest, context?: RouteContext): Promise<JsonRpcResponse> {
|
||||
const params = request.params as Record<string, unknown> | undefined;
|
||||
const toolName = params?.['name'] as string | undefined;
|
||||
|
||||
// Handle built-in propose_prompt tool
|
||||
if (toolName === 'propose_prompt') {
|
||||
return this.handleProposePrompt(request, context);
|
||||
}
|
||||
|
||||
// Intercept pagination page requests before routing to upstream
|
||||
const toolArgs = (params?.['arguments'] ?? {}) as Record<string, unknown>;
|
||||
if (this.paginator) {
|
||||
const paginationReq = ResponsePaginator.extractPaginationParams(toolArgs);
|
||||
if (paginationReq) {
|
||||
const pageResult = this.paginator.getPage(paginationReq.resultId, paginationReq.page);
|
||||
if (pageResult) {
|
||||
return { jsonrpc: '2.0', id: request.id, result: pageResult };
|
||||
}
|
||||
return {
|
||||
jsonrpc: '2.0',
|
||||
id: request.id,
|
||||
result: {
|
||||
content: [{
|
||||
type: 'text',
|
||||
text: 'Cached result not found (expired or invalid _resultId). Please re-call the tool without _resultId/_page to get a fresh result.',
|
||||
}],
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// If no processor or tool shouldn't be processed, route directly
|
||||
if (!this.llmProcessor || !toolName || !this.llmProcessor.shouldProcess('tools/call', toolName)) {
|
||||
return this.routeNamespacedCall(request, 'name', this.toolToServer);
|
||||
const response = await this.routeNamespacedCall(request, 'name', this.toolToServer);
|
||||
return this.maybePaginate(toolName, response);
|
||||
}
|
||||
|
||||
// Preprocess request params
|
||||
const toolParams = (params?.['arguments'] ?? {}) as Record<string, unknown>;
|
||||
const processed = await this.llmProcessor.preprocessRequest(toolName, toolParams);
|
||||
const processed = await this.llmProcessor.preprocessRequest(toolName, toolArgs);
|
||||
const processedRequest: JsonRpcRequest = processed.optimized
|
||||
? { ...request, params: { ...params, arguments: processed.params } }
|
||||
: request;
|
||||
@@ -314,6 +442,10 @@ export class McpRouter {
|
||||
// Route to upstream
|
||||
const response = await this.routeNamespacedCall(processedRequest, 'name', this.toolToServer);
|
||||
|
||||
// Paginate if response is large (skip LLM filtering for paginated responses)
|
||||
const paginated = await this.maybePaginate(toolName, response);
|
||||
if (paginated !== response) return paginated;
|
||||
|
||||
// Filter response
|
||||
if (response.error) return response;
|
||||
const filtered = await this.llmProcessor.filterResponse(toolName, response);
|
||||
@@ -323,6 +455,76 @@ export class McpRouter {
|
||||
return response;
|
||||
}
|
||||
|
||||
/**
|
||||
* If the response is large enough, paginate it and return the index instead.
|
||||
*/
|
||||
private async maybePaginate(toolName: string | undefined, response: JsonRpcResponse): Promise<JsonRpcResponse> {
|
||||
if (!this.paginator || !toolName || response.error) return response;
|
||||
|
||||
const raw = JSON.stringify(response.result);
|
||||
if (!this.paginator.shouldPaginate(raw)) return response;
|
||||
|
||||
const paginated = await this.paginator.paginate(toolName, raw);
|
||||
if (!paginated) return response;
|
||||
|
||||
return { jsonrpc: '2.0', id: response.id, result: paginated };
|
||||
}
|
||||
|
||||
private async handleProposePrompt(request: JsonRpcRequest, context?: RouteContext): Promise<JsonRpcResponse> {
|
||||
if (!this.mcpdClient || !this.projectName) {
|
||||
return {
|
||||
jsonrpc: '2.0',
|
||||
id: request.id,
|
||||
error: { code: -32603, message: 'Prompt config not set — propose_prompt unavailable' },
|
||||
};
|
||||
}
|
||||
|
||||
const params = request.params as Record<string, unknown> | undefined;
|
||||
const args = (params?.['arguments'] ?? {}) as Record<string, unknown>;
|
||||
const name = args['name'] as string | undefined;
|
||||
const content = args['content'] as string | undefined;
|
||||
|
||||
if (!name || !content) {
|
||||
return {
|
||||
jsonrpc: '2.0',
|
||||
id: request.id,
|
||||
error: { code: -32602, message: 'Missing required arguments: name and content' },
|
||||
};
|
||||
}
|
||||
|
||||
try {
|
||||
const body: Record<string, unknown> = { name, content };
|
||||
if (context?.sessionId) {
|
||||
body['createdBySession'] = context.sessionId;
|
||||
}
|
||||
await this.mcpdClient.post(
|
||||
`/api/v1/projects/${encodeURIComponent(this.projectName)}/promptrequests`,
|
||||
body,
|
||||
);
|
||||
return {
|
||||
jsonrpc: '2.0',
|
||||
id: request.id,
|
||||
result: {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: `Prompt request "${name}" created successfully. It will be visible to you as a resource at mcpctl://prompts/${name}. A user must approve it before it becomes permanent.`,
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
} catch (err) {
|
||||
return {
|
||||
jsonrpc: '2.0',
|
||||
id: request.id,
|
||||
error: {
|
||||
code: -32603,
|
||||
message: `Failed to propose prompt: ${err instanceof Error ? err.message : String(err)}`,
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
getUpstreamNames(): string[] {
|
||||
return [...this.upstreams.keys()];
|
||||
}
|
||||
|
||||
@@ -63,6 +63,8 @@ export interface ProxyConfig {
|
||||
export interface UpstreamConnection {
|
||||
/** Server name */
|
||||
name: string;
|
||||
/** Human-readable description of the server's purpose */
|
||||
description?: string;
|
||||
/** Send a JSON-RPC request and get a response */
|
||||
send(request: JsonRpcRequest): Promise<JsonRpcResponse>;
|
||||
/** Disconnect from the upstream */
|
||||
|
||||
@@ -18,14 +18,17 @@ interface McpdProxyResponse {
|
||||
*/
|
||||
export class McpdUpstream implements UpstreamConnection {
|
||||
readonly name: string;
|
||||
readonly description?: string;
|
||||
private alive = true;
|
||||
|
||||
constructor(
|
||||
private serverId: string,
|
||||
serverName: string,
|
||||
private mcpdClient: McpdClient,
|
||||
serverDescription?: string,
|
||||
) {
|
||||
this.name = serverName;
|
||||
if (serverDescription !== undefined) this.description = serverDescription;
|
||||
}
|
||||
|
||||
async send(request: JsonRpcRequest): Promise<JsonRpcResponse> {
|
||||
|
||||
486
src/mcplocal/tests/acp-client.test.ts
Normal file
486
src/mcplocal/tests/acp-client.test.ts
Normal file
@@ -0,0 +1,486 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import { EventEmitter, Readable } from 'node:stream';
|
||||
import { AcpClient } from '../src/providers/acp-client.js';
|
||||
import type { AcpClientConfig } from '../src/providers/acp-client.js';
|
||||
|
||||
/**
|
||||
* Creates a mock child process that speaks ACP protocol.
|
||||
* Returns the mock process and helpers to send responses.
|
||||
*/
|
||||
function createMockProcess() {
|
||||
const stdin = {
|
||||
write: vi.fn(),
|
||||
writable: true,
|
||||
};
|
||||
|
||||
const stdoutEmitter = new EventEmitter();
|
||||
const stdout = Object.assign(stdoutEmitter, {
|
||||
readable: true,
|
||||
// readline needs these
|
||||
[Symbol.asyncIterator]: undefined,
|
||||
pause: vi.fn(),
|
||||
resume: vi.fn(),
|
||||
isPaused: () => false,
|
||||
setEncoding: vi.fn(),
|
||||
read: vi.fn(),
|
||||
destroy: vi.fn(),
|
||||
pipe: vi.fn(),
|
||||
unpipe: vi.fn(),
|
||||
unshift: vi.fn(),
|
||||
wrap: vi.fn(),
|
||||
}) as unknown as Readable;
|
||||
|
||||
const proc = Object.assign(new EventEmitter(), {
|
||||
stdin,
|
||||
stdout,
|
||||
stderr: new EventEmitter(),
|
||||
pid: 12345,
|
||||
killed: false,
|
||||
kill: vi.fn(function (this: { killed: boolean }) {
|
||||
this.killed = true;
|
||||
}),
|
||||
});
|
||||
|
||||
/** Send a line of JSON from the "agent" to our client */
|
||||
function sendLine(data: unknown) {
|
||||
stdoutEmitter.emit('data', Buffer.from(JSON.stringify(data) + '\n'));
|
||||
}
|
||||
|
||||
/** Send a JSON-RPC response */
|
||||
function sendResponse(id: number, result: unknown) {
|
||||
sendLine({ jsonrpc: '2.0', id, result });
|
||||
}
|
||||
|
||||
/** Send a JSON-RPC error */
|
||||
function sendError(id: number, code: number, message: string) {
|
||||
sendLine({ jsonrpc: '2.0', id, error: { code, message } });
|
||||
}
|
||||
|
||||
/** Send a session/update notification with agent_message_chunk */
|
||||
function sendChunk(sessionId: string, text: string) {
|
||||
sendLine({
|
||||
jsonrpc: '2.0',
|
||||
method: 'session/update',
|
||||
params: {
|
||||
sessionId,
|
||||
update: {
|
||||
sessionUpdate: 'agent_message_chunk',
|
||||
content: [{ type: 'text', text }],
|
||||
},
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
/** Send a session/request_permission request */
|
||||
function sendPermissionRequest(id: number, sessionId: string) {
|
||||
sendLine({
|
||||
jsonrpc: '2.0',
|
||||
id,
|
||||
method: 'session/request_permission',
|
||||
params: { sessionId },
|
||||
});
|
||||
}
|
||||
|
||||
return { proc, stdin, stdout: stdoutEmitter, sendLine, sendResponse, sendError, sendChunk, sendPermissionRequest };
|
||||
}
|
||||
|
||||
function createConfig(overrides?: Partial<AcpClientConfig>): AcpClientConfig {
|
||||
return {
|
||||
binaryPath: '/usr/bin/gemini',
|
||||
model: 'gemini-2.5-flash',
|
||||
requestTimeoutMs: 5000,
|
||||
initTimeoutMs: 5000,
|
||||
...overrides,
|
||||
};
|
||||
}
|
||||
|
||||
describe('AcpClient', () => {
|
||||
let client: AcpClient;
|
||||
let mock: ReturnType<typeof createMockProcess>;
|
||||
|
||||
beforeEach(() => {
|
||||
mock = createMockProcess();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
client?.dispose();
|
||||
});
|
||||
|
||||
function createClient(configOverrides?: Partial<AcpClientConfig>) {
|
||||
const config = createConfig({
|
||||
spawn: (() => mock.proc) as unknown as AcpClientConfig['spawn'],
|
||||
...configOverrides,
|
||||
});
|
||||
client = new AcpClient(config);
|
||||
return client;
|
||||
}
|
||||
|
||||
/** Helper: auto-respond to the initialize + session/new handshake */
|
||||
function autoHandshake(sessionId = 'test-session') {
|
||||
mock.stdin.write.mockImplementation((data: string) => {
|
||||
const msg = JSON.parse(data.trim()) as { id: number; method: string };
|
||||
if (msg.method === 'initialize') {
|
||||
// Respond async to simulate real behavior
|
||||
setImmediate(() => mock.sendResponse(msg.id, {
|
||||
protocolVersion: 1,
|
||||
agentInfo: { name: 'gemini-cli', version: '1.0.0' },
|
||||
}));
|
||||
} else if (msg.method === 'session/new') {
|
||||
setImmediate(() => mock.sendResponse(msg.id, { sessionId }));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
describe('ensureReady', () => {
|
||||
it('spawns process and completes ACP handshake', async () => {
|
||||
createClient();
|
||||
autoHandshake();
|
||||
|
||||
await client.ensureReady();
|
||||
|
||||
expect(client.isAlive).toBe(true);
|
||||
// Verify initialize was sent
|
||||
const calls = mock.stdin.write.mock.calls.map((c) => JSON.parse(c[0] as string));
|
||||
expect(calls[0].method).toBe('initialize');
|
||||
expect(calls[0].params.protocolVersion).toBe(1);
|
||||
expect(calls[0].params.clientInfo.name).toBe('mcpctl');
|
||||
// Verify session/new was sent
|
||||
expect(calls[1].method).toBe('session/new');
|
||||
expect(calls[1].params.cwd).toBe('/tmp');
|
||||
expect(calls[1].params.mcpServers).toEqual([]);
|
||||
});
|
||||
|
||||
it('is idempotent when already ready', async () => {
|
||||
createClient();
|
||||
autoHandshake();
|
||||
|
||||
await client.ensureReady();
|
||||
await client.ensureReady();
|
||||
|
||||
// Should only have sent initialize + session/new once
|
||||
const calls = mock.stdin.write.mock.calls;
|
||||
expect(calls.length).toBe(2);
|
||||
});
|
||||
|
||||
it('shares init promise for concurrent calls', async () => {
|
||||
createClient();
|
||||
autoHandshake();
|
||||
|
||||
const p1 = client.ensureReady();
|
||||
const p2 = client.ensureReady();
|
||||
|
||||
await Promise.all([p1, p2]);
|
||||
expect(mock.stdin.write.mock.calls.length).toBe(2);
|
||||
});
|
||||
});
|
||||
|
||||
describe('prompt', () => {
|
||||
it('sends session/prompt and collects agent_message_chunk text', async () => {
|
||||
createClient();
|
||||
const sessionId = 'sess-123';
|
||||
autoHandshake(sessionId);
|
||||
|
||||
await client.ensureReady();
|
||||
|
||||
// Now set up the prompt response handler
|
||||
mock.stdin.write.mockImplementation((data: string) => {
|
||||
const msg = JSON.parse(data.trim()) as { id: number; method: string };
|
||||
if (msg.method === 'session/prompt') {
|
||||
setImmediate(() => {
|
||||
mock.sendChunk(sessionId, 'Hello ');
|
||||
mock.sendChunk(sessionId, 'world!');
|
||||
mock.sendResponse(msg.id, { stopReason: 'end_turn' });
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
const result = await client.prompt('Say hello');
|
||||
expect(result).toBe('Hello world!');
|
||||
});
|
||||
|
||||
it('handles multi-block content in a single chunk', async () => {
|
||||
createClient();
|
||||
autoHandshake('sess-1');
|
||||
await client.ensureReady();
|
||||
|
||||
mock.stdin.write.mockImplementation((data: string) => {
|
||||
const msg = JSON.parse(data.trim()) as { id: number; method: string };
|
||||
if (msg.method === 'session/prompt') {
|
||||
setImmediate(() => {
|
||||
mock.sendLine({
|
||||
jsonrpc: '2.0',
|
||||
method: 'session/update',
|
||||
params: {
|
||||
sessionId: 'sess-1',
|
||||
update: {
|
||||
sessionUpdate: 'agent_message_chunk',
|
||||
content: [
|
||||
{ type: 'text', text: 'Part A' },
|
||||
{ type: 'text', text: ' Part B' },
|
||||
],
|
||||
},
|
||||
},
|
||||
});
|
||||
mock.sendResponse(msg.id, { stopReason: 'end_turn' });
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
const result = await client.prompt('test');
|
||||
expect(result).toBe('Part A Part B');
|
||||
});
|
||||
|
||||
it('handles single-object content (real Gemini ACP format)', async () => {
|
||||
createClient();
|
||||
autoHandshake('sess-1');
|
||||
await client.ensureReady();
|
||||
|
||||
mock.stdin.write.mockImplementation((data: string) => {
|
||||
const msg = JSON.parse(data.trim()) as { id: number; method: string };
|
||||
if (msg.method === 'session/prompt') {
|
||||
setImmediate(() => {
|
||||
// Real Gemini ACP sends content as a single object, not an array
|
||||
mock.sendLine({
|
||||
jsonrpc: '2.0',
|
||||
method: 'session/update',
|
||||
params: {
|
||||
sessionId: 'sess-1',
|
||||
update: {
|
||||
sessionUpdate: 'agent_message_chunk',
|
||||
content: { type: 'text', text: 'ok' },
|
||||
},
|
||||
},
|
||||
});
|
||||
mock.sendResponse(msg.id, { stopReason: 'end_turn' });
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
const result = await client.prompt('test');
|
||||
expect(result).toBe('ok');
|
||||
});
|
||||
|
||||
it('ignores agent_thought_chunk notifications', async () => {
|
||||
createClient();
|
||||
autoHandshake('sess-1');
|
||||
await client.ensureReady();
|
||||
|
||||
mock.stdin.write.mockImplementation((data: string) => {
|
||||
const msg = JSON.parse(data.trim()) as { id: number; method: string };
|
||||
if (msg.method === 'session/prompt') {
|
||||
setImmediate(() => {
|
||||
// Gemini sends thought chunks before message chunks
|
||||
mock.sendLine({
|
||||
jsonrpc: '2.0',
|
||||
method: 'session/update',
|
||||
params: {
|
||||
sessionId: 'sess-1',
|
||||
update: {
|
||||
sessionUpdate: 'agent_thought_chunk',
|
||||
content: { type: 'text', text: 'Thinking about it...' },
|
||||
},
|
||||
},
|
||||
});
|
||||
mock.sendLine({
|
||||
jsonrpc: '2.0',
|
||||
method: 'session/update',
|
||||
params: {
|
||||
sessionId: 'sess-1',
|
||||
update: {
|
||||
sessionUpdate: 'agent_message_chunk',
|
||||
content: { type: 'text', text: 'ok' },
|
||||
},
|
||||
},
|
||||
});
|
||||
mock.sendResponse(msg.id, { stopReason: 'end_turn' });
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
const result = await client.prompt('test');
|
||||
expect(result).toBe('ok');
|
||||
});
|
||||
|
||||
it('calls ensureReady automatically (lazy init)', async () => {
|
||||
createClient();
|
||||
autoHandshake('sess-auto');
|
||||
|
||||
// After handshake, handle prompts
|
||||
const originalWrite = mock.stdin.write;
|
||||
let handshakeDone = false;
|
||||
mock.stdin.write.mockImplementation((data: string) => {
|
||||
const msg = JSON.parse(data.trim()) as { id: number; method: string };
|
||||
if (msg.method === 'initialize') {
|
||||
setImmediate(() => mock.sendResponse(msg.id, { protocolVersion: 1 }));
|
||||
} else if (msg.method === 'session/new') {
|
||||
setImmediate(() => {
|
||||
mock.sendResponse(msg.id, { sessionId: 'sess-auto' });
|
||||
handshakeDone = true;
|
||||
});
|
||||
} else if (msg.method === 'session/prompt') {
|
||||
setImmediate(() => {
|
||||
mock.sendChunk('sess-auto', 'ok');
|
||||
mock.sendResponse(msg.id, { stopReason: 'end_turn' });
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Call prompt directly without ensureReady
|
||||
const result = await client.prompt('test');
|
||||
expect(result).toBe('ok');
|
||||
});
|
||||
});
|
||||
|
||||
describe('auto-restart', () => {
|
||||
it('restarts after process exit', async () => {
|
||||
createClient();
|
||||
autoHandshake('sess-1');
|
||||
await client.ensureReady();
|
||||
expect(client.isAlive).toBe(true);
|
||||
|
||||
// Simulate process exit
|
||||
mock.proc.killed = true;
|
||||
mock.proc.emit('exit', 1);
|
||||
expect(client.isAlive).toBe(false);
|
||||
|
||||
// Create a new mock for the respawned process
|
||||
mock = createMockProcess();
|
||||
// Update the spawn function to return new mock
|
||||
(client as unknown as { config: { spawn: unknown } }).config.spawn = () => mock.proc;
|
||||
autoHandshake('sess-2');
|
||||
|
||||
await client.ensureReady();
|
||||
expect(client.isAlive).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('timeout', () => {
|
||||
it('kills process and rejects on request timeout', async () => {
|
||||
createClient({ requestTimeoutMs: 50 });
|
||||
autoHandshake('sess-1');
|
||||
await client.ensureReady();
|
||||
|
||||
// Don't respond to the prompt — let it timeout
|
||||
mock.stdin.write.mockImplementation(() => {});
|
||||
|
||||
await expect(client.prompt('test')).rejects.toThrow('timed out');
|
||||
expect(client.isAlive).toBe(false);
|
||||
});
|
||||
|
||||
it('rejects on init timeout', async () => {
|
||||
createClient({ initTimeoutMs: 50 });
|
||||
// Don't respond to initialize
|
||||
mock.stdin.write.mockImplementation(() => {});
|
||||
|
||||
await expect(client.ensureReady()).rejects.toThrow('timed out');
|
||||
});
|
||||
});
|
||||
|
||||
describe('error handling', () => {
|
||||
it('rejects on ACP error response', async () => {
|
||||
createClient();
|
||||
mock.stdin.write.mockImplementation((data: string) => {
|
||||
const msg = JSON.parse(data.trim()) as { id: number; method: string };
|
||||
setImmediate(() => mock.sendError(msg.id, -32603, 'Internal error'));
|
||||
});
|
||||
|
||||
await expect(client.ensureReady()).rejects.toThrow('ACP error -32603: Internal error');
|
||||
});
|
||||
|
||||
it('rejects pending requests on process crash', async () => {
|
||||
createClient();
|
||||
autoHandshake('sess-1');
|
||||
await client.ensureReady();
|
||||
|
||||
// Override write so prompt sends but gets no response; then crash the process
|
||||
mock.stdin.write.mockImplementation(() => {
|
||||
// After the prompt is sent, simulate a process crash
|
||||
setImmediate(() => {
|
||||
mock.proc.killed = true;
|
||||
mock.proc.emit('exit', 1);
|
||||
});
|
||||
});
|
||||
|
||||
const promptPromise = client.prompt('test');
|
||||
await expect(promptPromise).rejects.toThrow('process exited');
|
||||
});
|
||||
});
|
||||
|
||||
describe('permission requests', () => {
|
||||
it('rejects session/request_permission from agent', async () => {
|
||||
createClient();
|
||||
autoHandshake('sess-1');
|
||||
await client.ensureReady();
|
||||
|
||||
mock.stdin.write.mockImplementation((data: string) => {
|
||||
const msg = JSON.parse(data.trim()) as { id: number; method: string };
|
||||
if (msg.method === 'session/prompt') {
|
||||
setImmediate(() => {
|
||||
// Agent asks for permission first
|
||||
mock.sendPermissionRequest(100, 'sess-1');
|
||||
// Then provides the actual response
|
||||
mock.sendChunk('sess-1', 'done');
|
||||
mock.sendResponse(msg.id, { stopReason: 'end_turn' });
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
const result = await client.prompt('test');
|
||||
expect(result).toBe('done');
|
||||
|
||||
// Verify we sent a rejection for the permission request
|
||||
const writes = mock.stdin.write.mock.calls.map((c) => {
|
||||
try { return JSON.parse(c[0] as string); } catch { return null; }
|
||||
}).filter(Boolean);
|
||||
const rejection = writes.find((w: Record<string, unknown>) => w.id === 100);
|
||||
expect(rejection).toBeTruthy();
|
||||
expect((rejection as { result: { outcome: { outcome: string } } }).result.outcome.outcome).toBe('cancelled');
|
||||
});
|
||||
});
|
||||
|
||||
describe('dispose', () => {
|
||||
it('kills process and rejects pending', async () => {
|
||||
createClient();
|
||||
autoHandshake('sess-1');
|
||||
await client.ensureReady();
|
||||
|
||||
// Override write so prompt is sent but gets no response; then dispose
|
||||
mock.stdin.write.mockImplementation(() => {
|
||||
setImmediate(() => client.dispose());
|
||||
});
|
||||
|
||||
const promptPromise = client.prompt('test');
|
||||
await expect(promptPromise).rejects.toThrow('disposed');
|
||||
expect(mock.proc.kill).toHaveBeenCalledWith('SIGTERM');
|
||||
});
|
||||
|
||||
it('is safe to call multiple times', () => {
|
||||
createClient();
|
||||
client.dispose();
|
||||
client.dispose();
|
||||
// No error thrown
|
||||
});
|
||||
});
|
||||
|
||||
describe('isAlive', () => {
|
||||
it('returns false before init', () => {
|
||||
createClient();
|
||||
expect(client.isAlive).toBe(false);
|
||||
});
|
||||
|
||||
it('returns true after successful init', async () => {
|
||||
createClient();
|
||||
autoHandshake();
|
||||
await client.ensureReady();
|
||||
expect(client.isAlive).toBe(true);
|
||||
});
|
||||
|
||||
it('returns false after dispose', async () => {
|
||||
createClient();
|
||||
autoHandshake();
|
||||
await client.ensureReady();
|
||||
client.dispose();
|
||||
expect(client.isAlive).toBe(false);
|
||||
});
|
||||
});
|
||||
});
|
||||
134
src/mcplocal/tests/gemini-acp.test.ts
Normal file
134
src/mcplocal/tests/gemini-acp.test.ts
Normal file
@@ -0,0 +1,134 @@
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
|
||||
const mockEnsureReady = vi.fn(async () => {});
|
||||
const mockPrompt = vi.fn(async () => 'mock response');
|
||||
const mockDispose = vi.fn();
|
||||
|
||||
vi.mock('../src/providers/acp-client.js', () => ({
|
||||
AcpClient: vi.fn(function (this: Record<string, unknown>) {
|
||||
this.ensureReady = mockEnsureReady;
|
||||
this.prompt = mockPrompt;
|
||||
this.dispose = mockDispose;
|
||||
}),
|
||||
}));
|
||||
|
||||
// Must import after mock setup
|
||||
const { GeminiAcpProvider } = await import('../src/providers/gemini-acp.js');
|
||||
|
||||
describe('GeminiAcpProvider', () => {
|
||||
let provider: InstanceType<typeof GeminiAcpProvider>;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
mockPrompt.mockResolvedValue('mock response');
|
||||
provider = new GeminiAcpProvider({ binaryPath: '/usr/bin/gemini', defaultModel: 'gemini-2.5-flash' });
|
||||
});
|
||||
|
||||
describe('complete', () => {
|
||||
it('builds prompt from messages and returns CompletionResult', async () => {
|
||||
mockPrompt.mockResolvedValueOnce('The answer is 42.');
|
||||
|
||||
const result = await provider.complete({
|
||||
messages: [
|
||||
{ role: 'system', content: 'You are helpful.' },
|
||||
{ role: 'user', content: 'What is the answer?' },
|
||||
],
|
||||
});
|
||||
|
||||
expect(result.content).toBe('The answer is 42.');
|
||||
expect(result.toolCalls).toEqual([]);
|
||||
expect(result.finishReason).toBe('stop');
|
||||
|
||||
const promptText = mockPrompt.mock.calls[0][0] as string;
|
||||
expect(promptText).toContain('System: You are helpful.');
|
||||
expect(promptText).toContain('What is the answer?');
|
||||
});
|
||||
|
||||
it('formats assistant messages with prefix', async () => {
|
||||
mockPrompt.mockResolvedValueOnce('ok');
|
||||
|
||||
await provider.complete({
|
||||
messages: [
|
||||
{ role: 'user', content: 'Hello' },
|
||||
{ role: 'assistant', content: 'Hi there' },
|
||||
{ role: 'user', content: 'How are you?' },
|
||||
],
|
||||
});
|
||||
|
||||
const promptText = mockPrompt.mock.calls[0][0] as string;
|
||||
expect(promptText).toContain('Assistant: Hi there');
|
||||
});
|
||||
|
||||
it('trims response content', async () => {
|
||||
mockPrompt.mockResolvedValueOnce(' padded response \n');
|
||||
|
||||
const result = await provider.complete({
|
||||
messages: [{ role: 'user', content: 'test' }],
|
||||
});
|
||||
|
||||
expect(result.content).toBe('padded response');
|
||||
});
|
||||
|
||||
it('serializes concurrent calls', async () => {
|
||||
const callOrder: number[] = [];
|
||||
let callCount = 0;
|
||||
|
||||
mockPrompt.mockImplementation(async () => {
|
||||
const myCall = ++callCount;
|
||||
callOrder.push(myCall);
|
||||
await new Promise((r) => setTimeout(r, 10));
|
||||
return `response-${myCall}`;
|
||||
});
|
||||
|
||||
const [r1, r2, r3] = await Promise.all([
|
||||
provider.complete({ messages: [{ role: 'user', content: 'a' }] }),
|
||||
provider.complete({ messages: [{ role: 'user', content: 'b' }] }),
|
||||
provider.complete({ messages: [{ role: 'user', content: 'c' }] }),
|
||||
]);
|
||||
|
||||
expect(r1.content).toBe('response-1');
|
||||
expect(r2.content).toBe('response-2');
|
||||
expect(r3.content).toBe('response-3');
|
||||
expect(callOrder).toEqual([1, 2, 3]);
|
||||
});
|
||||
|
||||
it('continues queue after error', async () => {
|
||||
mockPrompt
|
||||
.mockRejectedValueOnce(new Error('first fails'))
|
||||
.mockResolvedValueOnce('second works');
|
||||
|
||||
const results = await Promise.allSettled([
|
||||
provider.complete({ messages: [{ role: 'user', content: 'a' }] }),
|
||||
provider.complete({ messages: [{ role: 'user', content: 'b' }] }),
|
||||
]);
|
||||
|
||||
expect(results[0].status).toBe('rejected');
|
||||
expect(results[1].status).toBe('fulfilled');
|
||||
if (results[1].status === 'fulfilled') {
|
||||
expect(results[1].value.content).toBe('second works');
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
describe('listModels', () => {
|
||||
it('returns static model list', async () => {
|
||||
const models = await provider.listModels();
|
||||
expect(models).toContain('gemini-2.5-flash');
|
||||
expect(models).toContain('gemini-2.5-pro');
|
||||
expect(models).toContain('gemini-2.0-flash');
|
||||
});
|
||||
});
|
||||
|
||||
describe('dispose', () => {
|
||||
it('delegates to AcpClient', () => {
|
||||
provider.dispose();
|
||||
expect(mockDispose).toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe('name', () => {
|
||||
it('is gemini-cli for config compatibility', () => {
|
||||
expect(provider.name).toBe('gemini-cli');
|
||||
});
|
||||
});
|
||||
});
|
||||
65
src/mcplocal/tests/http/config.test.ts
Normal file
65
src/mcplocal/tests/http/config.test.ts
Normal file
@@ -0,0 +1,65 @@
|
||||
import { describe, it, expect, vi, afterEach } from 'vitest';
|
||||
import { loadLlmConfig } from '../../src/http/config.js';
|
||||
import { existsSync, readFileSync } from 'node:fs';
|
||||
|
||||
vi.mock('node:fs', async () => {
|
||||
const actual = await vi.importActual<typeof import('node:fs')>('node:fs');
|
||||
return {
|
||||
...actual,
|
||||
existsSync: vi.fn(),
|
||||
readFileSync: vi.fn(),
|
||||
};
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
describe('loadLlmConfig', () => {
|
||||
it('returns undefined when config file does not exist', () => {
|
||||
vi.mocked(existsSync).mockReturnValue(false);
|
||||
expect(loadLlmConfig()).toBeUndefined();
|
||||
});
|
||||
|
||||
it('returns undefined when config has no llm section', () => {
|
||||
vi.mocked(existsSync).mockReturnValue(true);
|
||||
vi.mocked(readFileSync).mockReturnValue(JSON.stringify({ mcplocalUrl: 'http://localhost:3200' }));
|
||||
expect(loadLlmConfig()).toBeUndefined();
|
||||
});
|
||||
|
||||
it('returns undefined when provider is none', () => {
|
||||
vi.mocked(existsSync).mockReturnValue(true);
|
||||
vi.mocked(readFileSync).mockReturnValue(JSON.stringify({ llm: { provider: 'none' } }));
|
||||
expect(loadLlmConfig()).toBeUndefined();
|
||||
});
|
||||
|
||||
it('returns LLM config when provider is configured', () => {
|
||||
vi.mocked(existsSync).mockReturnValue(true);
|
||||
vi.mocked(readFileSync).mockReturnValue(JSON.stringify({
|
||||
llm: { provider: 'anthropic', model: 'claude-haiku-3-5-20241022' },
|
||||
}));
|
||||
const result = loadLlmConfig();
|
||||
expect(result).toEqual({ provider: 'anthropic', model: 'claude-haiku-3-5-20241022' });
|
||||
});
|
||||
|
||||
it('returns full LLM config with all fields', () => {
|
||||
vi.mocked(existsSync).mockReturnValue(true);
|
||||
vi.mocked(readFileSync).mockReturnValue(JSON.stringify({
|
||||
llm: { provider: 'vllm', model: 'my-model', url: 'http://gpu:8000' },
|
||||
}));
|
||||
const result = loadLlmConfig();
|
||||
expect(result).toEqual({ provider: 'vllm', model: 'my-model', url: 'http://gpu:8000' });
|
||||
});
|
||||
|
||||
it('returns undefined on malformed JSON', () => {
|
||||
vi.mocked(existsSync).mockReturnValue(true);
|
||||
vi.mocked(readFileSync).mockReturnValue('NOT JSON!!!');
|
||||
expect(loadLlmConfig()).toBeUndefined();
|
||||
});
|
||||
|
||||
it('returns undefined on read error', () => {
|
||||
vi.mocked(existsSync).mockReturnValue(true);
|
||||
vi.mocked(readFileSync).mockImplementation(() => { throw new Error('EACCES'); });
|
||||
expect(loadLlmConfig()).toBeUndefined();
|
||||
});
|
||||
});
|
||||
135
src/mcplocal/tests/llm-config.test.ts
Normal file
135
src/mcplocal/tests/llm-config.test.ts
Normal file
@@ -0,0 +1,135 @@
|
||||
import { describe, it, expect, vi } from 'vitest';
|
||||
import { createProviderFromConfig } from '../src/llm-config.js';
|
||||
import type { SecretStore } from '@mcpctl/shared';
|
||||
|
||||
function mockSecretStore(secrets: Record<string, string> = {}): SecretStore {
|
||||
return {
|
||||
get: vi.fn(async (key: string) => secrets[key] ?? null),
|
||||
set: vi.fn(async () => {}),
|
||||
delete: vi.fn(async () => true),
|
||||
backend: () => 'mock',
|
||||
};
|
||||
}
|
||||
|
||||
describe('createProviderFromConfig', () => {
|
||||
it('returns empty registry for undefined config', async () => {
|
||||
const store = mockSecretStore();
|
||||
const registry = await createProviderFromConfig(undefined, store);
|
||||
expect(registry.getActive()).toBeNull();
|
||||
expect(registry.list()).toEqual([]);
|
||||
});
|
||||
|
||||
it('returns empty registry for provider=none', async () => {
|
||||
const store = mockSecretStore();
|
||||
const registry = await createProviderFromConfig({ provider: 'none' }, store);
|
||||
expect(registry.getActive()).toBeNull();
|
||||
});
|
||||
|
||||
it('creates gemini-cli provider using ACP', async () => {
|
||||
const store = mockSecretStore();
|
||||
const registry = await createProviderFromConfig(
|
||||
{ provider: 'gemini-cli', model: 'gemini-2.5-flash', binaryPath: '/usr/bin/gemini' },
|
||||
store,
|
||||
);
|
||||
expect(registry.getActive()).not.toBeNull();
|
||||
expect(registry.getActive()!.name).toBe('gemini-cli');
|
||||
// ACP provider has dispose method
|
||||
expect(typeof registry.getActive()!.dispose).toBe('function');
|
||||
});
|
||||
|
||||
it('creates ollama provider', async () => {
|
||||
const store = mockSecretStore();
|
||||
const registry = await createProviderFromConfig(
|
||||
{ provider: 'ollama', model: 'llama3.2', url: 'http://localhost:11434' },
|
||||
store,
|
||||
);
|
||||
expect(registry.getActive()!.name).toBe('ollama');
|
||||
});
|
||||
|
||||
it('creates anthropic provider with API key from secret store', async () => {
|
||||
const store = mockSecretStore({ 'anthropic-api-key': 'sk-ant-test' });
|
||||
const registry = await createProviderFromConfig(
|
||||
{ provider: 'anthropic', model: 'claude-haiku-3-5-20241022' },
|
||||
store,
|
||||
);
|
||||
expect(registry.getActive()!.name).toBe('anthropic');
|
||||
expect(store.get).toHaveBeenCalledWith('anthropic-api-key');
|
||||
});
|
||||
|
||||
it('returns empty registry when anthropic API key is missing', async () => {
|
||||
const store = mockSecretStore();
|
||||
const stderrSpy = vi.spyOn(process.stderr, 'write').mockImplementation(() => true);
|
||||
const registry = await createProviderFromConfig(
|
||||
{ provider: 'anthropic', model: 'claude-haiku-3-5-20241022' },
|
||||
store,
|
||||
);
|
||||
expect(registry.getActive()).toBeNull();
|
||||
expect(stderrSpy).toHaveBeenCalledWith(expect.stringContaining('Anthropic API key not found'));
|
||||
stderrSpy.mockRestore();
|
||||
});
|
||||
|
||||
it('creates openai provider with API key from secret store', async () => {
|
||||
const store = mockSecretStore({ 'openai-api-key': 'sk-test' });
|
||||
const registry = await createProviderFromConfig(
|
||||
{ provider: 'openai', model: 'gpt-4o', url: 'https://api.openai.com' },
|
||||
store,
|
||||
);
|
||||
expect(registry.getActive()!.name).toBe('openai');
|
||||
expect(store.get).toHaveBeenCalledWith('openai-api-key');
|
||||
});
|
||||
|
||||
it('returns empty registry when openai API key is missing', async () => {
|
||||
const store = mockSecretStore();
|
||||
const stderrSpy = vi.spyOn(process.stderr, 'write').mockImplementation(() => true);
|
||||
const registry = await createProviderFromConfig(
|
||||
{ provider: 'openai' },
|
||||
store,
|
||||
);
|
||||
expect(registry.getActive()).toBeNull();
|
||||
stderrSpy.mockRestore();
|
||||
});
|
||||
|
||||
it('creates deepseek provider with API key from secret store', async () => {
|
||||
const store = mockSecretStore({ 'deepseek-api-key': 'sk-ds-test' });
|
||||
const registry = await createProviderFromConfig(
|
||||
{ provider: 'deepseek', model: 'deepseek-chat' },
|
||||
store,
|
||||
);
|
||||
expect(registry.getActive()!.name).toBe('deepseek');
|
||||
expect(store.get).toHaveBeenCalledWith('deepseek-api-key');
|
||||
});
|
||||
|
||||
it('returns empty registry when deepseek API key is missing', async () => {
|
||||
const store = mockSecretStore();
|
||||
const stderrSpy = vi.spyOn(process.stderr, 'write').mockImplementation(() => true);
|
||||
const registry = await createProviderFromConfig(
|
||||
{ provider: 'deepseek' },
|
||||
store,
|
||||
);
|
||||
expect(registry.getActive()).toBeNull();
|
||||
stderrSpy.mockRestore();
|
||||
});
|
||||
|
||||
it('creates vllm provider using OpenAI provider', async () => {
|
||||
const store = mockSecretStore();
|
||||
const registry = await createProviderFromConfig(
|
||||
{ provider: 'vllm', model: 'my-model', url: 'http://gpu-server:8000' },
|
||||
store,
|
||||
);
|
||||
// vLLM reuses OpenAI provider under the hood
|
||||
expect(registry.getActive()).not.toBeNull();
|
||||
expect(registry.getActive()!.name).toBe('openai');
|
||||
});
|
||||
|
||||
it('returns empty registry when vllm URL is missing', async () => {
|
||||
const store = mockSecretStore();
|
||||
const stderrSpy = vi.spyOn(process.stderr, 'write').mockImplementation(() => true);
|
||||
const registry = await createProviderFromConfig(
|
||||
{ provider: 'vllm' },
|
||||
store,
|
||||
);
|
||||
expect(registry.getActive()).toBeNull();
|
||||
expect(stderrSpy).toHaveBeenCalledWith(expect.stringContaining('vLLM URL not configured'));
|
||||
stderrSpy.mockRestore();
|
||||
});
|
||||
});
|
||||
433
src/mcplocal/tests/pagination.test.ts
Normal file
433
src/mcplocal/tests/pagination.test.ts
Normal file
@@ -0,0 +1,433 @@
|
||||
import { describe, it, expect, vi, afterEach } from 'vitest';
|
||||
import { ResponsePaginator, DEFAULT_PAGINATION_CONFIG } from '../src/llm/pagination.js';
|
||||
import type { ProviderRegistry } from '../src/providers/registry.js';
|
||||
import type { LlmProvider } from '../src/providers/types.js';
|
||||
|
||||
function makeProvider(response: string): ProviderRegistry {
|
||||
const provider: LlmProvider = {
|
||||
name: 'test',
|
||||
isAvailable: () => true,
|
||||
complete: vi.fn().mockResolvedValue({ content: response }),
|
||||
};
|
||||
return {
|
||||
getActive: () => provider,
|
||||
register: vi.fn(),
|
||||
setActive: vi.fn(),
|
||||
listProviders: () => [{ name: 'test', available: true, active: true }],
|
||||
} as unknown as ProviderRegistry;
|
||||
}
|
||||
|
||||
function makeLargeString(size: number, pattern = 'x'): string {
|
||||
return pattern.repeat(size);
|
||||
}
|
||||
|
||||
function makeLargeStringWithNewlines(size: number, lineLen = 100): string {
|
||||
const lines: string[] = [];
|
||||
let total = 0;
|
||||
let lineNum = 0;
|
||||
while (total < size) {
|
||||
const line = `line-${String(lineNum).padStart(5, '0')} ${'x'.repeat(lineLen - 15)}`;
|
||||
lines.push(line);
|
||||
total += line.length + 1; // +1 for newline
|
||||
lineNum++;
|
||||
}
|
||||
return lines.join('\n');
|
||||
}
|
||||
|
||||
describe('ResponsePaginator', () => {
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
// --- shouldPaginate ---
|
||||
|
||||
describe('shouldPaginate', () => {
|
||||
it('returns false for strings below threshold', () => {
|
||||
const paginator = new ResponsePaginator(null);
|
||||
expect(paginator.shouldPaginate('short string')).toBe(false);
|
||||
});
|
||||
|
||||
it('returns false for strings just below threshold', () => {
|
||||
const paginator = new ResponsePaginator(null);
|
||||
const str = makeLargeString(DEFAULT_PAGINATION_CONFIG.sizeThreshold - 1);
|
||||
expect(paginator.shouldPaginate(str)).toBe(false);
|
||||
});
|
||||
|
||||
it('returns true for strings at threshold', () => {
|
||||
const paginator = new ResponsePaginator(null);
|
||||
const str = makeLargeString(DEFAULT_PAGINATION_CONFIG.sizeThreshold);
|
||||
expect(paginator.shouldPaginate(str)).toBe(true);
|
||||
});
|
||||
|
||||
it('returns true for strings above threshold', () => {
|
||||
const paginator = new ResponsePaginator(null);
|
||||
const str = makeLargeString(DEFAULT_PAGINATION_CONFIG.sizeThreshold + 1000);
|
||||
expect(paginator.shouldPaginate(str)).toBe(true);
|
||||
});
|
||||
|
||||
it('respects custom threshold', () => {
|
||||
const paginator = new ResponsePaginator(null, { sizeThreshold: 100 });
|
||||
expect(paginator.shouldPaginate('x'.repeat(99))).toBe(false);
|
||||
expect(paginator.shouldPaginate('x'.repeat(100))).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
// --- paginate (no LLM) ---
|
||||
|
||||
describe('paginate without LLM', () => {
|
||||
it('returns null for small responses', async () => {
|
||||
const paginator = new ResponsePaginator(null);
|
||||
const result = await paginator.paginate('test/tool', 'small response');
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
|
||||
it('paginates large responses with simple index', async () => {
|
||||
const paginator = new ResponsePaginator(null, { sizeThreshold: 100, pageSize: 50 });
|
||||
const raw = makeLargeStringWithNewlines(200);
|
||||
const result = await paginator.paginate('test/tool', raw);
|
||||
|
||||
expect(result).not.toBeNull();
|
||||
expect(result!.content).toHaveLength(1);
|
||||
expect(result!.content[0]!.type).toBe('text');
|
||||
|
||||
const text = result!.content[0]!.text;
|
||||
expect(text).toContain('too large to return directly');
|
||||
expect(text).toContain('_resultId');
|
||||
expect(text).toContain('_page');
|
||||
expect(text).not.toContain('AI-generated summaries');
|
||||
});
|
||||
|
||||
it('includes correct page count in index', async () => {
|
||||
const paginator = new ResponsePaginator(null, { sizeThreshold: 100, pageSize: 50 });
|
||||
const raw = 'x'.repeat(200);
|
||||
const result = await paginator.paginate('test/tool', raw);
|
||||
|
||||
expect(result).not.toBeNull();
|
||||
const text = result!.content[0]!.text;
|
||||
// 200 chars / 50 per page = 4 pages
|
||||
expect(text).toContain('4 pages');
|
||||
expect(text).toContain('Page 1:');
|
||||
expect(text).toContain('Page 4:');
|
||||
});
|
||||
|
||||
it('caches the result for later page retrieval', async () => {
|
||||
const paginator = new ResponsePaginator(null, { sizeThreshold: 100, pageSize: 50 });
|
||||
const raw = 'x'.repeat(200);
|
||||
await paginator.paginate('test/tool', raw);
|
||||
|
||||
expect(paginator.cacheSize).toBe(1);
|
||||
});
|
||||
|
||||
it('includes page instructions with _resultId and _page', async () => {
|
||||
const paginator = new ResponsePaginator(null, { sizeThreshold: 100, pageSize: 50 });
|
||||
const raw = 'x'.repeat(200);
|
||||
const result = await paginator.paginate('test/tool', raw);
|
||||
|
||||
const text = result!.content[0]!.text;
|
||||
expect(text).toContain('"_resultId"');
|
||||
expect(text).toContain('"_page"');
|
||||
expect(text).toContain('"all"');
|
||||
});
|
||||
});
|
||||
|
||||
// --- paginate (with LLM) ---
|
||||
|
||||
describe('paginate with LLM', () => {
|
||||
it('generates smart index when provider available', async () => {
|
||||
const summaries = JSON.stringify([
|
||||
{ page: 1, summary: 'Configuration nodes and global settings' },
|
||||
{ page: 2, summary: 'HTTP request nodes and API integrations' },
|
||||
]);
|
||||
const registry = makeProvider(summaries);
|
||||
const paginator = new ResponsePaginator(registry, { sizeThreshold: 100, pageSize: 60 });
|
||||
const raw = makeLargeStringWithNewlines(150);
|
||||
const result = await paginator.paginate('node-red/get_flows', raw);
|
||||
|
||||
expect(result).not.toBeNull();
|
||||
const text = result!.content[0]!.text;
|
||||
expect(text).toContain('AI-generated summaries');
|
||||
expect(text).toContain('Configuration nodes and global settings');
|
||||
expect(text).toContain('HTTP request nodes and API integrations');
|
||||
});
|
||||
|
||||
it('falls back to simple index on LLM failure', async () => {
|
||||
const provider: LlmProvider = {
|
||||
name: 'test',
|
||||
isAvailable: () => true,
|
||||
complete: vi.fn().mockRejectedValue(new Error('LLM unavailable')),
|
||||
};
|
||||
const registry = {
|
||||
getActive: () => provider,
|
||||
register: vi.fn(),
|
||||
setActive: vi.fn(),
|
||||
listProviders: () => [{ name: 'test', available: true, active: true }],
|
||||
} as unknown as ProviderRegistry;
|
||||
|
||||
const paginator = new ResponsePaginator(registry, { sizeThreshold: 100, pageSize: 50 });
|
||||
const raw = 'x'.repeat(200);
|
||||
const result = await paginator.paginate('test/tool', raw);
|
||||
|
||||
expect(result).not.toBeNull();
|
||||
const text = result!.content[0]!.text;
|
||||
// Should NOT contain AI-generated label
|
||||
expect(text).not.toContain('AI-generated summaries');
|
||||
expect(text).toContain('Page 1:');
|
||||
});
|
||||
|
||||
it('sends page previews to LLM, not full content', async () => {
|
||||
const completeFn = vi.fn().mockResolvedValue({
|
||||
content: JSON.stringify([
|
||||
{ page: 1, summary: 'test' },
|
||||
{ page: 2, summary: 'test2' },
|
||||
{ page: 3, summary: 'test3' },
|
||||
]),
|
||||
});
|
||||
const provider: LlmProvider = {
|
||||
name: 'test',
|
||||
isAvailable: () => true,
|
||||
complete: completeFn,
|
||||
};
|
||||
const registry = {
|
||||
getActive: () => provider,
|
||||
register: vi.fn(),
|
||||
setActive: vi.fn(),
|
||||
listProviders: () => [{ name: 'test', available: true, active: true }],
|
||||
} as unknown as ProviderRegistry;
|
||||
|
||||
// Use a large enough string (3000 chars, pages of 1000) so previews (500 per page) are smaller than raw
|
||||
const paginator = new ResponsePaginator(registry, { sizeThreshold: 2000, pageSize: 1000 });
|
||||
const raw = makeLargeStringWithNewlines(3000);
|
||||
await paginator.paginate('test/tool', raw);
|
||||
|
||||
expect(completeFn).toHaveBeenCalledOnce();
|
||||
const call = completeFn.mock.calls[0]![0]!;
|
||||
const userMsg = call.messages.find((m: { role: string }) => m.role === 'user');
|
||||
// Should contain page preview markers
|
||||
expect(userMsg.content).toContain('Page 1');
|
||||
// The LLM prompt should be significantly smaller than the full content
|
||||
// (each page sends ~500 chars preview, not full 1000 chars)
|
||||
expect(userMsg.content.length).toBeLessThan(raw.length);
|
||||
});
|
||||
|
||||
it('falls back to simple when no active provider', async () => {
|
||||
const registry = {
|
||||
getActive: () => null,
|
||||
register: vi.fn(),
|
||||
setActive: vi.fn(),
|
||||
listProviders: () => [],
|
||||
} as unknown as ProviderRegistry;
|
||||
|
||||
const paginator = new ResponsePaginator(registry, { sizeThreshold: 100, pageSize: 50 });
|
||||
const raw = 'x'.repeat(200);
|
||||
const result = await paginator.paginate('test/tool', raw);
|
||||
|
||||
expect(result).not.toBeNull();
|
||||
const text = result!.content[0]!.text;
|
||||
expect(text).not.toContain('AI-generated summaries');
|
||||
});
|
||||
});
|
||||
|
||||
// --- getPage ---
|
||||
|
||||
describe('getPage', () => {
|
||||
it('returns specific page content', async () => {
|
||||
const paginator = new ResponsePaginator(null, { sizeThreshold: 100, pageSize: 50 });
|
||||
const raw = 'AAAA'.repeat(25) + 'BBBB'.repeat(25); // 200 chars total
|
||||
await paginator.paginate('test/tool', raw);
|
||||
|
||||
// Extract resultId from cache (there should be exactly 1 entry)
|
||||
expect(paginator.cacheSize).toBe(1);
|
||||
|
||||
// We need the resultId — get it from the index response
|
||||
const indexResult = await paginator.paginate('test/tool2', 'C'.repeat(200));
|
||||
const text = indexResult!.content[0]!.text;
|
||||
const match = /"_resultId": "([^"]+)"/.exec(text);
|
||||
expect(match).not.toBeNull();
|
||||
const resultId = match![1]!;
|
||||
|
||||
const page1 = paginator.getPage(resultId, 1);
|
||||
expect(page1).not.toBeNull();
|
||||
expect(page1!.content[0]!.text).toContain('Page 1/');
|
||||
expect(page1!.content[0]!.text).toContain('C');
|
||||
});
|
||||
|
||||
it('returns full content with _page=all', async () => {
|
||||
const paginator = new ResponsePaginator(null, { sizeThreshold: 100, pageSize: 50 });
|
||||
const raw = 'D'.repeat(200);
|
||||
const indexResult = await paginator.paginate('test/tool', raw);
|
||||
const match = /"_resultId": "([^"]+)"/.exec(indexResult!.content[0]!.text);
|
||||
const resultId = match![1]!;
|
||||
|
||||
const allPages = paginator.getPage(resultId, 'all');
|
||||
expect(allPages).not.toBeNull();
|
||||
expect(allPages!.content[0]!.text).toBe(raw);
|
||||
});
|
||||
|
||||
it('returns null for unknown resultId (cache miss)', () => {
|
||||
const paginator = new ResponsePaginator(null);
|
||||
const result = paginator.getPage('nonexistent-id', 1);
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
|
||||
it('returns error for out-of-range page', async () => {
|
||||
const paginator = new ResponsePaginator(null, { sizeThreshold: 100, pageSize: 50 });
|
||||
const raw = 'x'.repeat(200);
|
||||
const indexResult = await paginator.paginate('test/tool', raw);
|
||||
const match = /"_resultId": "([^"]+)"/.exec(indexResult!.content[0]!.text);
|
||||
const resultId = match![1]!;
|
||||
|
||||
const page999 = paginator.getPage(resultId, 999);
|
||||
expect(page999).not.toBeNull();
|
||||
expect(page999!.content[0]!.text).toContain('out of range');
|
||||
});
|
||||
|
||||
it('returns null after TTL expiry', async () => {
|
||||
const now = Date.now();
|
||||
vi.spyOn(Date, 'now').mockReturnValue(now);
|
||||
|
||||
const paginator = new ResponsePaginator(null, { sizeThreshold: 100, pageSize: 50, ttlMs: 1000 });
|
||||
const raw = 'x'.repeat(200);
|
||||
const indexResult = await paginator.paginate('test/tool', raw);
|
||||
const match = /"_resultId": "([^"]+)"/.exec(indexResult!.content[0]!.text);
|
||||
const resultId = match![1]!;
|
||||
|
||||
// Within TTL — should work
|
||||
expect(paginator.getPage(resultId, 1)).not.toBeNull();
|
||||
|
||||
// Past TTL — should be null
|
||||
vi.spyOn(Date, 'now').mockReturnValue(now + 1001);
|
||||
expect(paginator.getPage(resultId, 1)).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
// --- extractPaginationParams ---
|
||||
|
||||
describe('extractPaginationParams', () => {
|
||||
it('returns null when no pagination params', () => {
|
||||
expect(ResponsePaginator.extractPaginationParams({ query: 'test' })).toBeNull();
|
||||
});
|
||||
|
||||
it('returns null when only _resultId (no _page)', () => {
|
||||
expect(ResponsePaginator.extractPaginationParams({ _resultId: 'abc' })).toBeNull();
|
||||
});
|
||||
|
||||
it('returns null when only _page (no _resultId)', () => {
|
||||
expect(ResponsePaginator.extractPaginationParams({ _page: 1 })).toBeNull();
|
||||
});
|
||||
|
||||
it('extracts numeric page', () => {
|
||||
const result = ResponsePaginator.extractPaginationParams({ _resultId: 'abc-123', _page: 2 });
|
||||
expect(result).toEqual({ resultId: 'abc-123', page: 2 });
|
||||
});
|
||||
|
||||
it('extracts _page=all', () => {
|
||||
const result = ResponsePaginator.extractPaginationParams({ _resultId: 'abc-123', _page: 'all' });
|
||||
expect(result).toEqual({ resultId: 'abc-123', page: 'all' });
|
||||
});
|
||||
|
||||
it('rejects negative page numbers', () => {
|
||||
expect(ResponsePaginator.extractPaginationParams({ _resultId: 'abc', _page: -1 })).toBeNull();
|
||||
});
|
||||
|
||||
it('rejects zero page number', () => {
|
||||
expect(ResponsePaginator.extractPaginationParams({ _resultId: 'abc', _page: 0 })).toBeNull();
|
||||
});
|
||||
|
||||
it('rejects non-integer page numbers', () => {
|
||||
expect(ResponsePaginator.extractPaginationParams({ _resultId: 'abc', _page: 1.5 })).toBeNull();
|
||||
});
|
||||
|
||||
it('requires string resultId', () => {
|
||||
expect(ResponsePaginator.extractPaginationParams({ _resultId: 123, _page: 1 })).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
// --- Cache management ---
|
||||
|
||||
describe('cache management', () => {
|
||||
it('evicts expired entries on paginate', async () => {
|
||||
const now = Date.now();
|
||||
vi.spyOn(Date, 'now').mockReturnValue(now);
|
||||
|
||||
const paginator = new ResponsePaginator(null, { sizeThreshold: 100, pageSize: 50, ttlMs: 1000 });
|
||||
await paginator.paginate('test/tool1', 'x'.repeat(200));
|
||||
expect(paginator.cacheSize).toBe(1);
|
||||
|
||||
// Advance past TTL and paginate again
|
||||
vi.spyOn(Date, 'now').mockReturnValue(now + 1001);
|
||||
await paginator.paginate('test/tool2', 'y'.repeat(200));
|
||||
// Old entry evicted, new one added
|
||||
expect(paginator.cacheSize).toBe(1);
|
||||
});
|
||||
|
||||
it('evicts LRU at capacity', async () => {
|
||||
const paginator = new ResponsePaginator(null, { sizeThreshold: 100, pageSize: 50, maxCachedResults: 2 });
|
||||
await paginator.paginate('test/tool1', 'A'.repeat(200));
|
||||
await paginator.paginate('test/tool2', 'B'.repeat(200));
|
||||
expect(paginator.cacheSize).toBe(2);
|
||||
|
||||
// Third entry should evict the first
|
||||
await paginator.paginate('test/tool3', 'C'.repeat(200));
|
||||
expect(paginator.cacheSize).toBe(2);
|
||||
});
|
||||
|
||||
it('clearCache removes all entries', async () => {
|
||||
const paginator = new ResponsePaginator(null, { sizeThreshold: 100, pageSize: 50 });
|
||||
await paginator.paginate('test/tool1', 'x'.repeat(200));
|
||||
await paginator.paginate('test/tool2', 'y'.repeat(200));
|
||||
expect(paginator.cacheSize).toBe(2);
|
||||
|
||||
paginator.clearCache();
|
||||
expect(paginator.cacheSize).toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
// --- Page splitting ---
|
||||
|
||||
describe('page splitting', () => {
|
||||
it('breaks at newline boundaries', async () => {
|
||||
// Create content where a newline falls within the page boundary
|
||||
const paginator = new ResponsePaginator(null, { sizeThreshold: 100, pageSize: 60 });
|
||||
const lines = Array.from({ length: 10 }, (_, i) => `line${String(i).padStart(3, '0')} ${'x'.repeat(20)}`);
|
||||
const raw = lines.join('\n');
|
||||
// raw is ~269 chars
|
||||
const result = await paginator.paginate('test/tool', raw);
|
||||
|
||||
expect(result).not.toBeNull();
|
||||
// Pages should break at newline boundaries, not mid-line
|
||||
const text = result!.content[0]!.text;
|
||||
const match = /"_resultId": "([^"]+)"/.exec(text);
|
||||
const resultId = match![1]!;
|
||||
|
||||
const page1 = paginator.getPage(resultId, 1);
|
||||
expect(page1).not.toBeNull();
|
||||
// Page content should end at a newline boundary (no partial lines)
|
||||
const pageText = page1!.content[0]!.text;
|
||||
// Remove the header line
|
||||
const contentStart = pageText.indexOf('\n\n') + 2;
|
||||
const pageContent = pageText.slice(contentStart);
|
||||
// Content should contain complete lines
|
||||
expect(pageContent).toMatch(/line\d{3}/);
|
||||
});
|
||||
|
||||
it('handles content without newlines', async () => {
|
||||
const paginator = new ResponsePaginator(null, { sizeThreshold: 100, pageSize: 50 });
|
||||
const raw = 'x'.repeat(200); // No newlines at all
|
||||
const result = await paginator.paginate('test/tool', raw);
|
||||
|
||||
expect(result).not.toBeNull();
|
||||
const text = result!.content[0]!.text;
|
||||
expect(text).toContain('4 pages'); // 200/50 = 4
|
||||
});
|
||||
|
||||
it('handles content that fits exactly in one page at threshold', async () => {
|
||||
const paginator = new ResponsePaginator(null, { sizeThreshold: 100, pageSize: 100 });
|
||||
const raw = 'x'.repeat(100); // Exactly at threshold and page size
|
||||
const result = await paginator.paginate('test/tool', raw);
|
||||
|
||||
expect(result).not.toBeNull();
|
||||
const text = result!.content[0]!.text;
|
||||
expect(text).toContain('1 pages');
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -54,7 +54,7 @@ describe('refreshProjectUpstreams', () => {
|
||||
const client = mockMcpdClient(servers);
|
||||
|
||||
await refreshProjectUpstreams(router, client as any, 'smart-home', 'user-token-123');
|
||||
expect(client.forward).toHaveBeenCalledWith('GET', '/api/v1/projects/smart-home/servers', '', undefined);
|
||||
expect(client.forward).toHaveBeenCalledWith('GET', '/api/v1/projects/smart-home/servers', '', undefined, 'user-token-123');
|
||||
expect(router.getUpstreamNames()).toContain('grafana');
|
||||
});
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ vi.mock('../src/discovery.js', () => ({
|
||||
import { refreshProjectUpstreams } from '../src/discovery.js';
|
||||
|
||||
function mockMcpdClient() {
|
||||
return {
|
||||
const client: Record<string, unknown> = {
|
||||
baseUrl: 'http://test:3100',
|
||||
token: 'test-token',
|
||||
get: vi.fn(async () => []),
|
||||
@@ -19,7 +19,11 @@ function mockMcpdClient() {
|
||||
put: vi.fn(),
|
||||
delete: vi.fn(),
|
||||
forward: vi.fn(async () => ({ status: 200, body: [] })),
|
||||
withHeaders: vi.fn(),
|
||||
};
|
||||
// withHeaders returns a new client-like object (returns self for simplicity)
|
||||
(client.withHeaders as ReturnType<typeof vi.fn>).mockReturnValue(client);
|
||||
return client;
|
||||
}
|
||||
|
||||
describe('registerProjectMcpEndpoint', () => {
|
||||
|
||||
248
src/mcplocal/tests/router-prompts.test.ts
Normal file
248
src/mcplocal/tests/router-prompts.test.ts
Normal file
@@ -0,0 +1,248 @@
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import { McpRouter } from '../src/router.js';
|
||||
import type { UpstreamConnection, JsonRpcRequest, JsonRpcResponse, JsonRpcNotification } from '../src/types.js';
|
||||
import type { McpdClient } from '../src/http/mcpd-client.js';
|
||||
|
||||
function mockUpstream(name: string, opts?: {
|
||||
tools?: Array<{ name: string; description?: string; inputSchema?: unknown }>;
|
||||
}): UpstreamConnection {
|
||||
return {
|
||||
name,
|
||||
isAlive: vi.fn(() => true),
|
||||
close: vi.fn(async () => {}),
|
||||
onNotification: vi.fn(),
|
||||
send: vi.fn(async (req: JsonRpcRequest): Promise<JsonRpcResponse> => {
|
||||
if (req.method === 'tools/list') {
|
||||
return { jsonrpc: '2.0', id: req.id, result: { tools: opts?.tools ?? [] } };
|
||||
}
|
||||
if (req.method === 'resources/list') {
|
||||
return { jsonrpc: '2.0', id: req.id, result: { resources: [] } };
|
||||
}
|
||||
return { jsonrpc: '2.0', id: req.id, result: {} };
|
||||
}),
|
||||
};
|
||||
}
|
||||
|
||||
function mockMcpdClient(): McpdClient {
|
||||
return {
|
||||
get: vi.fn(async () => []),
|
||||
post: vi.fn(async () => ({})),
|
||||
put: vi.fn(async () => ({})),
|
||||
delete: vi.fn(async () => {}),
|
||||
forward: vi.fn(async () => ({ status: 200, body: {} })),
|
||||
withHeaders: vi.fn(function (this: McpdClient) { return this; }),
|
||||
} as unknown as McpdClient;
|
||||
}
|
||||
|
||||
describe('McpRouter - Prompt Integration', () => {
|
||||
let router: McpRouter;
|
||||
let mcpdClient: McpdClient;
|
||||
|
||||
beforeEach(() => {
|
||||
router = new McpRouter();
|
||||
mcpdClient = mockMcpdClient();
|
||||
});
|
||||
|
||||
describe('propose_prompt tool', () => {
|
||||
it('should include propose_prompt in tools/list when prompt config is set', async () => {
|
||||
router.setPromptConfig(mcpdClient, 'test-project');
|
||||
router.addUpstream(mockUpstream('server1'));
|
||||
|
||||
const response = await router.route({
|
||||
jsonrpc: '2.0',
|
||||
id: 1,
|
||||
method: 'tools/list',
|
||||
});
|
||||
|
||||
const tools = (response.result as { tools: Array<{ name: string }> }).tools;
|
||||
expect(tools.some((t) => t.name === 'propose_prompt')).toBe(true);
|
||||
});
|
||||
|
||||
it('should NOT include propose_prompt when no prompt config', async () => {
|
||||
router.addUpstream(mockUpstream('server1'));
|
||||
|
||||
const response = await router.route({
|
||||
jsonrpc: '2.0',
|
||||
id: 1,
|
||||
method: 'tools/list',
|
||||
});
|
||||
|
||||
const tools = (response.result as { tools: Array<{ name: string }> }).tools;
|
||||
expect(tools.some((t) => t.name === 'propose_prompt')).toBe(false);
|
||||
});
|
||||
|
||||
it('should call mcpd to create a prompt request', async () => {
|
||||
router.setPromptConfig(mcpdClient, 'my-project');
|
||||
|
||||
const response = await router.route(
|
||||
{
|
||||
jsonrpc: '2.0',
|
||||
id: 2,
|
||||
method: 'tools/call',
|
||||
params: {
|
||||
name: 'propose_prompt',
|
||||
arguments: { name: 'my-prompt', content: 'Hello world' },
|
||||
},
|
||||
},
|
||||
{ sessionId: 'sess-123' },
|
||||
);
|
||||
|
||||
expect(response.error).toBeUndefined();
|
||||
expect(mcpdClient.post).toHaveBeenCalledWith(
|
||||
'/api/v1/projects/my-project/promptrequests',
|
||||
{ name: 'my-prompt', content: 'Hello world', createdBySession: 'sess-123' },
|
||||
);
|
||||
});
|
||||
|
||||
it('should return error when name or content missing', async () => {
|
||||
router.setPromptConfig(mcpdClient, 'proj');
|
||||
|
||||
const response = await router.route({
|
||||
jsonrpc: '2.0',
|
||||
id: 3,
|
||||
method: 'tools/call',
|
||||
params: {
|
||||
name: 'propose_prompt',
|
||||
arguments: { name: 'only-name' },
|
||||
},
|
||||
});
|
||||
|
||||
expect(response.error?.code).toBe(-32602);
|
||||
expect(response.error?.message).toContain('Missing required arguments');
|
||||
});
|
||||
|
||||
it('should return error when mcpd call fails', async () => {
|
||||
router.setPromptConfig(mcpdClient, 'proj');
|
||||
vi.mocked(mcpdClient.post).mockRejectedValue(new Error('mcpd returned 409'));
|
||||
|
||||
const response = await router.route({
|
||||
jsonrpc: '2.0',
|
||||
id: 4,
|
||||
method: 'tools/call',
|
||||
params: {
|
||||
name: 'propose_prompt',
|
||||
arguments: { name: 'dup', content: 'x' },
|
||||
},
|
||||
});
|
||||
|
||||
expect(response.error?.code).toBe(-32603);
|
||||
expect(response.error?.message).toContain('mcpd returned 409');
|
||||
});
|
||||
});
|
||||
|
||||
describe('prompt resources', () => {
|
||||
it('should include prompt resources in resources/list', async () => {
|
||||
router.setPromptConfig(mcpdClient, 'test-project');
|
||||
vi.mocked(mcpdClient.get).mockResolvedValue([
|
||||
{ name: 'approved-prompt', content: 'Content A', type: 'prompt' },
|
||||
{ name: 'pending-req', content: 'Content B', type: 'promptrequest' },
|
||||
]);
|
||||
|
||||
const response = await router.route(
|
||||
{ jsonrpc: '2.0', id: 1, method: 'resources/list' },
|
||||
{ sessionId: 'sess-1' },
|
||||
);
|
||||
|
||||
const resources = (response.result as { resources: Array<{ uri: string; description?: string }> }).resources;
|
||||
expect(resources).toHaveLength(2);
|
||||
expect(resources[0]!.uri).toBe('mcpctl://prompts/approved-prompt');
|
||||
expect(resources[0]!.description).toContain('Approved');
|
||||
expect(resources[1]!.uri).toBe('mcpctl://prompts/pending-req');
|
||||
expect(resources[1]!.description).toContain('Pending');
|
||||
});
|
||||
|
||||
it('should pass session ID when fetching visible prompts', async () => {
|
||||
router.setPromptConfig(mcpdClient, 'proj');
|
||||
vi.mocked(mcpdClient.get).mockResolvedValue([]);
|
||||
|
||||
await router.route(
|
||||
{ jsonrpc: '2.0', id: 1, method: 'resources/list' },
|
||||
{ sessionId: 'my-session' },
|
||||
);
|
||||
|
||||
expect(mcpdClient.get).toHaveBeenCalledWith(
|
||||
'/api/v1/projects/proj/prompts/visible?session=my-session',
|
||||
);
|
||||
});
|
||||
|
||||
it('should read mcpctl resource content', async () => {
|
||||
router.setPromptConfig(mcpdClient, 'proj');
|
||||
vi.mocked(mcpdClient.get).mockResolvedValue([
|
||||
{ name: 'my-prompt', content: 'The content here', type: 'prompt' },
|
||||
]);
|
||||
|
||||
// First list to populate cache
|
||||
await router.route({ jsonrpc: '2.0', id: 1, method: 'resources/list' });
|
||||
|
||||
// Then read
|
||||
const response = await router.route({
|
||||
jsonrpc: '2.0',
|
||||
id: 2,
|
||||
method: 'resources/read',
|
||||
params: { uri: 'mcpctl://prompts/my-prompt' },
|
||||
});
|
||||
|
||||
expect(response.error).toBeUndefined();
|
||||
const contents = (response.result as { contents: Array<{ text: string }> }).contents;
|
||||
expect(contents[0]!.text).toBe('The content here');
|
||||
});
|
||||
|
||||
it('should return error for unknown mcpctl resource', async () => {
|
||||
router.setPromptConfig(mcpdClient, 'proj');
|
||||
|
||||
const response = await router.route({
|
||||
jsonrpc: '2.0',
|
||||
id: 3,
|
||||
method: 'resources/read',
|
||||
params: { uri: 'mcpctl://prompts/nonexistent' },
|
||||
});
|
||||
|
||||
expect(response.error?.code).toBe(-32602);
|
||||
expect(response.error?.message).toContain('Resource not found');
|
||||
});
|
||||
|
||||
it('should not fail when mcpd is unavailable', async () => {
|
||||
router.setPromptConfig(mcpdClient, 'proj');
|
||||
vi.mocked(mcpdClient.get).mockRejectedValue(new Error('Connection refused'));
|
||||
|
||||
const response = await router.route({ jsonrpc: '2.0', id: 1, method: 'resources/list' });
|
||||
|
||||
// Should succeed with empty resources (upstream errors are swallowed)
|
||||
expect(response.error).toBeUndefined();
|
||||
const resources = (response.result as { resources: unknown[] }).resources;
|
||||
expect(resources).toEqual([]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('session isolation', () => {
|
||||
it('should not include session parameter when no sessionId in context', async () => {
|
||||
router.setPromptConfig(mcpdClient, 'proj');
|
||||
vi.mocked(mcpdClient.get).mockResolvedValue([]);
|
||||
|
||||
await router.route({ jsonrpc: '2.0', id: 1, method: 'resources/list' });
|
||||
|
||||
expect(mcpdClient.get).toHaveBeenCalledWith(
|
||||
'/api/v1/projects/proj/prompts/visible',
|
||||
);
|
||||
});
|
||||
|
||||
it('should not include session in propose when no context', async () => {
|
||||
router.setPromptConfig(mcpdClient, 'proj');
|
||||
|
||||
await router.route({
|
||||
jsonrpc: '2.0',
|
||||
id: 2,
|
||||
method: 'tools/call',
|
||||
params: {
|
||||
name: 'propose_prompt',
|
||||
arguments: { name: 'test', content: 'stuff' },
|
||||
},
|
||||
});
|
||||
|
||||
expect(mcpdClient.post).toHaveBeenCalledWith(
|
||||
'/api/v1/projects/proj/promptrequests',
|
||||
{ name: 'test', content: 'stuff' },
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -2,3 +2,4 @@ export * from './types/index.js';
|
||||
export * from './validation/index.js';
|
||||
export * from './constants/index.js';
|
||||
export * from './utils/index.js';
|
||||
export * from './secrets/index.js';
|
||||
|
||||
63
src/shared/src/secrets/file-store.ts
Normal file
63
src/shared/src/secrets/file-store.ts
Normal file
@@ -0,0 +1,63 @@
|
||||
import { existsSync, mkdirSync, readFileSync, writeFileSync, chmodSync } from 'node:fs';
|
||||
import { join } from 'node:path';
|
||||
import { homedir } from 'node:os';
|
||||
import type { SecretStore, SecretStoreDeps } from './types.js';
|
||||
|
||||
function defaultConfigDir(): string {
|
||||
return join(homedir(), '.mcpctl');
|
||||
}
|
||||
|
||||
function secretsPath(configDir: string): string {
|
||||
return join(configDir, 'secrets');
|
||||
}
|
||||
|
||||
export class FileSecretStore implements SecretStore {
|
||||
private readonly configDir: string;
|
||||
|
||||
constructor(deps?: SecretStoreDeps) {
|
||||
this.configDir = deps?.configDir ?? defaultConfigDir();
|
||||
}
|
||||
|
||||
backend(): string {
|
||||
return 'file';
|
||||
}
|
||||
|
||||
async get(key: string): Promise<string | null> {
|
||||
const data = this.readAll();
|
||||
return data[key] ?? null;
|
||||
}
|
||||
|
||||
async set(key: string, value: string): Promise<void> {
|
||||
const data = this.readAll();
|
||||
data[key] = value;
|
||||
this.writeAll(data);
|
||||
}
|
||||
|
||||
async delete(key: string): Promise<boolean> {
|
||||
const data = this.readAll();
|
||||
if (!(key in data)) return false;
|
||||
delete data[key];
|
||||
this.writeAll(data);
|
||||
return true;
|
||||
}
|
||||
|
||||
private readAll(): Record<string, string> {
|
||||
const path = secretsPath(this.configDir);
|
||||
if (!existsSync(path)) return {};
|
||||
try {
|
||||
const raw = readFileSync(path, 'utf-8');
|
||||
return JSON.parse(raw) as Record<string, string>;
|
||||
} catch {
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
private writeAll(data: Record<string, string>): void {
|
||||
if (!existsSync(this.configDir)) {
|
||||
mkdirSync(this.configDir, { recursive: true });
|
||||
}
|
||||
const path = secretsPath(this.configDir);
|
||||
writeFileSync(path, JSON.stringify(data, null, 2) + '\n', 'utf-8');
|
||||
chmodSync(path, 0o600);
|
||||
}
|
||||
}
|
||||
97
src/shared/src/secrets/gnome-keyring.ts
Normal file
97
src/shared/src/secrets/gnome-keyring.ts
Normal file
@@ -0,0 +1,97 @@
|
||||
import { spawn } from 'node:child_process';
|
||||
import { execFile } from 'node:child_process';
|
||||
import { promisify } from 'node:util';
|
||||
import type { SecretStore } from './types.js';
|
||||
|
||||
const execFileAsync = promisify(execFile);
|
||||
const SERVICE = 'mcpctl';
|
||||
|
||||
export type RunCommand = (cmd: string, args: string[], stdin?: string) => Promise<{ stdout: string; code: number }>;
|
||||
|
||||
function defaultRunCommand(cmd: string, args: string[], stdin?: string): Promise<{ stdout: string; code: number }> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const child = spawn(cmd, args, {
|
||||
stdio: ['pipe', 'pipe', 'pipe'],
|
||||
timeout: 5000,
|
||||
});
|
||||
|
||||
const stdoutChunks: Buffer[] = [];
|
||||
child.stdout.on('data', (chunk: Buffer) => stdoutChunks.push(chunk));
|
||||
|
||||
child.on('error', reject);
|
||||
child.on('close', (code) => {
|
||||
const stdout = Buffer.concat(stdoutChunks).toString('utf-8');
|
||||
resolve({ stdout, code: code ?? 1 });
|
||||
});
|
||||
|
||||
if (stdin !== undefined) {
|
||||
child.stdin.write(stdin);
|
||||
child.stdin.end();
|
||||
} else {
|
||||
child.stdin.end();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
export interface GnomeKeyringDeps {
|
||||
run?: RunCommand;
|
||||
}
|
||||
|
||||
export class GnomeKeyringStore implements SecretStore {
|
||||
private readonly run: RunCommand;
|
||||
|
||||
constructor(deps?: GnomeKeyringDeps) {
|
||||
this.run = deps?.run ?? defaultRunCommand;
|
||||
}
|
||||
|
||||
backend(): string {
|
||||
return 'gnome-keyring';
|
||||
}
|
||||
|
||||
async get(key: string): Promise<string | null> {
|
||||
try {
|
||||
const { stdout, code } = await this.run(
|
||||
'secret-tool', ['lookup', 'service', SERVICE, 'key', key],
|
||||
);
|
||||
if (code !== 0 || !stdout) return null;
|
||||
return stdout;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
async set(key: string, value: string): Promise<void> {
|
||||
const { code } = await this.run(
|
||||
'secret-tool',
|
||||
['store', '--label', `mcpctl: ${key}`, 'service', SERVICE, 'key', key],
|
||||
value,
|
||||
);
|
||||
if (code !== 0) {
|
||||
throw new Error(`secret-tool store exited with code ${code}`);
|
||||
}
|
||||
}
|
||||
|
||||
async delete(key: string): Promise<boolean> {
|
||||
try {
|
||||
const { code } = await this.run(
|
||||
'secret-tool', ['clear', 'service', SERVICE, 'key', key],
|
||||
);
|
||||
return code === 0;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static async isAvailable(deps?: { run?: RunCommand }): Promise<boolean> {
|
||||
try {
|
||||
if (deps?.run) {
|
||||
const { code } = await deps.run('secret-tool', ['--version']);
|
||||
return code === 0;
|
||||
}
|
||||
await execFileAsync('secret-tool', ['--version'], { timeout: 3000 });
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
15
src/shared/src/secrets/index.ts
Normal file
15
src/shared/src/secrets/index.ts
Normal file
@@ -0,0 +1,15 @@
|
||||
export type { SecretStore, SecretStoreDeps } from './types.js';
|
||||
export { FileSecretStore } from './file-store.js';
|
||||
export { GnomeKeyringStore } from './gnome-keyring.js';
|
||||
export type { GnomeKeyringDeps, RunCommand } from './gnome-keyring.js';
|
||||
|
||||
import { GnomeKeyringStore } from './gnome-keyring.js';
|
||||
import { FileSecretStore } from './file-store.js';
|
||||
import type { SecretStore, SecretStoreDeps } from './types.js';
|
||||
|
||||
export async function createSecretStore(deps?: SecretStoreDeps): Promise<SecretStore> {
|
||||
if (await GnomeKeyringStore.isAvailable()) {
|
||||
return new GnomeKeyringStore();
|
||||
}
|
||||
return new FileSecretStore(deps);
|
||||
}
|
||||
10
src/shared/src/secrets/types.ts
Normal file
10
src/shared/src/secrets/types.ts
Normal file
@@ -0,0 +1,10 @@
|
||||
export interface SecretStore {
|
||||
get(key: string): Promise<string | null>;
|
||||
set(key: string, value: string): Promise<void>;
|
||||
delete(key: string): Promise<boolean>;
|
||||
backend(): string;
|
||||
}
|
||||
|
||||
export interface SecretStoreDeps {
|
||||
configDir?: string;
|
||||
}
|
||||
24
src/shared/tests/secrets/factory.test.ts
Normal file
24
src/shared/tests/secrets/factory.test.ts
Normal file
@@ -0,0 +1,24 @@
|
||||
import { describe, it, expect, vi, afterEach } from 'vitest';
|
||||
import { createSecretStore } from '../../src/secrets/index.js';
|
||||
import { GnomeKeyringStore } from '../../src/secrets/gnome-keyring.js';
|
||||
import { FileSecretStore } from '../../src/secrets/file-store.js';
|
||||
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
describe('createSecretStore', () => {
|
||||
it('returns GnomeKeyringStore when secret-tool is available', async () => {
|
||||
vi.spyOn(GnomeKeyringStore, 'isAvailable').mockResolvedValue(true);
|
||||
const store = await createSecretStore();
|
||||
expect(store.backend()).toBe('gnome-keyring');
|
||||
expect(store).toBeInstanceOf(GnomeKeyringStore);
|
||||
});
|
||||
|
||||
it('returns FileSecretStore when secret-tool is not available', async () => {
|
||||
vi.spyOn(GnomeKeyringStore, 'isAvailable').mockResolvedValue(false);
|
||||
const store = await createSecretStore();
|
||||
expect(store.backend()).toBe('file');
|
||||
expect(store).toBeInstanceOf(FileSecretStore);
|
||||
});
|
||||
});
|
||||
93
src/shared/tests/secrets/file-store.test.ts
Normal file
93
src/shared/tests/secrets/file-store.test.ts
Normal file
@@ -0,0 +1,93 @@
|
||||
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||
import { mkdtempSync, rmSync, statSync, existsSync, writeFileSync } from 'node:fs';
|
||||
import { join } from 'node:path';
|
||||
import { tmpdir } from 'node:os';
|
||||
import { FileSecretStore } from '../../src/secrets/file-store.js';
|
||||
|
||||
let tempDir: string;
|
||||
|
||||
beforeEach(() => {
|
||||
tempDir = mkdtempSync(join(tmpdir(), 'mcpctl-secrets-test-'));
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
rmSync(tempDir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
describe('FileSecretStore', () => {
|
||||
it('returns null for missing key', async () => {
|
||||
const store = new FileSecretStore({ configDir: tempDir });
|
||||
expect(await store.get('nonexistent')).toBeNull();
|
||||
});
|
||||
|
||||
it('stores and retrieves a secret', async () => {
|
||||
const store = new FileSecretStore({ configDir: tempDir });
|
||||
await store.set('api-key', 'sk-12345');
|
||||
expect(await store.get('api-key')).toBe('sk-12345');
|
||||
});
|
||||
|
||||
it('overwrites existing values', async () => {
|
||||
const store = new FileSecretStore({ configDir: tempDir });
|
||||
await store.set('api-key', 'old-value');
|
||||
await store.set('api-key', 'new-value');
|
||||
expect(await store.get('api-key')).toBe('new-value');
|
||||
});
|
||||
|
||||
it('stores multiple keys', async () => {
|
||||
const store = new FileSecretStore({ configDir: tempDir });
|
||||
await store.set('key-a', 'value-a');
|
||||
await store.set('key-b', 'value-b');
|
||||
expect(await store.get('key-a')).toBe('value-a');
|
||||
expect(await store.get('key-b')).toBe('value-b');
|
||||
});
|
||||
|
||||
it('deletes a key', async () => {
|
||||
const store = new FileSecretStore({ configDir: tempDir });
|
||||
await store.set('api-key', 'sk-12345');
|
||||
expect(await store.delete('api-key')).toBe(true);
|
||||
expect(await store.get('api-key')).toBeNull();
|
||||
});
|
||||
|
||||
it('returns false when deleting nonexistent key', async () => {
|
||||
const store = new FileSecretStore({ configDir: tempDir });
|
||||
expect(await store.delete('nonexistent')).toBe(false);
|
||||
});
|
||||
|
||||
it('sets 0600 permissions on secrets file', async () => {
|
||||
const store = new FileSecretStore({ configDir: tempDir });
|
||||
await store.set('api-key', 'sk-12345');
|
||||
const stat = statSync(join(tempDir, 'secrets'));
|
||||
expect(stat.mode & 0o777).toBe(0o600);
|
||||
});
|
||||
|
||||
it('creates config dir if missing', async () => {
|
||||
const nested = join(tempDir, 'sub', 'dir');
|
||||
const store = new FileSecretStore({ configDir: nested });
|
||||
await store.set('api-key', 'sk-12345');
|
||||
expect(existsSync(join(nested, 'secrets'))).toBe(true);
|
||||
});
|
||||
|
||||
it('recovers from corrupted JSON', async () => {
|
||||
writeFileSync(join(tempDir, 'secrets'), 'NOT JSON!!!', 'utf-8');
|
||||
const store = new FileSecretStore({ configDir: tempDir });
|
||||
// Should not throw, returns null for any key
|
||||
expect(await store.get('api-key')).toBeNull();
|
||||
// Should be able to write over corrupted file
|
||||
await store.set('api-key', 'fresh-value');
|
||||
expect(await store.get('api-key')).toBe('fresh-value');
|
||||
});
|
||||
|
||||
it('reports file backend', () => {
|
||||
const store = new FileSecretStore({ configDir: tempDir });
|
||||
expect(store.backend()).toBe('file');
|
||||
});
|
||||
|
||||
it('preserves other keys on delete', async () => {
|
||||
const store = new FileSecretStore({ configDir: tempDir });
|
||||
await store.set('key-a', 'value-a');
|
||||
await store.set('key-b', 'value-b');
|
||||
await store.delete('key-a');
|
||||
expect(await store.get('key-a')).toBeNull();
|
||||
expect(await store.get('key-b')).toBe('value-b');
|
||||
});
|
||||
});
|
||||
125
src/shared/tests/secrets/gnome-keyring.test.ts
Normal file
125
src/shared/tests/secrets/gnome-keyring.test.ts
Normal file
@@ -0,0 +1,125 @@
|
||||
import { describe, it, expect, vi } from 'vitest';
|
||||
import { GnomeKeyringStore } from '../../src/secrets/gnome-keyring.js';
|
||||
import type { RunCommand } from '../../src/secrets/gnome-keyring.js';
|
||||
|
||||
function mockRun(
|
||||
responses: Record<string, { stdout: string; code: number }>,
|
||||
): RunCommand {
|
||||
return vi.fn(async (cmd: string, args: string[], _stdin?: string) => {
|
||||
const key = `${cmd} ${args.join(' ')}`;
|
||||
for (const [pattern, response] of Object.entries(responses)) {
|
||||
if (key.includes(pattern)) return response;
|
||||
}
|
||||
return { stdout: '', code: 1 };
|
||||
});
|
||||
}
|
||||
|
||||
describe('GnomeKeyringStore', () => {
|
||||
describe('get', () => {
|
||||
it('returns value on success', async () => {
|
||||
const run = mockRun({ 'lookup': { stdout: 'my-secret', code: 0 } });
|
||||
const store = new GnomeKeyringStore({ run });
|
||||
expect(await store.get('api-key')).toBe('my-secret');
|
||||
});
|
||||
|
||||
it('returns null on exit code 1', async () => {
|
||||
const run = mockRun({ 'lookup': { stdout: '', code: 1 } });
|
||||
const store = new GnomeKeyringStore({ run });
|
||||
expect(await store.get('api-key')).toBeNull();
|
||||
});
|
||||
|
||||
it('returns null on empty stdout', async () => {
|
||||
const run = mockRun({ 'lookup': { stdout: '', code: 0 } });
|
||||
const store = new GnomeKeyringStore({ run });
|
||||
expect(await store.get('api-key')).toBeNull();
|
||||
});
|
||||
|
||||
it('returns null on error', async () => {
|
||||
const run = vi.fn().mockRejectedValue(new Error('timeout'));
|
||||
const store = new GnomeKeyringStore({ run });
|
||||
expect(await store.get('api-key')).toBeNull();
|
||||
});
|
||||
|
||||
it('calls secret-tool with correct args', async () => {
|
||||
const run = vi.fn().mockResolvedValue({ stdout: 'val', code: 0 });
|
||||
const store = new GnomeKeyringStore({ run });
|
||||
await store.get('my-key');
|
||||
expect(run).toHaveBeenCalledWith(
|
||||
'secret-tool',
|
||||
['lookup', 'service', 'mcpctl', 'key', 'my-key'],
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('set', () => {
|
||||
it('calls secret-tool store with value as stdin', async () => {
|
||||
const run = vi.fn().mockResolvedValue({ stdout: '', code: 0 });
|
||||
const store = new GnomeKeyringStore({ run });
|
||||
await store.set('api-key', 'secret-value');
|
||||
expect(run).toHaveBeenCalledWith(
|
||||
'secret-tool',
|
||||
['store', '--label', 'mcpctl: api-key', 'service', 'mcpctl', 'key', 'api-key'],
|
||||
'secret-value',
|
||||
);
|
||||
});
|
||||
|
||||
it('throws on non-zero exit code', async () => {
|
||||
const run = vi.fn().mockResolvedValue({ stdout: '', code: 1 });
|
||||
const store = new GnomeKeyringStore({ run });
|
||||
await expect(store.set('api-key', 'val')).rejects.toThrow('exited with code 1');
|
||||
});
|
||||
});
|
||||
|
||||
describe('delete', () => {
|
||||
it('returns true on success', async () => {
|
||||
const run = mockRun({ 'clear': { stdout: '', code: 0 } });
|
||||
const store = new GnomeKeyringStore({ run });
|
||||
expect(await store.delete('api-key')).toBe(true);
|
||||
});
|
||||
|
||||
it('returns false on failure', async () => {
|
||||
const run = mockRun({ 'clear': { stdout: '', code: 1 } });
|
||||
const store = new GnomeKeyringStore({ run });
|
||||
expect(await store.delete('api-key')).toBe(false);
|
||||
});
|
||||
|
||||
it('returns false on error', async () => {
|
||||
const run = vi.fn().mockRejectedValue(new Error('fail'));
|
||||
const store = new GnomeKeyringStore({ run });
|
||||
expect(await store.delete('api-key')).toBe(false);
|
||||
});
|
||||
|
||||
it('calls secret-tool clear with correct args', async () => {
|
||||
const run = vi.fn().mockResolvedValue({ stdout: '', code: 0 });
|
||||
const store = new GnomeKeyringStore({ run });
|
||||
await store.delete('my-key');
|
||||
expect(run).toHaveBeenCalledWith(
|
||||
'secret-tool',
|
||||
['clear', 'service', 'mcpctl', 'key', 'my-key'],
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('isAvailable', () => {
|
||||
it('returns true when secret-tool exists', async () => {
|
||||
const run = vi.fn().mockResolvedValue({ stdout: '0.20', code: 0 });
|
||||
expect(await GnomeKeyringStore.isAvailable({ run })).toBe(true);
|
||||
});
|
||||
|
||||
it('returns false when secret-tool not found', async () => {
|
||||
const run = vi.fn().mockRejectedValue(new Error('ENOENT'));
|
||||
expect(await GnomeKeyringStore.isAvailable({ run })).toBe(false);
|
||||
});
|
||||
|
||||
it('returns false on non-zero exit', async () => {
|
||||
const run = vi.fn().mockResolvedValue({ stdout: '', code: 127 });
|
||||
expect(await GnomeKeyringStore.isAvailable({ run })).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
it('reports gnome-keyring backend', () => {
|
||||
const run = vi.fn().mockResolvedValue({ stdout: '', code: 0 });
|
||||
const store = new GnomeKeyringStore({ run });
|
||||
expect(store.backend()).toBe('gnome-keyring');
|
||||
});
|
||||
});
|
||||
Reference in New Issue
Block a user