Compare commits
47 Commits
main
...
fix/update
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9e9a2f4a54 | ||
| c8cdd7f514 | |||
|
|
ec1dfe7438 | ||
| 50b4112398 | |||
|
|
bb17a892d6 | ||
| a8117091a1 | |||
|
|
dcda93d179 | ||
| a6b5e24a8d | |||
|
|
3a6e58274c | ||
|
|
c819b65175 | ||
|
|
c3ef5a664f | ||
|
|
4c2927a16e | ||
| 79dd6e723d | |||
|
|
cde1c59fd6 | ||
| daa5860ed2 | |||
|
|
ecbf48dd49 | ||
| d38b5aac60 | |||
|
|
d07d4d11dd | ||
| fa58c1b5ed | |||
|
|
dd1dfc629d | ||
| 7b3dab142e | |||
|
|
4c127a7dc3 | ||
| c1e3e4aed6 | |||
|
|
e45c6079c1 | ||
| e4aef3acf1 | |||
|
|
a2cda38850 | ||
| 081e90de0f | |||
|
|
4e3d896ef6 | ||
| 0823e965bf | |||
|
|
c97219f85e | ||
| 93adcd4be7 | |||
|
|
d58e6e153f | ||
|
|
1e8847bb63 | ||
|
|
2a0deaa225 | ||
| 4eef6e38a2 | |||
|
|
ca02340a4c | ||
|
|
02254f2aac | ||
|
|
540dd6fd63 | ||
| a05a4c4816 | |||
|
|
97ade470df | ||
|
|
b25ff98374 | ||
|
|
22fe9c3435 | ||
| 72643fceda | |||
|
|
467357c2c6 | ||
| d6a80fc03d | |||
|
|
c07da826a0 | ||
|
|
0482944056 |
@@ -1,4 +1,4 @@
|
||||
name: CI/CD
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
@@ -6,35 +6,25 @@ on:
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
env:
|
||||
GITEA_REGISTRY: 10.0.0.194:3012
|
||||
GITEA_PUBLIC_URL: https://mysources.co.uk
|
||||
GITEA_OWNER: michal
|
||||
|
||||
# ============================================================
|
||||
# Required Gitea secrets:
|
||||
# PACKAGES_TOKEN — Gitea API token (packages + registry)
|
||||
# ============================================================
|
||||
|
||||
jobs:
|
||||
# ── CI checks (run in parallel on every push/PR) ──────────
|
||||
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 9
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
# no pnpm cache — concurrent cache restore hangs on single-worker runner
|
||||
cache: pnpm
|
||||
|
||||
- run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Lint
|
||||
run: pnpm lint || echo "::warning::Lint has errors — not blocking CI yet"
|
||||
run: pnpm lint
|
||||
|
||||
typecheck:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -42,11 +32,13 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 9
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
# no pnpm cache — concurrent cache restore hangs on single-worker runner
|
||||
cache: pnpm
|
||||
|
||||
- run: pnpm install --frozen-lockfile
|
||||
|
||||
@@ -62,196 +54,22 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 9
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
# no pnpm cache — concurrent cache restore hangs on single-worker runner
|
||||
cache: pnpm
|
||||
|
||||
- run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Generate Prisma client
|
||||
run: pnpm --filter @mcpctl/db exec prisma generate
|
||||
|
||||
- name: Build (needed by completions test)
|
||||
run: pnpm build
|
||||
|
||||
- name: Run tests
|
||||
run: pnpm test:run
|
||||
|
||||
# ── Smoke tests (full stack: postgres + mcpd + mcplocal) ──
|
||||
|
||||
smoke:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [lint, typecheck, test]
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:16
|
||||
env:
|
||||
POSTGRES_USER: mcpctl
|
||||
POSTGRES_PASSWORD: mcpctl
|
||||
POSTGRES_DB: mcpctl
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
env:
|
||||
DATABASE_URL: postgresql://mcpctl:mcpctl@postgres:5432/mcpctl
|
||||
MCPD_PORT: "3100"
|
||||
MCPD_HOST: "0.0.0.0"
|
||||
MCPLOCAL_HTTP_PORT: "3200"
|
||||
MCPLOCAL_MCPD_URL: http://localhost:3100
|
||||
DOCKER_API_VERSION: "1.43"
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: pnpm/action-setup@v4
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
# no pnpm cache — concurrent cache restore hangs on single-worker runner
|
||||
|
||||
- run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Generate Prisma client
|
||||
run: pnpm --filter @mcpctl/db exec prisma generate
|
||||
|
||||
- name: Build all packages
|
||||
run: pnpm build
|
||||
|
||||
- name: Push database schema
|
||||
run: pnpm --filter @mcpctl/db exec prisma db push --accept-data-loss
|
||||
|
||||
- name: Seed templates
|
||||
run: node src/mcpd/dist/seed-runner.js
|
||||
|
||||
- name: Start mcpd
|
||||
run: node src/mcpd/dist/main.js &
|
||||
|
||||
- name: Wait for mcpd
|
||||
run: |
|
||||
for i in $(seq 1 30); do
|
||||
if curl -sf http://localhost:3100/health > /dev/null 2>&1; then
|
||||
echo "mcpd is ready"
|
||||
exit 0
|
||||
fi
|
||||
echo "Waiting for mcpd... ($i/30)"
|
||||
sleep 1
|
||||
done
|
||||
echo "::error::mcpd failed to start within 30s"
|
||||
exit 1
|
||||
|
||||
- name: Create CI user and session
|
||||
run: |
|
||||
pnpm --filter @mcpctl/db exec node -e "
|
||||
const { PrismaClient } = require('@prisma/client');
|
||||
const crypto = require('crypto');
|
||||
(async () => {
|
||||
const prisma = new PrismaClient();
|
||||
const user = await prisma.user.upsert({
|
||||
where: { email: 'ci@test.local' },
|
||||
create: { email: 'ci@test.local', name: 'CI', passwordHash: '!ci-no-login', role: 'USER' },
|
||||
update: {},
|
||||
});
|
||||
const token = crypto.randomBytes(32).toString('hex');
|
||||
await prisma.session.create({
|
||||
data: { token, userId: user.id, expiresAt: new Date(Date.now() + 86400000) },
|
||||
});
|
||||
await prisma.rbacDefinition.create({
|
||||
data: {
|
||||
name: 'ci-admin',
|
||||
subjects: [{ kind: 'User', name: 'ci@test.local' }],
|
||||
roleBindings: [
|
||||
{ role: 'edit', resource: '*' },
|
||||
{ role: 'run', resource: '*' },
|
||||
{ role: 'run', action: 'logs' },
|
||||
{ role: 'run', action: 'backup' },
|
||||
{ role: 'run', action: 'restore' },
|
||||
],
|
||||
},
|
||||
});
|
||||
const os = require('os'), fs = require('fs'), path = require('path');
|
||||
const dir = path.join(os.homedir(), '.mcpctl');
|
||||
fs.mkdirSync(dir, { recursive: true });
|
||||
fs.writeFileSync(path.join(dir, 'credentials'),
|
||||
JSON.stringify({ token, mcpdUrl: 'http://localhost:3100', user: 'ci@test.local' }));
|
||||
console.log('CI user + session + RBAC created, credentials written');
|
||||
await prisma.\$disconnect();
|
||||
})();
|
||||
"
|
||||
|
||||
- name: Create mcpctl CLI wrapper
|
||||
run: |
|
||||
printf '#!/bin/sh\nexec node "%s/src/cli/dist/index.js" "$@"\n' "$GITHUB_WORKSPACE" > /usr/local/bin/mcpctl
|
||||
chmod +x /usr/local/bin/mcpctl
|
||||
|
||||
- name: Configure mcplocal LLM provider
|
||||
run: |
|
||||
mkdir -p ~/.mcpctl
|
||||
cat > ~/.mcpctl/config.json << 'CONF'
|
||||
{"llm":{"providers":[{"name":"anthropic","type":"anthropic","model":"claude-haiku-3-5-20241022","tier":"fast"}]}}
|
||||
CONF
|
||||
printf '{"anthropic-api-key":"%s"}\n' "$ANTHROPIC_API_KEY" > ~/.mcpctl/secrets
|
||||
chmod 600 ~/.mcpctl/secrets
|
||||
|
||||
- name: Start mcplocal
|
||||
run: nohup node src/mcplocal/dist/main.js > /tmp/mcplocal.log 2>&1 &
|
||||
|
||||
- name: Wait for mcplocal
|
||||
run: |
|
||||
for i in $(seq 1 30); do
|
||||
if curl -sf http://localhost:3200/health > /dev/null 2>&1; then
|
||||
echo "mcplocal is ready"
|
||||
exit 0
|
||||
fi
|
||||
echo "Waiting for mcplocal... ($i/30)"
|
||||
sleep 1
|
||||
done
|
||||
echo "::error::mcplocal failed to start within 30s"
|
||||
exit 1
|
||||
|
||||
- name: Apply smoke test fixtures
|
||||
run: mcpctl apply -f src/mcplocal/tests/smoke/fixtures/smoke-data.yaml
|
||||
|
||||
- name: Wait for server instance
|
||||
run: |
|
||||
echo "Waiting for smoke-aws-docs instance..."
|
||||
for i in $(seq 1 60); do
|
||||
STATUS=$(mcpctl get instances -o json 2>/dev/null | \
|
||||
node -e "try{const d=JSON.parse(require('fs').readFileSync('/dev/stdin','utf-8'));const i=Array.isArray(d)?d.find(x=>x.serverName&&x.serverName.includes('aws')):null;console.log(i?.status??'WAITING')}catch{console.log('WAITING')}" 2>/dev/null || echo "WAITING")
|
||||
echo " Instance status: $STATUS ($i/60)"
|
||||
if [ "$STATUS" = "RUNNING" ]; then
|
||||
echo "Instance is running!"
|
||||
break
|
||||
fi
|
||||
if [ "$i" = "60" ]; then
|
||||
echo "::warning::Instance did not reach RUNNING — container management may not be available in CI"
|
||||
echo "API-layer smoke tests will still run"
|
||||
fi
|
||||
sleep 5
|
||||
done
|
||||
|
||||
- name: Run smoke tests
|
||||
# Exclude tests that need a running MCP server instance (Docker) or
|
||||
# LLM providers — CI has neither. --no-file-parallelism avoids
|
||||
# concurrent requests crashing mcplocal.
|
||||
run: >-
|
||||
pnpm --filter mcplocal exec vitest run
|
||||
--config vitest.smoke.config.ts
|
||||
--no-file-parallelism
|
||||
--exclude '**/security.test.ts'
|
||||
--exclude '**/audit.test.ts'
|
||||
--exclude '**/proxy-pipeline.test.ts'
|
||||
|
||||
- name: Dump mcplocal log on failure
|
||||
if: failure()
|
||||
run: cat /tmp/mcplocal.log || true
|
||||
|
||||
# ── Build & package RPM ───────────────────────────────────
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [lint, typecheck, test]
|
||||
@@ -259,16 +77,15 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 9
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
# no pnpm cache — concurrent cache restore hangs on single-worker runner
|
||||
cache: pnpm
|
||||
|
||||
- name: Install dependencies (hoisted for bun compile compatibility)
|
||||
run: |
|
||||
echo "node-linker=hoisted" >> .npmrc
|
||||
pnpm install --frozen-lockfile
|
||||
- run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Generate Prisma client
|
||||
run: pnpm --filter @mcpctl/db exec prisma generate
|
||||
@@ -276,153 +93,50 @@ jobs:
|
||||
- name: Build all packages
|
||||
run: pnpm build
|
||||
|
||||
- name: Generate shell completions
|
||||
run: pnpm completions:generate
|
||||
package:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build]
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: oven-sh/setup-bun@v2
|
||||
- uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 9
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
cache: pnpm
|
||||
|
||||
- run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Generate Prisma client
|
||||
run: pnpm --filter @mcpctl/db exec prisma generate
|
||||
|
||||
- name: Build TypeScript
|
||||
run: pnpm build
|
||||
|
||||
- name: Install bun
|
||||
uses: oven-sh/setup-bun@v2
|
||||
|
||||
- name: Install nfpm
|
||||
run: |
|
||||
curl -sL -o /tmp/nfpm.tar.gz "https://github.com/goreleaser/nfpm/releases/download/v2.45.0/nfpm_2.45.0_Linux_x86_64.tar.gz"
|
||||
tar xzf /tmp/nfpm.tar.gz -C /usr/local/bin nfpm
|
||||
|
||||
- name: Bundle standalone binaries
|
||||
run: |
|
||||
mkdir -p dist
|
||||
# Stub for optional dep that Ink tries to import (only used when DEV=true)
|
||||
# Copy instead of symlink — bun can't read directory symlinks
|
||||
if [ ! -e node_modules/react-devtools-core/package.json ]; then
|
||||
rm -rf node_modules/react-devtools-core
|
||||
cp -r src/cli/stubs/react-devtools-core node_modules/react-devtools-core
|
||||
fi
|
||||
bun build src/cli/src/index.ts --compile --outfile dist/mcpctl
|
||||
bun build src/mcplocal/src/main.ts --compile --outfile dist/mcpctl-local
|
||||
- name: Bundle standalone binary
|
||||
run: bun build src/cli/src/index.ts --compile --outfile dist/mcpctl
|
||||
|
||||
- name: Package RPM
|
||||
- name: Build RPM
|
||||
run: nfpm pkg --packager rpm --target dist/
|
||||
|
||||
- name: Package DEB
|
||||
run: nfpm pkg --packager deb --target dist/
|
||||
|
||||
- name: Upload RPM artifact
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: rpm-package
|
||||
path: dist/mcpctl-*.rpm
|
||||
retention-days: 7
|
||||
|
||||
- name: Upload DEB artifact
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: deb-package
|
||||
path: dist/mcpctl*.deb
|
||||
retention-days: 7
|
||||
|
||||
# ── Release pipeline (main branch push only) ──────────────
|
||||
# NOTE: Docker image builds + deploy happen via `bash fulldeploy.sh`
|
||||
# (not CI) because the runner containers lack the privileged access
|
||||
# needed for container-in-container builds (no /proc/self/uid_map,
|
||||
# no Docker socket access, buildah/podman/kaniko all fail).
|
||||
|
||||
publish-rpm:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build]
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Download RPM artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: rpm-package
|
||||
path: dist/
|
||||
|
||||
- name: Install rpm tools
|
||||
run: sudo apt-get update && sudo apt-get install -y rpm
|
||||
|
||||
- name: Publish RPM to Gitea
|
||||
- name: Publish to Gitea packages
|
||||
env:
|
||||
GITEA_TOKEN: ${{ secrets.PACKAGES_TOKEN }}
|
||||
GITEA_URL: http://${{ env.GITEA_REGISTRY }}
|
||||
GITEA_OWNER: ${{ env.GITEA_OWNER }}
|
||||
GITEA_REPO: mcpctl
|
||||
GITEA_TOKEN: ${{ secrets.GITEA_TOKEN }}
|
||||
run: |
|
||||
RPM_FILE=$(ls dist/mcpctl-*.rpm | head -1)
|
||||
RPM_VERSION=$(rpm -qp --queryformat '%{VERSION}-%{RELEASE}' "$RPM_FILE")
|
||||
echo "Publishing $RPM_FILE (version $RPM_VERSION)..."
|
||||
|
||||
# Delete existing version if present
|
||||
HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" \
|
||||
-H "Authorization: token ${GITEA_TOKEN}" \
|
||||
"${GITEA_URL}/api/v1/packages/${GITEA_OWNER}/rpm/mcpctl/${RPM_VERSION}")
|
||||
|
||||
if [ "$HTTP_CODE" = "200" ]; then
|
||||
echo "Version exists, replacing..."
|
||||
curl -s -o /dev/null -X DELETE \
|
||||
-H "Authorization: token ${GITEA_TOKEN}" \
|
||||
"${GITEA_URL}/api/v1/packages/${GITEA_OWNER}/rpm/mcpctl/${RPM_VERSION}"
|
||||
fi
|
||||
|
||||
# Upload
|
||||
curl --fail -X PUT \
|
||||
-H "Authorization: token ${GITEA_TOKEN}" \
|
||||
--upload-file "$RPM_FILE" \
|
||||
"${GITEA_URL}/api/packages/${GITEA_OWNER}/rpm/upload"
|
||||
|
||||
echo "Published successfully!"
|
||||
|
||||
# Link package to repo
|
||||
source scripts/link-package.sh
|
||||
link_package "rpm" "mcpctl"
|
||||
|
||||
publish-deb:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build]
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Download DEB artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: deb-package
|
||||
path: dist/
|
||||
|
||||
- name: Publish DEB to Gitea
|
||||
env:
|
||||
GITEA_TOKEN: ${{ secrets.PACKAGES_TOKEN }}
|
||||
GITEA_URL: http://${{ env.GITEA_REGISTRY }}
|
||||
GITEA_OWNER: ${{ env.GITEA_OWNER }}
|
||||
GITEA_REPO: mcpctl
|
||||
run: |
|
||||
DEB_FILE=$(ls dist/mcpctl*.deb | head -1)
|
||||
DEB_VERSION=$(dpkg-deb --field "$DEB_FILE" Version)
|
||||
echo "Publishing $DEB_FILE (version $DEB_VERSION)..."
|
||||
|
||||
# Publish to each supported distribution
|
||||
# Debian: trixie (13/stable), forky (14/testing)
|
||||
# Ubuntu: noble (24.04 LTS), plucky (25.04)
|
||||
DISTRIBUTIONS="trixie forky noble plucky"
|
||||
|
||||
for DIST in $DISTRIBUTIONS; do
|
||||
echo " -> $DIST..."
|
||||
HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" \
|
||||
-X PUT \
|
||||
-H "Authorization: token ${GITEA_TOKEN}" \
|
||||
--upload-file "$DEB_FILE" \
|
||||
"${GITEA_URL}/api/packages/${GITEA_OWNER}/debian/pool/${DIST}/main/upload")
|
||||
|
||||
if [ "$HTTP_CODE" = "201" ] || [ "$HTTP_CODE" = "200" ]; then
|
||||
echo " Published to $DIST"
|
||||
elif [ "$HTTP_CODE" = "409" ]; then
|
||||
echo " Already exists in $DIST (skipping)"
|
||||
else
|
||||
echo " WARNING: Upload to $DIST returned HTTP $HTTP_CODE"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Published successfully!"
|
||||
|
||||
# Link package to repo
|
||||
source scripts/link-package.sh
|
||||
link_package "debian" "mcpctl"
|
||||
"${{ github.server_url }}/api/packages/${{ github.repository_owner }}/rpm/upload"
|
||||
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -38,9 +38,3 @@ pgdata/
|
||||
# Prisma
|
||||
src/db/prisma/migrations/*.sql.backup
|
||||
logs.sh
|
||||
|
||||
# Temp/test files
|
||||
*.backup.json
|
||||
mcpctl-backup.json
|
||||
a.yaml
|
||||
test-mcp.sh
|
||||
|
||||
40
.mcp.json
40
.mcp.json
@@ -1,20 +1,24 @@
|
||||
{
|
||||
"mcpServers": {
|
||||
"mcpctl-development": {
|
||||
"command": "mcpctl",
|
||||
"args": [
|
||||
"mcp",
|
||||
"-p",
|
||||
"mcpctl-development"
|
||||
]
|
||||
},
|
||||
"mcpctl-inspect": {
|
||||
"command": "mcpctl",
|
||||
"args": [
|
||||
"console",
|
||||
"--inspect",
|
||||
"--stdin-mcp"
|
||||
]
|
||||
}
|
||||
}
|
||||
"mcpServers": {
|
||||
"task-master-ai": {
|
||||
"type": "stdio",
|
||||
"command": "npx",
|
||||
"args": [
|
||||
"-y",
|
||||
"task-master-ai"
|
||||
],
|
||||
"env": {
|
||||
"TASK_MASTER_TOOLS": "core",
|
||||
"ANTHROPIC_API_KEY": "YOUR_ANTHROPIC_API_KEY_HERE",
|
||||
"PERPLEXITY_API_KEY": "YOUR_PERPLEXITY_API_KEY_HERE",
|
||||
"OPENAI_API_KEY": "YOUR_OPENAI_KEY_HERE",
|
||||
"GOOGLE_API_KEY": "YOUR_GOOGLE_KEY_HERE",
|
||||
"XAI_API_KEY": "YOUR_XAI_KEY_HERE",
|
||||
"OPENROUTER_API_KEY": "YOUR_OPENROUTER_KEY_HERE",
|
||||
"MISTRAL_API_KEY": "YOUR_MISTRAL_KEY_HERE",
|
||||
"AZURE_OPENAI_API_KEY": "YOUR_AZURE_KEY_HERE",
|
||||
"OLLAMA_API_KEY": "YOUR_OLLAMA_API_KEY_HERE"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,392 +0,0 @@
|
||||
# PRD: Gated Project Experience & Prompt Intelligence
|
||||
|
||||
## Overview
|
||||
|
||||
When 300 developers connect their LLM clients (Claude Code, Cursor, etc.) to mcpctl projects, they need relevant context — security policies, architecture decisions, operational runbooks — without flooding the context window. This feature introduces a gated session flow where the client LLM drives its own context retrieval through keyword-based matching, with the proxy providing a prompt index and encouraging ongoing discovery.
|
||||
|
||||
## Problem
|
||||
|
||||
- Injecting all prompts into instructions doesn't scale (hundreds of pages of policies)
|
||||
- Exposing prompts only as MCP resources means LLMs never read them
|
||||
- An index-only approach works for small numbers but breaks down at scale
|
||||
- No mechanism to link external knowledge (Notion, Docmost) as prompts
|
||||
- LLMs tend to work with whatever they have rather than proactively seek more context
|
||||
|
||||
## Core Concepts
|
||||
|
||||
### Gated Experience
|
||||
|
||||
A project-level flag (`gated: boolean`, default: `true`) that controls whether sessions go through a keyword-driven prompt retrieval flow before accessing project tools and resources.
|
||||
|
||||
**Flow (A + C):**
|
||||
|
||||
1. On `initialize`, instructions include the **prompt index** (names + summaries for all prompts, up to a reasonable cap) and tell client LLM: "Call `begin_session` with 5 keywords describing your task"
|
||||
2. **If client obeys**: `begin_session({ tags: ["zigbee", "lights", "mqtt", "pairing", "automation"] })` → prompt selection (see below) → returns matched prompt content + full prompt index + encouragement to retrieve more → session ungated
|
||||
3. **If client ignores**: First `tools/call` is intercepted → keywords extracted from tool name + arguments → same prompt selection → briefing injected alongside tool result → session ungated
|
||||
4. **Ongoing retrieval**: Client can call `read_prompts({ tags: ["security", "vpn"] })` at any point to retrieve more prompts. The prompt index is always visible so the client LLM can see what's available.
|
||||
|
||||
**Prompt selection — tiered approach:**
|
||||
|
||||
- **Primary (heavy LLM available)**: Tags + full prompt index (names, priorities, summaries, chapters) are sent to the heavy LLM (e.g. Gemini). The LLM understands synonyms, context, and intent — it knows "zigbee" relates to "Z2M" and "Zigbee2MQTT", and that someone working on "lights" probably needs the "common-mistakes" prompt about pairing. The LLM returns a ranked list of relevant prompt names with brief explanations of why each is relevant. The heavy LLM may use the fast LLM for preprocessing if needed (e.g. generating missing summaries on the fly).
|
||||
- **Fallback (no LLM, or `llmProvider=none`)**: Deterministic keyword-based tag matching against summaries/chapters with byte-budget allocation (see "Tag Matching Algorithm" below). Same approach as ResponsePaginator's byte-based fallback. Triggered when: no LLM providers configured, project has `llmProvider: "none"`, or local override sets `provider: "none"`.
|
||||
- **Hybrid (both paths always available)**: Even when heavy LLM does the initial selection, the `read_prompts({ tags: [...] })` tool always uses keyword matching. This way the client LLM can retrieve specific prompts by keyword that the heavy LLM may have missed. The LLM is smart about context, keywords are precise about names — together they cover both fuzzy and exact retrieval.
|
||||
|
||||
**LLM availability resolution** (same chain as existing LLM features):
|
||||
- Project `llmProvider: "none"` → no LLM, keyword fallback only
|
||||
- Project `llmProvider: null` → inherit from global config
|
||||
- Local override `provider: "none"` → no LLM, keyword fallback only
|
||||
- No providers configured → keyword fallback only
|
||||
- Otherwise → use heavy LLM for `begin_session`, fast LLM for summary generation
|
||||
|
||||
### Encouraging Retrieval
|
||||
|
||||
LLMs tend to proceed with incomplete information rather than seek more context. The system must actively counter this at multiple points:
|
||||
|
||||
**In `initialize` instructions:**
|
||||
```
|
||||
You have access to project knowledge containing policies, architecture decisions,
|
||||
and guidelines. Some may contain critical rules about what you're doing. After your
|
||||
initial briefing, if you're unsure about conventions, security requirements, or
|
||||
best practices — request more context using read_prompts. It's always better to
|
||||
check than to guess wrong. The project may have specific rules you don't know about yet.
|
||||
```
|
||||
|
||||
**In `begin_session` response (after matched prompts):**
|
||||
```
|
||||
Other prompts available that may become relevant as your work progresses:
|
||||
- security-policies: Network segmentation, firewall rules, VPN access
|
||||
- naming-conventions: Service and resource naming standards
|
||||
- ...
|
||||
If any of these seem related to what you're doing now or later, request them
|
||||
with read_prompts({ tags: [...] }) or resources/read. Don't assume you have
|
||||
all the context — check when in doubt.
|
||||
```
|
||||
|
||||
**In `read_prompts` response:**
|
||||
```
|
||||
Remember: you can request more prompts at any time with read_prompts({ tags: [...] }).
|
||||
The project may have additional guidelines relevant to your current approach.
|
||||
```
|
||||
|
||||
The tone is not "here's optional reading" but "there are rules you might not know about, and violating them costs more than reading them."
|
||||
|
||||
### Prompt Priority (1-10)
|
||||
|
||||
Every prompt has a priority level that influences selection order and byte-budget allocation:
|
||||
|
||||
| Range | Meaning | Behavior |
|
||||
|-------|---------|----------|
|
||||
| 1-3 | Reference | Low priority, included only on strong keyword match |
|
||||
| 4-6 | Standard | Default priority, included on moderate keyword match |
|
||||
| 7-9 | Important | High priority, lower match threshold |
|
||||
| 10 | Critical | Always included in full, regardless of keyword match (guardrails, common mistakes) |
|
||||
|
||||
Default priority for new prompts: `5`.
|
||||
|
||||
### Prompt Summaries & Chapters (Auto-generated)
|
||||
|
||||
Each prompt gets auto-generated metadata used for the prompt index and tag matching:
|
||||
|
||||
- `summary` (string, ~20 words) — one-line description of what the prompt covers
|
||||
- `chapters` (string[]) — key sections/topics extracted from content
|
||||
|
||||
Generation pipeline:
|
||||
- **Fast LLM available**: Summarize content, extract key topics
|
||||
- **No fast LLM**: First sentence of content + markdown headings via regex
|
||||
- Regenerated on prompt create/update
|
||||
- Cached on the prompt record
|
||||
|
||||
### Tag Matching Algorithm (No-LLM Fallback)
|
||||
|
||||
When no local LLM is available, the system falls back to a deterministic retrieval algorithm:
|
||||
|
||||
1. Client provides tags (5 keywords from `begin_session`, or extracted from tool call)
|
||||
2. For each prompt, compute a match score:
|
||||
- Check tags against prompt `summary` and `chapters` (case-insensitive substring match)
|
||||
- Score = `number_of_matching_tags * base_priority`
|
||||
- Priority 10 prompts: score = infinity (always included)
|
||||
3. Sort by score descending
|
||||
4. Fill a byte budget (configurable, default ~8KB) from top down:
|
||||
- Include full content until budget exhausted
|
||||
- Remaining matched prompts: include as index entries (name + summary)
|
||||
- Non-matched prompts: listed as names only in the "other prompts available" section
|
||||
|
||||
**When `begin_session` is skipped (intercept path):**
|
||||
- Extract keywords from tool name + arguments (e.g., `home-assistant/get_entities({ domain: "light" })` → tags: `["home-assistant", "entities", "light"]`)
|
||||
- Run same matching algorithm
|
||||
- Inject briefing alongside the real tool result
|
||||
|
||||
### `read_prompts` Tool (Ongoing Retrieval)
|
||||
|
||||
Available after session is ungated. Allows the client LLM to request more context at any point:
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "read_prompts",
|
||||
"description": "Request additional project context by keywords. Use this whenever you need guidelines, policies, or conventions related to your current work. It's better to check than to guess.",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"tags": {
|
||||
"type": "array",
|
||||
"items": { "type": "string" },
|
||||
"description": "Keywords describing what context you need (e.g. [\"security\", \"vpn\", \"firewall\"])"
|
||||
}
|
||||
},
|
||||
"required": ["tags"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Returns matched prompt content + the prompt index reminder.
|
||||
|
||||
### Prompt Links
|
||||
|
||||
A prompt can be a **link** to an MCP resource in another project's server. The linked content is fetched server-side (by the proxy, not the client), enforcing RBAC.
|
||||
|
||||
Format: `project/server:resource-uri`
|
||||
Example: `system-public/docmost-mcp:docmost://pages/architecture-overview`
|
||||
|
||||
Properties:
|
||||
- The proxy fetches linked content using the source project's service account
|
||||
- Client LLM never gets direct access to the source MCP server
|
||||
- Dead links are detected and marked (health check on link resolution)
|
||||
- Dead links generate error log entries
|
||||
|
||||
RBAC for links:
|
||||
- Creating a link requires `edit` permission on RBAC in the target project
|
||||
- A service account permission is created on the source project for the linked resource
|
||||
- Default: admin group members can manage links
|
||||
|
||||
## Schema Changes
|
||||
|
||||
### Project
|
||||
|
||||
Add field:
|
||||
- `gated: boolean` (default: `true`)
|
||||
|
||||
### Prompt
|
||||
|
||||
Add fields:
|
||||
- `priority: integer` (1-10, default: 5)
|
||||
- `summary: string | null` (auto-generated)
|
||||
- `chapters: string[] | null` (auto-generated, stored as JSON)
|
||||
- `linkTarget: string | null` (format: `project/server:resource-uri`, null for regular prompts)
|
||||
|
||||
### PromptRequest
|
||||
|
||||
Add field:
|
||||
- `priority: integer` (1-10, default: 5)
|
||||
|
||||
## API Changes
|
||||
|
||||
### Modified Endpoints
|
||||
|
||||
- `POST /api/v1/prompts` — accept `priority`, `linkTarget`
|
||||
- `PUT /api/v1/prompts/:id` — accept `priority` (not `linkTarget` — links are immutable, delete and recreate)
|
||||
- `POST /api/v1/promptrequests` — accept `priority`
|
||||
- `GET /api/v1/prompts` — return `priority`, `summary`, `linkTarget`, `linkStatus` (alive/dead/unknown)
|
||||
- `GET /api/v1/projects/:name/prompts/visible` — return `priority`, `summary`, `chapters`
|
||||
|
||||
### New Endpoints
|
||||
|
||||
- `POST /api/v1/prompts/:id/regenerate-summary` — force re-generation of summary/chapters
|
||||
- `GET /api/v1/projects/:name/prompt-index` — returns compact index (name, priority, summary, chapters)
|
||||
|
||||
## MCP Protocol Changes (mcplocal router)
|
||||
|
||||
### Session State
|
||||
|
||||
Router tracks per-session state:
|
||||
- `gated: boolean` — starts `true` if project is gated
|
||||
- `tags: string[]` — accumulated tags from begin_session + read_prompts calls
|
||||
- `retrievedPrompts: Set<string>` — prompts already sent to client (avoid re-sending)
|
||||
|
||||
### Gated Session Flow
|
||||
|
||||
1. On `initialize`: instructions include prompt index + gate message + retrieval encouragement
|
||||
2. `tools/list` while gated: only `begin_session` visible (progressive tool exposure)
|
||||
3. `begin_session({ tags })`: match tags → return briefing + prompt index + encouragement → ungate → send `notifications/tools/list_changed`
|
||||
4. On first `tools/call` while still gated: extract keywords → match → inject briefing alongside result → ungate
|
||||
5. After ungating: all tools work normally, `read_prompts` available for ongoing retrieval
|
||||
|
||||
### `begin_session` Tool
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "begin_session",
|
||||
"description": "Start your session by providing 5 keywords that describe your current task. You'll receive relevant project context, policies, and guidelines. Required before using other tools.",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"tags": {
|
||||
"type": "array",
|
||||
"items": { "type": "string" },
|
||||
"maxItems": 10,
|
||||
"description": "5 keywords describing your current task (e.g. [\"zigbee\", \"automation\", \"lights\", \"mqtt\", \"pairing\"])"
|
||||
}
|
||||
},
|
||||
"required": ["tags"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Response structure:
|
||||
```
|
||||
[Priority 10 prompts — always, full content]
|
||||
|
||||
[Tag-matched prompts — full content, byte-budget-capped, priority-ordered]
|
||||
|
||||
Other prompts available that may become relevant as your work progresses:
|
||||
- <name>: <summary>
|
||||
- <name>: <summary>
|
||||
- ...
|
||||
If any of these seem related to what you're doing, request them with
|
||||
read_prompts({ tags: [...] }). Don't assume you have all the context — check.
|
||||
```
|
||||
|
||||
### Prompt Index in Instructions
|
||||
|
||||
The `initialize` instructions include a compact prompt index so the client LLM can see what knowledge exists. Format per prompt: `- <name>: <summary>` (~100 chars max per entry).
|
||||
|
||||
Cap: if more than 50 prompts, include only priority 7+ in instructions index. Full index always available via `resources/list`.
|
||||
|
||||
## CLI Changes
|
||||
|
||||
### New/Modified Commands
|
||||
|
||||
- `mcpctl create prompt <name> --priority <1-10>` — create with priority
|
||||
- `mcpctl create prompt <name> --link <project/server:uri>` — create linked prompt
|
||||
- `mcpctl get prompt -A` — show all prompts across all projects, with link targets
|
||||
- `mcpctl describe project <name>` — show gated status, session greeting, prompt table
|
||||
- `mcpctl edit project <name>` — `gated` field editable
|
||||
|
||||
### Prompt Link Display
|
||||
|
||||
```
|
||||
$ mcpctl get prompt -A
|
||||
PROJECT NAME PRIORITY LINK STATUS
|
||||
homeautomation security-policies 8 - -
|
||||
homeautomation architecture-adr 6 system-public/docmost-mcp:docmost://pages/a1 alive
|
||||
homeautomation common-mistakes 10 - -
|
||||
system-public onboarding 4 - -
|
||||
```
|
||||
|
||||
## Describe Project Output
|
||||
|
||||
```
|
||||
$ mcpctl describe project homeautomation
|
||||
Name: homeautomation
|
||||
Gated: true
|
||||
LLM Provider: gemini-cli
|
||||
...
|
||||
|
||||
Session greeting:
|
||||
You have access to project knowledge containing policies, architecture decisions,
|
||||
and guidelines. Call begin_session with 5 keywords describing your task to receive
|
||||
relevant context. Some prompts contain critical rules — it's better to check than guess.
|
||||
|
||||
Prompts:
|
||||
NAME PRIORITY TYPE LINK
|
||||
common-mistakes 10 local -
|
||||
security-policies 8 local -
|
||||
architecture-adr 6 link system-public/docmost-mcp:docmost://pages/a1
|
||||
stack 5 local -
|
||||
```
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
**Full test coverage is required.** Every new module, service, route, and algorithm must have comprehensive tests. No feature ships without tests.
|
||||
|
||||
### Unit Tests (mcpd)
|
||||
- Prompt priority CRUD: create/update/get with priority field, default value, validation (1-10 range)
|
||||
- Prompt link CRUD: create with linkTarget, immutability (can't update linkTarget), delete
|
||||
- Prompt summary generation: auto-generation on create/update, regex fallback when no LLM
|
||||
- `GET /api/v1/prompts` with priority, linkTarget, linkStatus fields
|
||||
- `GET /api/v1/projects/:name/prompt-index` returns compact index
|
||||
- `POST /api/v1/prompts/:id/regenerate-summary` triggers re-generation
|
||||
- Project `gated` field: CRUD, default value
|
||||
|
||||
### Unit Tests (mcplocal — gating flow)
|
||||
- State machine: gated → `begin_session` → ungated (happy path)
|
||||
- State machine: gated → `tools/call` intercepted → ungated (fallback path)
|
||||
- State machine: non-gated project skips gate entirely
|
||||
- LLM selection path: tags + prompt index sent to heavy LLM, ranked results returned, priority 10 always included
|
||||
- LLM selection path: heavy LLM uses fast LLM for missing summary generation
|
||||
- No-LLM fallback: tag matching score calculation, priority weighting, substring matching
|
||||
- No-LLM fallback: byte-budget exhaustion, priority ordering, index fallback, edge cases
|
||||
- Keyword extraction from tool calls: tool name parsing, argument extraction
|
||||
- `begin_session` response: matched content + index + encouragement text (both LLM and fallback paths)
|
||||
- `read_prompts` response: additional matches, deduplication against already-sent prompts (both paths)
|
||||
- Tools blocked while gated: return error directing to `begin_session`
|
||||
- `tools/list` while gated: only `begin_session` visible
|
||||
- `tools/list` after ungating: `begin_session` replaced by `read_prompts` + all upstream tools
|
||||
- Priority 10 always included regardless of tag match or budget
|
||||
- Prompt index in instructions: cap at 50, priority 7+ when over cap
|
||||
- Notifications: `tools/list_changed` sent after ungating
|
||||
|
||||
### Unit Tests (mcplocal — prompt links)
|
||||
- Link resolution: fetch content from source project's MCP server via service account
|
||||
- Dead link detection: source server unavailable, resource not found, permission denied
|
||||
- Dead link marking: status field updated, error logged
|
||||
- RBAC enforcement: link creation requires edit permission on target project RBAC
|
||||
- Service account permission: auto-created on source project for linked resource
|
||||
- Content isolation: client LLM cannot access source server directly
|
||||
|
||||
### Unit Tests (CLI)
|
||||
- `create prompt` with `--priority` flag, validation
|
||||
- `create prompt` with `--link` flag, format validation
|
||||
- `get prompt -A` output: all projects, link targets, status columns
|
||||
- `describe project` output: gated status, session greeting, prompt table
|
||||
- `edit project` with gated field
|
||||
- Shell completions for new flags and resources
|
||||
|
||||
### Integration Tests
|
||||
- End-to-end gated session: connect → begin_session with tags → tools available → correct prompts returned
|
||||
- End-to-end intercept: connect → skip begin_session → call tool → keywords extracted → briefing injected
|
||||
- End-to-end read_prompts: after ungating → request more context → additional prompts returned → no duplicates
|
||||
- Prompt link resolution: create link → fetch content → verify content matches source
|
||||
- Dead link lifecycle: create link → kill source → verify dead detection → restore → verify recovery
|
||||
- Priority ordering: create prompts at various priorities → verify selection order and budget allocation
|
||||
- Encouragement text: verify retrieval encouragement present in begin_session, read_prompts, and instructions
|
||||
|
||||
## System Prompts (mcpctl-system project)
|
||||
|
||||
All gate messages, encouragement text, and briefing templates are stored as prompts in a special `mcpctl-system` project. This makes them editable at runtime via `mcpctl edit prompt` without code changes or redeployment.
|
||||
|
||||
### Required System Prompts
|
||||
|
||||
| Name | Priority | Purpose |
|
||||
|------|----------|---------|
|
||||
| `gate-instructions` | 10 | Text injected into `initialize` instructions for gated projects. Tells client to call `begin_session` with 5 keywords. |
|
||||
| `gate-encouragement` | 10 | Appended after `begin_session` response. Lists remaining prompts and encourages further retrieval. |
|
||||
| `read-prompts-reminder` | 10 | Appended after `read_prompts` response. Reminds client that more context is available. |
|
||||
| `gate-intercept-preamble` | 10 | Prepended to briefing when injected via tool call intercept (Option C fallback). |
|
||||
| `session-greeting` | 10 | Shown in `mcpctl describe project` as the "hello prompt" — what client LLMs see on connect. |
|
||||
|
||||
### Bootstrap
|
||||
|
||||
The `mcpctl-system` project and its system prompts are created automatically on first startup (seed migration). They can be edited afterward but not deleted — delete attempts return an error.
|
||||
|
||||
### How mcplocal Uses Them
|
||||
|
||||
On router initialization, mcplocal fetches system prompts from mcpd via:
|
||||
```
|
||||
GET /api/v1/projects/mcpctl-system/prompts/visible
|
||||
```
|
||||
|
||||
These are cached with the same 60s TTL as project routers. The prompt content supports template variables:
|
||||
- `{{prompt_index}}` — replaced with the current project's prompt index
|
||||
- `{{project_name}}` — replaced with the current project name
|
||||
- `{{matched_prompts}}` — replaced with tag-matched prompt content
|
||||
- `{{remaining_prompts}}` — replaced with the list of non-matched prompts
|
||||
|
||||
This way the encouragement text, tone, and structure can be tuned by editing prompts — no code changes needed.
|
||||
|
||||
## Security Considerations
|
||||
|
||||
- Prompt links: content fetched server-side, client never gets direct access to source MCP server
|
||||
- RBAC: link creation requires edit permission on target project's RBAC
|
||||
- Service account: source project grants read access to linked resource only
|
||||
- Dead links: logged as errors, marked in listings, never expose source server errors to client
|
||||
- Tag extraction: sanitize tool call arguments before using as keywords (prevent injection)
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
724
README.md
724
README.md
@@ -1,724 +0,0 @@
|
||||
# mcpctl
|
||||
|
||||
**kubectl for MCP servers.** A management system for [Model Context Protocol](https://modelcontextprotocol.io) servers — define, deploy, and connect MCP servers to Claude using familiar kubectl-style commands.
|
||||
|
||||
```
|
||||
mcpctl get servers
|
||||
NAME TRANSPORT REPLICAS DOCKER IMAGE DESCRIPTION
|
||||
grafana STDIO 1 grafana/mcp-grafana:latest Grafana MCP server
|
||||
home-assistant SSE 1 ghcr.io/homeassistant-ai/ha-mcp:latest Home Assistant MCP
|
||||
docmost SSE 1 10.0.0.194:3012/michal/docmost-mcp:latest Docmost wiki MCP
|
||||
```
|
||||
|
||||
## What is this?
|
||||
|
||||
mcpctl manages MCP servers the same way kubectl manages Kubernetes pods. You define servers declaratively in YAML, group them into projects, and connect them to Claude Code or any MCP client through a local proxy.
|
||||
|
||||
**The architecture:**
|
||||
|
||||
```
|
||||
Claude Code <--STDIO--> mcplocal (local proxy) <--HTTP--> mcpd (daemon) <--Docker--> MCP servers
|
||||
```
|
||||
|
||||
- **mcpd** — the daemon. Runs on a server, manages MCP server containers (Docker/Podman), stores configuration in PostgreSQL.
|
||||
- **mcplocal** — local proxy. Runs on your machine, presents a single MCP endpoint to Claude that merges tools from all your servers. Handles namespacing (`grafana/search_dashboards`), plugin execution (gating, content pipelines), and prompt delivery.
|
||||
- **mcpctl** — the CLI. Talks to mcpd (via mcplocal or directly) to manage everything.
|
||||
|
||||
## Quick Start
|
||||
|
||||
### 1. Install
|
||||
|
||||
```bash
|
||||
# From RPM repository (Fedora/RHEL)
|
||||
sudo tee /etc/yum.repos.d/mcpctl.repo <<'EOF'
|
||||
[mcpctl]
|
||||
name=mcpctl
|
||||
baseurl=https://mysources.co.uk/api/packages/michal/rpm
|
||||
enabled=1
|
||||
gpgcheck=0
|
||||
EOF
|
||||
sudo dnf install mcpctl
|
||||
|
||||
# Or build from source
|
||||
git clone https://mysources.co.uk/michal/mcpctl.git
|
||||
cd mcpctl
|
||||
pnpm install
|
||||
pnpm build
|
||||
pnpm rpm:build # requires bun and nfpm
|
||||
```
|
||||
|
||||
### 2. Connect to a daemon
|
||||
|
||||
```bash
|
||||
# Login to an mcpd instance
|
||||
mcpctl login --mcpd-url http://your-server:3000
|
||||
|
||||
# Check connectivity
|
||||
mcpctl status
|
||||
```
|
||||
|
||||
### 3. Create your first secret
|
||||
|
||||
Secrets store credentials that servers need — API tokens, passwords, etc.
|
||||
|
||||
```bash
|
||||
mcpctl create secret grafana-creds \
|
||||
--data GRAFANA_URL=http://grafana.local:3000 \
|
||||
--data GRAFANA_SERVICE_ACCOUNT_TOKEN=glsa_xxxxxxxxxxxx
|
||||
```
|
||||
|
||||
### 4. Create your first server
|
||||
|
||||
Browse available templates, then create a server from one:
|
||||
|
||||
```bash
|
||||
mcpctl get templates # List available server blueprints
|
||||
mcpctl describe template grafana # See required env vars, health checks, etc.
|
||||
|
||||
mcpctl create server my-grafana \
|
||||
--from-template grafana \
|
||||
--env-from-secret grafana-creds
|
||||
```
|
||||
|
||||
mcpd pulls the image, starts a container, and keeps it running. Check on it:
|
||||
|
||||
```bash
|
||||
mcpctl get instances # See running containers
|
||||
mcpctl logs my-grafana # View server logs
|
||||
mcpctl describe server my-grafana # Full details
|
||||
```
|
||||
|
||||
### 5. Create a project
|
||||
|
||||
A project groups servers together and configures how Claude interacts with them.
|
||||
|
||||
```bash
|
||||
mcpctl create project monitoring \
|
||||
--description "Grafana dashboards and alerting" \
|
||||
--server my-grafana \
|
||||
--proxy-model content-pipeline
|
||||
```
|
||||
|
||||
### 6. Connect Claude Code
|
||||
|
||||
Generate the `.mcp.json` config for Claude Code:
|
||||
|
||||
```bash
|
||||
mcpctl config claude --project monitoring
|
||||
```
|
||||
|
||||
This writes a `.mcp.json` that tells Claude Code to connect through mcplocal. Restart Claude Code and your Grafana tools appear:
|
||||
|
||||
```
|
||||
mcpctl console monitoring # Preview what Claude sees
|
||||
```
|
||||
|
||||
## Declarative Configuration
|
||||
|
||||
Everything can be defined in YAML and applied with `mcpctl apply`:
|
||||
|
||||
```yaml
|
||||
# infrastructure.yaml
|
||||
secrets:
|
||||
- name: grafana-creds
|
||||
data:
|
||||
GRAFANA_URL: "http://grafana.local:3000"
|
||||
GRAFANA_SERVICE_ACCOUNT_TOKEN: "glsa_xxxxxxxxxxxx"
|
||||
|
||||
servers:
|
||||
- name: my-grafana
|
||||
description: "Grafana dashboards and alerting"
|
||||
fromTemplate: grafana
|
||||
envFrom:
|
||||
- secretRef:
|
||||
name: grafana-creds
|
||||
|
||||
projects:
|
||||
- name: monitoring
|
||||
description: "Infrastructure monitoring"
|
||||
proxyModel: content-pipeline
|
||||
servers:
|
||||
- my-grafana
|
||||
```
|
||||
|
||||
```bash
|
||||
mcpctl apply -f infrastructure.yaml
|
||||
```
|
||||
|
||||
Round-trip works too — export, edit, re-apply:
|
||||
|
||||
```bash
|
||||
mcpctl get all --project monitoring -o yaml > state.yaml
|
||||
# edit state.yaml...
|
||||
mcpctl apply -f state.yaml
|
||||
```
|
||||
|
||||
## Plugin System (ProxyModel)
|
||||
|
||||
ProxyModel is mcpctl's plugin system. Each project is assigned a **plugin** that controls how Claude interacts with its servers.
|
||||
|
||||
There are two layers:
|
||||
- **Plugins** — TypeScript hooks that intercept MCP requests/responses (gating, tool filtering, etc.)
|
||||
- **Pipelines** — YAML-defined content transformation stages (pagination, summarization, etc.)
|
||||
|
||||
### Built-in Plugins
|
||||
|
||||
Plugins compose through inheritance. A plugin can `extend` another plugin and inherit all its hooks:
|
||||
|
||||
```
|
||||
gate → gating only (begin_session + prompt delivery)
|
||||
content-pipeline → content transformation only (pagination, section-split)
|
||||
default → extends both gate AND content-pipeline (inherits all hooks from both)
|
||||
```
|
||||
|
||||
| Plugin | Gating | Content pipeline | Description |
|
||||
|--------|:-:|:-:|---|
|
||||
| **gate** | Yes | No | `begin_session` gate with prompt delivery |
|
||||
| **content-pipeline** | No | Yes | Content transformation (paginate, section-split) |
|
||||
| **default** | Yes | Yes | Extends both — gate + content pipeline combined |
|
||||
|
||||
The `default` plugin doesn't reimplement anything — it inherits the gating hooks from `gate` and the content hooks from `content-pipeline`. Custom plugins can extend built-in ones the same way.
|
||||
|
||||
**Gating** means Claude initially sees only a `begin_session` tool. After calling it with a task description, relevant prompts are delivered and the full tool list is revealed. This keeps Claude's context focused.
|
||||
|
||||
```bash
|
||||
# Gated with content pipeline (default — extends gate + content-pipeline)
|
||||
mcpctl create project home --server my-ha --proxy-model default
|
||||
|
||||
# Ungated, content pipeline only
|
||||
mcpctl create project tools --server my-grafana --proxy-model content-pipeline
|
||||
|
||||
# Gated only, no content transformation
|
||||
mcpctl create project docs --server my-docs --proxy-model gate
|
||||
```
|
||||
|
||||
### Plugin Hooks
|
||||
|
||||
Plugins intercept MCP requests/responses at specific lifecycle points. When a plugin extends another, it inherits all the parent's hooks. If both parent and child define the same hook, the child's version wins.
|
||||
|
||||
| Hook | When it fires |
|
||||
|------|--------------|
|
||||
| `onSessionCreate` | New MCP session established |
|
||||
| `onSessionDestroy` | Session ends |
|
||||
| `onInitialize` | MCP `initialize` request — can inject instructions |
|
||||
| `onToolsList` | `tools/list` — can filter/modify tool list |
|
||||
| `onToolCallBefore` | Before forwarding a tool call — can intercept |
|
||||
| `onToolCallAfter` | After receiving tool result — can transform |
|
||||
| `onResourcesList` | `resources/list` — can filter resources |
|
||||
| `onResourceRead` | `resources/read` — can intercept resource reads |
|
||||
| `onPromptsList` | `prompts/list` — can filter prompts |
|
||||
| `onPromptGet` | `prompts/get` — can intercept prompt reads |
|
||||
|
||||
When multiple parents define the same hook, lifecycle hooks (`onSessionCreate`, `onSessionDestroy`) chain sequentially. All other hooks require the child to override — otherwise it's a conflict error.
|
||||
|
||||
### Content Pipelines
|
||||
|
||||
Content pipelines transform tool results through ordered stages before delivering to Claude:
|
||||
|
||||
| Pipeline | Stages | Use case |
|
||||
|----------|--------|----------|
|
||||
| **default** | `passthrough` → `paginate` (8KB pages) | Safe pass-through with pagination for large responses |
|
||||
| **subindex** | `section-split` → `summarize-tree` | Splits large content into sections, returns a summary index |
|
||||
|
||||
#### How `subindex` Works
|
||||
|
||||
1. Upstream returns a large tool result (e.g., 50KB of device states)
|
||||
2. `section-split` divides content into logical sections (2KB-15KB each)
|
||||
3. `summarize-tree` generates a compact index with section summaries (~200 tokens each)
|
||||
4. Client receives the index and can request specific sections via `_section` parameter
|
||||
|
||||
### Configuration
|
||||
|
||||
Set per-project:
|
||||
|
||||
```yaml
|
||||
kind: project
|
||||
name: home-automation
|
||||
proxyModel: default
|
||||
servers:
|
||||
- home-assistant
|
||||
- node-red
|
||||
```
|
||||
|
||||
Via CLI:
|
||||
|
||||
```bash
|
||||
mcpctl create project monitoring --server grafana --proxy-model content-pipeline
|
||||
```
|
||||
|
||||
### Custom ProxyModels
|
||||
|
||||
Place YAML files in `~/.mcpctl/proxymodels/` to define custom pipelines:
|
||||
|
||||
```yaml
|
||||
kind: ProxyModel
|
||||
metadata:
|
||||
name: my-pipeline
|
||||
spec:
|
||||
stages:
|
||||
- type: section-split
|
||||
config:
|
||||
minSectionSize: 1000
|
||||
maxSectionSize: 10000
|
||||
- type: summarize-tree
|
||||
config:
|
||||
maxTokens: 150
|
||||
maxDepth: 2
|
||||
appliesTo: [toolResult, prompt]
|
||||
cacheable: true
|
||||
```
|
||||
|
||||
Inspect available plugins and pipelines:
|
||||
|
||||
```bash
|
||||
mcpctl get proxymodels # List all plugins and pipelines
|
||||
mcpctl describe proxymodel default # Pipeline details (stages, controller)
|
||||
mcpctl describe proxymodel gate # Plugin details (hooks, extends)
|
||||
```
|
||||
|
||||
### Custom Stages
|
||||
|
||||
Drop `.js` or `.mjs` files in `~/.mcpctl/stages/` to add custom transformation stages. Each file must `export default` an async function matching the `StageHandler` contract:
|
||||
|
||||
```javascript
|
||||
// ~/.mcpctl/stages/redact-keys.js
|
||||
export default async function(content, ctx) {
|
||||
// ctx provides: contentType, sourceName, projectName, sessionId,
|
||||
// originalContent, llm, cache, log, config
|
||||
const redacted = content.replace(/([A-Z_]+_KEY)=\S+/g, '$1=***');
|
||||
ctx.log.info(`Redacted ${content.length - redacted.length} chars of secrets`);
|
||||
return { content: redacted };
|
||||
}
|
||||
```
|
||||
|
||||
Stages loaded from disk appear as `local` source. Use them in a custom ProxyModel YAML:
|
||||
|
||||
```yaml
|
||||
kind: ProxyModel
|
||||
metadata:
|
||||
name: secure-pipeline
|
||||
spec:
|
||||
stages:
|
||||
- type: redact-keys # matches filename without extension
|
||||
- type: section-split
|
||||
- type: summarize-tree
|
||||
```
|
||||
|
||||
**Stage contract reference:**
|
||||
|
||||
| Field | Type | Description |
|
||||
|-------|------|-------------|
|
||||
| `content` | `string` | Input content (from previous stage or raw upstream) |
|
||||
| `ctx.contentType` | `'toolResult' \| 'prompt' \| 'resource'` | What kind of content is being processed |
|
||||
| `ctx.sourceName` | `string` | Tool name, prompt name, or resource URI |
|
||||
| `ctx.originalContent` | `string` | The unmodified content before any stage ran |
|
||||
| `ctx.llm` | `LLMProvider` | Call `ctx.llm.complete(prompt)` for LLM summarization |
|
||||
| `ctx.cache` | `CacheProvider` | Call `ctx.cache.getOrCompute(key, fn)` to cache expensive results |
|
||||
| `ctx.log` | `StageLogger` | `debug()`, `info()`, `warn()`, `error()` |
|
||||
| `ctx.config` | `Record<string, unknown>` | Config values from the ProxyModel YAML |
|
||||
|
||||
**Return value:**
|
||||
|
||||
```typescript
|
||||
{ content: string; sections?: Section[]; metadata?: Record<string, unknown> }
|
||||
```
|
||||
|
||||
If `sections` is returned, the framework stores them and presents a table of contents to the client. The client can drill into individual sections via `_resultId` + `_section` parameters on subsequent tool or prompt calls.
|
||||
|
||||
### Section Drill-Down
|
||||
|
||||
When a stage (like `section-split`) produces sections, the pipeline automatically:
|
||||
|
||||
1. Replaces the full content with a compact table of contents
|
||||
2. Appends a `_resultId` for subsequent drill-down
|
||||
3. Stores the full sections in memory (5-minute TTL)
|
||||
|
||||
Claude then calls the same tool (or `prompts/get`) again with `_resultId` and `_section` parameters to retrieve a specific section. This works for both tool results and prompt responses.
|
||||
|
||||
```
|
||||
# What Claude sees (tool result):
|
||||
3 sections (json):
|
||||
[users] Users (4K chars)
|
||||
[config] Config (1K chars)
|
||||
[logs] Logs (8K chars)
|
||||
|
||||
_resultId: pm-abc123 — use _resultId and _section parameters to drill into a section.
|
||||
|
||||
# Claude drills down:
|
||||
→ tools/call: grafana/query { _resultId: "pm-abc123", _section: "logs" }
|
||||
← [full 8K content of the logs section]
|
||||
```
|
||||
|
||||
### Hot-Reload
|
||||
|
||||
Stages and ProxyModels reload automatically when files change — no restart needed.
|
||||
|
||||
- **Stages** (`~/.mcpctl/stages/*.js`): File watcher with 300ms debounce. Add, edit, or remove stage files and they take effect on the next tool call.
|
||||
- **ProxyModels** (`~/.mcpctl/proxymodels/*.yaml`): Re-read from disk on every request, so changes are always picked up.
|
||||
|
||||
Force a manual reload via the HTTP API:
|
||||
|
||||
```bash
|
||||
curl -X POST http://localhost:3200/proxymodels/reload
|
||||
# {"loaded": 3}
|
||||
|
||||
curl http://localhost:3200/proxymodels/stages
|
||||
# [{"name":"passthrough","source":"built-in"},{"name":"redact-keys","source":"local"},...]
|
||||
```
|
||||
|
||||
### Built-in Stages Reference
|
||||
|
||||
| Stage | Description | Key Config |
|
||||
|-------|------------|------------|
|
||||
| `passthrough` | Returns content unchanged | — |
|
||||
| `paginate` | Splits large content into numbered pages | `pageSize` (default: 8000 chars) |
|
||||
| `section-split` | Splits content into named sections by structure (headers, JSON keys, code boundaries) | `minSectionSize` (500), `maxSectionSize` (15000) |
|
||||
| `summarize-tree` | Generates LLM summaries for each section | `maxTokens` (200), `maxDepth` (2) |
|
||||
|
||||
`section-split` detects content type automatically:
|
||||
|
||||
| Content Type | Split Strategy |
|
||||
|-------------|---------------|
|
||||
| JSON array | One section per array element, using `name`/`id`/`label` as section ID |
|
||||
| JSON object | One section per top-level key |
|
||||
| YAML | One section per top-level key |
|
||||
| Markdown | One section per `##` header |
|
||||
| Code | One section per function/class boundary |
|
||||
| XML | One section per top-level element |
|
||||
|
||||
### Pause Queue (Model Studio)
|
||||
|
||||
The pause queue lets you intercept pipeline results in real-time — inspect what the pipeline produced, edit it, or drop it before Claude receives the response.
|
||||
|
||||
```bash
|
||||
# Enable pause mode
|
||||
curl -X PUT http://localhost:3200/pause -d '{"paused":true}'
|
||||
|
||||
# View queued items (blocked tool calls waiting for your decision)
|
||||
curl http://localhost:3200/pause/queue
|
||||
|
||||
# Release an item (send transformed content to Claude)
|
||||
curl -X POST http://localhost:3200/pause/queue/<id>/release
|
||||
|
||||
# Edit and release (send your modified content instead)
|
||||
curl -X POST http://localhost:3200/pause/queue/<id>/edit -d '{"content":"modified content"}'
|
||||
|
||||
# Drop an item (send empty response)
|
||||
curl -X POST http://localhost:3200/pause/queue/<id>/drop
|
||||
|
||||
# Release all queued items at once
|
||||
curl -X POST http://localhost:3200/pause/release-all
|
||||
|
||||
# Disable pause mode
|
||||
curl -X PUT http://localhost:3200/pause -d '{"paused":false}'
|
||||
```
|
||||
|
||||
The pause queue is also available as MCP tools via `mcpctl console --stdin-mcp`, which gives Claude direct access to `pause`, `get_pause_queue`, and `release_paused` tools for self-monitoring.
|
||||
|
||||
## LLM Providers
|
||||
|
||||
ProxyModel stages that need LLM capabilities (like `summarize-tree`) use configurable providers. Configure in `~/.mcpctl/config.yaml`:
|
||||
|
||||
```yaml
|
||||
llm:
|
||||
- name: vllm-local
|
||||
type: openai-compatible
|
||||
baseUrl: http://localhost:8000/v1
|
||||
model: Qwen/Qwen3-32B
|
||||
- name: anthropic
|
||||
type: anthropic
|
||||
model: claude-sonnet-4-20250514
|
||||
# API key from: mcpctl create secret llm-keys --data ANTHROPIC_API_KEY=sk-...
|
||||
```
|
||||
|
||||
Providers support **tiered routing** (`fast` for quick summaries, `heavy` for complex analysis) and **automatic failover** — if one provider is down, the next is tried.
|
||||
|
||||
```bash
|
||||
# Check active providers
|
||||
mcpctl status # Shows LLM provider status
|
||||
|
||||
# View provider details
|
||||
curl http://localhost:3200/llm/providers
|
||||
```
|
||||
|
||||
## Pipeline Cache
|
||||
|
||||
ProxyModel pipelines cache LLM-generated results (summaries, section indexes) to avoid redundant API calls. The cache is persistent across mcplocal restarts.
|
||||
|
||||
### Namespace Isolation
|
||||
|
||||
Each combination of **LLM provider + model + ProxyModel** gets its own cache namespace:
|
||||
|
||||
```
|
||||
~/.mcpctl/cache/openai--gpt-4o--content-pipeline/
|
||||
~/.mcpctl/cache/anthropic--claude-sonnet-4-20250514--content-pipeline/
|
||||
~/.mcpctl/cache/vllm--qwen-72b--subindex/
|
||||
```
|
||||
|
||||
Switching LLM providers or models automatically uses a fresh cache — no stale results from a different model.
|
||||
|
||||
### CLI Management
|
||||
|
||||
```bash
|
||||
# View cache statistics (per-namespace breakdown)
|
||||
mcpctl cache stats
|
||||
|
||||
# Clear all cache entries
|
||||
mcpctl cache clear
|
||||
|
||||
# Clear a specific namespace
|
||||
mcpctl cache clear openai--gpt-4o--content-pipeline
|
||||
|
||||
# Clear entries older than 7 days
|
||||
mcpctl cache clear --older-than 7
|
||||
```
|
||||
|
||||
### Size Limits
|
||||
|
||||
The cache enforces a configurable maximum size (default: 256MB). When exceeded, the oldest entries are evicted (LRU). Entries older than 30 days are automatically expired.
|
||||
|
||||
Size can be specified as bytes, human-readable units, or a percentage of the filesystem:
|
||||
|
||||
```typescript
|
||||
new FileCache('ns', { maxSize: '512MB' }) // fixed size
|
||||
new FileCache('ns', { maxSize: '1.5GB' }) // fractional units
|
||||
new FileCache('ns', { maxSize: '10%' }) // 10% of partition
|
||||
```
|
||||
|
||||
## Resources
|
||||
|
||||
| Resource | What it is | Example |
|
||||
|----------|-----------|---------|
|
||||
| **server** | MCP server definition | Docker image + transport + env vars |
|
||||
| **instance** | Running container (immutable) | Auto-created from server replicas |
|
||||
| **secret** | Key-value credentials | API tokens, passwords |
|
||||
| **template** | Reusable server blueprint | Community server configs |
|
||||
| **project** | Workspace grouping servers | "monitoring", "home-automation" |
|
||||
| **prompt** | Curated content for Claude | Instructions, docs, guides |
|
||||
| **promptrequest** | Pending prompt proposal | LLM-submitted, needs approval |
|
||||
| **rbac** | Access control bindings | Who can do what |
|
||||
| **serverattachment** | Server-to-project link | Virtual resource for `apply` |
|
||||
|
||||
## Commands
|
||||
|
||||
```bash
|
||||
# List resources
|
||||
mcpctl get servers
|
||||
mcpctl get instances
|
||||
mcpctl get projects
|
||||
mcpctl get prompts --project myproject
|
||||
|
||||
# Detailed view
|
||||
mcpctl describe server grafana
|
||||
mcpctl describe project monitoring
|
||||
|
||||
# Create resources
|
||||
mcpctl create server <name> [flags]
|
||||
mcpctl create secret <name> --data KEY=value
|
||||
mcpctl create project <name> --server <srv> [--proxy-model <plugin>]
|
||||
mcpctl create prompt <name> --project <proj> --content "..."
|
||||
|
||||
# Modify resources
|
||||
mcpctl edit server grafana # Opens in $EDITOR
|
||||
mcpctl patch project myproj proxyModel=default
|
||||
mcpctl apply -f config.yaml # Declarative create/update
|
||||
|
||||
# Delete resources
|
||||
mcpctl delete server grafana
|
||||
|
||||
# Logs and debugging
|
||||
mcpctl logs grafana # Container logs
|
||||
mcpctl console monitoring # Interactive MCP console
|
||||
mcpctl console --inspect # Traffic inspector
|
||||
mcpctl console --audit # Audit event timeline
|
||||
mcpctl console --stdin-mcp # Claude monitor (MCP tools for Claude)
|
||||
|
||||
# Backup (git-based)
|
||||
mcpctl backup # Status and SSH key
|
||||
mcpctl backup log # Commit history
|
||||
mcpctl backup restore list # Available restore points
|
||||
mcpctl backup restore diff abc1234 # Preview a restore
|
||||
mcpctl backup restore to abc1234 --force # Restore to a commit
|
||||
|
||||
# Project management
|
||||
mcpctl --project monitoring get servers # Project-scoped listing
|
||||
mcpctl --project monitoring attach-server grafana
|
||||
mcpctl --project monitoring detach-server grafana
|
||||
```
|
||||
|
||||
## Templates
|
||||
|
||||
Templates are reusable server configurations. Create a server from a template without repeating all the config:
|
||||
|
||||
```bash
|
||||
# Register a template
|
||||
mcpctl create template home-assistant \
|
||||
--docker-image "ghcr.io/homeassistant-ai/ha-mcp:latest" \
|
||||
--transport SSE \
|
||||
--container-port 8086
|
||||
|
||||
# Create a server from it
|
||||
mcpctl create server my-ha \
|
||||
--from-template home-assistant \
|
||||
--env-from-secret ha-secrets
|
||||
```
|
||||
|
||||
## Gated Sessions
|
||||
|
||||
Projects using the `default` or `gate` plugin are **gated**. When Claude connects to a gated project:
|
||||
|
||||
1. Claude sees only a `begin_session` tool initially
|
||||
2. Claude calls `begin_session` with a description of its task
|
||||
3. mcplocal matches relevant prompts and delivers them
|
||||
4. The full tool list is revealed
|
||||
|
||||
This keeps Claude's context focused — instead of dumping 100+ tools and pages of docs upfront, only the relevant ones are delivered based on the task at hand.
|
||||
|
||||
```bash
|
||||
# Gated (default)
|
||||
mcpctl create project monitoring --server grafana --proxy-model default
|
||||
|
||||
# Ungated (direct tool access)
|
||||
mcpctl create project tools --server grafana --proxy-model content-pipeline
|
||||
```
|
||||
|
||||
## Prompts
|
||||
|
||||
Prompts are curated content delivered to Claude through the MCP protocol. They can be plain text or linked to external MCP resources (like wiki pages).
|
||||
|
||||
```bash
|
||||
# Create a text prompt
|
||||
mcpctl create prompt deployment-guide \
|
||||
--project monitoring \
|
||||
--content-file docs/deployment.md \
|
||||
--priority 7
|
||||
|
||||
# Create a linked prompt (content fetched live from an MCP resource)
|
||||
mcpctl create prompt wiki-page \
|
||||
--project monitoring \
|
||||
--link "monitoring/docmost:docmost://pages/abc123" \
|
||||
--priority 5
|
||||
```
|
||||
|
||||
Claude can also **propose** prompts during a session. These appear as prompt requests that you can review and approve:
|
||||
|
||||
```bash
|
||||
mcpctl get promptrequests
|
||||
mcpctl approve promptrequest proposed-guide
|
||||
```
|
||||
|
||||
## Interactive Console
|
||||
|
||||
The console lets you see exactly what Claude sees — tools, resources, prompts — and call tools interactively:
|
||||
|
||||
```bash
|
||||
mcpctl console monitoring
|
||||
```
|
||||
|
||||
The traffic inspector watches MCP traffic from other clients in real-time:
|
||||
|
||||
```bash
|
||||
mcpctl console --inspect
|
||||
```
|
||||
|
||||
### Claude Monitor (stdin-mcp)
|
||||
|
||||
Connect Claude itself as a monitor via the inspect MCP server:
|
||||
|
||||
```bash
|
||||
mcpctl console --stdin-mcp
|
||||
```
|
||||
|
||||
This exposes MCP tools that let Claude observe and control traffic:
|
||||
|
||||
| Tool | Description |
|
||||
|------|------------|
|
||||
| `list_models` | List configured LLM providers and their status |
|
||||
| `list_stages` | List all available pipeline stages (built-in + custom) |
|
||||
| `switch_model` | Change the active LLM provider for pipeline stages |
|
||||
| `get_model_info` | Get details about a specific LLM provider |
|
||||
| `reload_stages` | Force reload custom stages from disk |
|
||||
| `pause` | Toggle pause mode (intercept pipeline results) |
|
||||
| `get_pause_queue` | List items held in the pause queue |
|
||||
| `release_paused` | Release, edit, or drop a paused item |
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
┌──────────────┐ ┌─────────────────────────────────────────┐
|
||||
│ Claude Code │ STDIO │ mcplocal (proxy) │
|
||||
│ │◄─────────►│ │
|
||||
│ (or any MCP │ │ Namespace-merging MCP proxy │
|
||||
│ client) │ │ Gated sessions + prompt delivery │
|
||||
│ │ │ Per-project endpoints │
|
||||
└──────────────┘ │ Traffic inspection │
|
||||
└──────────────┬──────────────────────────┘
|
||||
│ HTTP (REST + MCP proxy)
|
||||
│
|
||||
┌──────────────┴──────────────────────────┐
|
||||
│ mcpd (daemon) │
|
||||
│ │
|
||||
│ REST API (/api/v1/*) │
|
||||
│ MCP proxy (routes tool calls) │
|
||||
│ PostgreSQL (Prisma ORM) │
|
||||
│ Docker/Podman container management │
|
||||
│ Health probes (STDIO, SSE, HTTP) │
|
||||
│ RBAC enforcement │
|
||||
│ │
|
||||
│ ┌───────────────────────────────────┐ │
|
||||
│ │ MCP Server Containers │ │
|
||||
│ │ │ │
|
||||
│ │ grafana/ home-assistant/ ... │ │
|
||||
│ │ (managed + proxied by mcpd) │ │
|
||||
│ └───────────────────────────────────┘ │
|
||||
└─────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Clients never connect to MCP server containers directly — all tool calls go through mcplocal → mcpd, which proxies them to the right container via STDIO/SSE/HTTP. This keeps containers unexposed and lets mcpd enforce RBAC and health checks.
|
||||
|
||||
**Tool namespacing**: When Claude connects to a project with servers `grafana` and `slack`, it sees tools like `grafana/search_dashboards` and `slack/send_message`. mcplocal routes each call through mcpd to the correct upstream server.
|
||||
|
||||
## Project Structure
|
||||
|
||||
```
|
||||
mcpctl/
|
||||
├── src/
|
||||
│ ├── cli/ # mcpctl command-line interface (Commander.js)
|
||||
│ ├── mcpd/ # Daemon server (Fastify 5, REST API)
|
||||
│ ├── mcplocal/ # Local MCP proxy (namespace merging, gating)
|
||||
│ ├── db/ # Database schema (Prisma) and migrations
|
||||
│ └── shared/ # Shared types and utilities
|
||||
├── deploy/ # Docker Compose for local development
|
||||
├── stack/ # Production deployment (Portainer)
|
||||
├── scripts/ # Build, release, and deploy scripts
|
||||
├── examples/ # Example YAML configurations
|
||||
└── completions/ # Shell completions (fish, bash)
|
||||
```
|
||||
|
||||
## Development
|
||||
|
||||
```bash
|
||||
# Prerequisites: Node.js 20+, pnpm 9+, Docker/Podman
|
||||
|
||||
# Install dependencies
|
||||
pnpm install
|
||||
|
||||
# Start local database
|
||||
pnpm db:up
|
||||
|
||||
# Generate Prisma client
|
||||
cd src/db && npx prisma generate && cd ../..
|
||||
|
||||
# Build all packages
|
||||
pnpm build
|
||||
|
||||
# Run tests
|
||||
pnpm test:run
|
||||
|
||||
# Development mode (mcpd with hot-reload)
|
||||
cd src/mcpd && pnpm dev
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
@@ -1,328 +1,73 @@
|
||||
# mcpctl bash completions — auto-generated by scripts/generate-completions.ts
|
||||
# DO NOT EDIT MANUALLY — run: pnpm completions:generate
|
||||
|
||||
_mcpctl() {
|
||||
local cur prev words cword
|
||||
_init_completion || return
|
||||
|
||||
local commands="status login logout config get describe delete logs create edit apply patch backup approve console cache"
|
||||
local project_commands="get describe delete logs create edit attach-server detach-server"
|
||||
local global_opts="-v --version --daemon-url --direct -p --project -h --help"
|
||||
local resources="servers instances secrets templates projects users groups rbac prompts promptrequests serverattachments proxymodels all"
|
||||
local resource_aliases="servers instances secrets templates projects users groups rbac prompts promptrequests serverattachments proxymodels all server srv instance inst secret sec template tpl project proj user group rbac-definition rbac-binding prompt promptrequest pr serverattachment sa proxymodel pm"
|
||||
local commands="status login logout config get describe delete logs create edit apply backup restore help"
|
||||
local global_opts="-v --version --daemon-url --direct -h --help"
|
||||
local resources="servers instances secrets templates projects users groups rbac"
|
||||
|
||||
# Check if --project/-p was given
|
||||
local has_project=false
|
||||
local i
|
||||
for ((i=1; i < cword; i++)); do
|
||||
if [[ "${words[i]}" == "--project" || "${words[i]}" == "-p" ]]; then
|
||||
has_project=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
# Find the first subcommand
|
||||
local subcmd=""
|
||||
local subcmd_pos=0
|
||||
for ((i=1; i < cword; i++)); do
|
||||
if [[ "${words[i]}" == "--project" || "${words[i]}" == "--daemon-url" || "${words[i]}" == "-p" ]]; then
|
||||
((i++))
|
||||
continue
|
||||
fi
|
||||
if [[ "${words[i]}" != -* ]]; then
|
||||
subcmd="${words[i]}"
|
||||
subcmd_pos=$i
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
# Find the resource type after resource commands
|
||||
local resource_type=""
|
||||
if [[ -n "$subcmd_pos" ]] && [[ $subcmd_pos -gt 0 ]]; then
|
||||
for ((i=subcmd_pos+1; i < cword; i++)); do
|
||||
if [[ "${words[i]}" != -* ]] && [[ " $resource_aliases " == *" ${words[i]} "* ]]; then
|
||||
resource_type="${words[i]}"
|
||||
break
|
||||
case "${words[1]}" in
|
||||
config)
|
||||
if [[ $cword -eq 2 ]]; then
|
||||
COMPREPLY=($(compgen -W "view set path reset claude-generate impersonate help" -- "$cur"))
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Helper: get --project/-p value
|
||||
_mcpctl_get_project_value() {
|
||||
local i
|
||||
for ((i=1; i < cword; i++)); do
|
||||
if [[ "${words[i]}" == "--project" || "${words[i]}" == "-p" ]] && (( i+1 < cword )); then
|
||||
echo "${words[i+1]}"
|
||||
return
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Helper: fetch resource names
|
||||
_mcpctl_resource_names() {
|
||||
local rt="$1"
|
||||
if [[ -n "$rt" ]]; then
|
||||
if [[ "$rt" == "instances" ]]; then
|
||||
mcpctl get instances -o json 2>/dev/null | jq -r '.[][].server.name' 2>/dev/null
|
||||
else
|
||||
mcpctl get "$rt" -o json 2>/dev/null | jq -r '.[].name' 2>/dev/null
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Helper: find sub-subcommand (for config/create)
|
||||
_mcpctl_get_subcmd() {
|
||||
local parent_pos="$1"
|
||||
local i
|
||||
for ((i=parent_pos+1; i < cword; i++)); do
|
||||
if [[ "${words[i]}" != -* ]]; then
|
||||
echo "${words[i]}"
|
||||
return
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# If completing option values
|
||||
if [[ "$prev" == "--project" || "$prev" == "-p" ]]; then
|
||||
local names
|
||||
names=$(mcpctl get projects -o json 2>/dev/null | jq -r '.[].name' 2>/dev/null)
|
||||
COMPREPLY=($(compgen -W "$names" -- "$cur"))
|
||||
return
|
||||
fi
|
||||
|
||||
case "$subcmd" in
|
||||
return ;;
|
||||
status)
|
||||
COMPREPLY=($(compgen -W "-o --output -h --help" -- "$cur"))
|
||||
return ;;
|
||||
login)
|
||||
COMPREPLY=($(compgen -W "--mcpd-url -h --help" -- "$cur"))
|
||||
return ;;
|
||||
logout)
|
||||
COMPREPLY=($(compgen -W "-h --help" -- "$cur"))
|
||||
return ;;
|
||||
config)
|
||||
local config_sub=$(_mcpctl_get_subcmd $subcmd_pos)
|
||||
if [[ -z "$config_sub" ]]; then
|
||||
COMPREPLY=($(compgen -W "view set path reset claude claude-generate setup impersonate help" -- "$cur"))
|
||||
else
|
||||
case "$config_sub" in
|
||||
view)
|
||||
COMPREPLY=($(compgen -W "-o --output -h --help" -- "$cur"))
|
||||
;;
|
||||
set)
|
||||
COMPREPLY=($(compgen -W "-h --help" -- "$cur"))
|
||||
;;
|
||||
path)
|
||||
COMPREPLY=($(compgen -W "-h --help" -- "$cur"))
|
||||
;;
|
||||
reset)
|
||||
COMPREPLY=($(compgen -W "-h --help" -- "$cur"))
|
||||
;;
|
||||
claude)
|
||||
COMPREPLY=($(compgen -W "-p --project -o --output --inspect --stdout -h --help" -- "$cur"))
|
||||
;;
|
||||
claude-generate)
|
||||
COMPREPLY=($(compgen -W "-p --project -o --output --inspect --stdout -h --help" -- "$cur"))
|
||||
;;
|
||||
setup)
|
||||
COMPREPLY=($(compgen -W "-h --help" -- "$cur"))
|
||||
;;
|
||||
impersonate)
|
||||
COMPREPLY=($(compgen -W "--quit -h --help" -- "$cur"))
|
||||
;;
|
||||
*)
|
||||
COMPREPLY=($(compgen -W "-h --help" -- "$cur"))
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
login)
|
||||
COMPREPLY=($(compgen -W "--url --email --password -h --help" -- "$cur"))
|
||||
return ;;
|
||||
logout)
|
||||
return ;;
|
||||
get)
|
||||
if [[ -z "$resource_type" ]]; then
|
||||
COMPREPLY=($(compgen -W "$resources -o --output -p --project -A --all -h --help" -- "$cur"))
|
||||
if [[ $cword -eq 2 ]]; then
|
||||
COMPREPLY=($(compgen -W "$resources" -- "$cur"))
|
||||
else
|
||||
local names
|
||||
names=$(_mcpctl_resource_names "$resource_type")
|
||||
COMPREPLY=($(compgen -W "$names -o --output -p --project -A --all -h --help" -- "$cur"))
|
||||
COMPREPLY=($(compgen -W "-o --output -h --help" -- "$cur"))
|
||||
fi
|
||||
return ;;
|
||||
describe)
|
||||
if [[ -z "$resource_type" ]]; then
|
||||
COMPREPLY=($(compgen -W "$resources -o --output --show-values -h --help" -- "$cur"))
|
||||
if [[ $cword -eq 2 ]]; then
|
||||
COMPREPLY=($(compgen -W "$resources" -- "$cur"))
|
||||
else
|
||||
local names
|
||||
names=$(_mcpctl_resource_names "$resource_type")
|
||||
COMPREPLY=($(compgen -W "$names -o --output --show-values -h --help" -- "$cur"))
|
||||
COMPREPLY=($(compgen -W "-o --output --show-values -h --help" -- "$cur"))
|
||||
fi
|
||||
return ;;
|
||||
delete)
|
||||
if [[ -z "$resource_type" ]]; then
|
||||
COMPREPLY=($(compgen -W "$resources -p --project -h --help" -- "$cur"))
|
||||
else
|
||||
local names
|
||||
names=$(_mcpctl_resource_names "$resource_type")
|
||||
COMPREPLY=($(compgen -W "$names -p --project -h --help" -- "$cur"))
|
||||
fi
|
||||
return ;;
|
||||
logs)
|
||||
if [[ $((cword - subcmd_pos)) -eq 1 ]]; then
|
||||
local names
|
||||
names=$(mcpctl get instances -o json 2>/dev/null | jq -r '.[][].server.name' 2>/dev/null)
|
||||
COMPREPLY=($(compgen -W "$names -t --tail -i --instance -h --help" -- "$cur"))
|
||||
else
|
||||
COMPREPLY=($(compgen -W "-t --tail -i --instance -h --help" -- "$cur"))
|
||||
fi
|
||||
return ;;
|
||||
create)
|
||||
local create_sub=$(_mcpctl_get_subcmd $subcmd_pos)
|
||||
if [[ -z "$create_sub" ]]; then
|
||||
COMPREPLY=($(compgen -W "server secret project user group rbac prompt serverattachment promptrequest help" -- "$cur"))
|
||||
else
|
||||
case "$create_sub" in
|
||||
server)
|
||||
COMPREPLY=($(compgen -W "-d --description --package-name --runtime --docker-image --transport --repository-url --external-url --command --container-port --replicas --env --from-template --env-from-secret --force -h --help" -- "$cur"))
|
||||
;;
|
||||
secret)
|
||||
COMPREPLY=($(compgen -W "--data --force -h --help" -- "$cur"))
|
||||
;;
|
||||
project)
|
||||
COMPREPLY=($(compgen -W "-d --description --proxy-model --prompt --gated --no-gated --server --force -h --help" -- "$cur"))
|
||||
;;
|
||||
user)
|
||||
COMPREPLY=($(compgen -W "--password --name --force -h --help" -- "$cur"))
|
||||
;;
|
||||
group)
|
||||
COMPREPLY=($(compgen -W "--description --member --force -h --help" -- "$cur"))
|
||||
;;
|
||||
rbac)
|
||||
COMPREPLY=($(compgen -W "--subject --binding --operation --force -h --help" -- "$cur"))
|
||||
;;
|
||||
prompt)
|
||||
COMPREPLY=($(compgen -W "-p --project --content --content-file --priority --link -h --help" -- "$cur"))
|
||||
;;
|
||||
serverattachment)
|
||||
COMPREPLY=($(compgen -W "-p --project -h --help" -- "$cur"))
|
||||
;;
|
||||
promptrequest)
|
||||
COMPREPLY=($(compgen -W "-p --project --content --content-file --priority -h --help" -- "$cur"))
|
||||
;;
|
||||
*)
|
||||
COMPREPLY=($(compgen -W "-h --help" -- "$cur"))
|
||||
;;
|
||||
esac
|
||||
if [[ $cword -eq 2 ]]; then
|
||||
COMPREPLY=($(compgen -W "$resources" -- "$cur"))
|
||||
fi
|
||||
return ;;
|
||||
edit)
|
||||
if [[ -z "$resource_type" ]]; then
|
||||
COMPREPLY=($(compgen -W "servers secrets projects groups rbac prompts promptrequests -h --help" -- "$cur"))
|
||||
else
|
||||
local names
|
||||
names=$(_mcpctl_resource_names "$resource_type")
|
||||
COMPREPLY=($(compgen -W "$names -h --help" -- "$cur"))
|
||||
if [[ $cword -eq 2 ]]; then
|
||||
COMPREPLY=($(compgen -W "servers projects" -- "$cur"))
|
||||
fi
|
||||
return ;;
|
||||
logs)
|
||||
COMPREPLY=($(compgen -W "--tail --since -f --follow -h --help" -- "$cur"))
|
||||
return ;;
|
||||
create)
|
||||
if [[ $cword -eq 2 ]]; then
|
||||
COMPREPLY=($(compgen -W "server secret project user group rbac help" -- "$cur"))
|
||||
fi
|
||||
return ;;
|
||||
apply)
|
||||
COMPREPLY=($(compgen -f -W "-f --file --dry-run -h --help" -- "$cur"))
|
||||
return ;;
|
||||
patch)
|
||||
if [[ -z "$resource_type" ]]; then
|
||||
COMPREPLY=($(compgen -W "$resources -h --help" -- "$cur"))
|
||||
else
|
||||
local names
|
||||
names=$(_mcpctl_resource_names "$resource_type")
|
||||
COMPREPLY=($(compgen -W "$names -h --help" -- "$cur"))
|
||||
fi
|
||||
COMPREPLY=($(compgen -f -- "$cur"))
|
||||
return ;;
|
||||
backup)
|
||||
local backup_sub=$(_mcpctl_get_subcmd $subcmd_pos)
|
||||
if [[ -z "$backup_sub" ]]; then
|
||||
COMPREPLY=($(compgen -W "log restore help" -- "$cur"))
|
||||
else
|
||||
case "$backup_sub" in
|
||||
log)
|
||||
COMPREPLY=($(compgen -W "-n --limit -h --help" -- "$cur"))
|
||||
;;
|
||||
restore)
|
||||
COMPREPLY=($(compgen -W "-h --help" -- "$cur"))
|
||||
;;
|
||||
*)
|
||||
COMPREPLY=($(compgen -W "-h --help" -- "$cur"))
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
COMPREPLY=($(compgen -W "-o --output -p --password -h --help" -- "$cur"))
|
||||
return ;;
|
||||
attach-server)
|
||||
if [[ $((cword - subcmd_pos)) -ne 1 ]]; then return; fi
|
||||
local proj names all_servers proj_servers
|
||||
proj=$(_mcpctl_get_project_value)
|
||||
if [[ -n "$proj" ]]; then
|
||||
all_servers=$(mcpctl get servers -o json 2>/dev/null | jq -r '.[].name' 2>/dev/null)
|
||||
proj_servers=$(mcpctl --project "$proj" get servers -o json 2>/dev/null | jq -r '.[].name' 2>/dev/null)
|
||||
names=$(comm -23 <(echo "$all_servers" | sort) <(echo "$proj_servers" | sort))
|
||||
else
|
||||
names=$(_mcpctl_resource_names "servers")
|
||||
fi
|
||||
COMPREPLY=($(compgen -W "$names" -- "$cur"))
|
||||
return ;;
|
||||
detach-server)
|
||||
if [[ $((cword - subcmd_pos)) -ne 1 ]]; then return; fi
|
||||
local proj names
|
||||
proj=$(_mcpctl_get_project_value)
|
||||
if [[ -n "$proj" ]]; then
|
||||
names=$(mcpctl --project "$proj" get servers -o json 2>/dev/null | jq -r '.[].name' 2>/dev/null)
|
||||
fi
|
||||
COMPREPLY=($(compgen -W "$names" -- "$cur"))
|
||||
return ;;
|
||||
approve)
|
||||
if [[ -z "$resource_type" ]]; then
|
||||
COMPREPLY=($(compgen -W "promptrequest -h --help" -- "$cur"))
|
||||
else
|
||||
local names
|
||||
names=$(_mcpctl_resource_names "$resource_type")
|
||||
COMPREPLY=($(compgen -W "$names -h --help" -- "$cur"))
|
||||
fi
|
||||
return ;;
|
||||
mcp)
|
||||
COMPREPLY=($(compgen -W "-p --project -h --help" -- "$cur"))
|
||||
return ;;
|
||||
console)
|
||||
if [[ $((cword - subcmd_pos)) -eq 1 ]]; then
|
||||
local names
|
||||
names=$(mcpctl get projects -o json 2>/dev/null | jq -r '.[].name' 2>/dev/null)
|
||||
COMPREPLY=($(compgen -W "$names --stdin-mcp --audit -h --help" -- "$cur"))
|
||||
else
|
||||
COMPREPLY=($(compgen -W "--stdin-mcp --audit -h --help" -- "$cur"))
|
||||
fi
|
||||
return ;;
|
||||
cache)
|
||||
local cache_sub=$(_mcpctl_get_subcmd $subcmd_pos)
|
||||
if [[ -z "$cache_sub" ]]; then
|
||||
COMPREPLY=($(compgen -W "stats clear help" -- "$cur"))
|
||||
else
|
||||
case "$cache_sub" in
|
||||
stats)
|
||||
COMPREPLY=($(compgen -W "-h --help" -- "$cur"))
|
||||
;;
|
||||
clear)
|
||||
COMPREPLY=($(compgen -W "--older-than -y --yes -h --help" -- "$cur"))
|
||||
;;
|
||||
*)
|
||||
COMPREPLY=($(compgen -W "-h --help" -- "$cur"))
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
restore)
|
||||
COMPREPLY=($(compgen -W "-i --input -p --password -c --conflict -h --help" -- "$cur"))
|
||||
return ;;
|
||||
help)
|
||||
COMPREPLY=($(compgen -W "$commands" -- "$cur"))
|
||||
return ;;
|
||||
esac
|
||||
|
||||
# No subcommand yet — offer commands based on context
|
||||
if [[ -z "$subcmd" ]]; then
|
||||
if $has_project; then
|
||||
COMPREPLY=($(compgen -W "$project_commands $global_opts" -- "$cur"))
|
||||
else
|
||||
COMPREPLY=($(compgen -W "$commands $global_opts" -- "$cur"))
|
||||
fi
|
||||
if [[ $cword -eq 1 ]]; then
|
||||
COMPREPLY=($(compgen -W "$commands $global_opts" -- "$cur"))
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
@@ -1,11 +1,6 @@
|
||||
# mcpctl fish completions — auto-generated by scripts/generate-completions.ts
|
||||
# DO NOT EDIT MANUALLY — run: pnpm completions:generate
|
||||
# mcpctl fish completions
|
||||
|
||||
# Erase any stale completions from previous versions
|
||||
complete -c mcpctl -e
|
||||
|
||||
set -l commands status login logout config get describe delete logs create edit apply patch backup approve console cache
|
||||
set -l project_commands get describe delete logs create edit attach-server detach-server
|
||||
set -l commands status login logout config get describe delete logs create edit apply backup restore help
|
||||
|
||||
# Disable file completions by default
|
||||
complete -c mcpctl -f
|
||||
@@ -13,405 +8,74 @@ complete -c mcpctl -f
|
||||
# Global options
|
||||
complete -c mcpctl -s v -l version -d 'Show version'
|
||||
complete -c mcpctl -l daemon-url -d 'mcplocal daemon URL' -x
|
||||
complete -c mcpctl -l direct -d 'bypass mcplocal and connect directly to mcpd'
|
||||
complete -c mcpctl -s p -l project -d 'Target project for project commands' -xa '(__mcpctl_project_names)'
|
||||
complete -c mcpctl -l direct -d 'Bypass mcplocal, connect directly to mcpd'
|
||||
complete -c mcpctl -s h -l help -d 'Show help'
|
||||
|
||||
# ---- Runtime helpers ----
|
||||
# Top-level commands
|
||||
complete -c mcpctl -n "not __fish_seen_subcommand_from $commands" -a status -d 'Show status and connectivity'
|
||||
complete -c mcpctl -n "not __fish_seen_subcommand_from $commands" -a login -d 'Authenticate with mcpd'
|
||||
complete -c mcpctl -n "not __fish_seen_subcommand_from $commands" -a logout -d 'Log out'
|
||||
complete -c mcpctl -n "not __fish_seen_subcommand_from $commands" -a config -d 'Manage configuration'
|
||||
complete -c mcpctl -n "not __fish_seen_subcommand_from $commands" -a get -d 'List resources'
|
||||
complete -c mcpctl -n "not __fish_seen_subcommand_from $commands" -a describe -d 'Show resource details'
|
||||
complete -c mcpctl -n "not __fish_seen_subcommand_from $commands" -a delete -d 'Delete a resource'
|
||||
complete -c mcpctl -n "not __fish_seen_subcommand_from $commands" -a logs -d 'Get instance logs'
|
||||
complete -c mcpctl -n "not __fish_seen_subcommand_from $commands" -a create -d 'Create a resource'
|
||||
complete -c mcpctl -n "not __fish_seen_subcommand_from $commands" -a edit -d 'Edit a resource'
|
||||
complete -c mcpctl -n "not __fish_seen_subcommand_from $commands" -a apply -d 'Apply configuration from file'
|
||||
complete -c mcpctl -n "not __fish_seen_subcommand_from $commands" -a backup -d 'Backup configuration'
|
||||
complete -c mcpctl -n "not __fish_seen_subcommand_from $commands" -a restore -d 'Restore from backup'
|
||||
complete -c mcpctl -n "not __fish_seen_subcommand_from $commands" -a help -d 'Show help'
|
||||
|
||||
# Helper: check if --project or -p was given
|
||||
function __mcpctl_has_project
|
||||
set -l tokens (commandline -opc)
|
||||
for i in (seq (count $tokens))
|
||||
if test "$tokens[$i]" = "--project" -o "$tokens[$i]" = "-p"
|
||||
return 0
|
||||
end
|
||||
end
|
||||
return 1
|
||||
end
|
||||
# Resource types for get/describe/delete/edit
|
||||
set -l resources servers instances secrets templates projects users groups rbac
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from get describe delete" -a "$resources" -d 'Resource type'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from edit" -a 'servers projects' -d 'Resource type'
|
||||
|
||||
# Resource type detection
|
||||
set -l resources servers instances secrets templates projects users groups rbac prompts promptrequests serverattachments proxymodels all
|
||||
# get/describe/delete options
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from get" -s o -l output -d 'Output format' -xa 'table json yaml'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from describe" -s o -l output -d 'Output format' -xa 'detail json yaml'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from describe" -l show-values -d 'Show secret values'
|
||||
|
||||
function __mcpctl_needs_resource_type
|
||||
set -l resource_aliases servers instances secrets templates projects users groups rbac prompts promptrequests serverattachments proxymodels all server srv instance inst secret sec template tpl project proj user group rbac-definition rbac-binding prompt promptrequest pr serverattachment sa proxymodel pm
|
||||
set -l tokens (commandline -opc)
|
||||
set -l found_cmd false
|
||||
for tok in $tokens
|
||||
if $found_cmd
|
||||
if contains -- $tok $resource_aliases
|
||||
return 1 # resource type already present
|
||||
end
|
||||
end
|
||||
if contains -- $tok get describe delete edit patch approve
|
||||
set found_cmd true
|
||||
end
|
||||
end
|
||||
if $found_cmd
|
||||
return 0 # command found but no resource type yet
|
||||
end
|
||||
return 1
|
||||
end
|
||||
|
||||
# Map any resource alias to the canonical plural form for API calls
|
||||
function __mcpctl_resolve_resource
|
||||
switch $argv[1]
|
||||
case server srv servers; echo servers
|
||||
case instance inst instances; echo instances
|
||||
case secret sec secrets; echo secrets
|
||||
case template tpl templates; echo templates
|
||||
case project proj projects; echo projects
|
||||
case user users; echo users
|
||||
case group groups; echo groups
|
||||
case rbac rbac-definition rbac-binding; echo rbac
|
||||
case prompt prompts; echo prompts
|
||||
case promptrequest promptrequests pr; echo promptrequests
|
||||
case serverattachment serverattachments sa; echo serverattachments
|
||||
case proxymodel proxymodels pm; echo proxymodels
|
||||
case all; echo all
|
||||
case '*'; echo $argv[1]
|
||||
end
|
||||
end
|
||||
|
||||
function __mcpctl_get_resource_type
|
||||
set -l resource_aliases servers instances secrets templates projects users groups rbac prompts promptrequests serverattachments proxymodels all server srv instance inst secret sec template tpl project proj user group rbac-definition rbac-binding prompt promptrequest pr serverattachment sa proxymodel pm
|
||||
set -l tokens (commandline -opc)
|
||||
set -l found_cmd false
|
||||
for tok in $tokens
|
||||
if $found_cmd
|
||||
if contains -- $tok $resource_aliases
|
||||
__mcpctl_resolve_resource $tok
|
||||
return
|
||||
end
|
||||
end
|
||||
if contains -- $tok get describe delete edit patch approve
|
||||
set found_cmd true
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# Fetch resource names dynamically from the API
|
||||
function __mcpctl_resource_names
|
||||
set -l resource (__mcpctl_get_resource_type)
|
||||
if test -z "$resource"
|
||||
return
|
||||
end
|
||||
if test "$resource" = "instances"
|
||||
mcpctl get instances -o json 2>/dev/null | jq -r '.[][].server.name' 2>/dev/null
|
||||
else if test "$resource" = "prompts" -o "$resource" = "promptrequests"
|
||||
mcpctl get $resource -A -o json 2>/dev/null | jq -r '.[].name' 2>/dev/null
|
||||
else
|
||||
mcpctl get $resource -o json 2>/dev/null | jq -r '.[].name' 2>/dev/null
|
||||
end
|
||||
end
|
||||
|
||||
# Fetch project names for --project value
|
||||
function __mcpctl_project_names
|
||||
mcpctl get projects -o json 2>/dev/null | jq -r '.[].name' 2>/dev/null
|
||||
end
|
||||
|
||||
# Helper: get the --project/-p value from the command line
|
||||
function __mcpctl_get_project_value
|
||||
set -l tokens (commandline -opc)
|
||||
for i in (seq (count $tokens))
|
||||
if test "$tokens[$i]" = "--project" -o "$tokens[$i]" = "-p"; and test $i -lt (count $tokens)
|
||||
echo $tokens[(math $i + 1)]
|
||||
return
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# Servers currently attached to the project (for detach-server)
|
||||
function __mcpctl_project_servers
|
||||
set -l proj (__mcpctl_get_project_value)
|
||||
if test -z "$proj"
|
||||
return
|
||||
end
|
||||
mcpctl --project $proj get servers -o json 2>/dev/null | jq -r '.[].name' 2>/dev/null
|
||||
end
|
||||
|
||||
# Servers NOT attached to the project (for attach-server)
|
||||
function __mcpctl_available_servers
|
||||
set -l proj (__mcpctl_get_project_value)
|
||||
if test -z "$proj"
|
||||
mcpctl get servers -o json 2>/dev/null | jq -r '.[].name' 2>/dev/null
|
||||
return
|
||||
end
|
||||
set -l all (mcpctl get servers -o json 2>/dev/null | jq -r '.[].name' 2>/dev/null)
|
||||
set -l attached (mcpctl --project $proj get servers -o json 2>/dev/null | jq -r '.[].name' 2>/dev/null)
|
||||
for s in $all
|
||||
if not contains -- $s $attached
|
||||
echo $s
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# Instance names for logs
|
||||
function __mcpctl_instance_names
|
||||
mcpctl get instances -o json 2>/dev/null | jq -r '.[][].server.name' 2>/dev/null
|
||||
end
|
||||
|
||||
# Helper: check if a positional arg has been given for a specific command
|
||||
function __mcpctl_needs_arg_for
|
||||
set -l cmd $argv[1]
|
||||
set -l tokens (commandline -opc)
|
||||
set -l found false
|
||||
for tok in $tokens
|
||||
if $found
|
||||
if not string match -q -- '-*' $tok
|
||||
return 1 # arg already present
|
||||
end
|
||||
end
|
||||
if test "$tok" = "$cmd"
|
||||
set found true
|
||||
end
|
||||
end
|
||||
if $found
|
||||
return 0 # command found but no arg yet
|
||||
end
|
||||
return 1
|
||||
end
|
||||
|
||||
# Helper: check if attach-server/detach-server already has a server argument
|
||||
function __mcpctl_needs_server_arg
|
||||
set -l tokens (commandline -opc)
|
||||
set -l found_cmd false
|
||||
for tok in $tokens
|
||||
if $found_cmd
|
||||
if not string match -q -- '-*' $tok
|
||||
return 1 # server arg already present
|
||||
end
|
||||
end
|
||||
if contains -- $tok attach-server detach-server
|
||||
set found_cmd true
|
||||
end
|
||||
end
|
||||
if $found_cmd
|
||||
return 0
|
||||
end
|
||||
return 1
|
||||
end
|
||||
|
||||
# Helper: check if a specific parent-child subcommand pair is active
|
||||
function __mcpctl_subcmd_active
|
||||
set -l parent $argv[1]
|
||||
set -l child $argv[2]
|
||||
set -l tokens (commandline -opc)
|
||||
set -l found_parent false
|
||||
for tok in $tokens
|
||||
if $found_parent
|
||||
if test "$tok" = "$child"
|
||||
return 0
|
||||
end
|
||||
if not string match -q -- '-*' $tok
|
||||
return 1 # different subcommand
|
||||
end
|
||||
end
|
||||
if test "$tok" = "$parent"
|
||||
set found_parent true
|
||||
end
|
||||
end
|
||||
return 1
|
||||
end
|
||||
|
||||
# Top-level commands (without --project)
|
||||
complete -c mcpctl -n "not __mcpctl_has_project; and not __fish_seen_subcommand_from $commands" -a status -d 'Show mcpctl status and connectivity'
|
||||
complete -c mcpctl -n "not __mcpctl_has_project; and not __fish_seen_subcommand_from $commands" -a login -d 'Authenticate with mcpd'
|
||||
complete -c mcpctl -n "not __mcpctl_has_project; and not __fish_seen_subcommand_from $commands" -a logout -d 'Log out and remove stored credentials'
|
||||
complete -c mcpctl -n "not __mcpctl_has_project; and not __fish_seen_subcommand_from $commands" -a config -d 'Manage mcpctl configuration'
|
||||
complete -c mcpctl -n "not __mcpctl_has_project; and not __fish_seen_subcommand_from $commands" -a get -d 'List resources (servers, projects, instances, all)'
|
||||
complete -c mcpctl -n "not __mcpctl_has_project; and not __fish_seen_subcommand_from $commands" -a describe -d 'Show detailed information about a resource'
|
||||
complete -c mcpctl -n "not __mcpctl_has_project; and not __fish_seen_subcommand_from $commands" -a delete -d 'Delete a resource (server, instance, secret, project, user, group, rbac)'
|
||||
complete -c mcpctl -n "not __mcpctl_has_project; and not __fish_seen_subcommand_from $commands" -a logs -d 'Get logs from an MCP server instance'
|
||||
complete -c mcpctl -n "not __mcpctl_has_project; and not __fish_seen_subcommand_from $commands" -a create -d 'Create a resource (server, secret, project, user, group, rbac, serverattachment, prompt)'
|
||||
complete -c mcpctl -n "not __mcpctl_has_project; and not __fish_seen_subcommand_from $commands" -a edit -d 'Edit a resource in your default editor (server, project)'
|
||||
complete -c mcpctl -n "not __mcpctl_has_project; and not __fish_seen_subcommand_from $commands" -a apply -d 'Apply declarative configuration from a YAML or JSON file'
|
||||
complete -c mcpctl -n "not __mcpctl_has_project; and not __fish_seen_subcommand_from $commands" -a patch -d 'Patch a resource field (e.g. mcpctl patch project myproj llmProvider=none)'
|
||||
complete -c mcpctl -n "not __mcpctl_has_project; and not __fish_seen_subcommand_from $commands" -a backup -d 'Git-based backup status and management'
|
||||
complete -c mcpctl -n "not __mcpctl_has_project; and not __fish_seen_subcommand_from $commands" -a approve -d 'Approve a pending prompt request (atomic: delete request, create prompt)'
|
||||
complete -c mcpctl -n "not __mcpctl_has_project; and not __fish_seen_subcommand_from $commands" -a console -d 'Interactive MCP console — unified timeline with tools, provenance, and lab replay'
|
||||
complete -c mcpctl -n "not __mcpctl_has_project; and not __fish_seen_subcommand_from $commands" -a cache -d 'Manage ProxyModel pipeline cache'
|
||||
|
||||
# Project-scoped commands (with --project)
|
||||
complete -c mcpctl -n "__mcpctl_has_project; and not __fish_seen_subcommand_from $project_commands" -a get -d 'List resources (servers, projects, instances, all)'
|
||||
complete -c mcpctl -n "__mcpctl_has_project; and not __fish_seen_subcommand_from $project_commands" -a describe -d 'Show detailed information about a resource'
|
||||
complete -c mcpctl -n "__mcpctl_has_project; and not __fish_seen_subcommand_from $project_commands" -a delete -d 'Delete a resource (server, instance, secret, project, user, group, rbac)'
|
||||
complete -c mcpctl -n "__mcpctl_has_project; and not __fish_seen_subcommand_from $project_commands" -a logs -d 'Get logs from an MCP server instance'
|
||||
complete -c mcpctl -n "__mcpctl_has_project; and not __fish_seen_subcommand_from $project_commands" -a create -d 'Create a resource (server, secret, project, user, group, rbac, serverattachment, prompt)'
|
||||
complete -c mcpctl -n "__mcpctl_has_project; and not __fish_seen_subcommand_from $project_commands" -a edit -d 'Edit a resource in your default editor (server, project)'
|
||||
complete -c mcpctl -n "__mcpctl_has_project; and not __fish_seen_subcommand_from $project_commands" -a attach-server -d 'Attach a server to a project (requires --project)'
|
||||
complete -c mcpctl -n "__mcpctl_has_project; and not __fish_seen_subcommand_from $project_commands" -a detach-server -d 'Detach a server from a project (requires --project)'
|
||||
|
||||
# Resource types — only when resource type not yet selected
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from get describe delete patch; and __mcpctl_needs_resource_type" -a "$resources" -d 'Resource type'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from edit; and __mcpctl_needs_resource_type" -a 'servers secrets projects groups rbac prompts promptrequests' -d 'Resource type'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from approve; and __mcpctl_needs_resource_type" -a 'promptrequest' -d 'Resource type'
|
||||
|
||||
# Resource names — after resource type is selected
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from get describe delete edit patch approve; and not __mcpctl_needs_resource_type" -a '(__mcpctl_resource_names)' -d 'Resource name'
|
||||
# login options
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from login" -l url -d 'mcpd URL' -x
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from login" -l email -d 'Email address' -x
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from login" -l password -d 'Password' -x
|
||||
|
||||
# config subcommands
|
||||
set -l config_cmds view set path reset claude claude-generate setup impersonate
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from config; and not __fish_seen_subcommand_from $config_cmds" -a view -d 'Show current configuration'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from config; and not __fish_seen_subcommand_from $config_cmds" -a set -d 'Set a configuration value'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from config; and not __fish_seen_subcommand_from $config_cmds" -a path -d 'Show configuration file path'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from config; and not __fish_seen_subcommand_from $config_cmds" -a reset -d 'Reset configuration to defaults'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from config; and not __fish_seen_subcommand_from $config_cmds" -a claude -d 'Generate .mcp.json that connects a project via mcpctl mcp bridge'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from config; and not __fish_seen_subcommand_from $config_cmds" -a claude-generate -d ''
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from config; and not __fish_seen_subcommand_from $config_cmds" -a setup -d 'Interactive LLM provider setup wizard'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from config; and not __fish_seen_subcommand_from $config_cmds" -a impersonate -d 'Impersonate another user or return to original identity'
|
||||
|
||||
# config view options
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active config view" -s o -l output -d 'output format (json, yaml)' -x
|
||||
|
||||
# config claude options
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active config claude" -s p -l project -d 'Project name' -xa '(__mcpctl_project_names)'
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active config claude" -s o -l output -d 'Output file path' -x
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active config claude" -l inspect -d 'Include mcpctl-inspect MCP server for traffic monitoring'
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active config claude" -l stdout -d 'Print to stdout instead of writing a file'
|
||||
|
||||
# config claude-generate options
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active config claude-generate" -s p -l project -d 'Project name' -xa '(__mcpctl_project_names)'
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active config claude-generate" -s o -l output -d 'Output file path' -x
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active config claude-generate" -l inspect -d 'Include mcpctl-inspect MCP server for traffic monitoring'
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active config claude-generate" -l stdout -d 'Print to stdout instead of writing a file'
|
||||
|
||||
# config impersonate options
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active config impersonate" -l quit -d 'Stop impersonating and return to original identity'
|
||||
set -l config_cmds view set path reset claude-generate impersonate
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from config; and not __fish_seen_subcommand_from $config_cmds" -a view -d 'Show configuration'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from config; and not __fish_seen_subcommand_from $config_cmds" -a set -d 'Set a config value'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from config; and not __fish_seen_subcommand_from $config_cmds" -a path -d 'Show config file path'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from config; and not __fish_seen_subcommand_from $config_cmds" -a reset -d 'Reset to defaults'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from config; and not __fish_seen_subcommand_from $config_cmds" -a claude-generate -d 'Generate .mcp.json'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from config; and not __fish_seen_subcommand_from $config_cmds" -a impersonate -d 'Impersonate a user'
|
||||
|
||||
# create subcommands
|
||||
set -l create_cmds server secret project user group rbac prompt serverattachment promptrequest
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from create; and not __fish_seen_subcommand_from $create_cmds" -a server -d 'Create an MCP server definition'
|
||||
set -l create_cmds server secret project user group rbac
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from create; and not __fish_seen_subcommand_from $create_cmds" -a server -d 'Create a server'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from create; and not __fish_seen_subcommand_from $create_cmds" -a secret -d 'Create a secret'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from create; and not __fish_seen_subcommand_from $create_cmds" -a project -d 'Create a project'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from create; and not __fish_seen_subcommand_from $create_cmds" -a user -d 'Create a user'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from create; and not __fish_seen_subcommand_from $create_cmds" -a group -d 'Create a group'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from create; and not __fish_seen_subcommand_from $create_cmds" -a rbac -d 'Create an RBAC binding definition'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from create; and not __fish_seen_subcommand_from $create_cmds" -a prompt -d 'Create an approved prompt'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from create; and not __fish_seen_subcommand_from $create_cmds" -a serverattachment -d 'Attach a server to a project'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from create; and not __fish_seen_subcommand_from $create_cmds" -a promptrequest -d 'Create a prompt request (pending proposal that needs approval)'
|
||||
|
||||
# create server options
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create server" -s d -l description -d 'Server description' -x
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create server" -l package-name -d 'Package name (npm, PyPI, Go module, etc.)' -x
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create server" -l runtime -d 'Package runtime (node, python, go — default: node)' -x
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create server" -l docker-image -d 'Docker image' -x
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create server" -l transport -d 'Transport type (STDIO, SSE, STREAMABLE_HTTP)' -x
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create server" -l repository-url -d 'Source repository URL' -x
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create server" -l external-url -d 'External endpoint URL' -x
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create server" -l command -d 'Command argument (repeat for multiple)' -x
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create server" -l container-port -d 'Container port number' -x
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create server" -l replicas -d 'Number of replicas' -x
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create server" -l env -d 'Env var: KEY=value (inline) or KEY=secretRef:SECRET:KEY (secret ref, repeat for multiple)' -x
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create server" -l from-template -d 'Create from template (name or name:version)' -x
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create server" -l env-from-secret -d 'Map template env vars from a secret' -x
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create server" -l force -d 'Update if already exists'
|
||||
|
||||
# create secret options
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create secret" -l data -d 'Secret data KEY=value (repeat for multiple)' -x
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create secret" -l force -d 'Update if already exists'
|
||||
|
||||
# create project options
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create project" -s d -l description -d 'Project description' -x
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create project" -l proxy-model -d 'Plugin name (default, content-pipeline, gate, none)' -x
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create project" -l prompt -d 'Project-level prompt / instructions for the LLM' -x
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create project" -l gated -d '[deprecated: use --proxy-model default]'
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create project" -l no-gated -d '[deprecated: use --proxy-model content-pipeline]'
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create project" -l server -d 'Server name (repeat for multiple)' -x
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create project" -l force -d 'Update if already exists'
|
||||
|
||||
# create user options
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create user" -l password -d 'User password' -x
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create user" -l name -d 'User display name' -x
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create user" -l force -d 'Update if already exists'
|
||||
|
||||
# create group options
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create group" -l description -d 'Group description' -x
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create group" -l member -d 'Member email (repeat for multiple)' -x
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create group" -l force -d 'Update if already exists'
|
||||
|
||||
# create rbac options
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create rbac" -l subject -d 'Subject as Kind:name (repeat for multiple)' -x
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create rbac" -l binding -d 'Role binding as role:resource (e.g. edit:servers, run:projects)' -x
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create rbac" -l operation -d 'Operation binding (e.g. logs, backup)' -x
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create rbac" -l force -d 'Update if already exists'
|
||||
|
||||
# create prompt options
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create prompt" -s p -l project -d 'Project name to scope the prompt to' -xa '(__mcpctl_project_names)'
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create prompt" -l content -d 'Prompt content text' -x
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create prompt" -l content-file -d 'Read prompt content from file' -rF
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create prompt" -l priority -d 'Priority 1-10 (default: 5, higher = more important)' -x
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create prompt" -l link -d 'Link to MCP resource (format: project/server:uri)' -x
|
||||
|
||||
# create serverattachment options
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create serverattachment" -s p -l project -d 'Project name' -xa '(__mcpctl_project_names)'
|
||||
|
||||
# create promptrequest options
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create promptrequest" -s p -l project -d 'Project name to scope the prompt request to' -xa '(__mcpctl_project_names)'
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create promptrequest" -l content -d 'Prompt content text' -x
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create promptrequest" -l content-file -d 'Read prompt content from file' -rF
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active create promptrequest" -l priority -d 'Priority 1-10 (default: 5, higher = more important)' -x
|
||||
|
||||
# backup subcommands
|
||||
set -l backup_cmds log restore
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from backup; and not __fish_seen_subcommand_from $backup_cmds" -a log -d 'Show backup commit history'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from backup; and not __fish_seen_subcommand_from $backup_cmds" -a restore -d 'Restore mcpctl state from backup history'
|
||||
|
||||
# backup log options
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active backup log" -s n -l limit -d 'number of commits to show' -x
|
||||
|
||||
# cache subcommands
|
||||
set -l cache_cmds stats clear
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from cache; and not __fish_seen_subcommand_from $cache_cmds" -a stats -d 'Show cache statistics'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from cache; and not __fish_seen_subcommand_from $cache_cmds" -a clear -d 'Clear cache entries'
|
||||
|
||||
# cache clear options
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active cache clear" -l older-than -d 'Clear entries older than N days' -x
|
||||
complete -c mcpctl -n "__mcpctl_subcmd_active cache clear" -s y -l yes -d 'Skip confirmation'
|
||||
|
||||
# status options
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from status" -s o -l output -d 'output format (table, json, yaml)' -x
|
||||
|
||||
# login options
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from login" -l mcpd-url -d 'mcpd URL to authenticate against' -x
|
||||
|
||||
# get options
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from get" -s o -l output -d 'output format (table, json, yaml)' -x
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from get" -s p -l project -d 'Filter by project' -xa '(__mcpctl_project_names)'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from get" -s A -l all -d 'Show all (including project-scoped) resources'
|
||||
|
||||
# describe options
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from describe" -s o -l output -d 'output format (detail, json, yaml)' -x
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from describe" -l show-values -d 'Show secret values (default: masked)'
|
||||
|
||||
# delete options
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from delete" -s p -l project -d 'Project name (for serverattachment)' -xa '(__mcpctl_project_names)'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from create; and not __fish_seen_subcommand_from $create_cmds" -a rbac -d 'Create an RBAC binding'
|
||||
|
||||
# logs options
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from logs" -s t -l tail -d 'Number of lines to show' -x
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from logs" -s i -l instance -d 'Instance/replica index (0-based, for servers with multiple replicas)' -x
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from logs" -l tail -d 'Number of lines' -x
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from logs" -l since -d 'Since timestamp' -x
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from logs" -s f -l follow -d 'Follow log output'
|
||||
|
||||
# apply options
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from apply" -s f -l file -d 'Path to config file (alternative to positional arg)' -rF
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from apply" -l dry-run -d 'Validate and show changes without applying'
|
||||
# backup options
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from backup" -s o -l output -d 'Output file' -rF
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from backup" -s p -l password -d 'Encryption password' -x
|
||||
|
||||
# console options
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from console" -l stdin-mcp -d 'Run inspector as MCP server over stdin/stdout (for Claude)'
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from console" -l audit -d 'Browse audit events from mcpd'
|
||||
# restore options
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from restore" -s i -l input -d 'Input file' -rF
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from restore" -s p -l password -d 'Decryption password' -x
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from restore" -s c -l conflict -d 'Conflict strategy' -xa 'skip overwrite fail'
|
||||
|
||||
# logs: takes a server/instance name
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from logs; and __mcpctl_needs_arg_for logs" -a '(__mcpctl_instance_names)' -d 'Server name'
|
||||
|
||||
# console: takes a project name
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from console; and __mcpctl_needs_arg_for console" -a '(__mcpctl_project_names)' -d 'Project name'
|
||||
|
||||
# attach-server: show servers NOT in the project (only if no server arg yet)
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from attach-server; and __mcpctl_needs_server_arg" -a '(__mcpctl_available_servers)' -d 'Server'
|
||||
|
||||
# detach-server: show servers IN the project (only if no server arg yet)
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from detach-server; and __mcpctl_needs_server_arg" -a '(__mcpctl_project_servers)' -d 'Server'
|
||||
|
||||
# apply: allow file completions for positional argument
|
||||
# apply takes a file
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from apply" -s f -l file -d 'Configuration file' -rF
|
||||
complete -c mcpctl -n "__fish_seen_subcommand_from apply" -F
|
||||
|
||||
# help completions
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
# Docker image for MrMartiniMo/docmost-mcp (TypeScript STDIO MCP server)
|
||||
# Not published to npm, so we clone + build from source.
|
||||
# Includes patches for list_pages pagination and search response handling.
|
||||
FROM node:20-slim
|
||||
|
||||
WORKDIR /mcp
|
||||
|
||||
RUN apt-get update && apt-get install -y git && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN git clone --depth 1 https://github.com/MrMartiniMo/docmost-mcp.git . \
|
||||
&& npm install \
|
||||
&& rm -rf .git
|
||||
|
||||
# Apply our fixes before building
|
||||
COPY deploy/docmost-mcp-fixes.patch /tmp/fixes.patch
|
||||
RUN git init && git add -A && git apply /tmp/fixes.patch && rm -rf .git /tmp/fixes.patch
|
||||
|
||||
RUN npm run build
|
||||
|
||||
ENTRYPOINT ["node", "build/index.js"]
|
||||
@@ -27,8 +27,7 @@ RUN pnpm -F @mcpctl/shared build && pnpm -F @mcpctl/db build && pnpm -F @mcpctl/
|
||||
# Stage 2: Production runtime
|
||||
FROM node:20-alpine
|
||||
|
||||
RUN apk add --no-cache git openssh-client \
|
||||
&& corepack enable && corepack prepare pnpm@9.15.0 --activate
|
||||
RUN corepack enable && corepack prepare pnpm@9.15.0 --activate
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
# Base container for Python/uvx-based MCP servers (STDIO transport).
|
||||
# mcpd uses this image to run `uvx <packageName>` when a server
|
||||
# has packageName with runtime=python but no dockerImage.
|
||||
FROM python:3.12-slim
|
||||
|
||||
WORKDIR /mcp
|
||||
|
||||
# Install uv (which provides uvx)
|
||||
RUN pip install --no-cache-dir uv
|
||||
|
||||
# Default entrypoint — overridden by mcpd via container command
|
||||
ENTRYPOINT ["uvx"]
|
||||
@@ -31,7 +31,6 @@ services:
|
||||
MCPD_HOST: "0.0.0.0"
|
||||
MCPD_LOG_LEVEL: info
|
||||
MCPD_NODE_RUNNER_IMAGE: mcpctl-node-runner:latest
|
||||
MCPD_PYTHON_RUNNER_IMAGE: mcpctl-python-runner:latest
|
||||
MCPD_MCP_NETWORK: mcp-servers
|
||||
depends_on:
|
||||
postgres:
|
||||
@@ -61,16 +60,6 @@ services:
|
||||
- build
|
||||
entrypoint: ["echo", "Image built successfully"]
|
||||
|
||||
# Base image for Python/uvx-based MCP servers (built once, used by mcpd)
|
||||
python-runner:
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: deploy/Dockerfile.python-runner
|
||||
image: mcpctl-python-runner:latest
|
||||
profiles:
|
||||
- build
|
||||
entrypoint: ["echo", "Image built successfully"]
|
||||
|
||||
postgres-test:
|
||||
image: postgres:16-alpine
|
||||
container_name: mcpctl-postgres-test
|
||||
|
||||
@@ -1,106 +0,0 @@
|
||||
diff --git a/src/index.ts b/src/index.ts
|
||||
index 83c251d..852ee0e 100644
|
||||
--- a/src/index.ts
|
||||
+++ b/src/index.ts
|
||||
@@ -1,4 +1,4 @@
|
||||
-import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
|
||||
+import { McpServer, ResourceTemplate } from "@modelcontextprotocol/sdk/server/mcp.js";
|
||||
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
|
||||
import FormData from "form-data";
|
||||
import axios, { AxiosInstance } from "axios";
|
||||
@@ -130,10 +130,18 @@ class DocmostClient {
|
||||
return groups.map((group) => filterGroup(group));
|
||||
}
|
||||
|
||||
- async listPages(spaceId?: string) {
|
||||
- const payload = spaceId ? { spaceId } : {};
|
||||
- const pages = await this.paginateAll("/pages/recent", payload);
|
||||
- return pages.map((page) => filterPage(page));
|
||||
+ async listPages(spaceId?: string, page: number = 1, limit: number = 50) {
|
||||
+ await this.ensureAuthenticated();
|
||||
+ const clampedLimit = Math.max(1, Math.min(100, limit));
|
||||
+ const payload: Record<string, any> = { page, limit: clampedLimit };
|
||||
+ if (spaceId) payload.spaceId = spaceId;
|
||||
+ const response = await this.client.post("/pages/recent", payload);
|
||||
+ const data = response.data;
|
||||
+ const items = data.data?.items || data.items || [];
|
||||
+ return {
|
||||
+ pages: items.map((p: any) => filterPage(p)),
|
||||
+ meta: data.data?.meta || data.meta || {},
|
||||
+ };
|
||||
}
|
||||
|
||||
async listSidebarPages(spaceId: string, pageId: string) {
|
||||
@@ -283,8 +291,9 @@ class DocmostClient {
|
||||
spaceId,
|
||||
});
|
||||
|
||||
- // Filter search results (data is directly an array)
|
||||
- const items = response.data?.data || [];
|
||||
+ // Handle both array and {items: [...]} response formats
|
||||
+ const rawData = response.data?.data;
|
||||
+ const items = Array.isArray(rawData) ? rawData : (rawData?.items || []);
|
||||
const filteredItems = items.map((item: any) => filterSearchResult(item));
|
||||
|
||||
return {
|
||||
@@ -384,13 +393,15 @@ server.registerTool(
|
||||
server.registerTool(
|
||||
"list_pages",
|
||||
{
|
||||
- description: "List pages in a space ordered by updatedAt (descending).",
|
||||
+ description: "List pages in a space ordered by updatedAt (descending). Returns one page of results.",
|
||||
inputSchema: {
|
||||
spaceId: z.string().optional(),
|
||||
+ page: z.number().optional().describe("Page number (default: 1)"),
|
||||
+ limit: z.number().optional().describe("Items per page, 1-100 (default: 50)"),
|
||||
},
|
||||
},
|
||||
- async ({ spaceId }) => {
|
||||
- const result = await docmostClient.listPages(spaceId);
|
||||
+ async ({ spaceId, page, limit }) => {
|
||||
+ const result = await docmostClient.listPages(spaceId, page, limit);
|
||||
return jsonContent(result);
|
||||
},
|
||||
);
|
||||
@@ -544,6 +555,41 @@ server.registerTool(
|
||||
},
|
||||
);
|
||||
|
||||
+// Resource template: docmost://pages/{pageId}
|
||||
+// Allows MCP clients to read page content as resources
|
||||
+server.resource(
|
||||
+ "page",
|
||||
+ new ResourceTemplate("docmost://pages/{pageId}", {
|
||||
+ list: async () => {
|
||||
+ // List recent pages as browsable resources
|
||||
+ try {
|
||||
+ const result = await docmostClient.listPages(undefined, 1, 100);
|
||||
+ return result.pages.map((page: any) => ({
|
||||
+ uri: `docmost://pages/${page.id}`,
|
||||
+ name: page.title || page.id,
|
||||
+ mimeType: "text/markdown",
|
||||
+ }));
|
||||
+ } catch {
|
||||
+ return [];
|
||||
+ }
|
||||
+ },
|
||||
+ }),
|
||||
+ { description: "A Docmost wiki page", mimeType: "text/markdown" },
|
||||
+ async (uri: URL, variables: Record<string, string | string[]>) => {
|
||||
+ const pageId = Array.isArray(variables.pageId) ? variables.pageId[0]! : variables.pageId!;
|
||||
+ const page = await docmostClient.getPage(pageId);
|
||||
+ return {
|
||||
+ contents: [
|
||||
+ {
|
||||
+ uri: uri.href,
|
||||
+ text: page.data.content || `# ${page.data.title || "Untitled"}\n\n(No content)`,
|
||||
+ mimeType: "text/markdown",
|
||||
+ },
|
||||
+ ],
|
||||
+ };
|
||||
+ },
|
||||
+);
|
||||
+
|
||||
async function run() {
|
||||
const transport = new StdioServerTransport();
|
||||
await server.connect(transport);
|
||||
@@ -1,232 +0,0 @@
|
||||
# Gated MCP Sessions: What Claude Recognizes (and What It Doesn't)
|
||||
|
||||
Lessons learned from building and testing mcpctl's gated session system with Claude Code (Opus 4.6, v2.1.59). These patterns apply to any MCP proxy that needs to control tool access through a gate step.
|
||||
|
||||
## The Problem
|
||||
|
||||
When Claude connects to an MCP server, it receives an `initialize` response with `instructions`, then calls `tools/list` to see available tools. In a gated session, we want Claude to call `begin_session` before accessing real tools. This is surprisingly hard to get right because Claude has strong default behaviors that fight against the gate pattern.
|
||||
|
||||
---
|
||||
|
||||
## What Works
|
||||
|
||||
### 1. One gate tool, zero ambiguity
|
||||
|
||||
When `tools/list` returns exactly ONE tool (`begin_session`), Claude recognizes it must call that tool first. Having multiple tools available in the gated state confuses Claude — it may try to call a "real" tool and skip the gate entirely.
|
||||
|
||||
**Working pattern:**
|
||||
```json
|
||||
{
|
||||
"tools": [{
|
||||
"name": "begin_session",
|
||||
"description": "Start your session by providing keywords...",
|
||||
"inputSchema": { ... }
|
||||
}]
|
||||
}
|
||||
```
|
||||
|
||||
### 2. "Check its input schema" instead of naming parameters
|
||||
|
||||
Claude reads the tool's `inputSchema` to understand what arguments are needed. When the instructions **name a specific parameter** that doesn't exist in the schema, Claude gets confused and may not call the tool at all.
|
||||
|
||||
**FAILED — named wrong parameter:**
|
||||
> "Call begin_session with a description of the user's task"
|
||||
|
||||
This failed because the noLLM mode tool has `tags`, not `description`. Claude saw the mismatch between instructions and schema, got confused, and went exploring the filesystem instead.
|
||||
|
||||
**WORKS — schema-agnostic:**
|
||||
> "Call begin_session immediately using the arguments it requires (check its input schema). If it accepts a description, briefly describe the user's task. If it accepts tags, provide 3-7 keywords relevant to the user's request."
|
||||
|
||||
This works for both LLM mode (`description` param) and noLLM mode (`tags` param) because Claude reads the actual schema.
|
||||
|
||||
### 3. Instructions must say "immediately" and "required"
|
||||
|
||||
Without urgency words, Claude may acknowledge the gate exists but decide to "explore first" before calling it. Two critical phrases:
|
||||
|
||||
- **"immediately"** — prevents Claude from doing reconnaissance first
|
||||
- **"required before using other tools"** — makes it clear this isn't optional
|
||||
|
||||
**Working instruction block:**
|
||||
```
|
||||
This project uses a gated session. Before you can access tools, you must start a session by calling begin_session.
|
||||
|
||||
Call begin_session immediately using the arguments it requires (check its input schema).
|
||||
```
|
||||
|
||||
### 4. Show available tools as a preview (names only)
|
||||
|
||||
Listing tool names in the initialize instructions (without making them callable) helps Claude understand what's available and craft better `begin_session` keywords. Claude uses this list to generate relevant tags.
|
||||
|
||||
**Working pattern:**
|
||||
```
|
||||
Available MCP server tools (accessible after begin_session):
|
||||
my-node-red/get_flows
|
||||
my-node-red/create_flow
|
||||
my-home-assistant/ha_get_entity
|
||||
...
|
||||
```
|
||||
|
||||
Claude then produces tags like `["node-red", "flows", "automation"]` — directly informed by the tool names it saw.
|
||||
|
||||
### 5. Show prompt index with priorities
|
||||
|
||||
When the instructions list available prompts with priorities, Claude uses them to choose relevant `begin_session` keywords:
|
||||
|
||||
```
|
||||
Available project prompts:
|
||||
- pnpm (priority 5)
|
||||
- stack (priority 5)
|
||||
|
||||
Choose your begin_session keywords based on which of these prompts seem relevant to your task.
|
||||
```
|
||||
|
||||
### 6. `tools/list_changed` notification after ungating
|
||||
|
||||
After `begin_session` succeeds, the server must send a `notifications/tools/list_changed` notification. Claude then re-fetches `tools/list` and sees all 108+ tools. Without this notification, Claude continues thinking only `begin_session` is available.
|
||||
|
||||
### 7. The intercept fallback (auto-ungate on real tool call)
|
||||
|
||||
If Claude somehow bypasses the gate and calls a real tool directly, the server auto-ungates the session, extracts keywords from the tool call, matches relevant prompts, and prepends the context as a preamble to the tool result. This is a safety net, not the primary path.
|
||||
|
||||
---
|
||||
|
||||
## What Fails
|
||||
|
||||
### 1. Referencing parameters that don't exist in the schema
|
||||
|
||||
If instructions say "call begin_session with a description" but the schema only has `tags`, Claude recognizes the inconsistency and may refuse to call the tool entirely. It falls back to filesystem exploration or asks the user for help.
|
||||
|
||||
**Root cause:** Claude cross-references instruction text against tool schemas. Mismatches create distrust.
|
||||
|
||||
### 2. Complex conditional instructions
|
||||
|
||||
Don't write instructions like:
|
||||
> "If the project is gated, check for begin_session. If begin_session accepts tags, provide tags. Otherwise if it accepts description, provide a description. But first check if..."
|
||||
|
||||
Claude handles simple, direct instructions better than decision trees. One clear path: "Call begin_session immediately, check its input schema for what arguments it needs."
|
||||
|
||||
### 3. Having read_prompts available in gated state
|
||||
|
||||
In early iterations, both `begin_session` and `read_prompts` were available in the gated state. Claude sometimes called `read_prompts` instead of `begin_session`, or tried to use `read_prompts` to understand the environment before beginning the session. This delayed or skipped the gate.
|
||||
|
||||
**Fix:** Only `begin_session` is available when gated. `read_prompts` appears after ungating.
|
||||
|
||||
### 4. Putting gate instructions only in the tool description
|
||||
|
||||
The tool description alone is not enough. Claude reads `instructions` from the initialize response first and forms its plan there. If the initialize instructions don't mention the gate, Claude may ignore the tool description and try to find other ways to accomplish the task.
|
||||
|
||||
**Both are needed:**
|
||||
- Initialize `instructions` field: explains the gate and what to do
|
||||
- Tool `description` field: reinforces the purpose of begin_session
|
||||
|
||||
### 5. Long instructions that bury the call-to-action
|
||||
|
||||
If the initialize instructions contain 200 lines of context before mentioning "call begin_session", Claude may not reach that instruction. The gate call-to-action must be in the **first few lines** of the instructions.
|
||||
|
||||
### 6. Expecting Claude to remember instructions across reconnects
|
||||
|
||||
Each new session starts fresh. Claude doesn't carry over knowledge from previous sessions. The gate instructions must be self-contained in every initialize response.
|
||||
|
||||
---
|
||||
|
||||
## Prompt Scoring: Ensuring Prompts Reach Claude
|
||||
|
||||
### The byte budget problem
|
||||
|
||||
When `begin_session` returns matched prompts, there's a byte budget (default 8KB) to prevent token overflow. Prompts are included in score order until the budget is full. Prompts that don't fit get listed as index-only (name + summary).
|
||||
|
||||
### Scoring formula: `priority + (matchCount * priority)`
|
||||
|
||||
- **Priority alone is the baseline** — every prompt gets at least its priority score
|
||||
- **Tag matches multiply the priority** — relevant prompts score much higher
|
||||
- **Priority 10 = Infinity** — system prompts always included regardless of budget
|
||||
|
||||
**Failed formula:** `matchCount * priority`
|
||||
This meant prompts with zero tag matches scored 0 and were never included, even if they were high-priority global prompts (like "stack" with priority 5). A priority-5 prompt with no tag matches should still compete for inclusion.
|
||||
|
||||
**Working formula:** `priority + (matchCount * priority)`
|
||||
A priority-5 prompt with 0 matches scores 5 (baseline). With 2 matches it scores 15. This ensures global prompts are included when budget allows.
|
||||
|
||||
### Response truncation safety cap
|
||||
|
||||
All responses are capped at 24,000 characters. Larger responses get truncated with a message to use `read_prompts` for the full content. This prevents a single massive prompt from consuming Claude's entire context window.
|
||||
|
||||
---
|
||||
|
||||
## The Complete Flow (What Actually Happens)
|
||||
|
||||
```
|
||||
Client mcplocal upstream servers
|
||||
│ │ │
|
||||
│── initialize ───────────>│ │
|
||||
│<── instructions + caps ──│ (instructions contain │
|
||||
│ │ gate-instructions, │
|
||||
│ │ tool list preview, │
|
||||
│ │ prompt index) │
|
||||
│── tools/list ──────────>│ │
|
||||
│<── [begin_session] ─────│ (ONLY begin_session) │
|
||||
│ │ │
|
||||
│── prompts/list ────────>│ │
|
||||
│<── [] ──────────────────│ (empty - gated) │
|
||||
│ │ │
|
||||
│── resources/list ──────>│ │
|
||||
│<── [prompt resources] ──│ (prompts visible as │
|
||||
│ │ resources always) │
|
||||
│ │ │
|
||||
│ Claude reads instructions, sees begin_session is the │
|
||||
│ only tool, calls it with relevant tags/description │
|
||||
│ │ │
|
||||
│── tools/call ──────────>│ │
|
||||
│ begin_session │── match prompts ────────────>│
|
||||
│ {tags:[...]} │<── prompt content ──────────│
|
||||
│ │ │
|
||||
│<── matched prompts ─────│ (full content of matched │
|
||||
│ + tool list │ prompts, tool names, │
|
||||
│ + encouragement │ encouragement to use │
|
||||
│ │ read_prompts later) │
|
||||
│ │ │
|
||||
│<── notification ────────│ tools/list_changed │
|
||||
│ │ │
|
||||
│── tools/list ──────────>│ │
|
||||
│<── [108 tools] ─────────│ (ALL tools now visible) │
|
||||
│ │ │
|
||||
│ Claude proceeds with the user's original request │
|
||||
│ using the full tool set │
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Testing Gate Behavior
|
||||
|
||||
The MCP Inspector (`mcpctl console --inspect`) is essential for debugging gate issues. It shows the exact sequence of requests/responses between Claude and mcplocal, including:
|
||||
|
||||
- What Claude sees in the initialize response
|
||||
- Whether Claude calls `begin_session` or tries to bypass it
|
||||
- What tags/description Claude provides
|
||||
- What prompts are matched and returned
|
||||
- Whether `tools/list_changed` notification fires
|
||||
- The full tool list after ungating
|
||||
|
||||
Run it alongside Claude Code to see exactly what happens:
|
||||
```bash
|
||||
# Terminal 1: Inspector
|
||||
mcpctl console --inspect
|
||||
|
||||
# Terminal 2: Claude Code connected to the project
|
||||
claude
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Checklist for New Gate Configurations
|
||||
|
||||
- [ ] Initialize instructions mention gate in first 3 lines
|
||||
- [ ] Instructions say "immediately" and "required"
|
||||
- [ ] Instructions say "check its input schema" (not "pass description/tags")
|
||||
- [ ] Only `begin_session` in tools/list when gated
|
||||
- [ ] Tool names listed in instructions as preview
|
||||
- [ ] Prompt index shown with priorities
|
||||
- [ ] `tools/list_changed` notification sent after ungate
|
||||
- [ ] Response size under 24K characters
|
||||
- [ ] Prompt scoring uses baseline priority (not just match count)
|
||||
- [ ] Test with Inspector to verify the full flow
|
||||
@@ -20,13 +20,9 @@ servers:
|
||||
name: ha-secrets
|
||||
key: token
|
||||
|
||||
secrets:
|
||||
- name: ha-secrets
|
||||
data:
|
||||
token: "your-home-assistant-long-lived-access-token"
|
||||
|
||||
projects:
|
||||
- name: smart-home
|
||||
description: "Home automation project"
|
||||
servers:
|
||||
- ha-mcp
|
||||
profiles:
|
||||
- name: production
|
||||
server: ha-mcp
|
||||
envOverrides:
|
||||
HOMEASSISTANT_URL: "https://ha.itaz.eu"
|
||||
HOMEASSISTANT_TOKEN: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiIyNjFlZTRhOWI2MGM0YTllOGJkNTIxN2Q3YmVmZDkzNSIsImlhdCI6MTc3MDA3NjYzOCwiZXhwIjoyMDg1NDM2NjM4fQ.17mAQxIrCBrQx3ogqAUetwEt-cngRmJiH-e7sLt-3FY"
|
||||
|
||||
@@ -1,51 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Full deployment: Docker image → Portainer stack → RPM build/publish/install
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
cd "$SCRIPT_DIR"
|
||||
|
||||
# Load .env
|
||||
if [ -f .env ]; then
|
||||
set -a; source .env; set +a
|
||||
fi
|
||||
|
||||
echo "========================================"
|
||||
echo " mcpctl Full Deploy"
|
||||
echo "========================================"
|
||||
|
||||
echo ""
|
||||
echo ">>> Step 1/3: Build & push mcpd Docker image"
|
||||
echo ""
|
||||
bash scripts/build-mcpd.sh "$@"
|
||||
|
||||
echo ""
|
||||
echo ">>> Step 2/3: Deploy stack to production"
|
||||
echo ""
|
||||
bash deploy.sh
|
||||
|
||||
echo ""
|
||||
echo ">>> Step 3/3: Build, publish & install RPM"
|
||||
echo ""
|
||||
bash scripts/release.sh
|
||||
|
||||
echo ""
|
||||
echo ">>> Post-deploy: Restart mcplocal"
|
||||
echo ""
|
||||
systemctl --user restart mcplocal
|
||||
sleep 2
|
||||
|
||||
echo ""
|
||||
echo ">>> Post-deploy: Smoke tests"
|
||||
echo ""
|
||||
export PATH="$HOME/.npm-global/bin:$PATH"
|
||||
if pnpm test:smoke; then
|
||||
echo " Smoke tests passed!"
|
||||
else
|
||||
echo " WARNING: Smoke tests failed! Verify mcplocal + mcpd are healthy."
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "========================================"
|
||||
echo " Full deploy complete!"
|
||||
echo "========================================"
|
||||
57
i.sh
Normal file
57
i.sh
Normal file
@@ -0,0 +1,57 @@
|
||||
#!/bin/bash
|
||||
# 1. Install & Set Fish
|
||||
sudo dnf install -y fish byobu curl wl-clipboard
|
||||
chsh -s /usr/bin/fish
|
||||
|
||||
# 2. SILENCE THE PROMPTS (The "Wtf" Fix)
|
||||
mkdir -p ~/.byobu
|
||||
byobu-ctrl-a emacs
|
||||
|
||||
# 3. Configure Byobu Core (Clean Paths)
|
||||
byobu-enable
|
||||
mkdir -p ~/.byobu/bin
|
||||
# We REMOVED the -S flag to stop those random files appearing in your folders
|
||||
echo "set -g default-shell /usr/bin/fish" > ~/.byobu/.tmux.conf
|
||||
echo "set -g default-command /usr/bin/fish" >> ~/.byobu/.tmux.conf
|
||||
echo "set -g mouse off" >> ~/.byobu/.tmux.conf
|
||||
echo "set -s set-clipboard on" >> ~/.byobu/.tmux.conf
|
||||
|
||||
# 4. Create the Smart Mouse Indicator
|
||||
cat <<EOF > ~/.byobu/bin/custom
|
||||
#!/bin/bash
|
||||
if tmux show-options -g mouse | grep -q "on"; then
|
||||
echo "#[fg=green]MOUSE: ON (Nav)#[default]"
|
||||
else
|
||||
echo "#[fg=red]Alt+F12 (Copy Mode)#[default]"
|
||||
fi
|
||||
EOF
|
||||
chmod +x ~/.byobu/bin/custom
|
||||
|
||||
# 5. Setup Status Bar
|
||||
echo 'tmux_left="session"' > ~/.byobu/status
|
||||
echo 'tmux_right="custom cpu_temp load_average"' >> ~/.byobu/status
|
||||
|
||||
# 6. Atuin Global History
|
||||
if ! command -v atuin &> /dev/null; then
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://setup.atuin.sh | sh
|
||||
fi
|
||||
|
||||
# 7. Final Fish Config (The Clean Sticky Logic)
|
||||
mkdir -p ~/.config/fish
|
||||
cat <<EOF > ~/.config/fish/config.fish
|
||||
# Atuin Setup
|
||||
source ~/.atuin/bin/env.fish
|
||||
atuin init fish | source
|
||||
|
||||
# Start a UNIQUE session per window without cluttering project folders
|
||||
if status is-interactive
|
||||
and not set -q BYOBU_RUN_DIR
|
||||
# We use a human-readable name: FolderName-Time
|
||||
set SESSION_NAME (basename (pwd))-(date +%H%M)
|
||||
exec byobu new-session -A -s "\$SESSION_NAME"
|
||||
end
|
||||
EOF
|
||||
|
||||
# Kill any existing server to wipe the old "socket" logic
|
||||
byobu kill-server 2>/dev/null
|
||||
echo "Done! No more random files in your project folders."
|
||||
@@ -1,12 +1,10 @@
|
||||
name: mcpctl
|
||||
arch: amd64
|
||||
version: 0.0.1
|
||||
version: 0.1.0
|
||||
release: "1"
|
||||
maintainer: michal
|
||||
description: kubectl-like CLI for managing MCP servers
|
||||
license: MIT
|
||||
depends:
|
||||
- jq
|
||||
contents:
|
||||
- src: ./dist/mcpctl
|
||||
dst: /usr/bin/mcpctl
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "mcpctl",
|
||||
"version": "0.0.1",
|
||||
"version": "0.1.0",
|
||||
"private": true,
|
||||
"description": "kubectl-like CLI for managing MCP servers",
|
||||
"type": "module",
|
||||
@@ -9,7 +9,6 @@
|
||||
"test": "vitest",
|
||||
"test:run": "vitest run",
|
||||
"test:coverage": "vitest run --coverage",
|
||||
"test:smoke": "pnpm --filter mcplocal run test:smoke",
|
||||
"test:ui": "vitest --ui",
|
||||
"lint": "eslint 'src/*/src/**/*.ts'",
|
||||
"lint:fix": "eslint 'src/*/src/**/*.ts' --fix",
|
||||
@@ -17,12 +16,8 @@
|
||||
"db:up": "docker compose -f deploy/docker-compose.yml up -d",
|
||||
"db:down": "docker compose -f deploy/docker-compose.yml down",
|
||||
"typecheck": "tsc --build",
|
||||
"completions:generate": "tsx scripts/generate-completions.ts --write",
|
||||
"completions:check": "tsx scripts/generate-completions.ts --check",
|
||||
"rpm:build": "bash scripts/build-rpm.sh",
|
||||
"rpm:publish": "bash scripts/publish-rpm.sh",
|
||||
"deb:build": "bash scripts/build-deb.sh",
|
||||
"deb:publish": "bash scripts/publish-deb.sh",
|
||||
"release": "bash scripts/release.sh",
|
||||
"mcpd:build": "bash scripts/build-mcpd.sh",
|
||||
"mcpd:deploy": "bash deploy.sh",
|
||||
|
||||
444
pnpm-lock.yaml
generated
444
pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
55
pr.sh
55
pr.sh
@@ -1,55 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Usage: bash pr.sh "PR title" "PR body"
|
||||
# Loads GITEA_TOKEN from .env automatically
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Load .env if GITEA_TOKEN not already exported
|
||||
if [ -z "${GITEA_TOKEN:-}" ] && [ -f .env ]; then
|
||||
set -a
|
||||
source .env
|
||||
set +a
|
||||
fi
|
||||
|
||||
GITEA_URL="${GITEA_URL:-http://10.0.0.194:3012}"
|
||||
REPO="${GITEA_OWNER:-michal}/mcpctl"
|
||||
|
||||
TITLE="${1:?Usage: pr.sh <title> [body]}"
|
||||
BODY="${2:-}"
|
||||
BASE="${3:-main}"
|
||||
HEAD=$(git rev-parse --abbrev-ref HEAD)
|
||||
|
||||
if [ "$HEAD" = "$BASE" ]; then
|
||||
echo "Error: already on $BASE, switch to a feature branch first" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "${GITEA_TOKEN:-}" ]; then
|
||||
echo "Error: GITEA_TOKEN not set and .env not found" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Push if needed
|
||||
if ! git rev-parse --verify "origin/$HEAD" &>/dev/null; then
|
||||
git push -u origin "$HEAD"
|
||||
else
|
||||
git push
|
||||
fi
|
||||
|
||||
# Create PR
|
||||
RESPONSE=$(curl -s -X POST "$GITEA_URL/api/v1/repos/$REPO/pulls" \
|
||||
-H "Authorization: token $GITEA_TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "$(jq -n --arg t "$TITLE" --arg b "$BODY" --arg h "$HEAD" --arg base "$BASE" \
|
||||
'{title: $t, body: $b, head: $h, base: $base}')")
|
||||
|
||||
PR_NUM=$(echo "$RESPONSE" | jq -r '.number // empty')
|
||||
PR_URL=$(echo "$RESPONSE" | jq -r '.html_url // empty')
|
||||
|
||||
if [ -z "$PR_NUM" ]; then
|
||||
echo "Error creating PR:" >&2
|
||||
echo "$RESPONSE" | jq . 2>/dev/null || echo "$RESPONSE" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "PR #$PR_NUM: https://mysources.co.uk/$REPO/pulls/$PR_NUM"
|
||||
@@ -1,53 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
|
||||
cd "$PROJECT_ROOT"
|
||||
|
||||
# Load .env if present
|
||||
if [ -f .env ]; then
|
||||
set -a; source .env; set +a
|
||||
fi
|
||||
|
||||
# Ensure tools are on PATH
|
||||
export PATH="$HOME/.npm-global/bin:$HOME/.bun/bin:$HOME/.local/bin:$PATH"
|
||||
|
||||
# Check if binaries already exist (build-rpm.sh may have been run first)
|
||||
if [ ! -f dist/mcpctl ] || [ ! -f dist/mcpctl-local ]; then
|
||||
echo "==> Binaries not found, building from scratch..."
|
||||
echo ""
|
||||
|
||||
echo "==> Running unit tests..."
|
||||
pnpm test:run
|
||||
echo ""
|
||||
|
||||
echo "==> Building TypeScript..."
|
||||
pnpm build
|
||||
|
||||
echo "==> Generating shell completions..."
|
||||
pnpm completions:generate
|
||||
|
||||
echo "==> Bundling standalone binaries..."
|
||||
mkdir -p dist
|
||||
|
||||
# Ink optionally imports react-devtools-core which isn't installed.
|
||||
# Provide a no-op stub so bun can bundle it (it's only invoked when DEV=true).
|
||||
if [ ! -e node_modules/react-devtools-core ]; then
|
||||
ln -s ../src/cli/stubs/react-devtools-core node_modules/react-devtools-core
|
||||
fi
|
||||
|
||||
bun build src/cli/src/index.ts --compile --outfile dist/mcpctl
|
||||
bun build src/mcplocal/src/main.ts --compile --outfile dist/mcpctl-local
|
||||
else
|
||||
echo "==> Using existing binaries in dist/"
|
||||
fi
|
||||
|
||||
echo "==> Packaging DEB..."
|
||||
rm -f dist/mcpctl-*.deb dist/mcpctl_*.deb
|
||||
nfpm pkg --packager deb --target dist/
|
||||
|
||||
DEB_FILE=$(ls dist/mcpctl*.deb 2>/dev/null | head -1)
|
||||
echo "==> Built: $DEB_FILE"
|
||||
echo " Size: $(du -h "$DEB_FILE" | cut -f1)"
|
||||
dpkg-deb --info "$DEB_FILE" 2>/dev/null || true
|
||||
@@ -1,36 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Build docmost-mcp Docker image and push to Gitea container registry
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
|
||||
cd "$PROJECT_ROOT"
|
||||
|
||||
# Load .env for GITEA_TOKEN
|
||||
if [ -f .env ]; then
|
||||
set -a; source .env; set +a
|
||||
fi
|
||||
|
||||
# Push directly to internal address (external proxy has body size limit)
|
||||
REGISTRY="10.0.0.194:3012"
|
||||
IMAGE="docmost-mcp"
|
||||
TAG="${1:-latest}"
|
||||
|
||||
echo "==> Building docmost-mcp image..."
|
||||
podman build -t "$IMAGE:$TAG" -f deploy/Dockerfile.docmost-mcp .
|
||||
|
||||
echo "==> Tagging as $REGISTRY/michal/$IMAGE:$TAG..."
|
||||
podman tag "$IMAGE:$TAG" "$REGISTRY/michal/$IMAGE:$TAG"
|
||||
|
||||
echo "==> Logging in to $REGISTRY..."
|
||||
podman login --tls-verify=false -u michal -p "$GITEA_TOKEN" "$REGISTRY"
|
||||
|
||||
echo "==> Pushing to $REGISTRY/michal/$IMAGE:$TAG..."
|
||||
podman push --tls-verify=false "$REGISTRY/michal/$IMAGE:$TAG"
|
||||
|
||||
# Ensure package is linked to the repository
|
||||
source "$SCRIPT_DIR/link-package.sh"
|
||||
link_package "container" "$IMAGE"
|
||||
|
||||
echo "==> Done!"
|
||||
echo " Image: $REGISTRY/michal/$IMAGE:$TAG"
|
||||
@@ -28,9 +28,5 @@ podman login --tls-verify=false -u michal -p "$GITEA_TOKEN" "$REGISTRY"
|
||||
echo "==> Pushing to $REGISTRY/michal/$IMAGE:$TAG..."
|
||||
podman push --tls-verify=false "$REGISTRY/michal/$IMAGE:$TAG"
|
||||
|
||||
# Ensure package is linked to the repository
|
||||
source "$SCRIPT_DIR/link-package.sh"
|
||||
link_package "container" "$IMAGE"
|
||||
|
||||
echo "==> Done!"
|
||||
echo " Image: $REGISTRY/michal/$IMAGE:$TAG"
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Build python-runner Docker image and push to Gitea container registry
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
|
||||
cd "$PROJECT_ROOT"
|
||||
|
||||
# Load .env for GITEA_TOKEN
|
||||
if [ -f .env ]; then
|
||||
set -a; source .env; set +a
|
||||
fi
|
||||
|
||||
# Push directly to internal address (external proxy has body size limit)
|
||||
REGISTRY="10.0.0.194:3012"
|
||||
IMAGE="mcpctl-python-runner"
|
||||
TAG="${1:-latest}"
|
||||
|
||||
echo "==> Building python-runner image..."
|
||||
podman build -t "$IMAGE:$TAG" -f deploy/Dockerfile.python-runner .
|
||||
|
||||
echo "==> Tagging as $REGISTRY/michal/$IMAGE:$TAG..."
|
||||
podman tag "$IMAGE:$TAG" "$REGISTRY/michal/$IMAGE:$TAG"
|
||||
|
||||
echo "==> Logging in to $REGISTRY..."
|
||||
podman login --tls-verify=false -u michal -p "$GITEA_TOKEN" "$REGISTRY"
|
||||
|
||||
echo "==> Pushing to $REGISTRY/michal/$IMAGE:$TAG..."
|
||||
podman push --tls-verify=false "$REGISTRY/michal/$IMAGE:$TAG"
|
||||
|
||||
# Ensure package is linked to the repository
|
||||
source "$SCRIPT_DIR/link-package.sh"
|
||||
link_package "container" "$IMAGE"
|
||||
|
||||
echo "==> Done!"
|
||||
echo " Image: $REGISTRY/michal/$IMAGE:$TAG"
|
||||
@@ -13,26 +13,12 @@ fi
|
||||
# Ensure tools are on PATH
|
||||
export PATH="$HOME/.npm-global/bin:$HOME/.bun/bin:$HOME/.local/bin:$PATH"
|
||||
|
||||
echo "==> Running unit tests..."
|
||||
pnpm test:run
|
||||
echo ""
|
||||
|
||||
echo "==> Building TypeScript..."
|
||||
pnpm build
|
||||
|
||||
echo "==> Generating shell completions..."
|
||||
pnpm completions:generate
|
||||
|
||||
echo "==> Bundling standalone binaries..."
|
||||
mkdir -p dist
|
||||
rm -f dist/mcpctl dist/mcpctl-local dist/mcpctl-*.rpm
|
||||
|
||||
# Ink optionally imports react-devtools-core which isn't installed.
|
||||
# Provide a no-op stub so bun can bundle it (it's only invoked when DEV=true).
|
||||
if [ ! -e node_modules/react-devtools-core ]; then
|
||||
ln -s ../src/cli/stubs/react-devtools-core node_modules/react-devtools-core
|
||||
fi
|
||||
|
||||
bun build src/cli/src/index.ts --compile --outfile dist/mcpctl
|
||||
bun build src/mcplocal/src/main.ts --compile --outfile dist/mcpctl-local
|
||||
|
||||
@@ -43,12 +29,3 @@ RPM_FILE=$(ls dist/mcpctl-*.rpm 2>/dev/null | head -1)
|
||||
echo "==> Built: $RPM_FILE"
|
||||
echo " Size: $(du -h "$RPM_FILE" | cut -f1)"
|
||||
rpm -qpi "$RPM_FILE"
|
||||
|
||||
echo ""
|
||||
echo "==> Packaging DEB..."
|
||||
rm -f dist/mcpctl*.deb
|
||||
nfpm pkg --packager deb --target dist/
|
||||
|
||||
DEB_FILE=$(ls dist/mcpctl*.deb 2>/dev/null | head -1)
|
||||
echo "==> Built: $DEB_FILE"
|
||||
echo " Size: $(du -h "$DEB_FILE" | cut -f1)"
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,65 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Link a Gitea package to a repository.
|
||||
# Works automatically on Gitea 1.24+ (uses API), warns on older versions.
|
||||
#
|
||||
# Usage: source scripts/link-package.sh
|
||||
# link_package <type> <name>
|
||||
#
|
||||
# Requires: GITEA_URL, GITEA_TOKEN, GITEA_OWNER, GITEA_REPO
|
||||
|
||||
link_package() {
|
||||
local PKG_TYPE="$1" # e.g. "rpm", "container"
|
||||
local PKG_NAME="$2" # e.g. "mcpctl", "mcpd"
|
||||
|
||||
if [ -z "$PKG_TYPE" ] || [ -z "$PKG_NAME" ]; then
|
||||
echo "Usage: link_package <type> <name>"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local GITEA_URL="${GITEA_URL:-http://10.0.0.194:3012}"
|
||||
local GITEA_OWNER="${GITEA_OWNER:-michal}"
|
||||
local GITEA_REPO="${GITEA_REPO:-mcpctl}"
|
||||
|
||||
if [ -z "$GITEA_TOKEN" ]; then
|
||||
echo "WARNING: GITEA_TOKEN not set, skipping package-repo linking."
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Check if already linked (search all packages, filter by type+name client-side)
|
||||
local REPO_LINK
|
||||
REPO_LINK=$(curl -s -H "Authorization: token ${GITEA_TOKEN}" \
|
||||
"${GITEA_URL}/api/v1/packages/${GITEA_OWNER}" \
|
||||
| python3 -c "
|
||||
import json,sys
|
||||
for p in json.load(sys.stdin):
|
||||
if p['type']=='$PKG_TYPE' and p['name']=='$PKG_NAME':
|
||||
r=p.get('repository')
|
||||
if r: print(r['full_name'])
|
||||
break
|
||||
" 2>/dev/null)
|
||||
|
||||
if [ -n "$REPO_LINK" ]; then
|
||||
echo "==> Package ${PKG_TYPE}/${PKG_NAME} already linked to ${REPO_LINK}"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Try Gitea 1.24+ link API
|
||||
local HTTP_CODE
|
||||
HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" -X POST \
|
||||
-H "Authorization: token ${GITEA_TOKEN}" \
|
||||
"${GITEA_URL}/api/v1/packages/${GITEA_OWNER}/${PKG_TYPE}/${PKG_NAME}/-/link/${GITEA_REPO}")
|
||||
|
||||
if [ "$HTTP_CODE" = "201" ] || [ "$HTTP_CODE" = "200" ]; then
|
||||
echo "==> Linked ${PKG_TYPE}/${PKG_NAME} to ${GITEA_OWNER}/${GITEA_REPO}"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# API not available (Gitea < 1.24) — warn with manual instructions
|
||||
local PUBLIC_URL="${GITEA_PUBLIC_URL:-${GITEA_URL}}"
|
||||
echo ""
|
||||
echo "WARNING: Could not auto-link ${PKG_TYPE}/${PKG_NAME} to repository (Gitea < 1.24)."
|
||||
echo "Link it manually in the Gitea UI:"
|
||||
echo " ${PUBLIC_URL}/${GITEA_OWNER}/-/packages/${PKG_TYPE}/${PKG_NAME}/settings"
|
||||
echo " -> Link to repository: ${GITEA_OWNER}/${GITEA_REPO}"
|
||||
return 0
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
|
||||
cd "$PROJECT_ROOT"
|
||||
|
||||
# Load .env if present
|
||||
if [ -f .env ]; then
|
||||
set -a; source .env; set +a
|
||||
fi
|
||||
|
||||
GITEA_URL="${GITEA_URL:-http://10.0.0.194:3012}"
|
||||
GITEA_PUBLIC_URL="${GITEA_PUBLIC_URL:-https://mysources.co.uk}"
|
||||
GITEA_OWNER="${GITEA_OWNER:-michal}"
|
||||
GITEA_REPO="${GITEA_REPO:-mcpctl}"
|
||||
|
||||
if [ -z "$GITEA_TOKEN" ]; then
|
||||
echo "Error: GITEA_TOKEN not set. Add it to .env or export it."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DEB_FILE=$(ls dist/mcpctl*.deb 2>/dev/null | head -1)
|
||||
if [ -z "$DEB_FILE" ]; then
|
||||
echo "Error: No DEB found in dist/. Run scripts/build-deb.sh first."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Extract version from the deb filename (e.g. mcpctl_0.0.1_amd64.deb)
|
||||
DEB_VERSION=$(dpkg-deb --field "$DEB_FILE" Version 2>/dev/null || echo "unknown")
|
||||
|
||||
echo "==> Publishing $DEB_FILE (version $DEB_VERSION) to ${GITEA_URL}..."
|
||||
|
||||
# Gitea Debian registry: PUT /api/packages/{owner}/debian/pool/{distribution}/{component}/upload
|
||||
# We publish to each supported distribution.
|
||||
# Debian: trixie (13/stable), forky (14/testing)
|
||||
# Ubuntu: noble (24.04 LTS), plucky (25.04)
|
||||
DISTRIBUTIONS="trixie forky noble plucky"
|
||||
|
||||
for DIST in $DISTRIBUTIONS; do
|
||||
echo " -> $DIST..."
|
||||
HTTP_CODE=$(curl -s -o /tmp/deb-upload-$DIST.out -w "%{http_code}" \
|
||||
-X PUT \
|
||||
-H "Authorization: token ${GITEA_TOKEN}" \
|
||||
--upload-file "$DEB_FILE" \
|
||||
"${GITEA_URL}/api/packages/${GITEA_OWNER}/debian/pool/${DIST}/main/upload")
|
||||
|
||||
if [ "$HTTP_CODE" = "201" ] || [ "$HTTP_CODE" = "200" ]; then
|
||||
echo " Published to $DIST"
|
||||
elif [ "$HTTP_CODE" = "409" ]; then
|
||||
echo " Already exists in $DIST (skipping)"
|
||||
else
|
||||
echo " WARNING: Upload to $DIST returned HTTP $HTTP_CODE"
|
||||
cat /tmp/deb-upload-$DIST.out 2>/dev/null || true
|
||||
echo ""
|
||||
fi
|
||||
rm -f /tmp/deb-upload-$DIST.out
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "==> Published successfully!"
|
||||
|
||||
# Ensure package is linked to the repository
|
||||
source "$SCRIPT_DIR/link-package.sh"
|
||||
link_package "debian" "mcpctl"
|
||||
|
||||
echo ""
|
||||
echo "Install with:"
|
||||
echo " echo \"deb ${GITEA_PUBLIC_URL}/api/packages/${GITEA_OWNER}/debian trixie main\" | sudo tee /etc/apt/sources.list.d/mcpctl.list"
|
||||
echo " curl -fsSL ${GITEA_PUBLIC_URL}/api/packages/${GITEA_OWNER}/debian/repository.key | sudo gpg --dearmor -o /etc/apt/keyrings/mcpctl.gpg"
|
||||
echo " sudo apt update && sudo apt install mcpctl"
|
||||
@@ -11,9 +11,7 @@ if [ -f .env ]; then
|
||||
fi
|
||||
|
||||
GITEA_URL="${GITEA_URL:-http://10.0.0.194:3012}"
|
||||
GITEA_PUBLIC_URL="${GITEA_PUBLIC_URL:-https://mysources.co.uk}"
|
||||
GITEA_OWNER="${GITEA_OWNER:-michal}"
|
||||
GITEA_REPO="${GITEA_REPO:-mcpctl}"
|
||||
|
||||
if [ -z "$GITEA_TOKEN" ]; then
|
||||
echo "Error: GITEA_TOKEN not set. Add it to .env or export it."
|
||||
@@ -51,11 +49,7 @@ curl --fail -s -X PUT \
|
||||
|
||||
echo ""
|
||||
echo "==> Published successfully!"
|
||||
|
||||
# Ensure package is linked to the repository
|
||||
source "$SCRIPT_DIR/link-package.sh"
|
||||
link_package "rpm" "mcpctl"
|
||||
|
||||
echo ""
|
||||
echo "Install with:"
|
||||
echo " sudo dnf install mcpctl # if repo already configured"
|
||||
echo " sudo dnf config-manager --add-repo ${GITEA_URL}/api/packages/${GITEA_OWNER}/rpm.repo"
|
||||
echo " sudo dnf install mcpctl"
|
||||
|
||||
@@ -20,7 +20,6 @@ echo ""
|
||||
|
||||
# Publish
|
||||
bash scripts/publish-rpm.sh
|
||||
bash scripts/publish-deb.sh
|
||||
|
||||
echo ""
|
||||
|
||||
@@ -34,30 +33,9 @@ echo "==> Installed:"
|
||||
mcpctl --version
|
||||
echo ""
|
||||
|
||||
# Restart mcplocal so smoke tests run against the new binary
|
||||
echo "==> Restarting mcplocal..."
|
||||
systemctl --user restart mcplocal
|
||||
sleep 2
|
||||
|
||||
# Run smoke tests (requires live mcplocal + mcpd)
|
||||
echo "==> Running smoke tests..."
|
||||
export PATH="$HOME/.npm-global/bin:$PATH"
|
||||
if pnpm test:smoke; then
|
||||
echo "==> Smoke tests passed!"
|
||||
else
|
||||
echo "==> WARNING: Smoke tests failed! Check mcplocal/mcpd are running."
|
||||
echo " Continuing anyway — deployment is complete, but verify manually."
|
||||
fi
|
||||
echo ""
|
||||
|
||||
GITEA_PUBLIC_URL="${GITEA_PUBLIC_URL:-https://mysources.co.uk}"
|
||||
GITEA_URL="${GITEA_URL:-http://10.0.0.194:3012}"
|
||||
GITEA_OWNER="${GITEA_OWNER:-michal}"
|
||||
echo "=== Done! ==="
|
||||
echo "RPM install:"
|
||||
echo " sudo dnf config-manager --add-repo ${GITEA_PUBLIC_URL}/api/packages/${GITEA_OWNER}/rpm.repo"
|
||||
echo "Others can install with:"
|
||||
echo " sudo dnf config-manager --add-repo ${GITEA_URL}/api/packages/${GITEA_OWNER}/rpm.repo"
|
||||
echo " sudo dnf install mcpctl"
|
||||
echo ""
|
||||
echo "DEB install (Debian/Ubuntu):"
|
||||
echo " echo \"deb ${GITEA_PUBLIC_URL}/api/packages/${GITEA_OWNER}/debian trixie main\" | sudo tee /etc/apt/sources.list.d/mcpctl.list"
|
||||
echo " curl -fsSL ${GITEA_PUBLIC_URL}/api/packages/${GITEA_OWNER}/debian/repository.key | sudo gpg --dearmor -o /etc/apt/keyrings/mcpctl.gpg"
|
||||
echo " sudo apt update && sudo apt install mcpctl"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@mcpctl/cli",
|
||||
"version": "0.0.1",
|
||||
"version": "0.1.0",
|
||||
"private": true,
|
||||
"type": "module",
|
||||
"bin": {
|
||||
@@ -16,22 +16,16 @@
|
||||
"test:run": "vitest run"
|
||||
},
|
||||
"dependencies": {
|
||||
"@inkjs/ui": "^2.0.0",
|
||||
"@mcpctl/db": "workspace:*",
|
||||
"@mcpctl/shared": "workspace:*",
|
||||
"chalk": "^5.4.0",
|
||||
"commander": "^13.0.0",
|
||||
"diff": "^8.0.3",
|
||||
"ink": "^6.8.0",
|
||||
"inquirer": "^12.0.0",
|
||||
"js-yaml": "^4.1.0",
|
||||
"react": "^19.2.4",
|
||||
"zod": "^3.24.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/diff": "^8.0.0",
|
||||
"@types/js-yaml": "^4.0.9",
|
||||
"@types/node": "^25.3.0",
|
||||
"@types/react": "^19.2.14"
|
||||
"@types/node": "^25.3.0"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,10 +24,7 @@ export class ApiError extends Error {
|
||||
function request<T>(method: string, url: string, timeout: number, body?: unknown, token?: string): Promise<ApiResponse<T>> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const parsed = new URL(url);
|
||||
const headers: Record<string, string> = {};
|
||||
if (body !== undefined) {
|
||||
headers['Content-Type'] = 'application/json';
|
||||
}
|
||||
const headers: Record<string, string> = { 'Content-Type': 'application/json' };
|
||||
if (token) {
|
||||
headers['Authorization'] = `Bearer ${token}`;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { Command } from 'commander';
|
||||
import { readFileSync, readSync } from 'node:fs';
|
||||
import { readFileSync } from 'node:fs';
|
||||
import yaml from 'js-yaml';
|
||||
import { z } from 'zod';
|
||||
import type { ApiClient } from '../api-client.js';
|
||||
@@ -24,7 +24,6 @@ const ServerSpecSchema = z.object({
|
||||
name: z.string().min(1),
|
||||
description: z.string().default(''),
|
||||
packageName: z.string().optional(),
|
||||
runtime: z.string().optional(),
|
||||
dockerImage: z.string().optional(),
|
||||
transport: z.enum(['STDIO', 'SSE', 'STREAMABLE_HTTP']).default('STDIO'),
|
||||
repositoryUrl: z.string().url().optional(),
|
||||
@@ -53,7 +52,6 @@ const TemplateSpecSchema = z.object({
|
||||
version: z.string().default('1.0.0'),
|
||||
description: z.string().default(''),
|
||||
packageName: z.string().optional(),
|
||||
runtime: z.string().optional(),
|
||||
dockerImage: z.string().optional(),
|
||||
transport: z.enum(['STDIO', 'SSE', 'STREAMABLE_HTTP']).default('STDIO'),
|
||||
repositoryUrl: z.string().optional(),
|
||||
@@ -78,19 +76,18 @@ const GroupSpecSchema = z.object({
|
||||
});
|
||||
|
||||
const RbacSubjectSchema = z.object({
|
||||
kind: z.enum(['User', 'Group', 'ServiceAccount']),
|
||||
kind: z.enum(['User', 'Group']),
|
||||
name: z.string().min(1),
|
||||
});
|
||||
|
||||
const RESOURCE_ALIASES: Record<string, string> = {
|
||||
server: 'servers', instance: 'instances', secret: 'secrets',
|
||||
project: 'projects', template: 'templates', user: 'users', group: 'groups',
|
||||
prompt: 'prompts', promptrequest: 'promptrequests',
|
||||
};
|
||||
|
||||
const RbacRoleBindingSchema = z.union([
|
||||
z.object({
|
||||
role: z.enum(['edit', 'view', 'create', 'delete', 'run', 'expose']),
|
||||
role: z.enum(['edit', 'view', 'create', 'delete', 'run']),
|
||||
resource: z.string().min(1).transform((r) => RESOURCE_ALIASES[r] ?? r),
|
||||
name: z.string().min(1).optional(),
|
||||
}),
|
||||
@@ -106,30 +103,14 @@ const RbacBindingSpecSchema = z.object({
|
||||
roleBindings: z.array(RbacRoleBindingSchema).default([]),
|
||||
});
|
||||
|
||||
const PromptSpecSchema = z.object({
|
||||
name: z.string().min(1).max(100).regex(/^[a-z0-9-]+$/),
|
||||
content: z.string().min(1).max(50000).optional(),
|
||||
projectId: z.string().optional(),
|
||||
project: z.string().optional(),
|
||||
priority: z.number().int().min(1).max(10).optional(),
|
||||
link: z.string().optional(),
|
||||
linkTarget: z.string().optional(),
|
||||
});
|
||||
|
||||
const ServerAttachmentSpecSchema = z.object({
|
||||
server: z.string().min(1),
|
||||
project: z.string().min(1),
|
||||
});
|
||||
|
||||
const ProjectSpecSchema = z.object({
|
||||
name: z.string().min(1),
|
||||
description: z.string().default(''),
|
||||
prompt: z.string().max(10000).default(''),
|
||||
proxyModel: z.string().optional(),
|
||||
gated: z.boolean().optional(),
|
||||
proxyMode: z.enum(['direct', 'filtered']).default('direct'),
|
||||
llmProvider: z.string().optional(),
|
||||
llmModel: z.string().optional(),
|
||||
servers: z.array(z.string()).default([]),
|
||||
members: z.array(z.string().email()).default([]),
|
||||
});
|
||||
|
||||
const ApplyConfigSchema = z.object({
|
||||
@@ -139,10 +120,8 @@ const ApplyConfigSchema = z.object({
|
||||
groups: z.array(GroupSpecSchema).default([]),
|
||||
projects: z.array(ProjectSpecSchema).default([]),
|
||||
templates: z.array(TemplateSpecSchema).default([]),
|
||||
serverattachments: z.array(ServerAttachmentSpecSchema).default([]),
|
||||
rbacBindings: z.array(RbacBindingSpecSchema).default([]),
|
||||
rbac: z.array(RbacBindingSpecSchema).default([]),
|
||||
prompts: z.array(PromptSpecSchema).default([]),
|
||||
}).transform((data) => ({
|
||||
...data,
|
||||
// Merge rbac into rbacBindings so both keys work
|
||||
@@ -179,9 +158,7 @@ export function createApplyCommand(deps: ApplyCommandDeps): Command {
|
||||
if (config.groups.length > 0) log(` ${config.groups.length} group(s)`);
|
||||
if (config.projects.length > 0) log(` ${config.projects.length} project(s)`);
|
||||
if (config.templates.length > 0) log(` ${config.templates.length} template(s)`);
|
||||
if (config.serverattachments.length > 0) log(` ${config.serverattachments.length} serverattachment(s)`);
|
||||
if (config.rbacBindings.length > 0) log(` ${config.rbacBindings.length} rbacBinding(s)`);
|
||||
if (config.prompts.length > 0) log(` ${config.prompts.length} prompt(s)`);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -189,78 +166,14 @@ export function createApplyCommand(deps: ApplyCommandDeps): Command {
|
||||
});
|
||||
}
|
||||
|
||||
function readStdin(): string {
|
||||
const chunks: Buffer[] = [];
|
||||
const buf = Buffer.alloc(4096);
|
||||
try {
|
||||
// eslint-disable-next-line no-constant-condition
|
||||
while (true) {
|
||||
const bytesRead = readSync(0, buf, 0, buf.length, null);
|
||||
if (bytesRead === 0) break;
|
||||
chunks.push(buf.subarray(0, bytesRead));
|
||||
}
|
||||
} catch {
|
||||
// EOF or closed pipe
|
||||
}
|
||||
return Buffer.concat(chunks).toString('utf-8');
|
||||
}
|
||||
|
||||
/** Map singular kind → plural resource key used by ApplyConfigSchema */
|
||||
const KIND_TO_RESOURCE: Record<string, string> = {
|
||||
server: 'servers',
|
||||
project: 'projects',
|
||||
secret: 'secrets',
|
||||
template: 'templates',
|
||||
user: 'users',
|
||||
group: 'groups',
|
||||
rbac: 'rbac',
|
||||
prompt: 'prompts',
|
||||
promptrequest: 'promptrequests',
|
||||
serverattachment: 'serverattachments',
|
||||
};
|
||||
|
||||
/**
|
||||
* Convert multi-doc format (array of {kind, ...} items) into the grouped
|
||||
* format that ApplyConfigSchema expects.
|
||||
*/
|
||||
function multiDocToGrouped(docs: Array<Record<string, unknown>>): Record<string, unknown[]> {
|
||||
const grouped: Record<string, unknown[]> = {};
|
||||
for (const doc of docs) {
|
||||
const kind = doc.kind as string;
|
||||
const resource = KIND_TO_RESOURCE[kind] ?? kind;
|
||||
const { kind: _k, ...rest } = doc;
|
||||
if (!grouped[resource]) grouped[resource] = [];
|
||||
grouped[resource].push(rest);
|
||||
}
|
||||
return grouped;
|
||||
}
|
||||
|
||||
function loadConfigFile(path: string): ApplyConfig {
|
||||
const raw = path === '-' ? readStdin() : readFileSync(path, 'utf-8');
|
||||
const raw = readFileSync(path, 'utf-8');
|
||||
let parsed: unknown;
|
||||
|
||||
const isJson = path === '-' ? raw.trimStart().startsWith('{') || raw.trimStart().startsWith('[') : path.endsWith('.json');
|
||||
if (isJson) {
|
||||
if (path.endsWith('.json')) {
|
||||
parsed = JSON.parse(raw);
|
||||
} else {
|
||||
// Try multi-document YAML first
|
||||
const docs: unknown[] = [];
|
||||
yaml.loadAll(raw, (doc) => docs.push(doc));
|
||||
const allDocs = docs.flatMap((d) => Array.isArray(d) ? d : [d]) as Array<Record<string, unknown>>;
|
||||
if (allDocs.length > 0 && allDocs[0] != null && 'kind' in allDocs[0]) {
|
||||
// Multi-doc or single doc with kind field
|
||||
parsed = multiDocToGrouped(allDocs);
|
||||
} else {
|
||||
parsed = docs[0]; // Fall back to single-doc grouped format
|
||||
}
|
||||
}
|
||||
|
||||
// JSON: handle array of {kind, ...} docs
|
||||
if (Array.isArray(parsed)) {
|
||||
const arr = parsed as Array<Record<string, unknown>>;
|
||||
if (arr.length > 0 && arr[0] != null && 'kind' in arr[0]) {
|
||||
parsed = multiDocToGrouped(arr);
|
||||
}
|
||||
parsed = yaml.load(raw);
|
||||
}
|
||||
|
||||
return ApplyConfigSchema.parse(parsed);
|
||||
@@ -269,59 +182,15 @@ function loadConfigFile(path: string): ApplyConfig {
|
||||
async function applyConfig(client: ApiClient, config: ApplyConfig, log: (...args: unknown[]) => void): Promise<void> {
|
||||
// Apply order: secrets, servers, users, groups, projects, templates, rbacBindings
|
||||
|
||||
// Cache for name→record lookups to avoid repeated API calls (rate limit protection)
|
||||
const nameCache = new Map<string, Map<string, { id: string; [key: string]: unknown }>>();
|
||||
|
||||
async function cachedFindByName(resource: string, name: string): Promise<{ id: string; [key: string]: unknown } | null> {
|
||||
if (!nameCache.has(resource)) {
|
||||
try {
|
||||
const items = await client.get<Array<{ id: string; name: string }>>(`/api/v1/${resource}`);
|
||||
const map = new Map<string, { id: string; [key: string]: unknown }>();
|
||||
for (const item of items) {
|
||||
if (item.name) map.set(item.name, item);
|
||||
}
|
||||
nameCache.set(resource, map);
|
||||
} catch {
|
||||
nameCache.set(resource, new Map());
|
||||
}
|
||||
}
|
||||
return nameCache.get(resource)!.get(name) ?? null;
|
||||
}
|
||||
|
||||
/** Invalidate a resource cache after a create/update so subsequent lookups see it */
|
||||
function invalidateCache(resource: string): void {
|
||||
nameCache.delete(resource);
|
||||
}
|
||||
|
||||
/** Retry a function on 429 rate-limit errors with exponential backoff */
|
||||
async function withRetry<T>(fn: () => Promise<T>, maxRetries = 5): Promise<T> {
|
||||
for (let attempt = 0; ; attempt++) {
|
||||
try {
|
||||
return await fn();
|
||||
} catch (err) {
|
||||
const msg = err instanceof Error ? err.message : String(err);
|
||||
if (attempt < maxRetries && msg.includes('429')) {
|
||||
const delay = 2000 * Math.pow(2, attempt); // 2s, 4s, 8s, 16s, 32s
|
||||
process.stderr.write(`\r\x1b[33mRate limited, retrying in ${delay / 1000}s...\x1b[0m`);
|
||||
await new Promise((r) => setTimeout(r, delay));
|
||||
process.stderr.write('\r\x1b[K'); // clear the line
|
||||
continue;
|
||||
}
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Apply secrets
|
||||
for (const secret of config.secrets) {
|
||||
try {
|
||||
const existing = await cachedFindByName('secrets', secret.name);
|
||||
const existing = await findByName(client, 'secrets', secret.name);
|
||||
if (existing) {
|
||||
await withRetry(() => client.put(`/api/v1/secrets/${existing.id}`, { data: secret.data }));
|
||||
await client.put(`/api/v1/secrets/${(existing as { id: string }).id}`, { data: secret.data });
|
||||
log(`Updated secret: ${secret.name}`);
|
||||
} else {
|
||||
await withRetry(() => client.post('/api/v1/secrets', secret));
|
||||
invalidateCache('secrets');
|
||||
await client.post('/api/v1/secrets', secret);
|
||||
log(`Created secret: ${secret.name}`);
|
||||
}
|
||||
} catch (err) {
|
||||
@@ -332,13 +201,12 @@ async function applyConfig(client: ApiClient, config: ApplyConfig, log: (...args
|
||||
// Apply servers
|
||||
for (const server of config.servers) {
|
||||
try {
|
||||
const existing = await cachedFindByName('servers', server.name);
|
||||
const existing = await findByName(client, 'servers', server.name);
|
||||
if (existing) {
|
||||
await withRetry(() => client.put(`/api/v1/servers/${existing.id}`, server));
|
||||
await client.put(`/api/v1/servers/${(existing as { id: string }).id}`, server);
|
||||
log(`Updated server: ${server.name}`);
|
||||
} else {
|
||||
await withRetry(() => client.post('/api/v1/servers', server));
|
||||
invalidateCache('servers');
|
||||
await client.post('/api/v1/servers', server);
|
||||
log(`Created server: ${server.name}`);
|
||||
}
|
||||
} catch (err) {
|
||||
@@ -349,13 +217,12 @@ async function applyConfig(client: ApiClient, config: ApplyConfig, log: (...args
|
||||
// Apply users (matched by email)
|
||||
for (const user of config.users) {
|
||||
try {
|
||||
// Users use email, not name — use uncached findByField
|
||||
const existing = await findByField(client, 'users', 'email', user.email);
|
||||
if (existing) {
|
||||
await withRetry(() => client.put(`/api/v1/users/${(existing as { id: string }).id}`, user));
|
||||
await client.put(`/api/v1/users/${(existing as { id: string }).id}`, user);
|
||||
log(`Updated user: ${user.email}`);
|
||||
} else {
|
||||
await withRetry(() => client.post('/api/v1/users', user));
|
||||
await client.post('/api/v1/users', user);
|
||||
log(`Created user: ${user.email}`);
|
||||
}
|
||||
} catch (err) {
|
||||
@@ -366,13 +233,12 @@ async function applyConfig(client: ApiClient, config: ApplyConfig, log: (...args
|
||||
// Apply groups
|
||||
for (const group of config.groups) {
|
||||
try {
|
||||
const existing = await cachedFindByName('groups', group.name);
|
||||
const existing = await findByName(client, 'groups', group.name);
|
||||
if (existing) {
|
||||
await withRetry(() => client.put(`/api/v1/groups/${existing.id}`, group));
|
||||
await client.put(`/api/v1/groups/${(existing as { id: string }).id}`, group);
|
||||
log(`Updated group: ${group.name}`);
|
||||
} else {
|
||||
await withRetry(() => client.post('/api/v1/groups', group));
|
||||
invalidateCache('groups');
|
||||
await client.post('/api/v1/groups', group);
|
||||
log(`Created group: ${group.name}`);
|
||||
}
|
||||
} catch (err) {
|
||||
@@ -380,16 +246,15 @@ async function applyConfig(client: ApiClient, config: ApplyConfig, log: (...args
|
||||
}
|
||||
}
|
||||
|
||||
// Apply projects (send full spec including servers)
|
||||
// Apply projects (send full spec including servers/members)
|
||||
for (const project of config.projects) {
|
||||
try {
|
||||
const existing = await cachedFindByName('projects', project.name);
|
||||
const existing = await findByName(client, 'projects', project.name);
|
||||
if (existing) {
|
||||
await withRetry(() => client.put(`/api/v1/projects/${existing.id}`, project));
|
||||
await client.put(`/api/v1/projects/${(existing as { id: string }).id}`, project);
|
||||
log(`Updated project: ${project.name}`);
|
||||
} else {
|
||||
await withRetry(() => client.post('/api/v1/projects', project));
|
||||
invalidateCache('projects');
|
||||
await client.post('/api/v1/projects', project);
|
||||
log(`Created project: ${project.name}`);
|
||||
}
|
||||
} catch (err) {
|
||||
@@ -400,13 +265,12 @@ async function applyConfig(client: ApiClient, config: ApplyConfig, log: (...args
|
||||
// Apply templates
|
||||
for (const template of config.templates) {
|
||||
try {
|
||||
const existing = await cachedFindByName('templates', template.name);
|
||||
const existing = await findByName(client, 'templates', template.name);
|
||||
if (existing) {
|
||||
await withRetry(() => client.put(`/api/v1/templates/${existing.id}`, template));
|
||||
await client.put(`/api/v1/templates/${(existing as { id: string }).id}`, template);
|
||||
log(`Updated template: ${template.name}`);
|
||||
} else {
|
||||
await withRetry(() => client.post('/api/v1/templates', template));
|
||||
invalidateCache('templates');
|
||||
await client.post('/api/v1/templates', template);
|
||||
log(`Created template: ${template.name}`);
|
||||
}
|
||||
} catch (err) {
|
||||
@@ -414,120 +278,29 @@ async function applyConfig(client: ApiClient, config: ApplyConfig, log: (...args
|
||||
}
|
||||
}
|
||||
|
||||
// Apply server attachments (after projects and servers exist)
|
||||
for (const sa of config.serverattachments) {
|
||||
try {
|
||||
const project = await cachedFindByName('projects', sa.project);
|
||||
if (!project) {
|
||||
log(`Error applying serverattachment: project '${sa.project}' not found`);
|
||||
continue;
|
||||
}
|
||||
await withRetry(() => client.post(`/api/v1/projects/${project.id}/servers`, { server: sa.server }));
|
||||
log(`Attached server '${sa.server}' to project '${sa.project}'`);
|
||||
} catch (err) {
|
||||
const msg = err instanceof Error ? err.message : String(err);
|
||||
// Ignore "already attached" conflicts silently
|
||||
if (msg.includes('409') || msg.includes('already')) {
|
||||
log(`Server '${sa.server}' already attached to project '${sa.project}'`);
|
||||
} else {
|
||||
log(`Error applying serverattachment '${sa.project}/${sa.server}': ${msg}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Apply RBAC bindings
|
||||
for (const rbacBinding of config.rbacBindings) {
|
||||
try {
|
||||
const existing = await cachedFindByName('rbac', rbacBinding.name);
|
||||
const existing = await findByName(client, 'rbac', rbacBinding.name);
|
||||
if (existing) {
|
||||
await withRetry(() => client.put(`/api/v1/rbac/${existing.id}`, rbacBinding));
|
||||
await client.put(`/api/v1/rbac/${(existing as { id: string }).id}`, rbacBinding);
|
||||
log(`Updated rbacBinding: ${rbacBinding.name}`);
|
||||
} else {
|
||||
await withRetry(() => client.post('/api/v1/rbac', rbacBinding));
|
||||
invalidateCache('rbac');
|
||||
await client.post('/api/v1/rbac', rbacBinding);
|
||||
log(`Created rbacBinding: ${rbacBinding.name}`);
|
||||
}
|
||||
} catch (err) {
|
||||
log(`Error applying rbacBinding '${rbacBinding.name}': ${err instanceof Error ? err.message : err}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Apply prompts — project-scoped: same name in different projects are distinct resources.
|
||||
// Cache project-scoped prompt lookups separately from global cache.
|
||||
const promptProjectIds = new Map<string, string>();
|
||||
const projectPromptCache = new Map<string, Map<string, { id: string; [key: string]: unknown }>>();
|
||||
|
||||
async function findPromptInProject(name: string, projectId: string | undefined): Promise<{ id: string; [key: string]: unknown } | null> {
|
||||
// Global prompts (no project) — use standard cache
|
||||
if (!projectId) {
|
||||
return cachedFindByName('prompts', name);
|
||||
}
|
||||
// Project-scoped: query prompts filtered by projectId
|
||||
if (!projectPromptCache.has(projectId)) {
|
||||
try {
|
||||
const items = await client.get<Array<{ id: string; name: string; projectId?: string }>>(`/api/v1/prompts?projectId=${projectId}`);
|
||||
const map = new Map<string, { id: string; [key: string]: unknown }>();
|
||||
for (const item of items) {
|
||||
if (item.name) map.set(item.name, item);
|
||||
}
|
||||
projectPromptCache.set(projectId, map);
|
||||
} catch {
|
||||
projectPromptCache.set(projectId, new Map());
|
||||
}
|
||||
}
|
||||
return projectPromptCache.get(projectId)!.get(name) ?? null;
|
||||
}
|
||||
|
||||
for (const prompt of config.prompts) {
|
||||
try {
|
||||
// Resolve project name → projectId if needed
|
||||
let projectId = prompt.projectId;
|
||||
if (!projectId && prompt.project) {
|
||||
if (promptProjectIds.has(prompt.project)) {
|
||||
projectId = promptProjectIds.get(prompt.project)!;
|
||||
} else {
|
||||
const proj = await cachedFindByName('projects', prompt.project);
|
||||
if (!proj) {
|
||||
log(`Error applying prompt '${prompt.name}': project '${prompt.project}' not found`);
|
||||
continue;
|
||||
}
|
||||
projectId = proj.id;
|
||||
promptProjectIds.set(prompt.project, projectId);
|
||||
}
|
||||
}
|
||||
|
||||
// Normalize: accept both `link` and `linkTarget`, prefer `link`
|
||||
const linkTarget = prompt.link ?? prompt.linkTarget;
|
||||
|
||||
// Linked prompts use placeholder content if none provided
|
||||
const content = prompt.content ?? (linkTarget ? `Linked prompt — content fetched from ${linkTarget}` : '');
|
||||
if (!content) {
|
||||
log(`Error applying prompt '${prompt.name}': content is required (or provide link)`);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Build API body (strip the `project` name field, use projectId)
|
||||
const body: Record<string, unknown> = { name: prompt.name, content };
|
||||
if (projectId) body.projectId = projectId;
|
||||
if (prompt.priority !== undefined) body.priority = prompt.priority;
|
||||
if (linkTarget) body.linkTarget = linkTarget;
|
||||
|
||||
const existing = await findPromptInProject(prompt.name, projectId);
|
||||
if (existing) {
|
||||
const updateData: Record<string, unknown> = { content };
|
||||
if (projectId) updateData.projectId = projectId;
|
||||
if (prompt.priority !== undefined) updateData.priority = prompt.priority;
|
||||
if (linkTarget) updateData.linkTarget = linkTarget;
|
||||
await withRetry(() => client.put(`/api/v1/prompts/${existing.id}`, updateData));
|
||||
log(`Updated prompt: ${prompt.name}`);
|
||||
} else {
|
||||
await withRetry(() => client.post('/api/v1/prompts', body));
|
||||
projectPromptCache.delete(projectId ?? '');
|
||||
log(`Created prompt: ${prompt.name}`);
|
||||
}
|
||||
} catch (err) {
|
||||
log(`Error applying prompt '${prompt.name}': ${err instanceof Error ? err.message : err}`);
|
||||
}
|
||||
async function findByName(client: ApiClient, resource: string, name: string): Promise<unknown | null> {
|
||||
try {
|
||||
const items = await client.get<Array<{ name: string }>>(`/api/v1/${resource}`);
|
||||
return items.find((item) => item.name === name) ?? null;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { Command } from 'commander';
|
||||
import fs from 'node:fs';
|
||||
import type { ApiClient } from '../api-client.js';
|
||||
|
||||
export interface BackupDeps {
|
||||
@@ -6,247 +7,74 @@ export interface BackupDeps {
|
||||
log: (...args: unknown[]) => void;
|
||||
}
|
||||
|
||||
interface BackupStatus {
|
||||
enabled: boolean;
|
||||
repoUrl: string | null;
|
||||
publicKey: string | null;
|
||||
gitReachable: boolean;
|
||||
lastSyncAt: string | null;
|
||||
lastPushAt: string | null;
|
||||
lastError: string | null;
|
||||
pendingCount: number;
|
||||
}
|
||||
|
||||
interface LogEntry {
|
||||
hash: string;
|
||||
date: string;
|
||||
author: string;
|
||||
message: string;
|
||||
manual: boolean;
|
||||
}
|
||||
|
||||
export function createBackupCommand(deps: BackupDeps): Command {
|
||||
const cmd = new Command('backup')
|
||||
.description('Git-based backup status and management')
|
||||
.action(async () => {
|
||||
const status = await deps.client.get<BackupStatus>('/api/v1/backup/status');
|
||||
|
||||
if (!status.enabled) {
|
||||
deps.log('Backup: disabled');
|
||||
deps.log('');
|
||||
deps.log('To enable, create a backup-ssh secret:');
|
||||
deps.log(' mcpctl create secret backup-ssh --data repoUrl=ssh://git@host/repo.git');
|
||||
deps.log('');
|
||||
deps.log('After creating the secret, restart mcpd. An SSH keypair will be');
|
||||
deps.log('auto-generated and stored in the secret. Run mcpctl backup to see');
|
||||
deps.log('the public key, then add it as a deploy key in your git host.');
|
||||
return;
|
||||
.description('Backup mcpctl configuration to a JSON file')
|
||||
.option('-o, --output <path>', 'output file path', 'mcpctl-backup.json')
|
||||
.option('-p, --password <password>', 'encrypt sensitive values with password')
|
||||
.option('-r, --resources <types>', 'resource types to backup (comma-separated: servers,profiles,projects)')
|
||||
.action(async (options: { output: string; password?: string; resources?: string }) => {
|
||||
const body: Record<string, unknown> = {};
|
||||
if (options.password) {
|
||||
body.password = options.password;
|
||||
}
|
||||
if (options.resources) {
|
||||
body.resources = options.resources.split(',').map((s) => s.trim());
|
||||
}
|
||||
|
||||
deps.log(`Repo: ${status.repoUrl}`);
|
||||
|
||||
if (status.gitReachable) {
|
||||
if (status.pendingCount === 0) {
|
||||
deps.log('Status: synced');
|
||||
} else {
|
||||
deps.log(`Status: ${status.pendingCount} changes pending`);
|
||||
}
|
||||
} else {
|
||||
deps.log('Status: disconnected');
|
||||
}
|
||||
|
||||
if (status.lastSyncAt) {
|
||||
const ago = timeAgo(status.lastSyncAt);
|
||||
deps.log(`Last sync: ${ago}`);
|
||||
}
|
||||
if (status.lastPushAt) {
|
||||
const ago = timeAgo(status.lastPushAt);
|
||||
deps.log(`Last push: ${ago}`);
|
||||
}
|
||||
if (status.lastError) {
|
||||
deps.log(`Error: ${status.lastError}`);
|
||||
}
|
||||
if (status.publicKey) {
|
||||
deps.log('');
|
||||
deps.log(`SSH key: ${status.publicKey}`);
|
||||
}
|
||||
const bundle = await deps.client.post('/api/v1/backup', body);
|
||||
fs.writeFileSync(options.output, JSON.stringify(bundle, null, 2), 'utf-8');
|
||||
deps.log(`Backup saved to ${options.output}`);
|
||||
});
|
||||
|
||||
cmd
|
||||
.command('log')
|
||||
.description('Show backup commit history')
|
||||
.option('-n, --limit <count>', 'number of commits to show', '20')
|
||||
.action(async (opts: { limit: string }) => {
|
||||
const { entries } = await deps.client.get<{ entries: LogEntry[] }>(
|
||||
`/api/v1/backup/log?limit=${opts.limit}`,
|
||||
);
|
||||
|
||||
if (entries.length === 0) {
|
||||
deps.log('No backup history');
|
||||
return;
|
||||
}
|
||||
|
||||
// Header
|
||||
const hashW = 9;
|
||||
const dateW = 20;
|
||||
const authorW = 15;
|
||||
deps.log(
|
||||
'COMMIT'.padEnd(hashW) +
|
||||
'DATE'.padEnd(dateW) +
|
||||
'AUTHOR'.padEnd(authorW) +
|
||||
'MESSAGE',
|
||||
);
|
||||
|
||||
for (const e of entries) {
|
||||
const hash = e.hash.slice(0, 7);
|
||||
const date = new Date(e.date).toLocaleString('en-GB', {
|
||||
day: '2-digit', month: '2-digit', year: 'numeric',
|
||||
hour: '2-digit', minute: '2-digit',
|
||||
});
|
||||
const author = e.author.replace(/<.*>/, '').trim();
|
||||
const marker = e.manual ? ' [manual]' : '';
|
||||
deps.log(
|
||||
hash.padEnd(hashW) +
|
||||
date.padEnd(dateW) +
|
||||
author.slice(0, authorW - 1).padEnd(authorW) +
|
||||
e.message + marker,
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
// ── Restore subcommand group ──
|
||||
const restore = new Command('restore')
|
||||
.description('Restore mcpctl state from backup history');
|
||||
|
||||
restore
|
||||
.command('list')
|
||||
.description('List available restore points')
|
||||
.option('-n, --limit <count>', 'number of entries', '30')
|
||||
.action(async (opts: { limit: string }) => {
|
||||
const { entries } = await deps.client.get<{ entries: LogEntry[] }>(
|
||||
`/api/v1/backup/log?limit=${opts.limit}`,
|
||||
);
|
||||
|
||||
if (entries.length === 0) {
|
||||
deps.log('No restore points available');
|
||||
return;
|
||||
}
|
||||
|
||||
deps.log(
|
||||
'COMMIT'.padEnd(9) +
|
||||
'DATE'.padEnd(20) +
|
||||
'USER'.padEnd(15) +
|
||||
'MESSAGE',
|
||||
);
|
||||
|
||||
for (const e of entries) {
|
||||
const hash = e.hash.slice(0, 7);
|
||||
const date = new Date(e.date).toLocaleString('en-GB', {
|
||||
day: '2-digit', month: '2-digit', year: 'numeric',
|
||||
hour: '2-digit', minute: '2-digit',
|
||||
});
|
||||
const author = e.author.replace(/<.*>/, '').trim();
|
||||
deps.log(
|
||||
hash.padEnd(9) +
|
||||
date.padEnd(20) +
|
||||
author.slice(0, 14).padEnd(15) +
|
||||
e.message,
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
restore
|
||||
.command('diff <commit>')
|
||||
.description('Preview what restoring to a commit would change')
|
||||
.action(async (commit: string) => {
|
||||
const preview = await deps.client.post<{
|
||||
targetCommit: string;
|
||||
targetDate: string;
|
||||
targetMessage: string;
|
||||
added: string[];
|
||||
removed: string[];
|
||||
modified: string[];
|
||||
}>('/api/v1/backup/restore/preview', { commit });
|
||||
|
||||
deps.log(`Target: ${preview.targetCommit.slice(0, 7)} — ${preview.targetMessage}`);
|
||||
deps.log(`Date: ${new Date(preview.targetDate).toLocaleString()}`);
|
||||
deps.log('');
|
||||
|
||||
if (preview.added.length === 0 && preview.removed.length === 0 && preview.modified.length === 0) {
|
||||
deps.log('No changes — already at this state.');
|
||||
return;
|
||||
}
|
||||
|
||||
for (const f of preview.added) deps.log(` + ${f}`);
|
||||
for (const f of preview.modified) deps.log(` ~ ${f}`);
|
||||
for (const f of preview.removed) deps.log(` - ${f}`);
|
||||
|
||||
deps.log('');
|
||||
deps.log(`Total: ${preview.added.length} added, ${preview.modified.length} modified, ${preview.removed.length} removed`);
|
||||
});
|
||||
|
||||
restore
|
||||
.command('to <commit>')
|
||||
.description('Restore to a specific commit')
|
||||
.option('--force', 'skip confirmation', false)
|
||||
.action(async (commit: string, opts: { force: boolean }) => {
|
||||
// Show preview first
|
||||
const preview = await deps.client.post<{
|
||||
targetCommit: string;
|
||||
targetDate: string;
|
||||
targetMessage: string;
|
||||
added: string[];
|
||||
removed: string[];
|
||||
modified: string[];
|
||||
}>('/api/v1/backup/restore/preview', { commit });
|
||||
|
||||
const totalChanges = preview.added.length + preview.removed.length + preview.modified.length;
|
||||
|
||||
if (totalChanges === 0) {
|
||||
deps.log('No changes — already at this state.');
|
||||
return;
|
||||
}
|
||||
|
||||
deps.log(`Restoring to ${preview.targetCommit.slice(0, 7)} — ${preview.targetMessage}`);
|
||||
deps.log(` ${preview.added.length} added, ${preview.modified.length} modified, ${preview.removed.length} removed`);
|
||||
|
||||
if (!opts.force) {
|
||||
deps.log('');
|
||||
deps.log('Use --force to proceed. Current state will be saved as a timeline branch.');
|
||||
return;
|
||||
}
|
||||
|
||||
const result = await deps.client.post<{
|
||||
branchName: string;
|
||||
applied: number;
|
||||
deleted: number;
|
||||
errors: string[];
|
||||
}>('/api/v1/backup/restore', { commit });
|
||||
|
||||
deps.log('');
|
||||
deps.log(`Restored: ${result.applied} applied, ${result.deleted} deleted`);
|
||||
deps.log(`Previous state saved as branch '${result.branchName}'`);
|
||||
|
||||
if (result.errors.length > 0) {
|
||||
deps.log('Errors:');
|
||||
for (const err of result.errors) {
|
||||
deps.log(` - ${err}`);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
cmd.addCommand(restore);
|
||||
|
||||
return cmd;
|
||||
}
|
||||
|
||||
function timeAgo(iso: string): string {
|
||||
const ms = Date.now() - new Date(iso).getTime();
|
||||
const secs = Math.floor(ms / 1000);
|
||||
if (secs < 60) return `${secs}s ago`;
|
||||
const mins = Math.floor(secs / 60);
|
||||
if (mins < 60) return `${mins}m ago`;
|
||||
const hours = Math.floor(mins / 60);
|
||||
if (hours < 24) return `${hours}h ago`;
|
||||
return `${Math.floor(hours / 24)}d ago`;
|
||||
export function createRestoreCommand(deps: BackupDeps): Command {
|
||||
const cmd = new Command('restore')
|
||||
.description('Restore mcpctl configuration from a backup file')
|
||||
.option('-i, --input <path>', 'backup file path', 'mcpctl-backup.json')
|
||||
.option('-p, --password <password>', 'decryption password for encrypted backups')
|
||||
.option('-c, --conflict <strategy>', 'conflict resolution: skip, overwrite, fail', 'skip')
|
||||
.action(async (options: { input: string; password?: string; conflict: string }) => {
|
||||
if (!fs.existsSync(options.input)) {
|
||||
deps.log(`Error: File not found: ${options.input}`);
|
||||
return;
|
||||
}
|
||||
|
||||
const raw = fs.readFileSync(options.input, 'utf-8');
|
||||
const bundle = JSON.parse(raw) as unknown;
|
||||
|
||||
const body: Record<string, unknown> = {
|
||||
bundle,
|
||||
conflictStrategy: options.conflict,
|
||||
};
|
||||
if (options.password) {
|
||||
body.password = options.password;
|
||||
}
|
||||
|
||||
const result = await deps.client.post<{
|
||||
serversCreated: number;
|
||||
serversSkipped: number;
|
||||
profilesCreated: number;
|
||||
profilesSkipped: number;
|
||||
projectsCreated: number;
|
||||
projectsSkipped: number;
|
||||
errors: string[];
|
||||
}>('/api/v1/restore', body);
|
||||
|
||||
deps.log('Restore complete:');
|
||||
deps.log(` Servers: ${result.serversCreated} created, ${result.serversSkipped} skipped`);
|
||||
deps.log(` Profiles: ${result.profilesCreated} created, ${result.profilesSkipped} skipped`);
|
||||
deps.log(` Projects: ${result.projectsCreated} created, ${result.projectsSkipped} skipped`);
|
||||
|
||||
if (result.errors.length > 0) {
|
||||
deps.log(` Errors:`);
|
||||
for (const err of result.errors) {
|
||||
deps.log(` - ${err}`);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
return cmd;
|
||||
}
|
||||
|
||||
@@ -1,137 +0,0 @@
|
||||
import { Command } from 'commander';
|
||||
import http from 'node:http';
|
||||
|
||||
export interface CacheCommandDeps {
|
||||
log: (...args: string[]) => void;
|
||||
mcplocalUrl?: string;
|
||||
}
|
||||
|
||||
interface NamespaceStats {
|
||||
name: string;
|
||||
entries: number;
|
||||
size: number;
|
||||
oldestMs: number;
|
||||
newestMs: number;
|
||||
}
|
||||
|
||||
interface CacheStats {
|
||||
rootDir: string;
|
||||
totalSize: number;
|
||||
totalEntries: number;
|
||||
namespaces: NamespaceStats[];
|
||||
}
|
||||
|
||||
interface ClearResult {
|
||||
removed: number;
|
||||
freedBytes: number;
|
||||
}
|
||||
|
||||
function formatBytes(bytes: number): string {
|
||||
if (bytes === 0) return '0 B';
|
||||
const units = ['B', 'KB', 'MB', 'GB'];
|
||||
const i = Math.min(Math.floor(Math.log(bytes) / Math.log(1024)), units.length - 1);
|
||||
const val = bytes / Math.pow(1024, i);
|
||||
return `${val < 10 ? val.toFixed(1) : Math.round(val)} ${units[i]}`;
|
||||
}
|
||||
|
||||
function formatAge(ms: number): string {
|
||||
if (ms === 0) return '-';
|
||||
const age = Date.now() - ms;
|
||||
const days = Math.floor(age / (24 * 60 * 60 * 1000));
|
||||
if (days > 0) return `${days}d ago`;
|
||||
const hours = Math.floor(age / (60 * 60 * 1000));
|
||||
if (hours > 0) return `${hours}h ago`;
|
||||
const mins = Math.floor(age / (60 * 1000));
|
||||
return `${mins}m ago`;
|
||||
}
|
||||
|
||||
function fetchJson<T>(url: string, method = 'GET'): Promise<T> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const req = http.request(url, { method, timeout: 5000 }, (res) => {
|
||||
let data = '';
|
||||
res.on('data', (chunk: Buffer) => { data += chunk.toString(); });
|
||||
res.on('end', () => {
|
||||
try {
|
||||
resolve(JSON.parse(data) as T);
|
||||
} catch {
|
||||
reject(new Error(`Invalid response from mcplocal: ${data.slice(0, 200)}`));
|
||||
}
|
||||
});
|
||||
});
|
||||
req.on('error', () => reject(new Error('Cannot connect to mcplocal. Is it running?')));
|
||||
req.on('timeout', () => { req.destroy(); reject(new Error('mcplocal request timed out')); });
|
||||
req.end();
|
||||
});
|
||||
}
|
||||
|
||||
export function createCacheCommand(deps: CacheCommandDeps): Command {
|
||||
const cache = new Command('cache')
|
||||
.description('Manage ProxyModel pipeline cache');
|
||||
|
||||
const mcplocalUrl = deps.mcplocalUrl ?? 'http://localhost:3200';
|
||||
|
||||
cache
|
||||
.command('stats')
|
||||
.description('Show cache statistics')
|
||||
.action(async () => {
|
||||
const stats = await fetchJson<CacheStats>(`${mcplocalUrl}/cache/stats`);
|
||||
|
||||
if (stats.totalEntries === 0) {
|
||||
deps.log('Cache is empty.');
|
||||
return;
|
||||
}
|
||||
|
||||
deps.log(`Cache: ${formatBytes(stats.totalSize)} total, ${stats.totalEntries} entries`);
|
||||
deps.log(`Path: ${stats.rootDir}`);
|
||||
deps.log('');
|
||||
|
||||
// Table header
|
||||
const pad = (s: string, w: number) => s.padEnd(w);
|
||||
deps.log(
|
||||
`${pad('NAMESPACE', 40)} ${pad('ENTRIES', 8)} ${pad('SIZE', 10)} ${pad('OLDEST', 12)} NEWEST`,
|
||||
);
|
||||
deps.log(
|
||||
`${pad('-'.repeat(40), 40)} ${pad('-'.repeat(8), 8)} ${pad('-'.repeat(10), 10)} ${pad('-'.repeat(12), 12)} ${'-'.repeat(12)}`,
|
||||
);
|
||||
|
||||
for (const ns of stats.namespaces) {
|
||||
deps.log(
|
||||
`${pad(ns.name, 40)} ${pad(String(ns.entries), 8)} ${pad(formatBytes(ns.size), 10)} ${pad(formatAge(ns.oldestMs), 12)} ${formatAge(ns.newestMs)}`,
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
cache
|
||||
.command('clear')
|
||||
.description('Clear cache entries')
|
||||
.argument('[namespace]', 'Clear only this namespace')
|
||||
.option('--older-than <days>', 'Clear entries older than N days')
|
||||
.option('-y, --yes', 'Skip confirmation')
|
||||
.action(async (namespace: string | undefined, opts: { olderThan?: string; yes?: boolean }) => {
|
||||
// Show what will be cleared first
|
||||
const stats = await fetchJson<CacheStats>(`${mcplocalUrl}/cache/stats`);
|
||||
if (stats.totalEntries === 0) {
|
||||
deps.log('Cache is already empty.');
|
||||
return;
|
||||
}
|
||||
|
||||
const target = namespace
|
||||
? stats.namespaces.find((ns) => ns.name === namespace)
|
||||
: null;
|
||||
if (namespace && !target) {
|
||||
deps.log(`Namespace '${namespace}' not found.`);
|
||||
deps.log(`Available: ${stats.namespaces.map((ns) => ns.name).join(', ')}`);
|
||||
return;
|
||||
}
|
||||
|
||||
const olderThan = opts.olderThan ? `?olderThan=${opts.olderThan}` : '';
|
||||
const url = namespace
|
||||
? `${mcplocalUrl}/cache/${encodeURIComponent(namespace)}${olderThan}`
|
||||
: `${mcplocalUrl}/cache${olderThan}`;
|
||||
|
||||
const result = await fetchJson<ClearResult>(url, 'DELETE');
|
||||
deps.log(`Cleared ${result.removed} entries, freed ${formatBytes(result.freedBytes)}`);
|
||||
});
|
||||
|
||||
return cache;
|
||||
}
|
||||
@@ -1,592 +0,0 @@
|
||||
import { Command } from 'commander';
|
||||
import http from 'node:http';
|
||||
import https from 'node:https';
|
||||
import { existsSync } from 'node:fs';
|
||||
import { execFile } from 'node:child_process';
|
||||
import { promisify } from 'node:util';
|
||||
import { homedir } from 'node:os';
|
||||
import { loadConfig, saveConfig } from '../config/index.js';
|
||||
import type { ConfigLoaderDeps, McpctlConfig, LlmConfig, LlmProviderName, LlmProviderEntry, LlmTier } from '../config/index.js';
|
||||
import type { SecretStore } from '@mcpctl/shared';
|
||||
import { createSecretStore } from '@mcpctl/shared';
|
||||
|
||||
const execFileAsync = promisify(execFile);
|
||||
|
||||
export interface ConfigSetupPrompt {
|
||||
select<T>(message: string, choices: Array<{ name: string; value: T; description?: string }>): Promise<T>;
|
||||
input(message: string, defaultValue?: string): Promise<string>;
|
||||
password(message: string): Promise<string>;
|
||||
confirm(message: string, defaultValue?: boolean): Promise<boolean>;
|
||||
}
|
||||
|
||||
export interface ConfigSetupDeps {
|
||||
configDeps: Partial<ConfigLoaderDeps>;
|
||||
secretStore: SecretStore;
|
||||
log: (...args: string[]) => void;
|
||||
prompt: ConfigSetupPrompt;
|
||||
fetchModels: (url: string, path: string) => Promise<string[]>;
|
||||
whichBinary: (name: string) => Promise<string | null>;
|
||||
}
|
||||
|
||||
interface ProviderChoice {
|
||||
name: string;
|
||||
value: LlmProviderName;
|
||||
description: string;
|
||||
}
|
||||
|
||||
/** Provider config fields returned by per-provider setup functions. */
|
||||
interface ProviderFields {
|
||||
model?: string;
|
||||
url?: string;
|
||||
binaryPath?: string;
|
||||
venvPath?: string;
|
||||
port?: number;
|
||||
gpuMemoryUtilization?: number;
|
||||
maxModelLen?: number;
|
||||
idleTimeoutMinutes?: number;
|
||||
extraArgs?: string[];
|
||||
}
|
||||
|
||||
const FAST_PROVIDER_CHOICES: ProviderChoice[] = [
|
||||
{ name: 'Run vLLM Instance', value: 'vllm-managed', description: 'Auto-managed local vLLM (starts/stops with mcplocal)' },
|
||||
{ name: 'vLLM (external)', value: 'vllm', description: 'Self-hosted vLLM (OpenAI-compatible)' },
|
||||
{ name: 'Ollama', value: 'ollama', description: 'Local models via Ollama' },
|
||||
{ name: 'Anthropic (Claude)', value: 'anthropic', description: 'Claude Haiku — fast & cheap' },
|
||||
];
|
||||
|
||||
const HEAVY_PROVIDER_CHOICES: ProviderChoice[] = [
|
||||
{ name: 'Gemini CLI', value: 'gemini-cli', description: 'Google Gemini via local CLI (free, no API key)' },
|
||||
{ name: 'Anthropic (Claude)', value: 'anthropic', description: 'Claude API (requires API key)' },
|
||||
{ name: 'OpenAI', value: 'openai', description: 'OpenAI API (requires API key)' },
|
||||
{ name: 'DeepSeek', value: 'deepseek', description: 'DeepSeek API (requires API key)' },
|
||||
];
|
||||
|
||||
const ALL_PROVIDER_CHOICES: ProviderChoice[] = [
|
||||
...FAST_PROVIDER_CHOICES,
|
||||
...HEAVY_PROVIDER_CHOICES,
|
||||
{ name: 'None (disable)', value: 'none', description: 'Disable LLM features' },
|
||||
] as ProviderChoice[];
|
||||
|
||||
const GEMINI_MODELS = ['gemini-2.5-flash', 'gemini-2.5-pro', 'gemini-2.0-flash'];
|
||||
const ANTHROPIC_MODELS = ['claude-haiku-3-5-20241022', 'claude-sonnet-4-20250514', 'claude-sonnet-4-5-20250514', 'claude-opus-4-20250514'];
|
||||
const DEEPSEEK_MODELS = ['deepseek-chat', 'deepseek-reasoner'];
|
||||
|
||||
function defaultFetchModels(baseUrl: string, path: string): Promise<string[]> {
|
||||
return new Promise((resolve) => {
|
||||
const url = new URL(path, baseUrl);
|
||||
const isHttps = url.protocol === 'https:';
|
||||
const transport = isHttps ? https : http;
|
||||
|
||||
const req = transport.get({
|
||||
hostname: url.hostname,
|
||||
port: url.port || (isHttps ? 443 : 80),
|
||||
path: url.pathname,
|
||||
timeout: 5000,
|
||||
}, (res) => {
|
||||
const chunks: Buffer[] = [];
|
||||
res.on('data', (chunk: Buffer) => chunks.push(chunk));
|
||||
res.on('end', () => {
|
||||
try {
|
||||
const raw = Buffer.concat(chunks).toString('utf-8');
|
||||
const data = JSON.parse(raw) as { models?: Array<{ name: string }>; data?: Array<{ id: string }> };
|
||||
// Ollama format: { models: [{ name }] }
|
||||
if (data.models) {
|
||||
resolve(data.models.map((m) => m.name));
|
||||
return;
|
||||
}
|
||||
// OpenAI/vLLM format: { data: [{ id }] }
|
||||
if (data.data) {
|
||||
resolve(data.data.map((m) => m.id));
|
||||
return;
|
||||
}
|
||||
resolve([]);
|
||||
} catch {
|
||||
resolve([]);
|
||||
}
|
||||
});
|
||||
});
|
||||
req.on('error', () => resolve([]));
|
||||
req.on('timeout', () => { req.destroy(); resolve([]); });
|
||||
});
|
||||
}
|
||||
|
||||
async function defaultSelect<T>(message: string, choices: Array<{ name: string; value: T; description?: string }>): Promise<T> {
|
||||
const { default: inquirer } = await import('inquirer');
|
||||
const { answer } = await inquirer.prompt([{
|
||||
type: 'list',
|
||||
name: 'answer',
|
||||
message,
|
||||
choices: choices.map((c) => ({
|
||||
name: c.description ? `${c.name} — ${c.description}` : c.name,
|
||||
value: c.value,
|
||||
short: c.name,
|
||||
})),
|
||||
}]);
|
||||
return answer as T;
|
||||
}
|
||||
|
||||
async function defaultInput(message: string, defaultValue?: string): Promise<string> {
|
||||
const { default: inquirer } = await import('inquirer');
|
||||
const { answer } = await inquirer.prompt([{
|
||||
type: 'input',
|
||||
name: 'answer',
|
||||
message,
|
||||
default: defaultValue,
|
||||
}]);
|
||||
return answer as string;
|
||||
}
|
||||
|
||||
async function defaultPassword(message: string): Promise<string> {
|
||||
const { default: inquirer } = await import('inquirer');
|
||||
const { answer } = await inquirer.prompt([{ type: 'password', name: 'answer', message }]);
|
||||
return answer as string;
|
||||
}
|
||||
|
||||
async function defaultConfirm(message: string, defaultValue?: boolean): Promise<boolean> {
|
||||
const { default: inquirer } = await import('inquirer');
|
||||
const { answer } = await inquirer.prompt([{
|
||||
type: 'confirm',
|
||||
name: 'answer',
|
||||
message,
|
||||
default: defaultValue ?? true,
|
||||
}]);
|
||||
return answer as boolean;
|
||||
}
|
||||
|
||||
const defaultPrompt: ConfigSetupPrompt = {
|
||||
select: defaultSelect,
|
||||
input: defaultInput,
|
||||
password: defaultPassword,
|
||||
confirm: defaultConfirm,
|
||||
};
|
||||
|
||||
async function defaultWhichBinary(name: string): Promise<string | null> {
|
||||
try {
|
||||
const { stdout } = await execFileAsync('which', [name], { timeout: 3000 });
|
||||
const path = stdout.trim();
|
||||
return path || null;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
// --- Per-provider setup functions (return ProviderFields for reuse in both modes) ---
|
||||
|
||||
async function setupGeminiCliFields(
|
||||
prompt: ConfigSetupPrompt,
|
||||
log: (...args: string[]) => void,
|
||||
whichBinary: (name: string) => Promise<string | null>,
|
||||
currentModel?: string,
|
||||
): Promise<ProviderFields> {
|
||||
const model = await prompt.select<string>('Select model:', [
|
||||
...GEMINI_MODELS.map((m) => ({
|
||||
name: m === currentModel ? `${m} (current)` : m,
|
||||
value: m,
|
||||
})),
|
||||
{ name: 'Custom...', value: '__custom__' },
|
||||
]);
|
||||
|
||||
const finalModel = model === '__custom__'
|
||||
? await prompt.input('Model name:', currentModel)
|
||||
: model;
|
||||
|
||||
let binaryPath: string | undefined;
|
||||
const detected = await whichBinary('gemini');
|
||||
if (detected) {
|
||||
log(`Found gemini at: ${detected}`);
|
||||
binaryPath = detected;
|
||||
} else {
|
||||
log('Warning: gemini binary not found in PATH');
|
||||
const manualPath = await prompt.input('Binary path (or install with: npm i -g @google/gemini-cli):');
|
||||
if (manualPath) binaryPath = manualPath;
|
||||
}
|
||||
|
||||
const result: ProviderFields = { model: finalModel };
|
||||
if (binaryPath) result.binaryPath = binaryPath;
|
||||
return result;
|
||||
}
|
||||
|
||||
async function setupOllamaFields(
|
||||
prompt: ConfigSetupPrompt,
|
||||
fetchModels: ConfigSetupDeps['fetchModels'],
|
||||
currentUrl?: string,
|
||||
currentModel?: string,
|
||||
): Promise<ProviderFields> {
|
||||
const url = await prompt.input('Ollama URL:', currentUrl ?? 'http://localhost:11434');
|
||||
const models = await fetchModels(url, '/api/tags');
|
||||
let model: string;
|
||||
|
||||
if (models.length > 0) {
|
||||
const choices = models.map((m) => ({
|
||||
name: m === currentModel ? `${m} (current)` : m,
|
||||
value: m,
|
||||
}));
|
||||
choices.push({ name: 'Custom...', value: '__custom__' });
|
||||
model = await prompt.select<string>('Select model:', choices);
|
||||
if (model === '__custom__') {
|
||||
model = await prompt.input('Model name:', currentModel);
|
||||
}
|
||||
} else {
|
||||
model = await prompt.input('Model name (could not fetch models):', currentModel ?? 'llama3.2');
|
||||
}
|
||||
|
||||
const result: ProviderFields = { model };
|
||||
if (url) result.url = url;
|
||||
return result;
|
||||
}
|
||||
|
||||
async function setupVllmFields(
|
||||
prompt: ConfigSetupPrompt,
|
||||
fetchModels: ConfigSetupDeps['fetchModels'],
|
||||
currentUrl?: string,
|
||||
currentModel?: string,
|
||||
): Promise<ProviderFields> {
|
||||
const url = await prompt.input('vLLM URL:', currentUrl ?? 'http://localhost:8000');
|
||||
const models = await fetchModels(url, '/v1/models');
|
||||
let model: string;
|
||||
|
||||
if (models.length > 0) {
|
||||
const choices = models.map((m) => ({
|
||||
name: m === currentModel ? `${m} (current)` : m,
|
||||
value: m,
|
||||
}));
|
||||
choices.push({ name: 'Custom...', value: '__custom__' });
|
||||
model = await prompt.select<string>('Select model:', choices);
|
||||
if (model === '__custom__') {
|
||||
model = await prompt.input('Model name:', currentModel);
|
||||
}
|
||||
} else {
|
||||
model = await prompt.input('Model name (could not fetch models):', currentModel ?? 'default');
|
||||
}
|
||||
|
||||
const result: ProviderFields = { model };
|
||||
if (url) result.url = url;
|
||||
return result;
|
||||
}
|
||||
|
||||
async function setupVllmManagedFields(
|
||||
prompt: ConfigSetupPrompt,
|
||||
log: (...args: string[]) => void,
|
||||
): Promise<ProviderFields> {
|
||||
const defaultVenv = '~/vllm_env';
|
||||
const venvPath = await prompt.input('vLLM venv path:', defaultVenv);
|
||||
|
||||
// Validate venv exists
|
||||
const expandedPath = venvPath.startsWith('~') ? venvPath.replace('~', homedir()) : venvPath;
|
||||
const vllmBin = `${expandedPath}/bin/vllm`;
|
||||
if (!existsSync(vllmBin)) {
|
||||
log(`Warning: ${vllmBin} not found.`);
|
||||
log(` Create it with: uv venv ${venvPath} --python 3.12 && ${expandedPath}/bin/pip install vllm`);
|
||||
} else {
|
||||
log(`Found vLLM at: ${vllmBin}`);
|
||||
}
|
||||
|
||||
const model = await prompt.input('Model to serve:', 'Qwen/Qwen2.5-7B-Instruct-AWQ');
|
||||
const gpuStr = await prompt.input('GPU memory utilization (0.1–1.0):', '0.75');
|
||||
const gpuMemoryUtilization = parseFloat(gpuStr) || 0.75;
|
||||
const idleStr = await prompt.input('Stop after N minutes idle:', '15');
|
||||
const idleTimeoutMinutes = parseInt(idleStr, 10) || 15;
|
||||
const portStr = await prompt.input('Port:', '8000');
|
||||
const port = parseInt(portStr, 10) || 8000;
|
||||
|
||||
return {
|
||||
model,
|
||||
venvPath,
|
||||
port,
|
||||
gpuMemoryUtilization,
|
||||
idleTimeoutMinutes,
|
||||
};
|
||||
}
|
||||
|
||||
async function setupApiKeyFields(
|
||||
prompt: ConfigSetupPrompt,
|
||||
secretStore: SecretStore,
|
||||
provider: LlmProviderName,
|
||||
secretKey: string,
|
||||
hardcodedModels: string[],
|
||||
currentModel?: string,
|
||||
currentUrl?: string,
|
||||
): Promise<ProviderFields> {
|
||||
const existingKey = await secretStore.get(secretKey);
|
||||
let apiKey: string;
|
||||
|
||||
if (existingKey) {
|
||||
const masked = `****${existingKey.slice(-4)}`;
|
||||
const changeKey = await prompt.confirm(`API key stored (${masked}). Change it?`, false);
|
||||
apiKey = changeKey ? await prompt.password('API key:') : existingKey;
|
||||
} else {
|
||||
apiKey = await prompt.password('API key:');
|
||||
}
|
||||
|
||||
if (apiKey !== existingKey) {
|
||||
await secretStore.set(secretKey, apiKey);
|
||||
}
|
||||
|
||||
let model: string;
|
||||
if (hardcodedModels.length > 0) {
|
||||
const choices = hardcodedModels.map((m) => ({
|
||||
name: m === currentModel ? `${m} (current)` : m,
|
||||
value: m,
|
||||
}));
|
||||
choices.push({ name: 'Custom...', value: '__custom__' });
|
||||
model = await prompt.select<string>('Select model:', choices);
|
||||
if (model === '__custom__') {
|
||||
model = await prompt.input('Model name:', currentModel);
|
||||
}
|
||||
} else {
|
||||
model = await prompt.input('Model name:', currentModel ?? 'gpt-4o');
|
||||
}
|
||||
|
||||
let url: string | undefined;
|
||||
if (provider === 'openai') {
|
||||
const customUrl = await prompt.confirm('Use custom API endpoint?', false);
|
||||
if (customUrl) {
|
||||
url = await prompt.input('API URL:', currentUrl ?? 'https://api.openai.com');
|
||||
}
|
||||
}
|
||||
|
||||
const result: ProviderFields = { model };
|
||||
if (url) result.url = url;
|
||||
return result;
|
||||
}
|
||||
|
||||
async function promptForAnthropicKey(
|
||||
prompt: ConfigSetupPrompt,
|
||||
log: (...args: string[]) => void,
|
||||
whichBinary: (name: string) => Promise<string | null>,
|
||||
): Promise<string> {
|
||||
const claudePath = await whichBinary('claude');
|
||||
|
||||
if (claudePath) {
|
||||
log(`Found Claude CLI at: ${claudePath}`);
|
||||
const useOAuth = await prompt.confirm(
|
||||
'Generate free token via Claude CLI? (requires Pro/Max subscription)', true);
|
||||
if (useOAuth) {
|
||||
log('');
|
||||
log(' Run: claude setup-token');
|
||||
log(' Then paste the token below (starts with sk-ant-oat01-)');
|
||||
log('');
|
||||
return prompt.password('OAuth token:');
|
||||
}
|
||||
} else {
|
||||
log('Tip: Install Claude CLI (npm i -g @anthropic-ai/claude-code) to generate');
|
||||
log(' a free OAuth token with "claude setup-token" (Pro/Max subscription).');
|
||||
log('');
|
||||
}
|
||||
|
||||
return prompt.password('API key (from console.anthropic.com):');
|
||||
}
|
||||
|
||||
async function setupAnthropicFields(
|
||||
prompt: ConfigSetupPrompt,
|
||||
secretStore: SecretStore,
|
||||
log: (...args: string[]) => void,
|
||||
whichBinary: (name: string) => Promise<string | null>,
|
||||
currentModel?: string,
|
||||
): Promise<ProviderFields> {
|
||||
const existingKey = await secretStore.get('anthropic-api-key');
|
||||
let apiKey: string;
|
||||
|
||||
if (existingKey) {
|
||||
const isOAuth = existingKey.startsWith('sk-ant-oat');
|
||||
const masked = `****${existingKey.slice(-4)}`;
|
||||
const label = isOAuth ? `OAuth token stored (${masked})` : `API key stored (${masked})`;
|
||||
const changeKey = await prompt.confirm(`${label}. Change it?`, false);
|
||||
apiKey = changeKey ? await promptForAnthropicKey(prompt, log, whichBinary) : existingKey;
|
||||
} else {
|
||||
apiKey = await promptForAnthropicKey(prompt, log, whichBinary);
|
||||
}
|
||||
|
||||
if (apiKey !== existingKey) {
|
||||
await secretStore.set('anthropic-api-key', apiKey);
|
||||
}
|
||||
|
||||
const choices = ANTHROPIC_MODELS.map((m) => ({
|
||||
name: m === currentModel ? `${m} (current)` : m,
|
||||
value: m,
|
||||
}));
|
||||
choices.push({ name: 'Custom...', value: '__custom__' });
|
||||
let model = await prompt.select<string>('Select model:', choices);
|
||||
if (model === '__custom__') {
|
||||
model = await prompt.input('Model name:', currentModel);
|
||||
}
|
||||
|
||||
return { model };
|
||||
}
|
||||
|
||||
/** Configure a single provider type and return its fields. */
|
||||
async function setupProviderFields(
|
||||
providerType: LlmProviderName,
|
||||
prompt: ConfigSetupPrompt,
|
||||
log: (...args: string[]) => void,
|
||||
fetchModels: ConfigSetupDeps['fetchModels'],
|
||||
whichBinary: (name: string) => Promise<string | null>,
|
||||
secretStore: SecretStore,
|
||||
): Promise<ProviderFields> {
|
||||
switch (providerType) {
|
||||
case 'gemini-cli':
|
||||
return setupGeminiCliFields(prompt, log, whichBinary);
|
||||
case 'ollama':
|
||||
return setupOllamaFields(prompt, fetchModels);
|
||||
case 'vllm':
|
||||
return setupVllmFields(prompt, fetchModels);
|
||||
case 'vllm-managed':
|
||||
return setupVllmManagedFields(prompt, log);
|
||||
case 'anthropic':
|
||||
return setupAnthropicFields(prompt, secretStore, log, whichBinary);
|
||||
case 'openai':
|
||||
return setupApiKeyFields(prompt, secretStore, 'openai', 'openai-api-key', []);
|
||||
case 'deepseek':
|
||||
return setupApiKeyFields(prompt, secretStore, 'deepseek', 'deepseek-api-key', DEEPSEEK_MODELS);
|
||||
default:
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
/** Build a LlmProviderEntry from type, name, and fields. */
|
||||
function buildEntry(providerType: LlmProviderName, name: string, fields: ProviderFields, tier?: LlmTier): LlmProviderEntry {
|
||||
const entry: LlmProviderEntry = { name, type: providerType };
|
||||
if (fields.model) entry.model = fields.model;
|
||||
if (fields.url) entry.url = fields.url;
|
||||
if (fields.binaryPath) entry.binaryPath = fields.binaryPath;
|
||||
if (fields.venvPath) entry.venvPath = fields.venvPath;
|
||||
if (fields.port !== undefined) entry.port = fields.port;
|
||||
if (fields.gpuMemoryUtilization !== undefined) entry.gpuMemoryUtilization = fields.gpuMemoryUtilization;
|
||||
if (fields.maxModelLen !== undefined) entry.maxModelLen = fields.maxModelLen;
|
||||
if (fields.idleTimeoutMinutes !== undefined) entry.idleTimeoutMinutes = fields.idleTimeoutMinutes;
|
||||
if (fields.extraArgs !== undefined) entry.extraArgs = fields.extraArgs;
|
||||
if (tier) entry.tier = tier;
|
||||
return entry;
|
||||
}
|
||||
|
||||
/** Simple mode: single provider (legacy format). */
|
||||
async function simpleSetup(
|
||||
config: McpctlConfig,
|
||||
configDeps: Partial<ConfigLoaderDeps>,
|
||||
prompt: ConfigSetupPrompt,
|
||||
log: (...args: string[]) => void,
|
||||
fetchModels: ConfigSetupDeps['fetchModels'],
|
||||
whichBinary: (name: string) => Promise<string | null>,
|
||||
secretStore: SecretStore,
|
||||
): Promise<void> {
|
||||
const currentLlm = config.llm && 'provider' in config.llm ? config.llm : undefined;
|
||||
|
||||
const choices = ALL_PROVIDER_CHOICES.map((c) => {
|
||||
if (currentLlm?.provider === c.value) {
|
||||
return { ...c, name: `${c.name} (current)` };
|
||||
}
|
||||
return c;
|
||||
});
|
||||
|
||||
const provider = await prompt.select<LlmProviderName>('Select LLM provider:', choices);
|
||||
|
||||
if (provider === 'none') {
|
||||
const updated: McpctlConfig = { ...config, llm: { provider: 'none' } };
|
||||
saveConfig(updated, configDeps);
|
||||
log('LLM disabled. Restart mcplocal: systemctl --user restart mcplocal');
|
||||
return;
|
||||
}
|
||||
|
||||
const fields = await setupProviderFields(provider, prompt, log, fetchModels, whichBinary, secretStore);
|
||||
const llmConfig: LlmConfig = { provider, ...fields };
|
||||
const updated: McpctlConfig = { ...config, llm: llmConfig };
|
||||
saveConfig(updated, configDeps);
|
||||
log(`\nLLM configured: ${llmConfig.provider}${llmConfig.model ? ` / ${llmConfig.model}` : ''}`);
|
||||
log('Restart mcplocal: systemctl --user restart mcplocal');
|
||||
}
|
||||
|
||||
/** Generate a unique default name given names already in use. */
|
||||
function uniqueDefaultName(baseName: string, usedNames: Set<string>): string {
|
||||
if (!usedNames.has(baseName)) return baseName;
|
||||
let i = 2;
|
||||
while (usedNames.has(`${baseName}-${i}`)) i++;
|
||||
return `${baseName}-${i}`;
|
||||
}
|
||||
|
||||
/** Advanced mode: multiple providers with tier assignments. */
|
||||
async function advancedSetup(
|
||||
config: McpctlConfig,
|
||||
configDeps: Partial<ConfigLoaderDeps>,
|
||||
prompt: ConfigSetupPrompt,
|
||||
log: (...args: string[]) => void,
|
||||
fetchModels: ConfigSetupDeps['fetchModels'],
|
||||
whichBinary: (name: string) => Promise<string | null>,
|
||||
secretStore: SecretStore,
|
||||
): Promise<void> {
|
||||
const entries: LlmProviderEntry[] = [];
|
||||
const usedNames = new Set<string>();
|
||||
|
||||
// Fast providers
|
||||
const addFast = await prompt.confirm('Add a FAST provider? (vLLM, Ollama — local, cheap, fast)', true);
|
||||
if (addFast) {
|
||||
let addMore = true;
|
||||
while (addMore) {
|
||||
const providerType = await prompt.select<LlmProviderName>('Fast provider type:', FAST_PROVIDER_CHOICES);
|
||||
const rawDefault = providerType === 'vllm' || providerType === 'vllm-managed' ? 'vllm-local' : providerType;
|
||||
const defaultName = uniqueDefaultName(rawDefault, usedNames);
|
||||
const name = await prompt.input('Provider name:', defaultName);
|
||||
usedNames.add(name);
|
||||
const fields = await setupProviderFields(providerType, prompt, log, fetchModels, whichBinary, secretStore);
|
||||
entries.push(buildEntry(providerType, name, fields, 'fast'));
|
||||
log(` Added: ${name} (${providerType}) → fast tier`);
|
||||
addMore = await prompt.confirm('Add another fast provider?', false);
|
||||
}
|
||||
}
|
||||
|
||||
// Heavy providers
|
||||
const addHeavy = await prompt.confirm('Add a HEAVY provider? (Gemini, Anthropic, OpenAI — cloud, smart)', true);
|
||||
if (addHeavy) {
|
||||
let addMore = true;
|
||||
while (addMore) {
|
||||
const providerType = await prompt.select<LlmProviderName>('Heavy provider type:', HEAVY_PROVIDER_CHOICES);
|
||||
const defaultName = uniqueDefaultName(providerType, usedNames);
|
||||
const name = await prompt.input('Provider name:', defaultName);
|
||||
usedNames.add(name);
|
||||
const fields = await setupProviderFields(providerType, prompt, log, fetchModels, whichBinary, secretStore);
|
||||
entries.push(buildEntry(providerType, name, fields, 'heavy'));
|
||||
log(` Added: ${name} (${providerType}) → heavy tier`);
|
||||
addMore = await prompt.confirm('Add another heavy provider?', false);
|
||||
}
|
||||
}
|
||||
|
||||
if (entries.length === 0) {
|
||||
log('No providers configured.');
|
||||
return;
|
||||
}
|
||||
|
||||
// Summary
|
||||
log('\nProvider configuration:');
|
||||
for (const e of entries) {
|
||||
log(` ${e.tier ?? 'unassigned'}: ${e.name} (${e.type})${e.model ? ` / ${e.model}` : ''}`);
|
||||
}
|
||||
|
||||
const updated: McpctlConfig = { ...config, llm: { providers: entries } };
|
||||
saveConfig(updated, configDeps);
|
||||
log('\nRestart mcplocal: systemctl --user restart mcplocal');
|
||||
}
|
||||
|
||||
export function createConfigSetupCommand(deps?: Partial<ConfigSetupDeps>): Command {
|
||||
return new Command('setup')
|
||||
.description('Interactive LLM provider setup wizard')
|
||||
.action(async () => {
|
||||
const configDeps = deps?.configDeps ?? {};
|
||||
const log = deps?.log ?? ((...args: string[]) => console.log(...args));
|
||||
const prompt = deps?.prompt ?? defaultPrompt;
|
||||
const fetchModels = deps?.fetchModels ?? defaultFetchModels;
|
||||
const whichBinary = deps?.whichBinary ?? defaultWhichBinary;
|
||||
const secretStore = deps?.secretStore ?? await createSecretStore();
|
||||
|
||||
const config = loadConfig(configDeps);
|
||||
|
||||
const mode = await prompt.select<'simple' | 'advanced'>('Setup mode:', [
|
||||
{ name: 'Simple', value: 'simple', description: 'One provider for everything' },
|
||||
{ name: 'Advanced', value: 'advanced', description: 'Multiple providers with fast/heavy tiers' },
|
||||
]);
|
||||
|
||||
if (mode === 'simple') {
|
||||
await simpleSetup(config, configDeps, prompt, log, fetchModels, whichBinary, secretStore);
|
||||
} else {
|
||||
await advancedSetup(config, configDeps, prompt, log, fetchModels, whichBinary, secretStore);
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -6,12 +6,11 @@ import { loadConfig, saveConfig, mergeConfig, getConfigPath, DEFAULT_CONFIG } fr
|
||||
import type { McpctlConfig, ConfigLoaderDeps } from '../config/index.js';
|
||||
import { formatJson, formatYaml } from '../formatters/index.js';
|
||||
import { saveCredentials, loadCredentials } from '../auth/index.js';
|
||||
import { createConfigSetupCommand } from './config-setup.js';
|
||||
import type { CredentialsDeps, StoredCredentials } from '../auth/index.js';
|
||||
import type { ApiClient } from '../api-client.js';
|
||||
|
||||
interface McpConfig {
|
||||
mcpServers: Record<string, { command?: string; args?: string[]; url?: string; env?: Record<string, string> }>;
|
||||
mcpServers: Record<string, { command: string; args: string[]; env?: Record<string, string> }>;
|
||||
}
|
||||
|
||||
export interface ConfigCommandDeps {
|
||||
@@ -85,76 +84,45 @@ export function createConfigCommand(deps?: Partial<ConfigCommandDeps>, apiDeps?:
|
||||
log('Configuration reset to defaults');
|
||||
});
|
||||
|
||||
// claude/claude-generate: generate .mcp.json pointing at mcpctl mcp bridge
|
||||
function registerClaudeCommand(name: string, hidden: boolean): void {
|
||||
const cmd = config
|
||||
.command(name)
|
||||
.description(hidden ? '' : 'Generate .mcp.json that connects a project via mcpctl mcp bridge')
|
||||
.option('-p, --project <name>', 'Project name')
|
||||
.option('-o, --output <path>', 'Output file path', '.mcp.json')
|
||||
.option('--inspect', 'Include mcpctl-inspect MCP server for traffic monitoring')
|
||||
.option('--stdout', 'Print to stdout instead of writing a file')
|
||||
.action((opts: { project?: string; output: string; inspect?: boolean; stdout?: boolean }) => {
|
||||
if (!opts.project && !opts.inspect) {
|
||||
log('Error: at least one of --project or --inspect is required');
|
||||
process.exitCode = 1;
|
||||
return;
|
||||
}
|
||||
if (apiDeps) {
|
||||
const { client, credentialsDeps, log: apiLog } = apiDeps;
|
||||
|
||||
const servers: McpConfig['mcpServers'] = {};
|
||||
if (opts.project) {
|
||||
servers[opts.project] = {
|
||||
command: 'mcpctl',
|
||||
args: ['mcp', '-p', opts.project],
|
||||
};
|
||||
}
|
||||
if (opts.inspect) {
|
||||
servers['mcpctl-inspect'] = {
|
||||
command: 'mcpctl',
|
||||
args: ['console', '--stdin-mcp'],
|
||||
};
|
||||
}
|
||||
config
|
||||
.command('claude-generate')
|
||||
.description('Generate .mcp.json from a project configuration')
|
||||
.requiredOption('--project <name>', 'Project name')
|
||||
.option('-o, --output <path>', 'Output file path', '.mcp.json')
|
||||
.option('--merge', 'Merge with existing .mcp.json instead of overwriting')
|
||||
.option('--stdout', 'Print to stdout instead of writing a file')
|
||||
.action(async (opts: { project: string; output: string; merge?: boolean; stdout?: boolean }) => {
|
||||
const mcpConfig = await client.get<McpConfig>(`/api/v1/projects/${opts.project}/mcp-config`);
|
||||
|
||||
if (opts.stdout) {
|
||||
log(JSON.stringify({ mcpServers: servers }, null, 2));
|
||||
apiLog(JSON.stringify(mcpConfig, null, 2));
|
||||
return;
|
||||
}
|
||||
|
||||
const outputPath = resolve(opts.output);
|
||||
let finalConfig: McpConfig = { mcpServers: servers };
|
||||
let finalConfig = mcpConfig;
|
||||
|
||||
// Always merge with existing .mcp.json — never overwrite other servers
|
||||
if (existsSync(outputPath)) {
|
||||
if (opts.merge && existsSync(outputPath)) {
|
||||
try {
|
||||
const existing = JSON.parse(readFileSync(outputPath, 'utf-8')) as McpConfig;
|
||||
finalConfig = {
|
||||
mcpServers: {
|
||||
...existing.mcpServers,
|
||||
...servers,
|
||||
...mcpConfig.mcpServers,
|
||||
},
|
||||
};
|
||||
} catch {
|
||||
// If existing file is invalid, start fresh
|
||||
// If existing file is invalid, just overwrite
|
||||
}
|
||||
}
|
||||
|
||||
writeFileSync(outputPath, JSON.stringify(finalConfig, null, 2) + '\n');
|
||||
const serverCount = Object.keys(finalConfig.mcpServers).length;
|
||||
log(`Wrote ${outputPath} (${serverCount} server(s))`);
|
||||
apiLog(`Wrote ${outputPath} (${serverCount} server(s))`);
|
||||
});
|
||||
if (hidden) {
|
||||
// Commander shows empty-description commands but they won't clutter help output
|
||||
void cmd; // suppress unused lint
|
||||
}
|
||||
}
|
||||
|
||||
registerClaudeCommand('claude', false);
|
||||
registerClaudeCommand('claude-generate', true); // backward compat
|
||||
|
||||
config.addCommand(createConfigSetupCommand({ configDeps }));
|
||||
|
||||
if (apiDeps) {
|
||||
const { client, credentialsDeps, log: apiLog } = apiDeps;
|
||||
|
||||
config
|
||||
.command('impersonate')
|
||||
|
||||
@@ -1,647 +0,0 @@
|
||||
/**
|
||||
* AuditConsoleApp — TUI for browsing audit events from mcpd.
|
||||
*
|
||||
* Navigation follows the same patterns as the main unified console:
|
||||
* - Sidebar open: arrows navigate sessions, Enter selects, Escape closes
|
||||
* - Sidebar closed: arrows navigate timeline, Escape reopens sidebar
|
||||
*
|
||||
* Sidebar groups sessions by project → user.
|
||||
* `d` key cycles through date filter presets.
|
||||
*/
|
||||
|
||||
import { useState, useEffect, useCallback, useRef } from 'react';
|
||||
import { render, Box, Text, useInput, useApp, useStdout } from 'ink';
|
||||
import type { AuditSession, AuditEvent, AuditConsoleState, DateFilterPreset } from './audit-types.js';
|
||||
import { EVENT_KIND_COLORS, EVENT_KIND_LABELS, DATE_FILTER_CYCLE, DATE_FILTER_LABELS, dateFilterToFrom } from './audit-types.js';
|
||||
import http from 'node:http';
|
||||
|
||||
const POLL_INTERVAL_MS = 3_000;
|
||||
const MAX_EVENTS = 500;
|
||||
|
||||
// ── HTTP helpers ──
|
||||
|
||||
function fetchJson<T>(url: string, token?: string): Promise<T> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const parsed = new URL(url);
|
||||
const headers: Record<string, string> = { 'Accept': 'application/json' };
|
||||
if (token) headers['Authorization'] = `Bearer ${token}`;
|
||||
|
||||
const req = http.get({ hostname: parsed.hostname, port: parsed.port, path: parsed.pathname + parsed.search, headers, timeout: 5000 }, (res) => {
|
||||
let data = '';
|
||||
res.on('data', (chunk: Buffer) => { data += chunk.toString(); });
|
||||
res.on('end', () => {
|
||||
try {
|
||||
resolve(JSON.parse(data) as T);
|
||||
} catch {
|
||||
reject(new Error(`Invalid JSON from ${url}`));
|
||||
}
|
||||
});
|
||||
});
|
||||
req.on('error', (err) => reject(err));
|
||||
req.on('timeout', () => { req.destroy(); reject(new Error('Request timed out')); });
|
||||
});
|
||||
}
|
||||
|
||||
// ── Format helpers ──
|
||||
|
||||
function formatTime(ts: string): string {
|
||||
const d = new Date(ts);
|
||||
return d.toLocaleTimeString('en-GB', { hour: '2-digit', minute: '2-digit', second: '2-digit' });
|
||||
}
|
||||
|
||||
function trunc(s: string, max: number): string {
|
||||
return s.length > max ? s.slice(0, max - 1) + '\u2026' : s;
|
||||
}
|
||||
|
||||
function formatPayload(payload: Record<string, unknown>): string {
|
||||
const parts: string[] = [];
|
||||
for (const [k, v] of Object.entries(payload)) {
|
||||
if (v === null || v === undefined) continue;
|
||||
if (typeof v === 'string') {
|
||||
parts.push(`${k}=${trunc(v, 30)}`);
|
||||
} else if (typeof v === 'number' || typeof v === 'boolean') {
|
||||
parts.push(`${k}=${String(v)}`);
|
||||
}
|
||||
}
|
||||
return parts.join(' ');
|
||||
}
|
||||
|
||||
function formatDetailPayload(payload: Record<string, unknown>): string[] {
|
||||
const lines: string[] = [];
|
||||
for (const [k, v] of Object.entries(payload)) {
|
||||
if (v === null || v === undefined) {
|
||||
lines.push(` ${k}: null`);
|
||||
} else if (typeof v === 'object') {
|
||||
lines.push(` ${k}: ${JSON.stringify(v, null, 2).split('\n').join('\n ')}`);
|
||||
} else {
|
||||
lines.push(` ${k}: ${String(v)}`);
|
||||
}
|
||||
}
|
||||
return lines;
|
||||
}
|
||||
|
||||
// ── Sidebar grouping ──
|
||||
|
||||
interface SidebarLine {
|
||||
type: 'project-header' | 'user-header' | 'session';
|
||||
label: string;
|
||||
sessionIdx?: number; // flat index into sessions array (only for type=session)
|
||||
}
|
||||
|
||||
function buildGroupedLines(sessions: AuditSession[]): SidebarLine[] {
|
||||
// Group by project → user
|
||||
const projectMap = new Map<string, Map<string, number[]>>();
|
||||
const projectOrder: string[] = [];
|
||||
|
||||
for (let i = 0; i < sessions.length; i++) {
|
||||
const s = sessions[i]!;
|
||||
let userMap = projectMap.get(s.projectName);
|
||||
if (!userMap) {
|
||||
userMap = new Map();
|
||||
projectMap.set(s.projectName, userMap);
|
||||
projectOrder.push(s.projectName);
|
||||
}
|
||||
const userName = s.userName ?? '(unknown)';
|
||||
let indices = userMap.get(userName);
|
||||
if (!indices) {
|
||||
indices = [];
|
||||
userMap.set(userName, indices);
|
||||
}
|
||||
indices.push(i);
|
||||
}
|
||||
|
||||
const lines: SidebarLine[] = [];
|
||||
for (const proj of projectOrder) {
|
||||
lines.push({ type: 'project-header', label: proj });
|
||||
const userMap = projectMap.get(proj)!;
|
||||
for (const [user, indices] of userMap) {
|
||||
lines.push({ type: 'user-header', label: user });
|
||||
for (const idx of indices) {
|
||||
const s = sessions[idx]!;
|
||||
const time = formatTime(s.lastSeen);
|
||||
lines.push({
|
||||
type: 'session',
|
||||
label: `${s.sessionId.slice(0, 8)} \u00B7 ${s.eventCount} ev \u00B7 ${time}`,
|
||||
sessionIdx: idx,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
return lines;
|
||||
}
|
||||
|
||||
/** Extract session indices in visual (grouped) order. */
|
||||
function visualSessionOrder(sessions: AuditSession[]): number[] {
|
||||
return buildGroupedLines(sessions)
|
||||
.filter((l) => l.type === 'session')
|
||||
.map((l) => l.sessionIdx!);
|
||||
}
|
||||
|
||||
// ── Session Sidebar ──
|
||||
|
||||
function AuditSidebar({ sessions, selectedIdx, projectFilter, dateFilter, height }: {
|
||||
sessions: AuditSession[];
|
||||
selectedIdx: number;
|
||||
projectFilter: string | null;
|
||||
dateFilter: DateFilterPreset;
|
||||
height: number;
|
||||
}) {
|
||||
const grouped = buildGroupedLines(sessions);
|
||||
|
||||
const headerLines = 4; // title + filter info + blank + "All" row
|
||||
const footerLines = 0;
|
||||
const bodyHeight = Math.max(1, height - headerLines - footerLines);
|
||||
|
||||
// Find which render line corresponds to the selected session
|
||||
let selectedLineIdx = -1;
|
||||
if (selectedIdx >= 0) {
|
||||
selectedLineIdx = grouped.findIndex((l) => l.sessionIdx === selectedIdx);
|
||||
}
|
||||
|
||||
// Scroll to keep selected visible
|
||||
let scrollStart = 0;
|
||||
if (selectedLineIdx >= 0) {
|
||||
if (selectedLineIdx >= scrollStart + bodyHeight) {
|
||||
scrollStart = selectedLineIdx - bodyHeight + 1;
|
||||
}
|
||||
if (selectedLineIdx < scrollStart) {
|
||||
scrollStart = selectedLineIdx;
|
||||
}
|
||||
}
|
||||
scrollStart = Math.max(0, scrollStart);
|
||||
|
||||
const visibleLines = grouped.slice(scrollStart, scrollStart + bodyHeight);
|
||||
|
||||
return (
|
||||
<Box flexDirection="column" width={34} height={height} borderStyle="single" borderColor="gray" paddingX={1}>
|
||||
<Text bold>Sessions ({sessions.length})</Text>
|
||||
<Text dimColor>
|
||||
{projectFilter ? `project: ${projectFilter}` : 'all projects'}
|
||||
{dateFilter !== 'all' ? ` \u00B7 ${DATE_FILTER_LABELS[dateFilter]}` : ''}
|
||||
</Text>
|
||||
<Text> </Text>
|
||||
<Text color={selectedIdx === -1 ? 'cyan' : undefined} bold={selectedIdx === -1}>
|
||||
{selectedIdx === -1 ? '\u25B8 ' : ' '}All ({sessions.reduce((s, x) => s + x.eventCount, 0)} events)
|
||||
</Text>
|
||||
|
||||
{visibleLines.map((line, vi) => {
|
||||
if (line.type === 'project-header') {
|
||||
return (
|
||||
<Text key={`p-${line.label}-${vi}`} bold wrap="truncate">
|
||||
{' '}{trunc(line.label, 28)}
|
||||
</Text>
|
||||
);
|
||||
}
|
||||
if (line.type === 'user-header') {
|
||||
return (
|
||||
<Text key={`u-${line.label}-${vi}`} dimColor wrap="truncate">
|
||||
{' '}{trunc(line.label, 26)}
|
||||
</Text>
|
||||
);
|
||||
}
|
||||
// session
|
||||
const isSel = line.sessionIdx === selectedIdx;
|
||||
return (
|
||||
<Text key={`s-${line.sessionIdx}-${vi}`} color={isSel ? 'cyan' : undefined} bold={isSel} wrap="truncate">
|
||||
{isSel ? ' \u25B8 ' : ' '}{trunc(line.label, 24)}
|
||||
</Text>
|
||||
);
|
||||
})}
|
||||
{sessions.length === 0 && <Text dimColor> No sessions</Text>}
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
// ── Event Timeline ──
|
||||
|
||||
function AuditTimeline({ events, height, focusedIdx }: { events: AuditEvent[]; height: number; focusedIdx: number }) {
|
||||
const maxVisible = Math.max(1, height - 2);
|
||||
let startIdx: number;
|
||||
if (focusedIdx >= 0) {
|
||||
startIdx = Math.max(0, Math.min(focusedIdx - Math.floor(maxVisible / 2), events.length - maxVisible));
|
||||
} else {
|
||||
startIdx = Math.max(0, events.length - maxVisible);
|
||||
}
|
||||
const visible = events.slice(startIdx, startIdx + maxVisible);
|
||||
|
||||
return (
|
||||
<Box flexDirection="column" flexGrow={1} paddingLeft={1}>
|
||||
<Text bold>
|
||||
Events <Text dimColor>({events.length}{focusedIdx >= 0 ? ` \u00B7 #${focusedIdx + 1}` : ' \u00B7 following'})</Text>
|
||||
</Text>
|
||||
{visible.length === 0 && (
|
||||
<Box marginTop={1}>
|
||||
<Text dimColor>{' No audit events yet\u2026'}</Text>
|
||||
</Box>
|
||||
)}
|
||||
{visible.map((event, vi) => {
|
||||
const absIdx = startIdx + vi;
|
||||
const isFocused = absIdx === focusedIdx;
|
||||
const kindColor = EVENT_KIND_COLORS[event.eventKind] ?? 'white';
|
||||
const kindLabel = EVENT_KIND_LABELS[event.eventKind] ?? event.eventKind.toUpperCase();
|
||||
const verified = event.verified ? '\u2713' : '\u2717';
|
||||
const verifiedColor = event.verified ? 'green' : 'red';
|
||||
const summary = formatPayload(event.payload);
|
||||
|
||||
return (
|
||||
<Text key={event.id} wrap="truncate">
|
||||
<Text color={isFocused ? 'cyan' : undefined}>{isFocused ? '\u25B8' : ' '}</Text>
|
||||
<Text dimColor>{formatTime(event.timestamp)} </Text>
|
||||
<Text color={verifiedColor}>{verified}</Text>
|
||||
<Text> </Text>
|
||||
<Text color={kindColor} bold>{trunc(kindLabel, 9).padEnd(9)}</Text>
|
||||
{event.serverName && <Text color="gray"> [{trunc(event.serverName, 14)}]</Text>}
|
||||
<Text dimColor> {trunc(summary, 60)}</Text>
|
||||
</Text>
|
||||
);
|
||||
})}
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
// ── Detail View ──
|
||||
|
||||
function AuditDetail({ event, scrollOffset, height }: { event: AuditEvent; scrollOffset: number; height: number }) {
|
||||
const kindColor = EVENT_KIND_COLORS[event.eventKind] ?? 'white';
|
||||
const kindLabel = EVENT_KIND_LABELS[event.eventKind] ?? event.eventKind;
|
||||
const lines = [
|
||||
`Kind: ${kindLabel}`,
|
||||
`Session: ${event.sessionId}`,
|
||||
`Project: ${event.projectName}`,
|
||||
`Source: ${event.source}`,
|
||||
`Verified: ${event.verified ? 'yes' : 'no'}`,
|
||||
`Server: ${event.serverName ?? '-'}`,
|
||||
`Time: ${new Date(event.timestamp).toLocaleString()}`,
|
||||
`ID: ${event.id}`,
|
||||
'',
|
||||
'Payload:',
|
||||
...formatDetailPayload(event.payload),
|
||||
];
|
||||
|
||||
const maxVisible = Math.max(1, height - 2);
|
||||
const visible = lines.slice(scrollOffset, scrollOffset + maxVisible);
|
||||
|
||||
return (
|
||||
<Box flexDirection="column" flexGrow={1} paddingLeft={1}>
|
||||
<Text bold color={kindColor}>
|
||||
{kindLabel} Detail <Text dimColor>(line {scrollOffset + 1}/{lines.length})</Text>
|
||||
</Text>
|
||||
{visible.map((line, i) => (
|
||||
<Text key={i} wrap="truncate">{line}</Text>
|
||||
))}
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
// ── Main App ──
|
||||
|
||||
interface AuditAppProps {
|
||||
mcpdUrl: string;
|
||||
token?: string;
|
||||
projectFilter?: string;
|
||||
}
|
||||
|
||||
function AuditApp({ mcpdUrl, token, projectFilter }: AuditAppProps) {
|
||||
const { exit } = useApp();
|
||||
const { stdout } = useStdout();
|
||||
|
||||
const [state, setState] = useState<AuditConsoleState>({
|
||||
phase: 'loading',
|
||||
error: null,
|
||||
sessions: [],
|
||||
selectedSessionIdx: -1,
|
||||
showSidebar: true,
|
||||
events: [],
|
||||
focusedEventIdx: -1,
|
||||
totalEvents: 0,
|
||||
detailEvent: null,
|
||||
detailScrollOffset: 0,
|
||||
projectFilter: projectFilter ?? null,
|
||||
kindFilter: null,
|
||||
dateFilter: 'all',
|
||||
});
|
||||
|
||||
// Use refs for polling to avoid re-creating intervals on every state change
|
||||
const stateRef = useRef(state);
|
||||
stateRef.current = state;
|
||||
|
||||
// Fetch sessions (stable — no state deps)
|
||||
const fetchSessions = useCallback(async () => {
|
||||
try {
|
||||
const params = new URLSearchParams();
|
||||
const s = stateRef.current;
|
||||
if (s.projectFilter) params.set('projectName', s.projectFilter);
|
||||
const from = dateFilterToFrom(s.dateFilter);
|
||||
if (from) params.set('from', from);
|
||||
params.set('limit', '50');
|
||||
const url = `${mcpdUrl}/api/v1/audit/sessions?${params.toString()}`;
|
||||
const data = await fetchJson<{ sessions?: AuditSession[]; total?: number }>(url, token);
|
||||
if (data.sessions && Array.isArray(data.sessions)) {
|
||||
setState((prev) => ({ ...prev, sessions: data.sessions!, phase: 'ready' }));
|
||||
}
|
||||
} catch (err) {
|
||||
setState((prev) => {
|
||||
// Only show error if we haven't loaded anything yet
|
||||
if (prev.phase === 'loading') {
|
||||
return { ...prev, phase: 'error', error: err instanceof Error ? err.message : String(err) };
|
||||
}
|
||||
return prev; // Keep existing data on transient errors
|
||||
});
|
||||
}
|
||||
}, [mcpdUrl, token]);
|
||||
|
||||
// Fetch events (stable — no state deps)
|
||||
const fetchEvents = useCallback(async () => {
|
||||
try {
|
||||
const s = stateRef.current;
|
||||
const params = new URLSearchParams();
|
||||
const selectedSession = s.selectedSessionIdx >= 0 ? s.sessions[s.selectedSessionIdx] : undefined;
|
||||
if (selectedSession) {
|
||||
params.set('sessionId', selectedSession.sessionId);
|
||||
} else if (s.projectFilter) {
|
||||
params.set('projectName', s.projectFilter);
|
||||
}
|
||||
if (s.kindFilter) params.set('eventKind', s.kindFilter);
|
||||
const from = dateFilterToFrom(s.dateFilter);
|
||||
if (from) params.set('from', from);
|
||||
params.set('limit', String(MAX_EVENTS));
|
||||
const url = `${mcpdUrl}/api/v1/audit/events?${params.toString()}`;
|
||||
const data = await fetchJson<{ events?: AuditEvent[]; total?: number }>(url, token);
|
||||
if (data.events && Array.isArray(data.events)) {
|
||||
// API returns newest first — reverse for timeline display
|
||||
setState((prev) => ({ ...prev, events: data.events!.reverse(), totalEvents: data.total ?? data.events!.length }));
|
||||
}
|
||||
} catch {
|
||||
// Non-fatal — keep existing events
|
||||
}
|
||||
}, [mcpdUrl, token]);
|
||||
|
||||
// Initial load + polling (single stable interval)
|
||||
useEffect(() => {
|
||||
void fetchSessions();
|
||||
void fetchEvents();
|
||||
const timer = setInterval(() => {
|
||||
void fetchSessions();
|
||||
void fetchEvents();
|
||||
}, POLL_INTERVAL_MS);
|
||||
return () => clearInterval(timer);
|
||||
}, [fetchSessions, fetchEvents]);
|
||||
|
||||
// Date filter handler — shared between sidebar and timeline
|
||||
const handleDateFilter = useCallback(() => {
|
||||
setState((prev) => {
|
||||
const currentIdx = DATE_FILTER_CYCLE.indexOf(prev.dateFilter);
|
||||
const nextIdx = (currentIdx + 1) % DATE_FILTER_CYCLE.length;
|
||||
const next = { ...prev, dateFilter: DATE_FILTER_CYCLE[nextIdx]!, focusedEventIdx: -1, selectedSessionIdx: -1 };
|
||||
stateRef.current = next;
|
||||
return next;
|
||||
});
|
||||
void fetchSessions();
|
||||
void fetchEvents();
|
||||
}, [fetchSessions, fetchEvents]);
|
||||
|
||||
// Kind filter handler — shared between sidebar and timeline
|
||||
const handleKindFilter = useCallback(() => {
|
||||
const kinds = [null, 'tool_call_trace', 'gate_decision', 'pipeline_execution', 'stage_execution', 'prompt_delivery', 'session_bind'];
|
||||
setState((prev) => {
|
||||
const currentIdx = kinds.indexOf(prev.kindFilter);
|
||||
const nextIdx = (currentIdx + 1) % kinds.length;
|
||||
const next = { ...prev, kindFilter: kinds[nextIdx] ?? null, focusedEventIdx: -1 };
|
||||
stateRef.current = next;
|
||||
return next;
|
||||
});
|
||||
void fetchEvents();
|
||||
}, [fetchEvents]);
|
||||
|
||||
// Keyboard input
|
||||
useInput((input, key) => {
|
||||
const s = stateRef.current;
|
||||
|
||||
// Quit
|
||||
if (input === 'q') {
|
||||
exit();
|
||||
return;
|
||||
}
|
||||
|
||||
// ── Detail view navigation ──
|
||||
if (s.detailEvent) {
|
||||
if (key.escape) {
|
||||
setState((prev) => ({ ...prev, detailEvent: null, detailScrollOffset: 0 }));
|
||||
return;
|
||||
}
|
||||
if (key.downArrow) {
|
||||
setState((prev) => ({ ...prev, detailScrollOffset: prev.detailScrollOffset + 1 }));
|
||||
return;
|
||||
}
|
||||
if (key.upArrow) {
|
||||
setState((prev) => ({ ...prev, detailScrollOffset: Math.max(0, prev.detailScrollOffset - 1) }));
|
||||
return;
|
||||
}
|
||||
if (key.pageDown) {
|
||||
const pageSize = Math.max(1, Math.floor(stdout.rows * 0.5));
|
||||
setState((prev) => ({ ...prev, detailScrollOffset: prev.detailScrollOffset + pageSize }));
|
||||
return;
|
||||
}
|
||||
if (key.pageUp) {
|
||||
const pageSize = Math.max(1, Math.floor(stdout.rows * 0.5));
|
||||
setState((prev) => ({ ...prev, detailScrollOffset: Math.max(0, prev.detailScrollOffset - pageSize) }));
|
||||
return;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// ── Sidebar navigation (arrows = sessions, Enter = select, Escape = close) ──
|
||||
if (s.showSidebar) {
|
||||
const navigateSidebar = (direction: number, step: number = 1) => {
|
||||
setState((prev) => {
|
||||
const order = visualSessionOrder(prev.sessions);
|
||||
if (order.length === 0) return prev;
|
||||
const curPos = prev.selectedSessionIdx === -1 ? -1 : order.indexOf(prev.selectedSessionIdx);
|
||||
let newPos = curPos + direction * step;
|
||||
let newIdx: number;
|
||||
if (newPos < 0) {
|
||||
newIdx = -1; // "All" selection
|
||||
} else {
|
||||
newPos = Math.min(order.length - 1, Math.max(0, newPos));
|
||||
newIdx = order[newPos]!;
|
||||
}
|
||||
if (newIdx === prev.selectedSessionIdx) return prev;
|
||||
const next = { ...prev, selectedSessionIdx: newIdx, focusedEventIdx: -1 };
|
||||
stateRef.current = next;
|
||||
return next;
|
||||
});
|
||||
void fetchEvents();
|
||||
};
|
||||
|
||||
if (key.downArrow) { navigateSidebar(1); return; }
|
||||
if (key.upArrow) { navigateSidebar(-1); return; }
|
||||
if (key.pageDown) { navigateSidebar(1, Math.max(1, Math.floor(stdout.rows * 0.5))); return; }
|
||||
if (key.pageUp) { navigateSidebar(-1, Math.max(1, Math.floor(stdout.rows * 0.5))); return; }
|
||||
|
||||
if (key.return) {
|
||||
// Enter closes sidebar, keeping the selected session
|
||||
setState((prev) => ({ ...prev, showSidebar: false, focusedEventIdx: -1 }));
|
||||
return;
|
||||
}
|
||||
if (key.escape) {
|
||||
setState((prev) => ({ ...prev, showSidebar: false }));
|
||||
return;
|
||||
}
|
||||
|
||||
if (input === 'k') { handleKindFilter(); return; }
|
||||
if (input === 'd') { handleDateFilter(); return; }
|
||||
|
||||
return; // Absorb all other input when sidebar is open
|
||||
}
|
||||
|
||||
// ── Timeline navigation (sidebar closed) ──
|
||||
|
||||
// Escape reopens sidebar
|
||||
if (key.escape) {
|
||||
setState((prev) => ({ ...prev, showSidebar: true, focusedEventIdx: -1 }));
|
||||
return;
|
||||
}
|
||||
|
||||
// Auto-scroll resume
|
||||
if (input === 'a') {
|
||||
setState((prev) => ({ ...prev, focusedEventIdx: -1 }));
|
||||
return;
|
||||
}
|
||||
|
||||
if (input === 'k') { handleKindFilter(); return; }
|
||||
if (input === 'd') { handleDateFilter(); return; }
|
||||
|
||||
// Enter: detail view
|
||||
if (key.return) {
|
||||
setState((prev) => {
|
||||
const idx = prev.focusedEventIdx === -1 ? prev.events.length - 1 : prev.focusedEventIdx;
|
||||
const event = prev.events[idx];
|
||||
if (!event) return prev;
|
||||
return { ...prev, detailEvent: event, detailScrollOffset: 0 };
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Arrow navigation
|
||||
if (key.downArrow) {
|
||||
setState((prev) => {
|
||||
if (prev.focusedEventIdx === -1) return prev;
|
||||
return { ...prev, focusedEventIdx: Math.min(prev.events.length - 1, prev.focusedEventIdx + 1) };
|
||||
});
|
||||
return;
|
||||
}
|
||||
if (key.upArrow) {
|
||||
setState((prev) => {
|
||||
if (prev.focusedEventIdx === -1) {
|
||||
return prev.events.length > 0 ? { ...prev, focusedEventIdx: prev.events.length - 1 } : prev;
|
||||
}
|
||||
return { ...prev, focusedEventIdx: prev.focusedEventIdx <= 0 ? -1 : prev.focusedEventIdx - 1 };
|
||||
});
|
||||
return;
|
||||
}
|
||||
if (key.pageDown) {
|
||||
const pageSize = Math.max(1, stdout.rows - 8);
|
||||
setState((prev) => {
|
||||
if (prev.focusedEventIdx === -1) return prev;
|
||||
return { ...prev, focusedEventIdx: Math.min(prev.events.length - 1, prev.focusedEventIdx + pageSize) };
|
||||
});
|
||||
return;
|
||||
}
|
||||
if (key.pageUp) {
|
||||
const pageSize = Math.max(1, stdout.rows - 8);
|
||||
setState((prev) => {
|
||||
const current = prev.focusedEventIdx === -1 ? prev.events.length - 1 : prev.focusedEventIdx;
|
||||
return { ...prev, focusedEventIdx: Math.max(0, current - pageSize) };
|
||||
});
|
||||
return;
|
||||
}
|
||||
});
|
||||
|
||||
const height = stdout.rows - 3; // header + footer
|
||||
|
||||
if (state.phase === 'loading') {
|
||||
return (
|
||||
<Box flexDirection="column">
|
||||
<Text bold color="cyan">Audit Console</Text>
|
||||
<Text dimColor>Connecting to mcpd{'\u2026'}</Text>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
if (state.phase === 'error') {
|
||||
return (
|
||||
<Box flexDirection="column">
|
||||
<Text bold color="red">Audit Console — Error</Text>
|
||||
<Text color="red">{state.error}</Text>
|
||||
<Text dimColor>Check mcpd is running and accessible at {mcpdUrl}</Text>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
// Detail view
|
||||
if (state.detailEvent) {
|
||||
return (
|
||||
<Box flexDirection="column" height={stdout.rows}>
|
||||
<Box flexGrow={1}>
|
||||
<AuditDetail event={state.detailEvent} scrollOffset={state.detailScrollOffset} height={height} />
|
||||
</Box>
|
||||
<Box borderStyle="single" borderColor="gray" paddingX={1}>
|
||||
<Text dimColor>[{'\u2191\u2193'}] scroll [PgUp/Dn] page [Esc] back [q] quit</Text>
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
// Main view
|
||||
const sidebarHint = state.showSidebar
|
||||
? '[\u2191\u2193] session [Enter] select [k] kind [d] date [Esc] close [q] quit'
|
||||
: state.focusedEventIdx === -1
|
||||
? '[\u2191] nav [k] kind [d] date [Enter] detail [Esc] sidebar [q] quit'
|
||||
: '[\u2191\u2193] nav [PgUp/Dn] page [a] follow [k] kind [d] date [Enter] detail [Esc] sidebar [q] quit';
|
||||
|
||||
return (
|
||||
<Box flexDirection="column" height={stdout.rows}>
|
||||
{/* Header */}
|
||||
<Box paddingX={1}>
|
||||
<Text bold color="cyan">Audit Console</Text>
|
||||
<Text dimColor> {state.totalEvents} total events</Text>
|
||||
{state.kindFilter && <Text color="yellow"> kind: {EVENT_KIND_LABELS[state.kindFilter] ?? state.kindFilter}</Text>}
|
||||
{state.dateFilter !== 'all' && <Text color="magenta"> date: {DATE_FILTER_LABELS[state.dateFilter]}</Text>}
|
||||
</Box>
|
||||
|
||||
{/* Body */}
|
||||
<Box flexGrow={1}>
|
||||
{state.showSidebar && (
|
||||
<AuditSidebar
|
||||
sessions={state.sessions}
|
||||
selectedIdx={state.selectedSessionIdx}
|
||||
projectFilter={state.projectFilter}
|
||||
dateFilter={state.dateFilter}
|
||||
height={height}
|
||||
/>
|
||||
)}
|
||||
<AuditTimeline events={state.events} height={height} focusedIdx={state.focusedEventIdx} />
|
||||
</Box>
|
||||
|
||||
{/* Footer */}
|
||||
<Box borderStyle="single" borderColor="gray" paddingX={1}>
|
||||
<Text dimColor>{sidebarHint}</Text>
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
// ── Render entry point ──
|
||||
|
||||
export interface AuditRenderOptions {
|
||||
mcpdUrl: string;
|
||||
token?: string;
|
||||
projectFilter?: string;
|
||||
}
|
||||
|
||||
export async function renderAuditConsole(opts: AuditRenderOptions): Promise<void> {
|
||||
const instance = render(
|
||||
<AuditApp mcpdUrl={opts.mcpdUrl} token={opts.token} projectFilter={opts.projectFilter} />,
|
||||
);
|
||||
await instance.waitUntilExit();
|
||||
}
|
||||
@@ -1,98 +0,0 @@
|
||||
/**
|
||||
* Types for the audit console — views audit events from mcpd.
|
||||
*/
|
||||
|
||||
export interface AuditSession {
|
||||
sessionId: string;
|
||||
projectName: string;
|
||||
userName: string | null;
|
||||
firstSeen: string;
|
||||
lastSeen: string;
|
||||
eventCount: number;
|
||||
eventKinds: string[];
|
||||
}
|
||||
|
||||
export interface AuditEvent {
|
||||
id: string;
|
||||
timestamp: string;
|
||||
sessionId: string;
|
||||
projectName: string;
|
||||
eventKind: string;
|
||||
source: string;
|
||||
verified: boolean;
|
||||
serverName: string | null;
|
||||
correlationId: string | null;
|
||||
parentEventId: string | null;
|
||||
payload: Record<string, unknown>;
|
||||
}
|
||||
|
||||
export interface AuditConsoleState {
|
||||
phase: 'loading' | 'ready' | 'error';
|
||||
error: string | null;
|
||||
|
||||
// Sessions
|
||||
sessions: AuditSession[];
|
||||
selectedSessionIdx: number; // -1 = all sessions, 0+ = specific session
|
||||
showSidebar: boolean;
|
||||
|
||||
// Events
|
||||
events: AuditEvent[];
|
||||
focusedEventIdx: number; // -1 = auto-scroll
|
||||
totalEvents: number;
|
||||
|
||||
// Detail view
|
||||
detailEvent: AuditEvent | null;
|
||||
detailScrollOffset: number;
|
||||
|
||||
// Filters
|
||||
projectFilter: string | null;
|
||||
kindFilter: string | null;
|
||||
dateFilter: 'all' | '1h' | '24h' | '7d' | 'today';
|
||||
}
|
||||
|
||||
export type DateFilterPreset = 'all' | '1h' | '24h' | '7d' | 'today';
|
||||
|
||||
export const DATE_FILTER_CYCLE: DateFilterPreset[] = ['all', 'today', '1h', '24h', '7d'];
|
||||
|
||||
export const DATE_FILTER_LABELS: Record<DateFilterPreset, string> = {
|
||||
'all': 'all time',
|
||||
'today': 'today',
|
||||
'1h': 'last hour',
|
||||
'24h': 'last 24h',
|
||||
'7d': 'last 7 days',
|
||||
};
|
||||
|
||||
export function dateFilterToFrom(preset: DateFilterPreset): string | undefined {
|
||||
if (preset === 'all') return undefined;
|
||||
const now = new Date();
|
||||
switch (preset) {
|
||||
case '1h': return new Date(now.getTime() - 60 * 60 * 1000).toISOString();
|
||||
case '24h': return new Date(now.getTime() - 24 * 60 * 60 * 1000).toISOString();
|
||||
case '7d': return new Date(now.getTime() - 7 * 24 * 60 * 60 * 1000).toISOString();
|
||||
case 'today': {
|
||||
const start = new Date(now);
|
||||
start.setHours(0, 0, 0, 0);
|
||||
return start.toISOString();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const EVENT_KIND_COLORS: Record<string, string> = {
|
||||
'pipeline_execution': 'blue',
|
||||
'stage_execution': 'cyan',
|
||||
'gate_decision': 'yellow',
|
||||
'prompt_delivery': 'magenta',
|
||||
'tool_call_trace': 'green',
|
||||
'rbac_decision': 'red',
|
||||
'session_bind': 'gray',
|
||||
};
|
||||
|
||||
export const EVENT_KIND_LABELS: Record<string, string> = {
|
||||
'pipeline_execution': 'PIPELINE',
|
||||
'stage_execution': 'STAGE',
|
||||
'gate_decision': 'GATE',
|
||||
'prompt_delivery': 'PROMPT',
|
||||
'tool_call_trace': 'TOOL',
|
||||
'rbac_decision': 'RBAC',
|
||||
'session_bind': 'BIND',
|
||||
};
|
||||
@@ -1,229 +0,0 @@
|
||||
/**
|
||||
* ActionArea — context-sensitive bottom panel in the unified console.
|
||||
*
|
||||
* Renders the appropriate sub-view based on the current action state.
|
||||
* Only one action at a time — Esc always returns to { type: 'none' }.
|
||||
*/
|
||||
|
||||
import { Box, Text } from 'ink';
|
||||
import type { ActionState, TimelineEvent } from '../unified-types.js';
|
||||
import type { McpTool, McpSession, McpResource, McpPrompt } from '../mcp-session.js';
|
||||
import { formatTime, formatEventSummary, formatBodyDetail } from '../format-event.js';
|
||||
import { ProvenanceView } from './provenance-view.js';
|
||||
import { ToolDetailView } from './tool-detail.js';
|
||||
import { ToolListView } from './tool-list.js';
|
||||
import { ResourceListView } from './resource-list.js';
|
||||
import { PromptListView } from './prompt-list.js';
|
||||
import { RawJsonRpcView } from './raw-jsonrpc.js';
|
||||
|
||||
interface ActionAreaProps {
|
||||
action: ActionState;
|
||||
events: TimelineEvent[];
|
||||
session: McpSession;
|
||||
tools: McpTool[];
|
||||
resources: McpResource[];
|
||||
prompts: McpPrompt[];
|
||||
availableModels: string[];
|
||||
height: number;
|
||||
onSetAction: (action: ActionState) => void;
|
||||
onError: (msg: string) => void;
|
||||
}
|
||||
|
||||
export function ActionArea({
|
||||
action,
|
||||
events,
|
||||
session,
|
||||
tools,
|
||||
resources,
|
||||
prompts,
|
||||
availableModels,
|
||||
height,
|
||||
onSetAction,
|
||||
onError,
|
||||
}: ActionAreaProps) {
|
||||
if (action.type === 'none') return null;
|
||||
|
||||
if (action.type === 'detail') {
|
||||
const event = events[action.eventIdx];
|
||||
if (!event) return null;
|
||||
return <DetailView event={event} maxLines={height} scrollOffset={action.scrollOffset} horizontalOffset={action.horizontalOffset} searchQuery={action.searchQuery} searchMatches={action.searchMatches} searchMatchIdx={action.searchMatchIdx} searchMode={action.searchMode} />;
|
||||
}
|
||||
|
||||
if (action.type === 'provenance') {
|
||||
const clientEvent = events[action.clientEventIdx];
|
||||
if (!clientEvent) return null;
|
||||
return (
|
||||
<ProvenanceView
|
||||
clientEvent={clientEvent}
|
||||
upstreamEvent={action.upstreamEvent}
|
||||
height={height}
|
||||
scrollOffset={action.scrollOffset}
|
||||
horizontalOffset={action.horizontalOffset}
|
||||
focusedPanel={action.focusedPanel}
|
||||
parameterIdx={action.parameterIdx}
|
||||
replayConfig={action.replayConfig}
|
||||
replayResult={action.replayResult}
|
||||
replayRunning={action.replayRunning}
|
||||
editingUpstream={action.editingUpstream}
|
||||
editedContent={action.editedContent}
|
||||
onEditContent={(text) => onSetAction({ ...action, editedContent: text })}
|
||||
proxyModelDetails={action.proxyModelDetails}
|
||||
liveOverride={action.liveOverride}
|
||||
serverList={action.serverList}
|
||||
serverOverrides={action.serverOverrides}
|
||||
selectedServerIdx={action.selectedServerIdx}
|
||||
serverPickerOpen={action.serverPickerOpen}
|
||||
modelPickerOpen={action.modelPickerOpen}
|
||||
modelPickerIdx={action.modelPickerIdx}
|
||||
availableModels={availableModels}
|
||||
searchMode={action.searchMode}
|
||||
searchQuery={action.searchQuery}
|
||||
searchMatches={action.searchMatches}
|
||||
searchMatchIdx={action.searchMatchIdx}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
if (action.type === 'tool-input') {
|
||||
return (
|
||||
<Box flexDirection="column" height={height} borderStyle="round" borderColor="gray" paddingX={1}>
|
||||
<ToolDetailView
|
||||
tool={action.tool}
|
||||
session={session}
|
||||
onResult={() => onSetAction({ type: 'none' })}
|
||||
onError={onError}
|
||||
onBack={() => onSetAction({ type: 'none' })}
|
||||
onLoadingChange={(loading) => onSetAction({ ...action, loading })}
|
||||
/>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
if (action.type === 'tool-browser') {
|
||||
return (
|
||||
<Box flexDirection="column" height={height} borderStyle="round" borderColor="gray" paddingX={1}>
|
||||
<ToolListView
|
||||
tools={tools}
|
||||
onSelect={(tool) => onSetAction({ type: 'tool-input', tool, loading: false })}
|
||||
onBack={() => onSetAction({ type: 'none' })}
|
||||
/>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
if (action.type === 'resource-browser') {
|
||||
return (
|
||||
<Box flexDirection="column" height={height} borderStyle="round" borderColor="gray" paddingX={1}>
|
||||
<ResourceListView
|
||||
resources={resources}
|
||||
session={session}
|
||||
onResult={() => {}}
|
||||
onError={onError}
|
||||
onBack={() => onSetAction({ type: 'none' })}
|
||||
/>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
if (action.type === 'prompt-browser') {
|
||||
return (
|
||||
<Box flexDirection="column" height={height} borderStyle="round" borderColor="gray" paddingX={1}>
|
||||
<PromptListView
|
||||
prompts={prompts}
|
||||
session={session}
|
||||
onResult={() => {}}
|
||||
onError={onError}
|
||||
onBack={() => onSetAction({ type: 'none' })}
|
||||
/>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
if (action.type === 'raw-jsonrpc') {
|
||||
return (
|
||||
<Box flexDirection="column" height={height} borderStyle="round" borderColor="gray" paddingX={1}>
|
||||
<RawJsonRpcView
|
||||
session={session}
|
||||
onBack={() => onSetAction({ type: 'none' })}
|
||||
/>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
// ── Detail View ──
|
||||
|
||||
function DetailView({ event, maxLines, scrollOffset, horizontalOffset, searchQuery, searchMatches, searchMatchIdx, searchMode }: {
|
||||
event: TimelineEvent;
|
||||
maxLines: number;
|
||||
scrollOffset: number;
|
||||
horizontalOffset: number;
|
||||
searchQuery: string;
|
||||
searchMatches: number[];
|
||||
searchMatchIdx: number;
|
||||
searchMode: boolean;
|
||||
}) {
|
||||
const { arrow, color, label } = formatEventSummary(
|
||||
event.eventType,
|
||||
event.method,
|
||||
event.body,
|
||||
event.upstreamName,
|
||||
event.durationMs,
|
||||
);
|
||||
const allLines = formatBodyDetail(event.eventType, event.method ?? '', event.body);
|
||||
const hasSearch = searchQuery.length > 0 || searchMode;
|
||||
const bodyHeight = maxLines - 3 - (hasSearch ? 1 : 0);
|
||||
const visibleLines = allLines.slice(scrollOffset, scrollOffset + bodyHeight);
|
||||
const totalLines = allLines.length;
|
||||
const canScroll = totalLines > bodyHeight;
|
||||
const atEnd = scrollOffset + bodyHeight >= totalLines;
|
||||
|
||||
// Which absolute line indices are in the visible window?
|
||||
const matchSet = new Set(searchMatches);
|
||||
|
||||
return (
|
||||
<Box flexDirection="column" borderStyle="round" borderColor="gray" paddingX={1} height={maxLines}>
|
||||
<Text bold>
|
||||
<Text color={color}>{arrow} {label}</Text>
|
||||
<Text dimColor> {formatTime(event.timestamp)} {event.projectName}/{event.sessionId.slice(0, 8)}</Text>
|
||||
{event.correlationId && <Text dimColor>{' \u26D3'}</Text>}
|
||||
{canScroll ? (
|
||||
<Text dimColor> [{scrollOffset + 1}-{Math.min(scrollOffset + bodyHeight, totalLines)}/{totalLines}]</Text>
|
||||
) : null}
|
||||
{horizontalOffset > 0 && <Text dimColor> col:{horizontalOffset}</Text>}
|
||||
</Text>
|
||||
<Text dimColor>{'\u2191\u2193:scroll \u2190\u2192:pan p:provenance /:search PgDn/PgUp:next/prev Esc:close'}</Text>
|
||||
{visibleLines.map((line, i) => {
|
||||
const absIdx = scrollOffset + i;
|
||||
const isMatch = matchSet.has(absIdx);
|
||||
const isCurrent = searchMatches[searchMatchIdx] === absIdx;
|
||||
const displayLine = horizontalOffset > 0 ? line.slice(horizontalOffset) : line;
|
||||
return (
|
||||
<Text key={i} wrap="truncate" dimColor={!isMatch && line.startsWith(' ')}
|
||||
backgroundColor={isCurrent ? 'yellow' : isMatch ? 'gray' : undefined}
|
||||
color={isCurrent ? 'black' : isMatch ? 'white' : undefined}
|
||||
>
|
||||
{displayLine}
|
||||
</Text>
|
||||
);
|
||||
})}
|
||||
{canScroll && !atEnd && (
|
||||
<Text dimColor>{'\u2026 +'}{totalLines - scrollOffset - bodyHeight}{' more lines \u2193'}</Text>
|
||||
)}
|
||||
{hasSearch && (
|
||||
<Text>
|
||||
<Text color="cyan">/{searchQuery}</Text>
|
||||
{searchMatches.length > 0 && (
|
||||
<Text dimColor> [{searchMatchIdx + 1}/{searchMatches.length}] n:next N:prev Esc:clear</Text>
|
||||
)}
|
||||
{searchQuery.length > 0 && searchMatches.length === 0 && (
|
||||
<Text dimColor> (no matches)</Text>
|
||||
)}
|
||||
{searchMode && <Text color="cyan">_</Text>}
|
||||
</Text>
|
||||
)}
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
@@ -1,151 +0,0 @@
|
||||
import { useState } from 'react';
|
||||
import { Box, Text } from 'ink';
|
||||
import { TextInput, Spinner } from '@inkjs/ui';
|
||||
import type { McpTool, McpSession } from '../mcp-session.js';
|
||||
|
||||
interface BeginSessionViewProps {
|
||||
tool: McpTool;
|
||||
session: McpSession;
|
||||
onDone: (result: unknown) => void;
|
||||
onError: (msg: string) => void;
|
||||
onBack: () => void;
|
||||
onLoadingChange?: (loading: boolean) => void;
|
||||
}
|
||||
|
||||
interface SchemaProperty {
|
||||
type?: string;
|
||||
description?: string;
|
||||
items?: { type?: string };
|
||||
maxItems?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Dynamically renders a form for the begin_session tool based on its
|
||||
* inputSchema from the MCP protocol. Adapts to whatever the server sends:
|
||||
* - string properties → text input
|
||||
* - array of strings → comma-separated text input
|
||||
* - multiple/unknown properties → raw JSON input
|
||||
*/
|
||||
export function BeginSessionView({ tool, session, onDone, onError, onLoadingChange }: BeginSessionViewProps) {
|
||||
const [loading, _setLoading] = useState(false);
|
||||
const setLoading = (v: boolean) => { _setLoading(v); onLoadingChange?.(v); };
|
||||
const [input, setInput] = useState('');
|
||||
|
||||
const schema = tool.inputSchema as {
|
||||
properties?: Record<string, SchemaProperty>;
|
||||
required?: string[];
|
||||
} | undefined;
|
||||
|
||||
const properties = schema?.properties ?? {};
|
||||
const propEntries = Object.entries(properties);
|
||||
|
||||
// Determine mode: focused single-property or generic JSON
|
||||
const singleProp = propEntries.length === 1 ? propEntries[0]! : null;
|
||||
const propName = singleProp?.[0];
|
||||
const propDef = singleProp?.[1];
|
||||
const isArray = propDef?.type === 'array';
|
||||
|
||||
const buildArgs = (): Record<string, unknown> | null => {
|
||||
if (!singleProp) {
|
||||
// JSON mode
|
||||
try {
|
||||
return JSON.parse(input) as Record<string, unknown>;
|
||||
} catch {
|
||||
onError('Invalid JSON');
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
const trimmed = input.trim();
|
||||
if (trimmed.length === 0) {
|
||||
onError(`${propName} is required`);
|
||||
return null;
|
||||
}
|
||||
|
||||
if (isArray) {
|
||||
const items = trimmed
|
||||
.split(',')
|
||||
.map((t) => t.trim())
|
||||
.filter((t) => t.length > 0);
|
||||
if (items.length === 0) {
|
||||
onError(`Enter at least one value for ${propName}`);
|
||||
return null;
|
||||
}
|
||||
return { [propName!]: items };
|
||||
}
|
||||
|
||||
return { [propName!]: trimmed };
|
||||
};
|
||||
|
||||
const handleSubmit = async () => {
|
||||
const args = buildArgs();
|
||||
if (!args) return;
|
||||
|
||||
setLoading(true);
|
||||
try {
|
||||
const result = await session.callTool(tool.name, args);
|
||||
onDone(result);
|
||||
} catch (err) {
|
||||
onError(`${tool.name} failed: ${err instanceof Error ? err.message : String(err)}`);
|
||||
setLoading(false);
|
||||
}
|
||||
};
|
||||
|
||||
if (loading) {
|
||||
return (
|
||||
<Box gap={1}>
|
||||
<Spinner label={`Calling ${tool.name}...`} />
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
// Focused single-property mode
|
||||
if (singleProp) {
|
||||
const label = propDef?.description ?? propName!;
|
||||
const hint = isArray ? 'comma-separated values' : 'text';
|
||||
|
||||
return (
|
||||
<Box flexDirection="column">
|
||||
<Text bold>{tool.description ?? tool.name}</Text>
|
||||
<Text dimColor>{label}</Text>
|
||||
<Box marginTop={1}>
|
||||
<Text color="cyan">{propName}: </Text>
|
||||
<TextInput
|
||||
placeholder={hint}
|
||||
onChange={setInput}
|
||||
onSubmit={handleSubmit}
|
||||
/>
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
// Multi-property / unknown schema → JSON input
|
||||
return (
|
||||
<Box flexDirection="column">
|
||||
<Text bold>{tool.description ?? tool.name}</Text>
|
||||
{propEntries.length > 0 && (
|
||||
<Box flexDirection="column" marginTop={1}>
|
||||
<Text bold>Schema:</Text>
|
||||
{propEntries.map(([name, def]) => (
|
||||
<Text key={name} dimColor>
|
||||
{name}: {def.type ?? 'any'}{def.description ? ` — ${def.description}` : ''}
|
||||
</Text>
|
||||
))}
|
||||
</Box>
|
||||
)}
|
||||
<Box flexDirection="column" marginTop={1}>
|
||||
<Text bold>Arguments (JSON):</Text>
|
||||
<Box>
|
||||
<Text color="cyan">> </Text>
|
||||
<TextInput
|
||||
placeholder="{}"
|
||||
defaultValue="{}"
|
||||
onChange={setInput}
|
||||
onSubmit={handleSubmit}
|
||||
/>
|
||||
</Box>
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
import { Box, Text } from 'ink';
|
||||
import { Spinner } from '@inkjs/ui';
|
||||
|
||||
export function ConnectingView() {
|
||||
return (
|
||||
<Box gap={1}>
|
||||
<Spinner label="Connecting..." />
|
||||
<Text dimColor>Sending initialize request</Text>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
@@ -1,185 +0,0 @@
|
||||
/**
|
||||
* Diff computation and rendering for the Provenance view.
|
||||
*
|
||||
* Uses the `diff` package for line-level diffs with:
|
||||
* - 3-line context around changes
|
||||
* - Collapsed unchanged regions (GitKraken style)
|
||||
* - vimdiff-style coloring (red=removed, green=added)
|
||||
*/
|
||||
|
||||
import { Text } from 'ink';
|
||||
import { diffLines } from 'diff';
|
||||
|
||||
// ── Types ──
|
||||
|
||||
export type DiffLineKind = 'added' | 'removed' | 'context' | 'collapsed';
|
||||
|
||||
export interface DiffLine {
|
||||
kind: DiffLineKind;
|
||||
text: string;
|
||||
collapsedCount?: number; // only for 'collapsed' kind
|
||||
}
|
||||
|
||||
export interface DiffStats {
|
||||
added: number;
|
||||
removed: number;
|
||||
pctChanged: number;
|
||||
}
|
||||
|
||||
export interface DiffResult {
|
||||
lines: DiffLine[];
|
||||
stats: DiffStats;
|
||||
}
|
||||
|
||||
// ── Compute diff with context and collapsing ──
|
||||
|
||||
const DEFAULT_CONTEXT = 3;
|
||||
|
||||
export function computeDiffLines(
|
||||
upstream: string,
|
||||
transformed: string,
|
||||
contextLines = DEFAULT_CONTEXT,
|
||||
): DiffResult {
|
||||
if (upstream === transformed) {
|
||||
// Identical — show single collapsed block
|
||||
const lineCount = upstream.split('\n').length;
|
||||
return {
|
||||
lines: [{ kind: 'collapsed', text: `${lineCount} unchanged lines`, collapsedCount: lineCount }],
|
||||
stats: { added: 0, removed: 0, pctChanged: 0 },
|
||||
};
|
||||
}
|
||||
|
||||
const changes = diffLines(upstream, transformed);
|
||||
|
||||
// Step 1: Flatten changes into individual tagged lines
|
||||
interface TaggedLine { kind: 'added' | 'removed' | 'unchanged'; text: string }
|
||||
const tagged: TaggedLine[] = [];
|
||||
|
||||
for (const change of changes) {
|
||||
const lines = change.value.replace(/\n$/, '').split('\n');
|
||||
const kind: TaggedLine['kind'] = change.added ? 'added' : change.removed ? 'removed' : 'unchanged';
|
||||
for (const line of lines) {
|
||||
tagged.push({ kind, text: line });
|
||||
}
|
||||
}
|
||||
|
||||
// Step 2: Mark which unchanged lines are within context range of a change
|
||||
const inContext = new Set<number>();
|
||||
for (let i = 0; i < tagged.length; i++) {
|
||||
if (tagged[i]!.kind !== 'unchanged') {
|
||||
// Mark contextLines before and after
|
||||
for (let j = Math.max(0, i - contextLines); j <= Math.min(tagged.length - 1, i + contextLines); j++) {
|
||||
if (tagged[j]!.kind === 'unchanged') {
|
||||
inContext.add(j);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Step 3: Build output with collapsed regions
|
||||
const result: DiffLine[] = [];
|
||||
let collapsedRun = 0;
|
||||
|
||||
for (let i = 0; i < tagged.length; i++) {
|
||||
const line = tagged[i]!;
|
||||
if (line.kind !== 'unchanged') {
|
||||
// Flush collapsed
|
||||
if (collapsedRun > 0) {
|
||||
result.push({ kind: 'collapsed', text: `${collapsedRun} unchanged lines`, collapsedCount: collapsedRun });
|
||||
collapsedRun = 0;
|
||||
}
|
||||
result.push({ kind: line.kind, text: line.text });
|
||||
} else if (inContext.has(i)) {
|
||||
// Context line
|
||||
if (collapsedRun > 0) {
|
||||
result.push({ kind: 'collapsed', text: `${collapsedRun} unchanged lines`, collapsedCount: collapsedRun });
|
||||
collapsedRun = 0;
|
||||
}
|
||||
result.push({ kind: 'context', text: line.text });
|
||||
} else {
|
||||
collapsedRun++;
|
||||
}
|
||||
}
|
||||
|
||||
// Flush trailing collapsed
|
||||
if (collapsedRun > 0) {
|
||||
result.push({ kind: 'collapsed', text: `${collapsedRun} unchanged lines`, collapsedCount: collapsedRun });
|
||||
}
|
||||
|
||||
// Stats
|
||||
let added = 0;
|
||||
let removed = 0;
|
||||
for (const t of tagged) {
|
||||
if (t.kind === 'added') added++;
|
||||
if (t.kind === 'removed') removed++;
|
||||
}
|
||||
const total = Math.max(1, tagged.length - added); // original line count approximation
|
||||
const pctChanged = Math.round(((added + removed) / (total + added)) * 100);
|
||||
|
||||
return { lines: result, stats: { added, removed, pctChanged } };
|
||||
}
|
||||
|
||||
// ── Format header stats ──
|
||||
|
||||
export function formatDiffStats(stats: DiffStats): string {
|
||||
if (stats.added === 0 && stats.removed === 0) return 'no changes';
|
||||
const parts: string[] = [];
|
||||
if (stats.added > 0) parts.push(`+${stats.added}`);
|
||||
if (stats.removed > 0) parts.push(`-${stats.removed}`);
|
||||
parts.push(`${stats.pctChanged}% chg`);
|
||||
return parts.join(' ');
|
||||
}
|
||||
|
||||
// ── Rendering component ──
|
||||
|
||||
interface DiffPanelProps {
|
||||
lines: DiffLine[];
|
||||
scrollOffset: number;
|
||||
height: number;
|
||||
horizontalOffset?: number;
|
||||
}
|
||||
|
||||
function hSlice(text: string, offset: number): string {
|
||||
return offset > 0 ? text.slice(offset) : text;
|
||||
}
|
||||
|
||||
export function DiffPanel({ lines, scrollOffset, height, horizontalOffset = 0 }: DiffPanelProps) {
|
||||
const visible = lines.slice(scrollOffset, scrollOffset + height);
|
||||
const hasMore = lines.length > scrollOffset + height;
|
||||
|
||||
return (
|
||||
<>
|
||||
{visible.map((line, i) => {
|
||||
switch (line.kind) {
|
||||
case 'added':
|
||||
return (
|
||||
<Text key={i} wrap="truncate" color="green">
|
||||
{'+ '}{hSlice(line.text, horizontalOffset)}
|
||||
</Text>
|
||||
);
|
||||
case 'removed':
|
||||
return (
|
||||
<Text key={i} wrap="truncate" color="red">
|
||||
{'- '}{hSlice(line.text, horizontalOffset)}
|
||||
</Text>
|
||||
);
|
||||
case 'context':
|
||||
return (
|
||||
<Text key={i} wrap="truncate" dimColor>
|
||||
{' '}{hSlice(line.text, horizontalOffset)}
|
||||
</Text>
|
||||
);
|
||||
case 'collapsed':
|
||||
return (
|
||||
<Text key={i} wrap="truncate" color="gray">
|
||||
{'\u2504\u2504\u2504 '}{line.text}{' \u2504\u2504\u2504'}
|
||||
</Text>
|
||||
);
|
||||
}
|
||||
})}
|
||||
{hasMore && (
|
||||
<Text dimColor>{'\u2026'} +{lines.length - scrollOffset - height} more</Text>
|
||||
)}
|
||||
</>
|
||||
);
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
import { Box, Text } from 'ink';
|
||||
|
||||
interface HeaderProps {
|
||||
projectName: string;
|
||||
sessionId?: string;
|
||||
gated: boolean;
|
||||
reconnecting: boolean;
|
||||
}
|
||||
|
||||
export function Header({ projectName, sessionId, gated, reconnecting }: HeaderProps) {
|
||||
return (
|
||||
<Box flexDirection="column" borderStyle="single" borderBottom={true} borderTop={false} borderLeft={false} borderRight={false} paddingX={1}>
|
||||
<Box gap={2}>
|
||||
<Text bold color="white" backgroundColor="blue"> mcpctl console </Text>
|
||||
<Text bold>{projectName}</Text>
|
||||
{sessionId && <Text dimColor>session: {sessionId.slice(0, 8)}</Text>}
|
||||
{gated ? (
|
||||
<Text color="yellow" bold>[GATED]</Text>
|
||||
) : (
|
||||
<Text color="green" bold>[OPEN]</Text>
|
||||
)}
|
||||
{reconnecting && <Text color="cyan">reconnecting...</Text>}
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
import { Box, Text } from 'ink';
|
||||
import { Select } from '@inkjs/ui';
|
||||
|
||||
type MenuAction = 'begin-session' | 'tools' | 'resources' | 'prompts' | 'raw' | 'session-info';
|
||||
|
||||
interface MainMenuProps {
|
||||
gated: boolean;
|
||||
toolCount: number;
|
||||
resourceCount: number;
|
||||
promptCount: number;
|
||||
onSelect: (action: MenuAction) => void;
|
||||
}
|
||||
|
||||
export function MainMenu({ gated, toolCount, resourceCount, promptCount, onSelect }: MainMenuProps) {
|
||||
const items = gated
|
||||
? [
|
||||
{ label: 'Begin Session — call begin_session with tags to ungate', value: 'begin-session' as MenuAction },
|
||||
{ label: 'Raw JSON-RPC — send freeform JSON-RPC messages', value: 'raw' as MenuAction },
|
||||
{ label: 'Session Info — view initialize result and session state', value: 'session-info' as MenuAction },
|
||||
]
|
||||
: [
|
||||
{ label: `Tools (${toolCount}) — browse and execute MCP tools`, value: 'tools' as MenuAction },
|
||||
{ label: `Resources (${resourceCount}) — browse and read MCP resources`, value: 'resources' as MenuAction },
|
||||
{ label: `Prompts (${promptCount}) — browse and get MCP prompts`, value: 'prompts' as MenuAction },
|
||||
{ label: 'Raw JSON-RPC — send freeform JSON-RPC messages', value: 'raw' as MenuAction },
|
||||
{ label: 'Session Info — view initialize result and session state', value: 'session-info' as MenuAction },
|
||||
];
|
||||
|
||||
return (
|
||||
<Box flexDirection="column">
|
||||
<Text bold>
|
||||
{gated ? 'Session is gated — call begin_session to ungate:' : 'What would you like to explore?'}
|
||||
</Text>
|
||||
<Box marginTop={1}>
|
||||
<Select options={items} onChange={(v) => onSelect(v as MenuAction)} />
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
import { useState } from 'react';
|
||||
import { Box, Text } from 'ink';
|
||||
import { Select, Spinner } from '@inkjs/ui';
|
||||
import type { McpPrompt, McpSession } from '../mcp-session.js';
|
||||
|
||||
interface PromptListViewProps {
|
||||
prompts: McpPrompt[];
|
||||
session: McpSession;
|
||||
onResult: (prompt: McpPrompt, content: unknown) => void;
|
||||
onError: (msg: string) => void;
|
||||
onBack: () => void;
|
||||
}
|
||||
|
||||
export function PromptListView({ prompts, session, onResult, onError }: PromptListViewProps) {
|
||||
const [loading, setLoading] = useState<string | null>(null);
|
||||
|
||||
if (prompts.length === 0) {
|
||||
return <Text dimColor>No prompts available.</Text>;
|
||||
}
|
||||
|
||||
const options = prompts.map((p) => ({
|
||||
label: `${p.name}${p.description ? ` — ${p.description.slice(0, 60)}` : ''}`,
|
||||
value: p.name,
|
||||
}));
|
||||
|
||||
if (loading) {
|
||||
return (
|
||||
<Box gap={1}>
|
||||
<Spinner label={`Getting prompt ${loading}...`} />
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<Box flexDirection="column">
|
||||
<Text bold>Prompts ({prompts.length}):</Text>
|
||||
<Box marginTop={1}>
|
||||
<Select
|
||||
options={options}
|
||||
onChange={async (name) => {
|
||||
const prompt = prompts.find((p) => p.name === name);
|
||||
if (!prompt) return;
|
||||
setLoading(name);
|
||||
try {
|
||||
const result = await session.getPrompt(name);
|
||||
onResult(prompt, result);
|
||||
} catch (err) {
|
||||
onError(`prompts/get failed: ${err instanceof Error ? err.message : String(err)}`);
|
||||
} finally {
|
||||
setLoading(null);
|
||||
}
|
||||
}}
|
||||
/>
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
@@ -1,366 +0,0 @@
|
||||
/**
|
||||
* ProvenanceView — 4-quadrant display:
|
||||
* Top-left: Parameters (proxymodel, LLM config, live override, server)
|
||||
* Top-right: Preview (diff from upstream after replay)
|
||||
* Bottom-left: Upstream (raw) — the origin, optionally editable
|
||||
* Bottom-right: Client (diff from upstream)
|
||||
*/
|
||||
|
||||
import { Box, Text } from 'ink';
|
||||
import { Spinner, TextInput } from '@inkjs/ui';
|
||||
import type { TimelineEvent, ReplayConfig, ReplayResult, ProxyModelDetails } from '../unified-types.js';
|
||||
import { computeDiffLines, formatDiffStats, DiffPanel } from './diff-renderer.js';
|
||||
|
||||
interface ProvenanceViewProps {
|
||||
clientEvent: TimelineEvent;
|
||||
upstreamEvent: TimelineEvent | null;
|
||||
height: number;
|
||||
scrollOffset: number;
|
||||
horizontalOffset: number;
|
||||
focusedPanel: 'client' | 'upstream' | 'parameters' | 'preview';
|
||||
parameterIdx: number; // 0=ProxyModel, 1=Provider, 2=Model, 3=Live, 4=Server
|
||||
replayConfig: ReplayConfig;
|
||||
replayResult: ReplayResult | null;
|
||||
replayRunning: boolean;
|
||||
editingUpstream: boolean;
|
||||
editedContent: string;
|
||||
onEditContent: (text: string) => void;
|
||||
proxyModelDetails: ProxyModelDetails | null;
|
||||
liveOverride: boolean;
|
||||
serverList: string[];
|
||||
serverOverrides: Record<string, string>;
|
||||
selectedServerIdx: number;
|
||||
serverPickerOpen: boolean;
|
||||
modelPickerOpen: boolean;
|
||||
modelPickerIdx: number;
|
||||
availableModels: string[];
|
||||
searchMode: boolean;
|
||||
searchQuery: string;
|
||||
searchMatches: number[];
|
||||
searchMatchIdx: number;
|
||||
}
|
||||
|
||||
export function getContentText(event: TimelineEvent): string {
|
||||
const body = event.body as Record<string, unknown> | null;
|
||||
if (!body) return '(no body)';
|
||||
|
||||
const result = body['result'] as Record<string, unknown> | undefined;
|
||||
if (!result) return JSON.stringify(body, null, 2);
|
||||
|
||||
const content = (result['content'] ?? result['contents'] ?? []) as Array<{ text?: string }>;
|
||||
if (content.length > 0) {
|
||||
return content.map((c) => c.text ?? '').join('\n');
|
||||
}
|
||||
|
||||
return JSON.stringify(result, null, 2);
|
||||
}
|
||||
|
||||
export function ProvenanceView({
|
||||
clientEvent,
|
||||
upstreamEvent,
|
||||
height,
|
||||
scrollOffset,
|
||||
horizontalOffset,
|
||||
focusedPanel,
|
||||
parameterIdx,
|
||||
replayConfig,
|
||||
replayResult,
|
||||
replayRunning,
|
||||
editingUpstream,
|
||||
editedContent,
|
||||
onEditContent,
|
||||
proxyModelDetails,
|
||||
liveOverride,
|
||||
serverList,
|
||||
serverOverrides,
|
||||
selectedServerIdx,
|
||||
serverPickerOpen,
|
||||
modelPickerOpen,
|
||||
modelPickerIdx,
|
||||
availableModels,
|
||||
searchMode,
|
||||
searchQuery,
|
||||
searchMatches,
|
||||
searchMatchIdx,
|
||||
}: ProvenanceViewProps) {
|
||||
// Split height: top half for params+preview, bottom half for upstream+client
|
||||
const topHeight = Math.max(4, Math.floor((height - 2) * 0.35));
|
||||
const bottomHeight = Math.max(4, height - topHeight - 2);
|
||||
|
||||
const upstreamText = editedContent || (upstreamEvent ? getContentText(upstreamEvent) : '(no upstream event found)');
|
||||
const clientText = getContentText(clientEvent);
|
||||
const upstreamChars = upstreamText.length;
|
||||
|
||||
// Upstream raw lines (for the origin panel)
|
||||
const upstreamLines = upstreamText.split('\n');
|
||||
const bottomBodyHeight = Math.max(1, bottomHeight - 3);
|
||||
// Route scrollOffset and horizontalOffset to only the focused panel
|
||||
const upstreamScroll = focusedPanel === 'upstream' ? scrollOffset : 0;
|
||||
const clientScroll = focusedPanel === 'client' ? scrollOffset : 0;
|
||||
const previewScroll = focusedPanel === 'preview' ? scrollOffset : 0;
|
||||
const upstreamHScroll = focusedPanel === 'upstream' ? horizontalOffset : 0;
|
||||
const clientHScroll = focusedPanel === 'client' ? horizontalOffset : 0;
|
||||
const previewHScroll = focusedPanel === 'preview' ? horizontalOffset : 0;
|
||||
const upstreamVisible = upstreamLines.slice(upstreamScroll, upstreamScroll + bottomBodyHeight);
|
||||
|
||||
// Client diff (from upstream)
|
||||
const clientDiff = computeDiffLines(upstreamText, clientText);
|
||||
|
||||
// Preview diff (from upstream, when replay result available)
|
||||
let previewDiff = { lines: [] as ReturnType<typeof computeDiffLines>['lines'], stats: { added: 0, removed: 0, pctChanged: 0 } };
|
||||
let previewError: string | null = null;
|
||||
let previewReady = false;
|
||||
|
||||
if (replayRunning) {
|
||||
// spinner handles this
|
||||
} else if (replayResult?.error) {
|
||||
previewError = replayResult.error;
|
||||
} else if (replayResult) {
|
||||
previewDiff = computeDiffLines(upstreamText, replayResult.content);
|
||||
previewReady = true;
|
||||
}
|
||||
|
||||
const previewBodyHeight = Math.max(1, topHeight - 3);
|
||||
|
||||
// Server display for row 4 — show per-server override if set
|
||||
const selectedServerName = selectedServerIdx >= 0 ? serverList[selectedServerIdx] : undefined;
|
||||
const serverOverrideModel = selectedServerName ? serverOverrides[selectedServerName] : undefined;
|
||||
const serverDisplay = selectedServerIdx < 0
|
||||
? '(project-wide)'
|
||||
: `${selectedServerName ?? '(unknown)'}${serverOverrideModel ? ` [${serverOverrideModel}]` : ''}`;
|
||||
|
||||
// Build parameter rows
|
||||
const paramRows = [
|
||||
{ label: 'ProxyModel', value: replayConfig.proxyModel },
|
||||
{ label: 'Provider ', value: replayConfig.provider ?? '(default)' },
|
||||
{ label: 'Model ', value: replayConfig.llmModel ?? '(default)' },
|
||||
{ label: 'Live ', value: liveOverride ? 'ON' : 'OFF', isLive: true },
|
||||
{ label: 'Server ', value: serverDisplay },
|
||||
];
|
||||
|
||||
// Build preview header
|
||||
let previewHeader = 'Preview';
|
||||
if (replayRunning) {
|
||||
previewHeader = 'Preview (running...)';
|
||||
} else if (previewError) {
|
||||
previewHeader = 'Preview (error)';
|
||||
} else if (previewReady) {
|
||||
previewHeader = `Preview (diff, ${formatDiffStats(previewDiff.stats)})`;
|
||||
}
|
||||
|
||||
// Build client header
|
||||
const clientHeader = `Client (diff, ${formatDiffStats(clientDiff.stats)})`;
|
||||
|
||||
// Show tooltip when ProxyModel row focused
|
||||
const showTooltip = focusedPanel === 'parameters' && parameterIdx === 0 && proxyModelDetails != null;
|
||||
|
||||
return (
|
||||
<Box flexDirection="column" height={height}>
|
||||
{/* Top row: Parameters + Preview */}
|
||||
<Box flexDirection="row" height={topHeight}>
|
||||
{/* Parameters panel */}
|
||||
<Box
|
||||
flexDirection="column"
|
||||
width="50%"
|
||||
borderStyle="single"
|
||||
borderColor={focusedPanel === 'parameters' ? 'cyan' : 'gray'}
|
||||
paddingX={1}
|
||||
>
|
||||
{/* When server picker is open, show ONLY the picker (full panel height) */}
|
||||
{serverPickerOpen && focusedPanel === 'parameters' && parameterIdx === 4 ? (
|
||||
<>
|
||||
<Text bold color="cyan">Select Server</Text>
|
||||
<Text key="project-wide">
|
||||
<Text color={selectedServerIdx === -1 ? 'cyan' : undefined}>
|
||||
{selectedServerIdx === -1 ? '\u25B6 ' : ' '}
|
||||
</Text>
|
||||
<Text bold={selectedServerIdx === -1}>(project-wide)</Text>
|
||||
{serverOverrides['*'] && <Text dimColor> [{serverOverrides['*']}]</Text>}
|
||||
</Text>
|
||||
{serverList.map((name, i) => (
|
||||
<Text key={name}>
|
||||
<Text color={selectedServerIdx === i ? 'cyan' : undefined}>
|
||||
{selectedServerIdx === i ? '\u25B6 ' : ' '}
|
||||
</Text>
|
||||
<Text bold={selectedServerIdx === i}>{name}</Text>
|
||||
{serverOverrides[name] && <Text dimColor> [{serverOverrides[name]}]</Text>}
|
||||
</Text>
|
||||
))}
|
||||
<Text dimColor>{'\u2191\u2193'}:navigate Enter:select Esc:cancel</Text>
|
||||
</>
|
||||
) : modelPickerOpen && focusedPanel === 'parameters' && selectedServerIdx >= 0 ? (
|
||||
<>
|
||||
<Text bold color="cyan">
|
||||
ProxyModel for {serverList[selectedServerIdx] ?? '(unknown)'}
|
||||
</Text>
|
||||
{availableModels.map((name, i) => {
|
||||
const serverName = serverList[selectedServerIdx] ?? '';
|
||||
const isCurrentOverride = serverOverrides[serverName] === name;
|
||||
return (
|
||||
<Text key={name}>
|
||||
<Text color={modelPickerIdx === i ? 'cyan' : undefined}>
|
||||
{modelPickerIdx === i ? '\u25B6 ' : ' '}
|
||||
</Text>
|
||||
<Text bold={modelPickerIdx === i}>{name}</Text>
|
||||
{isCurrentOverride && <Text color="green"> (active)</Text>}
|
||||
</Text>
|
||||
);
|
||||
})}
|
||||
<Text dimColor>{'\u2191\u2193'}:navigate Enter:apply Esc:cancel</Text>
|
||||
</>
|
||||
) : (
|
||||
<>
|
||||
<Text bold color={focusedPanel === 'parameters' ? 'cyan' : 'magenta'}>Parameters</Text>
|
||||
{paramRows.map((row, i) => {
|
||||
const isFocused = focusedPanel === 'parameters' && parameterIdx === i;
|
||||
const isLiveRow = 'isLive' in row;
|
||||
return (
|
||||
<Text key={i}>
|
||||
<Text color={isFocused ? 'cyan' : undefined}>{isFocused ? '\u25C0 ' : ' '}</Text>
|
||||
<Text dimColor={!isFocused}>{row.label}: </Text>
|
||||
{isLiveRow ? (
|
||||
<Text bold={isFocused} color={liveOverride ? 'green' : undefined}>
|
||||
{row.value}
|
||||
</Text>
|
||||
) : (
|
||||
<Text bold={isFocused}>{row.value}</Text>
|
||||
)}
|
||||
<Text color={isFocused ? 'cyan' : undefined}>{isFocused ? ' \u25B6' : ''}</Text>
|
||||
</Text>
|
||||
);
|
||||
})}
|
||||
|
||||
{/* ProxyModel details tooltip */}
|
||||
{showTooltip && proxyModelDetails && (
|
||||
<Box
|
||||
flexDirection="column"
|
||||
borderStyle="round"
|
||||
borderColor="magenta"
|
||||
paddingX={1}
|
||||
marginTop={0}
|
||||
>
|
||||
<Text bold color="magenta">{proxyModelDetails.name}</Text>
|
||||
<Text dimColor>
|
||||
{proxyModelDetails.type === 'plugin' ? 'plugin' : proxyModelDetails.source}
|
||||
{proxyModelDetails.cacheable ? ', cached' : ''}
|
||||
{proxyModelDetails.appliesTo && proxyModelDetails.appliesTo.length > 0 ? ` \u00B7 ${proxyModelDetails.appliesTo.join(', ')}` : ''}
|
||||
</Text>
|
||||
{proxyModelDetails.hooks && proxyModelDetails.hooks.length > 0 && (
|
||||
<Text dimColor>Hooks: {proxyModelDetails.hooks.join(', ')}</Text>
|
||||
)}
|
||||
{(proxyModelDetails.stages ?? []).map((stage, i) => (
|
||||
<Text key={i}>
|
||||
<Text color="yellow">{i + 1}. {stage.type}</Text>
|
||||
{stage.config && Object.keys(stage.config).length > 0 && (
|
||||
<Text dimColor>
|
||||
{' '}{Object.entries(stage.config).map(([k, v]) => `${k}=${String(v)}`).join(' ')}
|
||||
</Text>
|
||||
)}
|
||||
</Text>
|
||||
))}
|
||||
</Box>
|
||||
)}
|
||||
|
||||
{/* Per-server overrides summary */}
|
||||
{Object.keys(serverOverrides).length > 0 && (
|
||||
<Text dimColor wrap="truncate">
|
||||
Overrides: {Object.entries(serverOverrides).map(([s, m]) => `${s}=${m}`).join(', ')}
|
||||
</Text>
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
</Box>
|
||||
|
||||
{/* Preview panel — diff from upstream */}
|
||||
<Box
|
||||
flexDirection="column"
|
||||
width="50%"
|
||||
borderStyle="single"
|
||||
borderColor={focusedPanel === 'preview' ? 'cyan' : 'gray'}
|
||||
paddingX={1}
|
||||
>
|
||||
<Text bold color={focusedPanel === 'preview' ? 'cyan' : 'green'}>
|
||||
{previewHeader}
|
||||
</Text>
|
||||
{replayRunning ? (
|
||||
<Spinner label="Running replay..." />
|
||||
) : previewError ? (
|
||||
<Text color="red" wrap="truncate">Error: {previewError}</Text>
|
||||
) : previewReady ? (
|
||||
<DiffPanel lines={previewDiff.lines} scrollOffset={previewScroll} height={previewBodyHeight} horizontalOffset={previewHScroll} />
|
||||
) : (
|
||||
<Text dimColor>Press Enter to run preview</Text>
|
||||
)}
|
||||
</Box>
|
||||
</Box>
|
||||
|
||||
{/* Bottom row: Upstream (raw) + Client (diff) */}
|
||||
<Box flexDirection="row" height={bottomHeight}>
|
||||
{/* Upstream panel — origin, raw text */}
|
||||
<Box
|
||||
flexDirection="column"
|
||||
width="50%"
|
||||
borderStyle="single"
|
||||
borderColor={focusedPanel === 'upstream' ? 'cyan' : 'gray'}
|
||||
paddingX={1}
|
||||
>
|
||||
<Box>
|
||||
<Text bold color={focusedPanel === 'upstream' ? 'cyan' : 'yellowBright'}>
|
||||
Upstream (raw, {upstreamChars} chars)
|
||||
</Text>
|
||||
{editingUpstream && <Text color="yellow"> [EDITING]</Text>}
|
||||
</Box>
|
||||
{upstreamEvent?.upstreamName && upstreamEvent.upstreamName.includes(',') && (
|
||||
<Text dimColor wrap="truncate">{upstreamEvent.upstreamName}</Text>
|
||||
)}
|
||||
{editingUpstream ? (
|
||||
<Box flexGrow={1}>
|
||||
<TextInput defaultValue={editedContent} onChange={onEditContent} />
|
||||
</Box>
|
||||
) : (
|
||||
<>
|
||||
{upstreamVisible.map((line, i) => (
|
||||
<Text key={i} wrap="truncate">{upstreamHScroll > 0 ? (line || ' ').slice(upstreamHScroll) : (line || ' ')}</Text>
|
||||
))}
|
||||
{upstreamLines.length > upstreamScroll + bottomBodyHeight && (
|
||||
<Text dimColor>{'\u2026'} +{upstreamLines.length - upstreamScroll - bottomBodyHeight} more</Text>
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
</Box>
|
||||
|
||||
{/* Client panel — diff from upstream */}
|
||||
<Box
|
||||
flexDirection="column"
|
||||
width="50%"
|
||||
borderStyle="single"
|
||||
borderColor={focusedPanel === 'client' ? 'cyan' : 'gray'}
|
||||
paddingX={1}
|
||||
>
|
||||
<Text bold color={focusedPanel === 'client' ? 'cyan' : 'blue'}>
|
||||
{clientHeader}
|
||||
</Text>
|
||||
<DiffPanel lines={clientDiff.lines} scrollOffset={clientScroll} height={bottomBodyHeight} horizontalOffset={clientHScroll} />
|
||||
</Box>
|
||||
</Box>
|
||||
|
||||
{/* Footer */}
|
||||
<Box paddingX={1}>
|
||||
{searchMode || searchQuery.length > 0 ? (
|
||||
<Text>
|
||||
<Text color="cyan">/{searchQuery}</Text>
|
||||
{searchMatches.length > 0 && (
|
||||
<Text dimColor> [{searchMatchIdx + 1}/{searchMatches.length}] n:next N:prev Esc:clear</Text>
|
||||
)}
|
||||
{searchQuery.length > 0 && searchMatches.length === 0 && (
|
||||
<Text dimColor> (no matches)</Text>
|
||||
)}
|
||||
{searchMode && <Text color="cyan">_</Text>}
|
||||
</Text>
|
||||
) : (
|
||||
<Text dimColor>Tab:panel {'\u2191\u2193'}:scroll {'\u2190\u2192'}:pan/param /:search Enter:run/toggle e:edit Esc:close</Text>
|
||||
)}
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
import { useState } from 'react';
|
||||
import { Box, Text } from 'ink';
|
||||
import { TextInput, Spinner } from '@inkjs/ui';
|
||||
import type { McpSession } from '../mcp-session.js';
|
||||
|
||||
interface RawJsonRpcViewProps {
|
||||
session: McpSession;
|
||||
onBack: () => void;
|
||||
}
|
||||
|
||||
export function RawJsonRpcView({ session }: RawJsonRpcViewProps) {
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [result, setResult] = useState<string | null>(null);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
const [input, setInput] = useState('');
|
||||
|
||||
const handleSubmit = async () => {
|
||||
if (!input.trim()) return;
|
||||
setLoading(true);
|
||||
setResult(null);
|
||||
setError(null);
|
||||
|
||||
try {
|
||||
const response = await session.sendRaw(input);
|
||||
try {
|
||||
setResult(JSON.stringify(JSON.parse(response), null, 2));
|
||||
} catch {
|
||||
setResult(response);
|
||||
}
|
||||
} catch (err) {
|
||||
setError(err instanceof Error ? err.message : String(err));
|
||||
} finally {
|
||||
setLoading(false);
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<Box flexDirection="column">
|
||||
<Text bold>Raw JSON-RPC</Text>
|
||||
<Text dimColor>Enter a full JSON-RPC message and press Enter to send:</Text>
|
||||
|
||||
<Box marginTop={1}>
|
||||
<Text color="cyan">> </Text>
|
||||
<TextInput
|
||||
placeholder='{"jsonrpc":"2.0","id":1,"method":"tools/list","params":{}}'
|
||||
onChange={setInput}
|
||||
onSubmit={handleSubmit}
|
||||
/>
|
||||
</Box>
|
||||
|
||||
{loading && (
|
||||
<Box marginTop={1}>
|
||||
<Spinner label="Sending..." />
|
||||
</Box>
|
||||
)}
|
||||
|
||||
{error && (
|
||||
<Box marginTop={1}>
|
||||
<Text color="red">Error: {error}</Text>
|
||||
</Box>
|
||||
)}
|
||||
|
||||
{result && (
|
||||
<Box flexDirection="column" marginTop={1}>
|
||||
<Text bold>Response:</Text>
|
||||
<Text>{result}</Text>
|
||||
</Box>
|
||||
)}
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
@@ -1,60 +0,0 @@
|
||||
import { useState } from 'react';
|
||||
import { Box, Text } from 'ink';
|
||||
import { Select, Spinner } from '@inkjs/ui';
|
||||
import type { McpResource, McpSession } from '../mcp-session.js';
|
||||
|
||||
interface ResourceListViewProps {
|
||||
resources: McpResource[];
|
||||
session: McpSession;
|
||||
onResult: (resource: McpResource, content: string) => void;
|
||||
onError: (msg: string) => void;
|
||||
onBack: () => void;
|
||||
}
|
||||
|
||||
export function ResourceListView({ resources, session, onResult, onError }: ResourceListViewProps) {
|
||||
const [loading, setLoading] = useState<string | null>(null);
|
||||
|
||||
if (resources.length === 0) {
|
||||
return <Text dimColor>No resources available.</Text>;
|
||||
}
|
||||
|
||||
const options = resources.map((r) => ({
|
||||
label: `${r.uri}${r.name ? ` (${r.name})` : ''}${r.description ? ` — ${r.description.slice(0, 50)}` : ''}`,
|
||||
value: r.uri,
|
||||
}));
|
||||
|
||||
if (loading) {
|
||||
return (
|
||||
<Box gap={1}>
|
||||
<Spinner label={`Reading ${loading}...`} />
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<Box flexDirection="column">
|
||||
<Text bold>Resources ({resources.length}):</Text>
|
||||
<Box marginTop={1}>
|
||||
<Select
|
||||
options={options}
|
||||
onChange={async (uri) => {
|
||||
const resource = resources.find((r) => r.uri === uri);
|
||||
if (!resource) return;
|
||||
setLoading(uri);
|
||||
try {
|
||||
const result = await session.readResource(uri);
|
||||
const content = result.contents
|
||||
.map((c) => c.text ?? `[${c.mimeType ?? 'binary'}]`)
|
||||
.join('\n');
|
||||
onResult(resource, content);
|
||||
} catch (err) {
|
||||
onError(`resources/read failed: ${err instanceof Error ? err.message : String(err)}`);
|
||||
} finally {
|
||||
setLoading(null);
|
||||
}
|
||||
}}
|
||||
/>
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
import { Box, Text } from 'ink';
|
||||
|
||||
interface ResultViewProps {
|
||||
title: string;
|
||||
data: unknown;
|
||||
}
|
||||
|
||||
function formatJson(data: unknown): string {
|
||||
try {
|
||||
return JSON.stringify(data, null, 2);
|
||||
} catch {
|
||||
return String(data);
|
||||
}
|
||||
}
|
||||
|
||||
export function ResultView({ title, data }: ResultViewProps) {
|
||||
const formatted = formatJson(data);
|
||||
|
||||
return (
|
||||
<Box flexDirection="column">
|
||||
<Text bold color="cyan">{title}</Text>
|
||||
<Box marginTop={1}>
|
||||
<Text>{formatted}</Text>
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
@@ -1,321 +0,0 @@
|
||||
/**
|
||||
* SessionSidebar — project-grouped session list with "New Session" entry
|
||||
* and project picker mode.
|
||||
*
|
||||
* Sessions are grouped by project name. Each project appears once as a header,
|
||||
* with its sessions listed below. Discovers sessions from both the SSE snapshot
|
||||
* AND traffic events so closed sessions still appear.
|
||||
*
|
||||
* selectedIdx: -2 = "New Session", -1 = all sessions, 0+ = individual sessions
|
||||
*/
|
||||
|
||||
import { Box, Text } from 'ink';
|
||||
import type { ActiveSession, TimelineEvent } from '../unified-types.js';
|
||||
|
||||
interface SessionSidebarProps {
|
||||
interactiveSessionId: string | undefined;
|
||||
observedSessions: ActiveSession[];
|
||||
events: TimelineEvent[];
|
||||
selectedIdx: number; // -2 = new session, -1 = all, 0+ = session
|
||||
height: number;
|
||||
projectName: string;
|
||||
mode: 'sessions' | 'project-picker';
|
||||
availableProjects: string[];
|
||||
projectPickerIdx: number;
|
||||
}
|
||||
|
||||
interface SessionEntry {
|
||||
sessionId: string;
|
||||
projectName: string;
|
||||
}
|
||||
|
||||
interface ProjectGroup {
|
||||
projectName: string;
|
||||
sessions: SessionEntry[];
|
||||
}
|
||||
|
||||
export function SessionSidebar({
|
||||
interactiveSessionId,
|
||||
observedSessions,
|
||||
events,
|
||||
selectedIdx,
|
||||
height,
|
||||
projectName,
|
||||
mode,
|
||||
availableProjects,
|
||||
projectPickerIdx,
|
||||
}: SessionSidebarProps) {
|
||||
if (mode === 'project-picker') {
|
||||
return (
|
||||
<ProjectPicker
|
||||
projects={availableProjects}
|
||||
selectedIdx={projectPickerIdx}
|
||||
height={height}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
const sessions = buildSessionList(interactiveSessionId, observedSessions, events, projectName);
|
||||
const groups = groupByProject(sessions);
|
||||
|
||||
// Count events per session
|
||||
const counts = new Map<string, number>();
|
||||
for (const e of events) {
|
||||
counts.set(e.sessionId, (counts.get(e.sessionId) ?? 0) + 1);
|
||||
}
|
||||
|
||||
const headerLines = 3; // "Sessions (N)" + "New Session" + "all sessions"
|
||||
const footerLines = 5; // keybinding help box
|
||||
const bodyHeight = Math.max(1, height - headerLines - footerLines);
|
||||
|
||||
// Build flat render lines for scrolling
|
||||
interface RenderLine {
|
||||
type: 'project-header' | 'session';
|
||||
projectName: string;
|
||||
sessionId?: string;
|
||||
flatSessionIdx?: number;
|
||||
}
|
||||
|
||||
const lines: RenderLine[] = [];
|
||||
let flatIdx = 0;
|
||||
for (const group of groups) {
|
||||
lines.push({ type: 'project-header', projectName: group.projectName });
|
||||
for (const s of group.sessions) {
|
||||
lines.push({ type: 'session', projectName: group.projectName, sessionId: s.sessionId, flatSessionIdx: flatIdx });
|
||||
flatIdx++;
|
||||
}
|
||||
}
|
||||
|
||||
// Find which render line corresponds to the selected session
|
||||
let selectedLineIdx = -1;
|
||||
if (selectedIdx >= 0) {
|
||||
selectedLineIdx = lines.findIndex((l) => l.flatSessionIdx === selectedIdx);
|
||||
}
|
||||
|
||||
// Scroll to keep selected visible
|
||||
let scrollStart = 0;
|
||||
if (selectedLineIdx >= 0) {
|
||||
if (selectedLineIdx >= scrollStart + bodyHeight) {
|
||||
scrollStart = selectedLineIdx - bodyHeight + 1;
|
||||
}
|
||||
if (selectedLineIdx < scrollStart) {
|
||||
scrollStart = selectedLineIdx;
|
||||
}
|
||||
}
|
||||
scrollStart = Math.max(0, scrollStart);
|
||||
|
||||
const visibleLines = lines.slice(scrollStart, scrollStart + bodyHeight);
|
||||
const hasMore = scrollStart + bodyHeight < lines.length;
|
||||
|
||||
return (
|
||||
<Box
|
||||
flexDirection="column"
|
||||
width={32}
|
||||
borderStyle="round"
|
||||
borderColor="gray"
|
||||
paddingX={1}
|
||||
height={height}
|
||||
>
|
||||
<Text bold color="cyan">
|
||||
{' Sessions '}
|
||||
<Text dimColor>({sessions.length})</Text>
|
||||
</Text>
|
||||
|
||||
{/* "New Session" row */}
|
||||
<Text color={selectedIdx === -2 ? 'cyan' : 'green'} bold={selectedIdx === -2}>
|
||||
{selectedIdx === -2 ? ' \u25b8 ' : ' '}
|
||||
{'+ New Session'}
|
||||
</Text>
|
||||
|
||||
{/* "All sessions" row */}
|
||||
<Text color={selectedIdx === -1 ? 'cyan' : undefined} bold={selectedIdx === -1}>
|
||||
{selectedIdx === -1 ? ' \u25b8 ' : ' '}
|
||||
{'all sessions'}
|
||||
</Text>
|
||||
|
||||
{/* Grouped session list */}
|
||||
{sessions.length === 0 && (
|
||||
<Box marginTop={1}>
|
||||
<Text dimColor>{' waiting for connections\u2026'}</Text>
|
||||
</Box>
|
||||
)}
|
||||
|
||||
{visibleLines.map((line, vi) => {
|
||||
if (line.type === 'project-header') {
|
||||
return (
|
||||
<Text key={`proj-${line.projectName}-${vi}`} bold wrap="truncate">
|
||||
{' '}{line.projectName}
|
||||
</Text>
|
||||
);
|
||||
}
|
||||
|
||||
// Session line
|
||||
const isSelected = line.flatSessionIdx === selectedIdx;
|
||||
const count = counts.get(line.sessionId!) ?? 0;
|
||||
const isInteractive = line.sessionId === interactiveSessionId;
|
||||
|
||||
return (
|
||||
<Text key={line.sessionId!} wrap="truncate">
|
||||
<Text color={isSelected ? 'cyan' : undefined} bold={isSelected}>
|
||||
{isSelected ? ' \u25b8 ' : ' '}
|
||||
{line.sessionId!.slice(0, 8)}
|
||||
</Text>
|
||||
{count > 0 && <Text dimColor>{` \u00b7 ${count} ev`}</Text>}
|
||||
{isInteractive && <Text color="green">{' *'}</Text>}
|
||||
</Text>
|
||||
);
|
||||
})}
|
||||
|
||||
{hasMore && (
|
||||
<Text dimColor>{' \u2026 more'}</Text>
|
||||
)}
|
||||
|
||||
{/* Spacer */}
|
||||
<Box flexGrow={1} />
|
||||
|
||||
{/* Help */}
|
||||
<Box borderStyle="single" borderTop borderColor="gray" paddingTop={0}>
|
||||
<Text dimColor>
|
||||
{'[\u2191\u2193] session [a] all\n[\u23ce] select [Esc] close\n[x] clear [q] quit'}
|
||||
</Text>
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
/** Project picker sub-view */
|
||||
function ProjectPicker({
|
||||
projects,
|
||||
selectedIdx,
|
||||
height,
|
||||
}: {
|
||||
projects: string[];
|
||||
selectedIdx: number;
|
||||
height: number;
|
||||
}) {
|
||||
const headerLines = 2;
|
||||
const footerLines = 4;
|
||||
const bodyHeight = Math.max(1, height - headerLines - footerLines);
|
||||
|
||||
let scrollStart = 0;
|
||||
if (selectedIdx >= scrollStart + bodyHeight) {
|
||||
scrollStart = selectedIdx - bodyHeight + 1;
|
||||
}
|
||||
if (selectedIdx < scrollStart) {
|
||||
scrollStart = selectedIdx;
|
||||
}
|
||||
scrollStart = Math.max(0, scrollStart);
|
||||
|
||||
const visibleProjects = projects.slice(scrollStart, scrollStart + bodyHeight);
|
||||
const hasMore = scrollStart + bodyHeight < projects.length;
|
||||
|
||||
return (
|
||||
<Box
|
||||
flexDirection="column"
|
||||
width={32}
|
||||
borderStyle="round"
|
||||
borderColor="cyan"
|
||||
paddingX={1}
|
||||
height={height}
|
||||
>
|
||||
<Text bold color="cyan">
|
||||
{' Select Project '}
|
||||
</Text>
|
||||
|
||||
{projects.length === 0 ? (
|
||||
<Box marginTop={1}>
|
||||
<Text dimColor>{' no projects found'}</Text>
|
||||
</Box>
|
||||
) : (
|
||||
visibleProjects.map((name, vi) => {
|
||||
const realIdx = scrollStart + vi;
|
||||
const isSelected = realIdx === selectedIdx;
|
||||
return (
|
||||
<Text key={name} wrap="truncate">
|
||||
<Text color={isSelected ? 'cyan' : undefined} bold={isSelected}>
|
||||
{isSelected ? ' \u25b8 ' : ' '}
|
||||
{name}
|
||||
</Text>
|
||||
</Text>
|
||||
);
|
||||
})
|
||||
)}
|
||||
|
||||
{hasMore && (
|
||||
<Text dimColor>{' \u2026 more'}</Text>
|
||||
)}
|
||||
|
||||
{/* Spacer */}
|
||||
<Box flexGrow={1} />
|
||||
|
||||
{/* Help */}
|
||||
<Box borderStyle="single" borderTop borderColor="gray" paddingTop={0}>
|
||||
<Text dimColor>
|
||||
{'[\u2191\u2193] pick [\u23ce] select\n[Esc] back'}
|
||||
</Text>
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
/** Total session count across all groups */
|
||||
export function getSessionCount(
|
||||
interactiveSessionId: string | undefined,
|
||||
observedSessions: ActiveSession[],
|
||||
events: TimelineEvent[],
|
||||
projectName: string,
|
||||
): number {
|
||||
return buildSessionList(interactiveSessionId, observedSessions, events, projectName).length;
|
||||
}
|
||||
|
||||
function buildSessionList(
|
||||
interactiveSessionId: string | undefined,
|
||||
observedSessions: ActiveSession[],
|
||||
events: TimelineEvent[],
|
||||
projectName: string,
|
||||
): SessionEntry[] {
|
||||
const result: SessionEntry[] = [];
|
||||
const seen = new Set<string>();
|
||||
|
||||
// Interactive session first
|
||||
if (interactiveSessionId) {
|
||||
result.push({ sessionId: interactiveSessionId, projectName });
|
||||
seen.add(interactiveSessionId);
|
||||
}
|
||||
|
||||
// Then observed sessions from SSE snapshot
|
||||
for (const s of observedSessions) {
|
||||
if (!seen.has(s.sessionId)) {
|
||||
result.push({ sessionId: s.sessionId, projectName: s.projectName });
|
||||
seen.add(s.sessionId);
|
||||
}
|
||||
}
|
||||
|
||||
// Also discover sessions from traffic events (covers sessions that
|
||||
// were already closed before the SSE connected)
|
||||
for (const e of events) {
|
||||
if (!seen.has(e.sessionId)) {
|
||||
result.push({ sessionId: e.sessionId, projectName: e.projectName });
|
||||
seen.add(e.sessionId);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
function groupByProject(sessions: SessionEntry[]): ProjectGroup[] {
|
||||
const map = new Map<string, SessionEntry[]>();
|
||||
const order: string[] = [];
|
||||
|
||||
for (const s of sessions) {
|
||||
let group = map.get(s.projectName);
|
||||
if (!group) {
|
||||
group = [];
|
||||
map.set(s.projectName, group);
|
||||
order.push(s.projectName);
|
||||
}
|
||||
group.push(s);
|
||||
}
|
||||
|
||||
return order.map((name) => ({ projectName: name, sessions: map.get(name)! }));
|
||||
}
|
||||
@@ -1,95 +0,0 @@
|
||||
/**
|
||||
* Unified timeline — renders all events (interactive, observed)
|
||||
* with a lane-colored gutter, windowed rendering, and auto-scroll.
|
||||
*/
|
||||
|
||||
import { Box, Text } from 'ink';
|
||||
import type { TimelineEvent, EventLane } from '../unified-types.js';
|
||||
import { formatTime, formatEventSummary, trunc } from '../format-event.js';
|
||||
|
||||
const LANE_COLORS: Record<EventLane, string> = {
|
||||
interactive: 'green',
|
||||
observed: 'yellow',
|
||||
};
|
||||
|
||||
const LANE_MARKERS: Record<EventLane, string> = {
|
||||
interactive: '\u2502',
|
||||
observed: '\u2502',
|
||||
};
|
||||
|
||||
interface TimelineProps {
|
||||
events: TimelineEvent[];
|
||||
height: number;
|
||||
focusedIdx: number; // -1 = auto-scroll to bottom
|
||||
showProject: boolean;
|
||||
}
|
||||
|
||||
export function Timeline({ events, height, focusedIdx, showProject }: TimelineProps) {
|
||||
const maxVisible = Math.max(1, height - 2); // header + spacing
|
||||
let startIdx: number;
|
||||
if (focusedIdx >= 0) {
|
||||
startIdx = Math.max(0, Math.min(focusedIdx - Math.floor(maxVisible / 2), events.length - maxVisible));
|
||||
} else {
|
||||
startIdx = Math.max(0, events.length - maxVisible);
|
||||
}
|
||||
const visible = events.slice(startIdx, startIdx + maxVisible);
|
||||
|
||||
return (
|
||||
<Box flexDirection="column" flexGrow={1} paddingLeft={1}>
|
||||
<Text bold>
|
||||
Timeline <Text dimColor>({events.length} events{focusedIdx >= 0 ? ` \u00B7 #${focusedIdx + 1}` : ' \u00B7 following'})</Text>
|
||||
</Text>
|
||||
{visible.length === 0 && (
|
||||
<Box marginTop={1}>
|
||||
<Text dimColor>{' waiting for traffic\u2026'}</Text>
|
||||
</Box>
|
||||
)}
|
||||
{visible.map((event, vi) => {
|
||||
const absIdx = startIdx + vi;
|
||||
const isFocused = absIdx === focusedIdx;
|
||||
const { arrow, color, label, detail, detailColor } = formatEventSummary(
|
||||
event.eventType,
|
||||
event.method,
|
||||
event.body,
|
||||
event.upstreamName,
|
||||
event.durationMs,
|
||||
);
|
||||
const isLifecycle = event.eventType === 'session_created' || event.eventType === 'session_closed';
|
||||
const laneColor = LANE_COLORS[event.lane];
|
||||
const laneMarker = LANE_MARKERS[event.lane];
|
||||
const focusMarker = isFocused ? '\u25B8' : ' ';
|
||||
const hasCorrelation = event.correlationId !== undefined;
|
||||
|
||||
if (isLifecycle) {
|
||||
return (
|
||||
<Text key={event.id} wrap="truncate">
|
||||
<Text color={laneColor}>{laneMarker}</Text>
|
||||
<Text color={isFocused ? 'cyan' : undefined}>{focusMarker}</Text>
|
||||
<Text dimColor>{formatTime(event.timestamp)} </Text>
|
||||
<Text color={color} bold>{arrow} {label}</Text>
|
||||
{showProject && <Text color="gray"> [{trunc(event.projectName, 12)}]</Text>}
|
||||
<Text dimColor> {event.sessionId.slice(0, 8)}</Text>
|
||||
</Text>
|
||||
);
|
||||
}
|
||||
|
||||
const isUpstream = event.eventType.startsWith('upstream_');
|
||||
|
||||
return (
|
||||
<Text key={event.id} wrap="truncate">
|
||||
<Text color={laneColor}>{laneMarker}</Text>
|
||||
<Text color={isFocused ? 'cyan' : undefined}>{focusMarker}</Text>
|
||||
<Text dimColor>{formatTime(event.timestamp)} </Text>
|
||||
{showProject && <Text color="gray">[{trunc(event.projectName, 12)}] </Text>}
|
||||
<Text color={color}>{arrow} </Text>
|
||||
<Text bold={!isUpstream} color={color}>{label}</Text>
|
||||
{detail ? (
|
||||
<Text color={detailColor} dimColor={!detailColor}> {detail}</Text>
|
||||
) : null}
|
||||
{hasCorrelation && <Text dimColor>{' \u26D3'}</Text>}
|
||||
</Text>
|
||||
);
|
||||
})}
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
@@ -1,94 +0,0 @@
|
||||
import { useState } from 'react';
|
||||
import { Box, Text } from 'ink';
|
||||
import { TextInput, Spinner } from '@inkjs/ui';
|
||||
import type { McpTool, McpSession } from '../mcp-session.js';
|
||||
|
||||
interface ToolDetailViewProps {
|
||||
tool: McpTool;
|
||||
session: McpSession;
|
||||
onResult: (data: unknown) => void;
|
||||
onError: (msg: string) => void;
|
||||
onBack: () => void;
|
||||
onLoadingChange?: (loading: boolean) => void;
|
||||
}
|
||||
|
||||
interface SchemaProperty {
|
||||
type?: string;
|
||||
description?: string;
|
||||
}
|
||||
|
||||
export function ToolDetailView({ tool, session, onResult, onError, onLoadingChange }: ToolDetailViewProps) {
|
||||
const [loading, _setLoading] = useState(false);
|
||||
const setLoading = (v: boolean) => { _setLoading(v); onLoadingChange?.(v); };
|
||||
const [argsJson, setArgsJson] = useState('{}');
|
||||
|
||||
// Extract properties from input schema
|
||||
const schema = tool.inputSchema as { properties?: Record<string, SchemaProperty>; required?: string[] } | undefined;
|
||||
const properties = schema?.properties ?? {};
|
||||
const required = new Set(schema?.required ?? []);
|
||||
const propNames = Object.keys(properties);
|
||||
|
||||
const handleExecute = async () => {
|
||||
setLoading(true);
|
||||
try {
|
||||
let args: Record<string, unknown>;
|
||||
try {
|
||||
args = JSON.parse(argsJson) as Record<string, unknown>;
|
||||
} catch {
|
||||
onError('Invalid JSON for arguments');
|
||||
setLoading(false);
|
||||
return;
|
||||
}
|
||||
const result = await session.callTool(tool.name, args);
|
||||
onResult(result);
|
||||
} catch (err) {
|
||||
onError(`tools/call failed: ${err instanceof Error ? err.message : String(err)}`);
|
||||
} finally {
|
||||
setLoading(false);
|
||||
}
|
||||
};
|
||||
|
||||
if (loading) {
|
||||
return (
|
||||
<Box gap={1}>
|
||||
<Spinner label={`Calling ${tool.name}...`} />
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<Box flexDirection="column">
|
||||
<Text bold color="cyan">{tool.name}</Text>
|
||||
{tool.description && <Text>{tool.description}</Text>}
|
||||
|
||||
{propNames.length > 0 && (
|
||||
<Box flexDirection="column" marginTop={1}>
|
||||
<Text bold>Schema:</Text>
|
||||
{propNames.map((name) => {
|
||||
const prop = properties[name]!;
|
||||
const req = required.has(name) ? ' (required)' : '';
|
||||
return (
|
||||
<Text key={name} dimColor>
|
||||
{name}: {prop.type ?? 'any'}{req}{prop.description ? ` — ${prop.description}` : ''}
|
||||
</Text>
|
||||
);
|
||||
})}
|
||||
</Box>
|
||||
)}
|
||||
|
||||
<Box flexDirection="column" marginTop={1}>
|
||||
<Text bold>Arguments (JSON):</Text>
|
||||
<Box>
|
||||
<Text color="cyan">> </Text>
|
||||
<TextInput
|
||||
placeholder="{}"
|
||||
defaultValue="{}"
|
||||
onChange={setArgsJson}
|
||||
onSubmit={handleExecute}
|
||||
/>
|
||||
</Box>
|
||||
<Text dimColor>Press Enter to execute</Text>
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
@@ -1,35 +0,0 @@
|
||||
import { Box, Text } from 'ink';
|
||||
import { Select } from '@inkjs/ui';
|
||||
import type { McpTool } from '../mcp-session.js';
|
||||
|
||||
interface ToolListViewProps {
|
||||
tools: McpTool[];
|
||||
onSelect: (tool: McpTool) => void;
|
||||
onBack: () => void;
|
||||
}
|
||||
|
||||
export function ToolListView({ tools, onSelect }: ToolListViewProps) {
|
||||
if (tools.length === 0) {
|
||||
return <Text dimColor>No tools available.</Text>;
|
||||
}
|
||||
|
||||
const options = tools.map((t) => ({
|
||||
label: `${t.name}${t.description ? ` — ${t.description.slice(0, 60)}` : ''}`,
|
||||
value: t.name,
|
||||
}));
|
||||
|
||||
return (
|
||||
<Box flexDirection="column">
|
||||
<Text bold>Tools ({tools.length}):</Text>
|
||||
<Box marginTop={1}>
|
||||
<Select
|
||||
options={options}
|
||||
onChange={(value) => {
|
||||
const tool = tools.find((t) => t.name === value);
|
||||
if (tool) onSelect(tool);
|
||||
}}
|
||||
/>
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
@@ -1,46 +0,0 @@
|
||||
/**
|
||||
* Toolbar — compact 1-line bar showing Tools / Resources / Prompts / Raw JSON-RPC.
|
||||
*
|
||||
* Shown between the header and timeline when an interactive session is ungated.
|
||||
* Items are selectable via Tab (focus on/off), ←/→ (cycle), Enter (open).
|
||||
*/
|
||||
|
||||
import { Box, Text } from 'ink';
|
||||
|
||||
interface ToolbarProps {
|
||||
toolCount: number;
|
||||
resourceCount: number;
|
||||
promptCount: number;
|
||||
focusedItem: number; // -1 = not focused, 0-3 = which item
|
||||
}
|
||||
|
||||
const ITEMS = [
|
||||
{ label: 'Tools', key: 'tools' },
|
||||
{ label: 'Resources', key: 'resources' },
|
||||
{ label: 'Prompts', key: 'prompts' },
|
||||
{ label: 'Raw JSON-RPC', key: 'raw' },
|
||||
] as const;
|
||||
|
||||
export function Toolbar({ toolCount, resourceCount, promptCount, focusedItem }: ToolbarProps) {
|
||||
const counts = [toolCount, resourceCount, promptCount, -1]; // -1 = no count for raw
|
||||
|
||||
return (
|
||||
<Box paddingX={1} height={1}>
|
||||
{ITEMS.map((item, i) => {
|
||||
const focused = focusedItem === i;
|
||||
const count = counts[i]!;
|
||||
const separator = i < ITEMS.length - 1 ? ' | ' : '';
|
||||
|
||||
return (
|
||||
<Text key={item.key}>
|
||||
<Text color={focused ? 'cyan' : undefined} bold={focused} dimColor={!focused}>
|
||||
{` ${item.label}`}
|
||||
{count >= 0 && <Text>{` (${count})`}</Text>}
|
||||
</Text>
|
||||
{separator && <Text dimColor>{separator}</Text>}
|
||||
</Text>
|
||||
);
|
||||
})}
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
@@ -1,310 +0,0 @@
|
||||
/**
|
||||
* Shared formatting functions for MCP traffic events.
|
||||
*
|
||||
* Extracted from inspect-app.tsx so they can be reused by
|
||||
* the unified timeline, action area, and provenance views.
|
||||
*/
|
||||
|
||||
import type { TrafficEventType } from './unified-types.js';
|
||||
|
||||
/** Safely dig into unknown objects */
|
||||
export function dig(obj: unknown, ...keys: string[]): unknown {
|
||||
let cur = obj;
|
||||
for (const k of keys) {
|
||||
if (cur === null || cur === undefined || typeof cur !== 'object') return undefined;
|
||||
cur = (cur as Record<string, unknown>)[k];
|
||||
}
|
||||
return cur;
|
||||
}
|
||||
|
||||
export function trunc(s: string, maxLen: number): string {
|
||||
return s.length > maxLen ? s.slice(0, maxLen - 1) + '\u2026' : s;
|
||||
}
|
||||
|
||||
export function nameList(items: unknown[], key: string, max: number): string {
|
||||
if (items.length === 0) return '(none)';
|
||||
const names = items.map((it) => dig(it, key) as string).filter(Boolean);
|
||||
const shown = names.slice(0, max);
|
||||
const rest = names.length - shown.length;
|
||||
return shown.join(', ') + (rest > 0 ? ` +${rest} more` : '');
|
||||
}
|
||||
|
||||
export function formatTime(ts: Date | string): string {
|
||||
try {
|
||||
const d = typeof ts === 'string' ? new Date(ts) : ts;
|
||||
return d.toLocaleTimeString('en-GB', { hour12: false, hour: '2-digit', minute: '2-digit', second: '2-digit' });
|
||||
} catch {
|
||||
return '??:??:??';
|
||||
}
|
||||
}
|
||||
|
||||
/** Extract meaningful summary from request params (strips jsonrpc/id boilerplate) */
|
||||
export function summarizeRequest(method: string, body: unknown): string {
|
||||
const params = dig(body, 'params') as Record<string, unknown> | undefined;
|
||||
|
||||
switch (method) {
|
||||
case 'initialize': {
|
||||
const name = dig(params, 'clientInfo', 'name') ?? '?';
|
||||
const ver = dig(params, 'clientInfo', 'version') ?? '';
|
||||
const proto = dig(params, 'protocolVersion') ?? '';
|
||||
return `client=${name}${ver ? ` v${ver}` : ''} proto=${proto}`;
|
||||
}
|
||||
case 'tools/call': {
|
||||
const toolName = dig(params, 'name') as string ?? '?';
|
||||
const args = dig(params, 'arguments') as Record<string, unknown> | undefined;
|
||||
if (!args || Object.keys(args).length === 0) return `${toolName}()`;
|
||||
const pairs = Object.entries(args).map(([k, v]) => {
|
||||
const vs = typeof v === 'string' ? v : JSON.stringify(v);
|
||||
return `${k}: ${trunc(vs, 40)}`;
|
||||
});
|
||||
return `${toolName}(${trunc(pairs.join(', '), 80)})`;
|
||||
}
|
||||
case 'resources/read': {
|
||||
const uri = dig(params, 'uri') as string ?? '';
|
||||
return uri;
|
||||
}
|
||||
case 'prompts/get': {
|
||||
const name = dig(params, 'name') as string ?? '';
|
||||
return name;
|
||||
}
|
||||
case 'tools/list':
|
||||
case 'resources/list':
|
||||
case 'prompts/list':
|
||||
case 'notifications/initialized':
|
||||
return '';
|
||||
default: {
|
||||
if (!params || Object.keys(params).length === 0) return '';
|
||||
const s = JSON.stringify(params);
|
||||
return trunc(s, 80);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Extract meaningful summary from response result */
|
||||
export function summarizeResponse(method: string, body: unknown, durationMs?: number): string {
|
||||
const error = dig(body, 'error') as { message?: string; code?: number } | undefined;
|
||||
if (error) {
|
||||
return `ERROR ${error.code ?? ''}: ${error.message ?? 'unknown'}`;
|
||||
}
|
||||
|
||||
const result = dig(body, 'result') as Record<string, unknown> | undefined;
|
||||
if (!result) return '';
|
||||
|
||||
let summary: string;
|
||||
switch (method) {
|
||||
case 'initialize': {
|
||||
const name = dig(result, 'serverInfo', 'name') ?? '?';
|
||||
const ver = dig(result, 'serverInfo', 'version') ?? '';
|
||||
const caps = dig(result, 'capabilities') as Record<string, unknown> | undefined;
|
||||
const capList = caps ? Object.keys(caps).filter((k) => caps[k] && Object.keys(caps[k] as object).length > 0) : [];
|
||||
summary = `server=${name}${ver ? ` v${ver}` : ''}${capList.length ? ` caps=[${capList.join(',')}]` : ''}`;
|
||||
break;
|
||||
}
|
||||
case 'tools/list': {
|
||||
const tools = (result.tools ?? []) as unknown[];
|
||||
summary = `${tools.length} tools: ${nameList(tools, 'name', 6)}`;
|
||||
break;
|
||||
}
|
||||
case 'resources/list': {
|
||||
const resources = (result.resources ?? []) as unknown[];
|
||||
summary = `${resources.length} resources: ${nameList(resources, 'name', 6)}`;
|
||||
break;
|
||||
}
|
||||
case 'prompts/list': {
|
||||
const prompts = (result.prompts ?? []) as unknown[];
|
||||
if (prompts.length === 0) { summary = '0 prompts'; break; }
|
||||
summary = `${prompts.length} prompts: ${nameList(prompts, 'name', 6)}`;
|
||||
break;
|
||||
}
|
||||
case 'tools/call': {
|
||||
const content = (result.content ?? []) as unknown[];
|
||||
const isError = result.isError;
|
||||
const first = content[0];
|
||||
const text = (dig(first, 'text') as string) ?? '';
|
||||
const prefix = isError ? 'ERROR: ' : '';
|
||||
if (text) { summary = prefix + trunc(text.replace(/\n/g, ' '), 100); break; }
|
||||
summary = prefix + `${content.length} content block(s)`;
|
||||
break;
|
||||
}
|
||||
case 'resources/read': {
|
||||
const contents = (result.contents ?? []) as unknown[];
|
||||
const first = contents[0];
|
||||
const text = (dig(first, 'text') as string) ?? '';
|
||||
if (text) { summary = trunc(text.replace(/\n/g, ' '), 80); break; }
|
||||
summary = `${contents.length} content block(s)`;
|
||||
break;
|
||||
}
|
||||
case 'notifications/initialized':
|
||||
summary = 'ok';
|
||||
break;
|
||||
default: {
|
||||
if (Object.keys(result).length === 0) { summary = 'ok'; break; }
|
||||
const s = JSON.stringify(result);
|
||||
summary = trunc(s, 80);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (durationMs !== undefined) {
|
||||
return `[${durationMs}ms] ${summary}`;
|
||||
}
|
||||
return summary;
|
||||
}
|
||||
|
||||
/** Format full event body for expanded detail view (multi-line, readable) */
|
||||
export function formatBodyDetail(eventType: string, method: string, body: unknown): string[] {
|
||||
const bodyObj = body as Record<string, unknown> | null;
|
||||
if (!bodyObj) return ['(no body)'];
|
||||
|
||||
const lines: string[] = [];
|
||||
|
||||
if (eventType.includes('request') || eventType === 'client_notification') {
|
||||
const params = bodyObj['params'] as Record<string, unknown> | undefined;
|
||||
if (method === 'tools/call' && params) {
|
||||
lines.push(`Tool: ${params['name'] as string}`);
|
||||
const args = params['arguments'] as Record<string, unknown> | undefined;
|
||||
if (args && Object.keys(args).length > 0) {
|
||||
lines.push('Arguments:');
|
||||
for (const [k, v] of Object.entries(args)) {
|
||||
const vs = typeof v === 'string' ? v : JSON.stringify(v, null, 2);
|
||||
for (const vl of vs.split('\n')) {
|
||||
lines.push(` ${k}: ${vl}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (method === 'initialize' && params) {
|
||||
const ci = params['clientInfo'] as Record<string, unknown> | undefined;
|
||||
lines.push(`Client: ${ci?.['name'] ?? '?'} v${ci?.['version'] ?? '?'}`);
|
||||
lines.push(`Protocol: ${params['protocolVersion'] ?? '?'}`);
|
||||
const caps = params['capabilities'] as Record<string, unknown> | undefined;
|
||||
if (caps) lines.push(`Capabilities: ${JSON.stringify(caps)}`);
|
||||
} else if (params && Object.keys(params).length > 0) {
|
||||
for (const l of JSON.stringify(params, null, 2).split('\n')) {
|
||||
lines.push(l);
|
||||
}
|
||||
} else {
|
||||
lines.push('(empty params)');
|
||||
}
|
||||
} else if (eventType.includes('response')) {
|
||||
const error = bodyObj['error'] as Record<string, unknown> | undefined;
|
||||
if (error) {
|
||||
lines.push(`Error ${error['code']}: ${error['message']}`);
|
||||
if (error['data']) {
|
||||
for (const l of JSON.stringify(error['data'], null, 2).split('\n')) {
|
||||
lines.push(` ${l}`);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
const result = bodyObj['result'] as Record<string, unknown> | undefined;
|
||||
if (!result) {
|
||||
lines.push('(empty result)');
|
||||
} else if (method === 'tools/list') {
|
||||
const tools = (result['tools'] ?? []) as Array<{ name: string; description?: string }>;
|
||||
lines.push(`${tools.length} tools:`);
|
||||
for (const t of tools) {
|
||||
lines.push(` ${t.name}${t.description ? ` \u2014 ${trunc(t.description, 60)}` : ''}`);
|
||||
}
|
||||
} else if (method === 'resources/list') {
|
||||
const resources = (result['resources'] ?? []) as Array<{ name: string; uri?: string; description?: string }>;
|
||||
lines.push(`${resources.length} resources:`);
|
||||
for (const r of resources) {
|
||||
lines.push(` ${r.name}${r.uri ? ` (${r.uri})` : ''}${r.description ? ` \u2014 ${trunc(r.description, 50)}` : ''}`);
|
||||
}
|
||||
} else if (method === 'prompts/list') {
|
||||
const prompts = (result['prompts'] ?? []) as Array<{ name: string; description?: string }>;
|
||||
lines.push(`${prompts.length} prompts:`);
|
||||
for (const p of prompts) {
|
||||
lines.push(` ${p.name}${p.description ? ` \u2014 ${trunc(p.description, 60)}` : ''}`);
|
||||
}
|
||||
} else if (method === 'tools/call') {
|
||||
const isErr = result['isError'];
|
||||
const content = (result['content'] ?? []) as Array<{ type?: string; text?: string }>;
|
||||
if (isErr) lines.push('(error response)');
|
||||
for (const c of content) {
|
||||
if (c.text) {
|
||||
for (const l of c.text.split('\n')) {
|
||||
lines.push(l);
|
||||
}
|
||||
} else {
|
||||
lines.push(`[${c.type ?? 'unknown'} content]`);
|
||||
}
|
||||
}
|
||||
} else if (method === 'initialize') {
|
||||
const si = result['serverInfo'] as Record<string, unknown> | undefined;
|
||||
lines.push(`Server: ${si?.['name'] ?? '?'} v${si?.['version'] ?? '?'}`);
|
||||
lines.push(`Protocol: ${result['protocolVersion'] ?? '?'}`);
|
||||
const caps = result['capabilities'] as Record<string, unknown> | undefined;
|
||||
if (caps) {
|
||||
lines.push('Capabilities:');
|
||||
for (const [k, v] of Object.entries(caps)) {
|
||||
if (v && typeof v === 'object' && Object.keys(v).length > 0) {
|
||||
lines.push(` ${k}: ${JSON.stringify(v)}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
const instructions = result['instructions'] as string | undefined;
|
||||
if (instructions) {
|
||||
lines.push('');
|
||||
lines.push('Instructions:');
|
||||
for (const l of instructions.split('\n')) {
|
||||
lines.push(` ${l}`);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (const l of JSON.stringify(result, null, 2).split('\n')) {
|
||||
lines.push(l);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Lifecycle events
|
||||
for (const l of JSON.stringify(bodyObj, null, 2).split('\n')) {
|
||||
lines.push(l);
|
||||
}
|
||||
}
|
||||
|
||||
return lines;
|
||||
}
|
||||
|
||||
export interface FormattedEvent {
|
||||
arrow: string;
|
||||
color: string;
|
||||
label: string;
|
||||
detail: string;
|
||||
detailColor?: string | undefined;
|
||||
}
|
||||
|
||||
export function formatEventSummary(
|
||||
eventType: TrafficEventType,
|
||||
method: string | undefined,
|
||||
body: unknown,
|
||||
upstreamName?: string,
|
||||
durationMs?: number,
|
||||
): FormattedEvent {
|
||||
const m = method ?? '';
|
||||
|
||||
switch (eventType) {
|
||||
case 'client_request':
|
||||
return { arrow: '\u2192', color: 'green', label: m, detail: summarizeRequest(m, body) };
|
||||
case 'client_response': {
|
||||
const detail = summarizeResponse(m, body, durationMs);
|
||||
const hasError = detail.startsWith('ERROR');
|
||||
return { arrow: '\u2190', color: 'blue', label: m, detail, detailColor: hasError ? 'red' : undefined };
|
||||
}
|
||||
case 'client_notification':
|
||||
return { arrow: '\u25C2', color: 'magenta', label: m, detail: summarizeRequest(m, body) };
|
||||
case 'upstream_request':
|
||||
return { arrow: ' \u21E2', color: 'yellowBright', label: `${upstreamName ?? '?'}/${m}`, detail: summarizeRequest(m, body) };
|
||||
case 'upstream_response': {
|
||||
const detail = summarizeResponse(m, body, durationMs);
|
||||
const hasError = detail.startsWith('ERROR');
|
||||
return { arrow: ' \u21E0', color: 'yellowBright', label: `${upstreamName ?? '?'}/${m}`, detail, detailColor: hasError ? 'red' : undefined };
|
||||
}
|
||||
case 'session_created':
|
||||
return { arrow: '\u25CF', color: 'cyan', label: 'session', detail: '' };
|
||||
case 'session_closed':
|
||||
return { arrow: '\u25CB', color: 'red', label: 'session', detail: 'closed' };
|
||||
default:
|
||||
return { arrow: '?', color: 'white', label: eventType, detail: '' };
|
||||
}
|
||||
}
|
||||
@@ -1,113 +0,0 @@
|
||||
import { Command } from 'commander';
|
||||
|
||||
export interface ConsoleCommandDeps {
|
||||
getProject: () => string | undefined;
|
||||
configLoader?: () => { mcplocalUrl: string };
|
||||
credentialsLoader?: () => { token: string } | null;
|
||||
}
|
||||
|
||||
export function createConsoleCommand(deps: ConsoleCommandDeps): Command {
|
||||
const cmd = new Command('console')
|
||||
.description('Interactive MCP console — unified timeline with tools, provenance, and lab replay')
|
||||
.argument('[project]', 'Project name to connect to')
|
||||
.option('--stdin-mcp', 'Run inspector as MCP server over stdin/stdout (for Claude)')
|
||||
.option('--audit', 'Browse audit events from mcpd')
|
||||
.action(async (projectName: string | undefined, opts: { stdinMcp?: boolean; audit?: boolean }) => {
|
||||
let mcplocalUrl = 'http://localhost:3200';
|
||||
if (deps.configLoader) {
|
||||
mcplocalUrl = deps.configLoader().mcplocalUrl;
|
||||
} else {
|
||||
try {
|
||||
const { loadConfig } = await import('../../config/index.js');
|
||||
mcplocalUrl = loadConfig().mcplocalUrl;
|
||||
} catch {
|
||||
// Use default
|
||||
}
|
||||
}
|
||||
|
||||
// --stdin-mcp: MCP server for Claude (unchanged)
|
||||
if (opts.stdinMcp) {
|
||||
const { runInspectMcp } = await import('./inspect-mcp.js');
|
||||
await runInspectMcp(mcplocalUrl);
|
||||
return;
|
||||
}
|
||||
|
||||
let token: string | undefined;
|
||||
if (deps.credentialsLoader) {
|
||||
token = deps.credentialsLoader()?.token;
|
||||
} else {
|
||||
try {
|
||||
const { loadCredentials } = await import('../../auth/index.js');
|
||||
token = loadCredentials()?.token;
|
||||
} catch {
|
||||
// No credentials
|
||||
}
|
||||
}
|
||||
|
||||
// --audit: browse audit events from mcpd
|
||||
if (opts.audit) {
|
||||
let mcpdUrl = 'http://localhost:3100';
|
||||
try {
|
||||
const { loadConfig } = await import('../../config/index.js');
|
||||
mcpdUrl = loadConfig().mcpdUrl;
|
||||
} catch {
|
||||
// Use default
|
||||
}
|
||||
const { renderAuditConsole } = await import('./audit-app.js');
|
||||
await renderAuditConsole({ mcpdUrl, token, projectFilter: projectName });
|
||||
return;
|
||||
}
|
||||
|
||||
// Build endpoint URL only if project specified
|
||||
let endpointUrl: string | undefined;
|
||||
if (projectName) {
|
||||
endpointUrl = `${mcplocalUrl.replace(/\/$/, '')}/projects/${encodeURIComponent(projectName)}/mcp`;
|
||||
|
||||
// Preflight check: verify the project exists before launching the TUI
|
||||
const { postJsonRpc, sendDelete } = await import('../mcp.js');
|
||||
try {
|
||||
const initResult = await postJsonRpc(
|
||||
endpointUrl,
|
||||
JSON.stringify({
|
||||
jsonrpc: '2.0',
|
||||
id: 0,
|
||||
method: 'initialize',
|
||||
params: {
|
||||
protocolVersion: '2024-11-05',
|
||||
capabilities: {},
|
||||
clientInfo: { name: 'mcpctl-preflight', version: '0.0.1' },
|
||||
},
|
||||
}),
|
||||
undefined,
|
||||
token,
|
||||
);
|
||||
|
||||
if (initResult.status >= 400) {
|
||||
try {
|
||||
const body = JSON.parse(initResult.body) as { error?: string };
|
||||
console.error(`Error: ${body.error ?? `HTTP ${initResult.status}`}`);
|
||||
} catch {
|
||||
console.error(`Error: HTTP ${initResult.status} — ${initResult.body}`);
|
||||
}
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Clean up the preflight session
|
||||
const sid = initResult.headers['mcp-session-id'];
|
||||
if (typeof sid === 'string') {
|
||||
await sendDelete(endpointUrl, sid, token);
|
||||
}
|
||||
} catch (err) {
|
||||
console.error(`Error: cannot connect to mcplocal at ${mcplocalUrl}`);
|
||||
console.error(err instanceof Error ? err.message : String(err));
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
// Launch unified console (observe-only if no project, interactive available if project given)
|
||||
const { renderUnifiedConsole } = await import('./unified-app.js');
|
||||
await renderUnifiedConsole({ projectName, endpointUrl, mcplocalUrl, token });
|
||||
});
|
||||
|
||||
return cmd;
|
||||
}
|
||||
@@ -1,624 +0,0 @@
|
||||
/**
|
||||
* MCP server over stdin/stdout for the traffic inspector.
|
||||
*
|
||||
* Claude adds this to .mcp.json as:
|
||||
* { "mcpctl-inspect": { "command": "mcpctl", "args": ["console", "--stdin-mcp"] } }
|
||||
*
|
||||
* Subscribes to mcplocal's /inspect SSE endpoint and exposes traffic
|
||||
* data via MCP tools: list_sessions, get_traffic, get_session_info.
|
||||
*/
|
||||
|
||||
import { createInterface } from 'node:readline';
|
||||
import { request as httpRequest } from 'node:http';
|
||||
import type { IncomingMessage } from 'node:http';
|
||||
|
||||
// ── Types ──
|
||||
|
||||
interface TrafficEvent {
|
||||
timestamp: string;
|
||||
projectName: string;
|
||||
sessionId: string;
|
||||
eventType: string;
|
||||
method?: string;
|
||||
upstreamName?: string;
|
||||
body: unknown;
|
||||
durationMs?: number;
|
||||
}
|
||||
|
||||
interface ActiveSession {
|
||||
sessionId: string;
|
||||
projectName: string;
|
||||
startedAt: string;
|
||||
eventCount: number;
|
||||
}
|
||||
|
||||
interface JsonRpcRequest {
|
||||
jsonrpc: string;
|
||||
id: string | number;
|
||||
method: string;
|
||||
params?: Record<string, unknown>;
|
||||
}
|
||||
|
||||
// ── State ──
|
||||
|
||||
const sessions = new Map<string, ActiveSession>();
|
||||
const events: TrafficEvent[] = [];
|
||||
const MAX_EVENTS = 10000;
|
||||
let mcplocalBaseUrl = 'http://localhost:3200';
|
||||
|
||||
// ── SSE Client ──
|
||||
|
||||
function connectSSE(url: string): void {
|
||||
const parsed = new URL(url);
|
||||
|
||||
const req = httpRequest(
|
||||
{
|
||||
hostname: parsed.hostname,
|
||||
port: parsed.port,
|
||||
path: parsed.pathname + parsed.search,
|
||||
headers: { Accept: 'text/event-stream' },
|
||||
},
|
||||
(res: IncomingMessage) => {
|
||||
let buffer = '';
|
||||
let currentEventType = 'message';
|
||||
|
||||
res.setEncoding('utf-8');
|
||||
res.on('data', (chunk: string) => {
|
||||
buffer += chunk;
|
||||
const lines = buffer.split('\n');
|
||||
buffer = lines.pop()!;
|
||||
|
||||
for (const line of lines) {
|
||||
if (line.startsWith('event: ')) {
|
||||
currentEventType = line.slice(7).trim();
|
||||
} else if (line.startsWith('data: ')) {
|
||||
try {
|
||||
const data = JSON.parse(line.slice(6));
|
||||
if (currentEventType === 'sessions') {
|
||||
for (const s of data as Array<{ sessionId: string; projectName: string; startedAt: string }>) {
|
||||
sessions.set(s.sessionId, { ...s, eventCount: 0 });
|
||||
}
|
||||
} else if (currentEventType !== 'live') {
|
||||
handleEvent(data as TrafficEvent);
|
||||
}
|
||||
} catch {
|
||||
// ignore
|
||||
}
|
||||
currentEventType = 'message';
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
res.on('end', () => {
|
||||
// Reconnect after 2s
|
||||
setTimeout(() => connectSSE(url), 2000);
|
||||
});
|
||||
|
||||
res.on('error', () => {
|
||||
setTimeout(() => connectSSE(url), 2000);
|
||||
});
|
||||
},
|
||||
);
|
||||
|
||||
req.on('error', () => {
|
||||
setTimeout(() => connectSSE(url), 2000);
|
||||
});
|
||||
|
||||
req.end();
|
||||
}
|
||||
|
||||
function handleEvent(event: TrafficEvent): void {
|
||||
events.push(event);
|
||||
if (events.length > MAX_EVENTS) {
|
||||
events.splice(0, events.length - MAX_EVENTS);
|
||||
}
|
||||
|
||||
// Track sessions
|
||||
if (event.eventType === 'session_created') {
|
||||
sessions.set(event.sessionId, {
|
||||
sessionId: event.sessionId,
|
||||
projectName: event.projectName,
|
||||
startedAt: event.timestamp,
|
||||
eventCount: 0,
|
||||
});
|
||||
} else if (event.eventType === 'session_closed') {
|
||||
sessions.delete(event.sessionId);
|
||||
}
|
||||
|
||||
// Increment event count
|
||||
const session = sessions.get(event.sessionId);
|
||||
if (session) {
|
||||
session.eventCount++;
|
||||
}
|
||||
}
|
||||
|
||||
// ── MCP Protocol Handlers ──
|
||||
|
||||
const TOOLS = [
|
||||
{
|
||||
name: 'list_sessions',
|
||||
description: 'List all active MCP sessions with their project name, start time, and event count.',
|
||||
inputSchema: {
|
||||
type: 'object' as const,
|
||||
properties: {
|
||||
project: { type: 'string' as const, description: 'Filter by project name' },
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'get_traffic',
|
||||
description: 'Get captured MCP traffic events. Returns recent events, optionally filtered by session, method, or event type.',
|
||||
inputSchema: {
|
||||
type: 'object' as const,
|
||||
properties: {
|
||||
sessionId: { type: 'string' as const, description: 'Filter by session ID (first 8 chars is enough)' },
|
||||
method: { type: 'string' as const, description: 'Filter by JSON-RPC method (e.g. "tools/call", "initialize")' },
|
||||
eventType: { type: 'string' as const, description: 'Filter by event type: client_request, client_response, client_notification, upstream_request, upstream_response' },
|
||||
limit: { type: 'number' as const, description: 'Max events to return (default: 50)' },
|
||||
offset: { type: 'number' as const, description: 'Skip first N matching events' },
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'get_session_info',
|
||||
description: 'Get detailed information about a specific session including its recent traffic summary.',
|
||||
inputSchema: {
|
||||
type: 'object' as const,
|
||||
properties: {
|
||||
sessionId: { type: 'string' as const, description: 'Session ID (first 8 chars is enough)' },
|
||||
},
|
||||
required: ['sessionId'] as const,
|
||||
},
|
||||
},
|
||||
// ── Studio tools (task 109) ──
|
||||
{
|
||||
name: 'list_models',
|
||||
description: 'List all available proxymodels (YAML pipelines and TypeScript plugins).',
|
||||
inputSchema: { type: 'object' as const, properties: {} },
|
||||
},
|
||||
{
|
||||
name: 'list_stages',
|
||||
description: 'List all available pipeline stages (built-in and custom).',
|
||||
inputSchema: { type: 'object' as const, properties: {} },
|
||||
},
|
||||
{
|
||||
name: 'switch_model',
|
||||
description: 'Hot-swap the active proxymodel on a running project. Optionally target a specific server.',
|
||||
inputSchema: {
|
||||
type: 'object' as const,
|
||||
properties: {
|
||||
project: { type: 'string' as const, description: 'Project name' },
|
||||
proxyModel: { type: 'string' as const, description: 'ProxyModel name to switch to' },
|
||||
serverName: { type: 'string' as const, description: 'Optional: target a specific server instead of project-wide' },
|
||||
},
|
||||
required: ['project', 'proxyModel'] as const,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'get_model_info',
|
||||
description: 'Get detailed info about a specific proxymodel (stages, hooks, config).',
|
||||
inputSchema: {
|
||||
type: 'object' as const,
|
||||
properties: {
|
||||
name: { type: 'string' as const, description: 'ProxyModel name' },
|
||||
},
|
||||
required: ['name'] as const,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'reload_stages',
|
||||
description: 'Force reload all custom stages from ~/.mcpctl/stages/. Use after editing stage files.',
|
||||
inputSchema: { type: 'object' as const, properties: {} },
|
||||
},
|
||||
{
|
||||
name: 'pause',
|
||||
description: 'Toggle pause mode. When paused, pipeline results are held in a queue for inspection/editing before being sent to the client.',
|
||||
inputSchema: {
|
||||
type: 'object' as const,
|
||||
properties: {
|
||||
paused: { type: 'boolean' as const, description: 'true to pause, false to resume (releases all queued items)' },
|
||||
},
|
||||
required: ['paused'] as const,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'get_pause_queue',
|
||||
description: 'List all items currently held in the pause queue. Each item shows original and transformed content.',
|
||||
inputSchema: { type: 'object' as const, properties: {} },
|
||||
},
|
||||
{
|
||||
name: 'release_paused',
|
||||
description: 'Release a paused item (send transformed content to client), edit it (send custom content), or drop it (send empty).',
|
||||
inputSchema: {
|
||||
type: 'object' as const,
|
||||
properties: {
|
||||
id: { type: 'string' as const, description: 'Item ID from pause queue' },
|
||||
action: { type: 'string' as const, description: 'Action: "release", "edit", or "drop"' },
|
||||
content: { type: 'string' as const, description: 'Required for "edit" action: the modified content to send' },
|
||||
},
|
||||
required: ['id', 'action'] as const,
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
function handleInitialize(id: string | number): void {
|
||||
send({
|
||||
jsonrpc: '2.0',
|
||||
id,
|
||||
result: {
|
||||
protocolVersion: '2024-11-05',
|
||||
serverInfo: { name: 'mcpctl-inspector', version: '1.0.0' },
|
||||
capabilities: { tools: {} },
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
function handleToolsList(id: string | number): void {
|
||||
send({ jsonrpc: '2.0', id, result: { tools: TOOLS } });
|
||||
}
|
||||
|
||||
// ── HTTP helpers for mcplocal API calls ──
|
||||
|
||||
function fetchApi<T>(path: string, method = 'GET', body?: unknown): Promise<T> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const url = new URL(`${mcplocalBaseUrl}${path}`);
|
||||
const payload = body !== undefined ? JSON.stringify(body) : undefined;
|
||||
const req = httpRequest(
|
||||
{
|
||||
hostname: url.hostname,
|
||||
port: url.port,
|
||||
path: url.pathname + url.search,
|
||||
method,
|
||||
headers: payload ? { 'Content-Type': 'application/json', 'Content-Length': Buffer.byteLength(payload) } : {},
|
||||
timeout: 10_000,
|
||||
},
|
||||
(res) => {
|
||||
const chunks: Buffer[] = [];
|
||||
res.on('data', (chunk: Buffer) => chunks.push(chunk));
|
||||
res.on('end', () => {
|
||||
try {
|
||||
resolve(JSON.parse(Buffer.concat(chunks).toString()) as T);
|
||||
} catch {
|
||||
reject(new Error(`Invalid JSON from ${path}`));
|
||||
}
|
||||
});
|
||||
},
|
||||
);
|
||||
req.on('error', (err) => reject(err));
|
||||
req.on('timeout', () => { req.destroy(); reject(new Error(`Timeout: ${path}`)); });
|
||||
if (payload) req.write(payload);
|
||||
req.end();
|
||||
});
|
||||
}
|
||||
|
||||
function sendText(id: string | number, text: string): void {
|
||||
send({ jsonrpc: '2.0', id, result: { content: [{ type: 'text', text }] } });
|
||||
}
|
||||
|
||||
function sendError(id: string | number, message: string): void {
|
||||
send({ jsonrpc: '2.0', id, result: { content: [{ type: 'text', text: message }], isError: true } });
|
||||
}
|
||||
|
||||
async function handleToolsCall(id: string | number, params: { name: string; arguments?: Record<string, unknown> }): Promise<void> {
|
||||
const args = params.arguments ?? {};
|
||||
|
||||
switch (params.name) {
|
||||
case 'list_sessions': {
|
||||
let result = [...sessions.values()];
|
||||
const project = args['project'] as string | undefined;
|
||||
if (project) {
|
||||
result = result.filter((s) => s.projectName === project);
|
||||
}
|
||||
sendText(id, JSON.stringify(result, null, 2));
|
||||
break;
|
||||
}
|
||||
|
||||
case 'get_traffic': {
|
||||
const sessionFilter = args['sessionId'] as string | undefined;
|
||||
const methodFilter = args['method'] as string | undefined;
|
||||
const typeFilter = args['eventType'] as string | undefined;
|
||||
const limit = (args['limit'] as number | undefined) ?? 50;
|
||||
const offset = (args['offset'] as number | undefined) ?? 0;
|
||||
|
||||
let filtered = events;
|
||||
if (sessionFilter) {
|
||||
filtered = filtered.filter((e) => e.sessionId.startsWith(sessionFilter));
|
||||
}
|
||||
if (methodFilter) {
|
||||
filtered = filtered.filter((e) => e.method === methodFilter);
|
||||
}
|
||||
if (typeFilter) {
|
||||
filtered = filtered.filter((e) => e.eventType === typeFilter);
|
||||
}
|
||||
|
||||
const sliced = filtered.slice(offset, offset + limit);
|
||||
|
||||
const lines = sliced.map((e) => {
|
||||
const arrow = e.eventType === 'client_request' ? '→'
|
||||
: e.eventType === 'client_response' ? '←'
|
||||
: e.eventType === 'client_notification' ? '◂'
|
||||
: e.eventType === 'upstream_request' ? '⇢'
|
||||
: e.eventType === 'upstream_response' ? '⇠'
|
||||
: e.eventType === 'session_created' ? '●'
|
||||
: e.eventType === 'session_closed' ? '○'
|
||||
: '?';
|
||||
const layer = e.eventType.startsWith('upstream') ? 'internal' : 'client';
|
||||
const ms = e.durationMs !== undefined ? ` (${e.durationMs}ms)` : '';
|
||||
const upstream = e.upstreamName ? `${e.upstreamName}/` : '';
|
||||
const time = e.timestamp.split('T')[1]?.replace('Z', '') ?? e.timestamp;
|
||||
|
||||
const body = e.body as Record<string, unknown> | null;
|
||||
let content = '';
|
||||
if (body) {
|
||||
if (e.eventType.includes('request') || e.eventType === 'client_notification') {
|
||||
const p = body['params'] as Record<string, unknown> | undefined;
|
||||
if (e.method === 'tools/call' && p) {
|
||||
const toolArgs = p['arguments'] as Record<string, unknown> | undefined;
|
||||
content = `tool=${p['name']}${toolArgs ? ` args=${JSON.stringify(toolArgs)}` : ''}`;
|
||||
} else if (e.method === 'resources/read' && p) {
|
||||
content = `uri=${p['uri']}`;
|
||||
} else if (e.method === 'initialize' && p) {
|
||||
const ci = p['clientInfo'] as Record<string, unknown> | undefined;
|
||||
content = ci ? `client=${ci['name']} v${ci['version']}` : '';
|
||||
} else if (p && Object.keys(p).length > 0) {
|
||||
content = JSON.stringify(p);
|
||||
}
|
||||
} else if (e.eventType.includes('response')) {
|
||||
const result = body['result'] as Record<string, unknown> | undefined;
|
||||
const error = body['error'] as Record<string, unknown> | undefined;
|
||||
if (error) {
|
||||
content = `ERROR ${error['code']}: ${error['message']}`;
|
||||
} else if (result) {
|
||||
if (e.method === 'tools/list') {
|
||||
const tools = (result['tools'] ?? []) as Array<{ name: string }>;
|
||||
content = `${tools.length} tools: ${tools.map((t) => t.name).join(', ')}`;
|
||||
} else if (e.method === 'resources/list') {
|
||||
const res = (result['resources'] ?? []) as Array<{ name: string }>;
|
||||
content = `${res.length} resources: ${res.map((r) => r.name).join(', ')}`;
|
||||
} else if (e.method === 'tools/call') {
|
||||
const c = (result['content'] ?? []) as Array<{ text?: string }>;
|
||||
const text = c[0]?.text ?? '';
|
||||
content = text.length > 200 ? text.slice(0, 200) + '…' : text;
|
||||
} else if (e.method === 'initialize') {
|
||||
const si = result['serverInfo'] as Record<string, unknown> | undefined;
|
||||
content = si ? `server=${si['name']} v${si['version']}` : '';
|
||||
} else if (Object.keys(result).length > 0) {
|
||||
const s = JSON.stringify(result);
|
||||
content = s.length > 200 ? s.slice(0, 200) + '…' : s;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return `${time} ${arrow} [${layer}] ${upstream}${e.method ?? e.eventType}${ms}${content ? ' ' + content : ''}`;
|
||||
});
|
||||
|
||||
sendText(id, `${filtered.length} total events (showing ${offset + 1}-${offset + sliced.length})\n\n${lines.join('\n')}`);
|
||||
break;
|
||||
}
|
||||
|
||||
case 'get_session_info': {
|
||||
const sid = args['sessionId'] as string;
|
||||
const session = [...sessions.values()].find((s) => s.sessionId.startsWith(sid));
|
||||
if (!session) {
|
||||
sendError(id, `Session not found: ${sid}`);
|
||||
return;
|
||||
}
|
||||
|
||||
const sessionEvents = events.filter((e) => e.sessionId === session.sessionId);
|
||||
const methods = new Map<string, number>();
|
||||
for (const e of sessionEvents) {
|
||||
if (e.method) {
|
||||
methods.set(e.method, (methods.get(e.method) ?? 0) + 1);
|
||||
}
|
||||
}
|
||||
|
||||
const info = {
|
||||
...session,
|
||||
totalEvents: sessionEvents.length,
|
||||
methodCounts: Object.fromEntries(methods),
|
||||
lastEvent: sessionEvents.length > 0
|
||||
? sessionEvents[sessionEvents.length - 1]!.timestamp
|
||||
: null,
|
||||
};
|
||||
|
||||
sendText(id, JSON.stringify(info, null, 2));
|
||||
break;
|
||||
}
|
||||
|
||||
// ── Studio tools ──
|
||||
|
||||
case 'list_models': {
|
||||
try {
|
||||
const models = await fetchApi<unknown[]>('/proxymodels');
|
||||
sendText(id, JSON.stringify(models, null, 2));
|
||||
} catch (err) {
|
||||
sendError(id, `Failed to list models: ${err instanceof Error ? err.message : String(err)}`);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case 'list_stages': {
|
||||
try {
|
||||
const stages = await fetchApi<unknown[]>('/proxymodels/stages');
|
||||
sendText(id, JSON.stringify(stages, null, 2));
|
||||
} catch {
|
||||
// Fallback: stages endpoint may not exist yet, list from models
|
||||
sendError(id, 'Stages endpoint not available. Check mcplocal version.');
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case 'switch_model': {
|
||||
const project = args['project'] as string;
|
||||
const proxyModel = args['proxyModel'] as string;
|
||||
const serverName = args['serverName'] as string | undefined;
|
||||
if (!project || !proxyModel) {
|
||||
sendError(id, 'project and proxyModel are required');
|
||||
return;
|
||||
}
|
||||
try {
|
||||
const body: Record<string, string> = serverName
|
||||
? { serverName, serverProxyModel: proxyModel }
|
||||
: { proxyModel };
|
||||
const result = await fetchApi<unknown>(`/projects/${encodeURIComponent(project)}/override`, 'PUT', body);
|
||||
sendText(id, `Switched to ${proxyModel}${serverName ? ` on ${serverName}` : ' (project-wide)'}.\n\n${JSON.stringify(result, null, 2)}`);
|
||||
} catch (err) {
|
||||
sendError(id, `Failed to switch model: ${err instanceof Error ? err.message : String(err)}`);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case 'get_model_info': {
|
||||
const name = args['name'] as string;
|
||||
if (!name) {
|
||||
sendError(id, 'name is required');
|
||||
return;
|
||||
}
|
||||
try {
|
||||
const info = await fetchApi<unknown>(`/proxymodels/${encodeURIComponent(name)}`);
|
||||
sendText(id, JSON.stringify(info, null, 2));
|
||||
} catch (err) {
|
||||
sendError(id, `Failed to get model info: ${err instanceof Error ? err.message : String(err)}`);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case 'reload_stages': {
|
||||
try {
|
||||
const result = await fetchApi<unknown>('/proxymodels/reload', 'POST');
|
||||
sendText(id, `Stages reloaded.\n\n${JSON.stringify(result, null, 2)}`);
|
||||
} catch {
|
||||
sendError(id, 'Reload endpoint not available. Check mcplocal version.');
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case 'pause': {
|
||||
const paused = args['paused'] as boolean;
|
||||
if (typeof paused !== 'boolean') {
|
||||
sendError(id, 'paused must be a boolean');
|
||||
return;
|
||||
}
|
||||
try {
|
||||
const result = await fetchApi<{ paused: boolean; queueSize: number }>('/pause', 'PUT', { paused });
|
||||
sendText(id, paused
|
||||
? `Paused. Pipeline results will be held for inspection. Queue size: ${result.queueSize}`
|
||||
: `Resumed. Released ${result.queueSize} queued items.`);
|
||||
} catch (err) {
|
||||
sendError(id, `Failed to toggle pause: ${err instanceof Error ? err.message : String(err)}`);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case 'get_pause_queue': {
|
||||
try {
|
||||
const result = await fetchApi<{ paused: boolean; items: Array<{ id: string; sourceName: string; contentType: string; original: string; transformed: string; timestamp: number }> }>('/pause/queue');
|
||||
if (result.items.length === 0) {
|
||||
sendText(id, `Pause mode: ${result.paused ? 'ON' : 'OFF'}. Queue is empty.`);
|
||||
} else {
|
||||
const lines = result.items.map((item, i) => {
|
||||
const age = Math.round((Date.now() - item.timestamp) / 1000);
|
||||
const origLen = item.original.length;
|
||||
const transLen = item.transformed.length;
|
||||
const preview = item.transformed.length > 200 ? item.transformed.slice(0, 200) + '...' : item.transformed;
|
||||
return `[${i + 1}] id=${item.id}\n source: ${item.sourceName} (${item.contentType})\n original: ${origLen} chars → transformed: ${transLen} chars (${age}s ago)\n preview: ${preview}`;
|
||||
});
|
||||
sendText(id, `Pause mode: ${result.paused ? 'ON' : 'OFF'}. ${result.items.length} item(s) queued:\n\n${lines.join('\n\n')}`);
|
||||
}
|
||||
} catch (err) {
|
||||
sendError(id, `Failed to get pause queue: ${err instanceof Error ? err.message : String(err)}`);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case 'release_paused': {
|
||||
const itemId = args['id'] as string;
|
||||
const action = args['action'] as string;
|
||||
if (!itemId || !action) {
|
||||
sendError(id, 'id and action are required');
|
||||
return;
|
||||
}
|
||||
try {
|
||||
if (action === 'release') {
|
||||
await fetchApi<unknown>(`/pause/queue/${encodeURIComponent(itemId)}/release`, 'POST');
|
||||
sendText(id, `Released item ${itemId} with transformed content.`);
|
||||
} else if (action === 'edit') {
|
||||
const content = args['content'] as string;
|
||||
if (typeof content !== 'string') {
|
||||
sendError(id, 'content is required for edit action');
|
||||
return;
|
||||
}
|
||||
await fetchApi<unknown>(`/pause/queue/${encodeURIComponent(itemId)}/edit`, 'POST', { content });
|
||||
sendText(id, `Edited and released item ${itemId} with custom content (${content.length} chars).`);
|
||||
} else if (action === 'drop') {
|
||||
await fetchApi<unknown>(`/pause/queue/${encodeURIComponent(itemId)}/drop`, 'POST');
|
||||
sendText(id, `Dropped item ${itemId}. Empty content sent to client.`);
|
||||
} else {
|
||||
sendError(id, `Unknown action: ${action}. Use "release", "edit", or "drop".`);
|
||||
}
|
||||
} catch (err) {
|
||||
sendError(id, `Failed to ${action} item: ${err instanceof Error ? err.message : String(err)}`);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
send({
|
||||
jsonrpc: '2.0',
|
||||
id,
|
||||
error: { code: -32601, message: `Unknown tool: ${params.name}` },
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
async function handleRequest(request: JsonRpcRequest): Promise<void> {
|
||||
switch (request.method) {
|
||||
case 'initialize':
|
||||
handleInitialize(request.id);
|
||||
break;
|
||||
case 'notifications/initialized':
|
||||
// Notification — no response
|
||||
break;
|
||||
case 'tools/list':
|
||||
handleToolsList(request.id);
|
||||
break;
|
||||
case 'tools/call':
|
||||
await handleToolsCall(request.id, request.params as { name: string; arguments?: Record<string, unknown> });
|
||||
break;
|
||||
default:
|
||||
if (request.id !== undefined) {
|
||||
send({
|
||||
jsonrpc: '2.0',
|
||||
id: request.id,
|
||||
error: { code: -32601, message: `Method not supported: ${request.method}` },
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function send(message: unknown): void {
|
||||
process.stdout.write(JSON.stringify(message) + '\n');
|
||||
}
|
||||
|
||||
// ── Entrypoint ──
|
||||
|
||||
export async function runInspectMcp(mcplocalUrl: string): Promise<void> {
|
||||
mcplocalBaseUrl = mcplocalUrl.replace(/\/$/, '');
|
||||
const inspectUrl = `${mcplocalBaseUrl}/inspect`;
|
||||
connectSSE(inspectUrl);
|
||||
|
||||
const rl = createInterface({ input: process.stdin });
|
||||
|
||||
for await (const line of rl) {
|
||||
const trimmed = line.trim();
|
||||
if (!trimmed) continue;
|
||||
|
||||
try {
|
||||
const request = JSON.parse(trimmed) as JsonRpcRequest;
|
||||
await handleRequest(request);
|
||||
} catch {
|
||||
// Ignore unparseable lines
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,238 +0,0 @@
|
||||
/**
|
||||
* MCP protocol session — wraps HTTP transport with typed methods.
|
||||
*
|
||||
* Every request/response is logged via the onLog callback so
|
||||
* the console UI can display raw JSON-RPC traffic.
|
||||
*/
|
||||
|
||||
import { postJsonRpc, sendDelete, extractJsonRpcMessages } from '../mcp.js';
|
||||
|
||||
export interface LogEntry {
|
||||
timestamp: Date;
|
||||
direction: 'request' | 'response' | 'error';
|
||||
method?: string;
|
||||
body: unknown;
|
||||
}
|
||||
|
||||
export interface McpTool {
|
||||
name: string;
|
||||
description?: string;
|
||||
inputSchema?: Record<string, unknown>;
|
||||
}
|
||||
|
||||
export interface McpResource {
|
||||
uri: string;
|
||||
name?: string;
|
||||
description?: string;
|
||||
mimeType?: string;
|
||||
}
|
||||
|
||||
export interface McpPrompt {
|
||||
name: string;
|
||||
description?: string;
|
||||
arguments?: Array<{ name: string; description?: string; required?: boolean }>;
|
||||
}
|
||||
|
||||
export interface InitializeResult {
|
||||
protocolVersion: string;
|
||||
serverInfo: { name: string; version: string };
|
||||
capabilities: Record<string, unknown>;
|
||||
instructions?: string;
|
||||
}
|
||||
|
||||
export interface CallToolResult {
|
||||
content: Array<{ type: string; text?: string }>;
|
||||
isError?: boolean;
|
||||
}
|
||||
|
||||
export interface ReadResourceResult {
|
||||
contents: Array<{ uri: string; mimeType?: string; text?: string }>;
|
||||
}
|
||||
|
||||
export class McpSession {
|
||||
private sessionId?: string;
|
||||
private nextId = 1;
|
||||
private log: LogEntry[] = [];
|
||||
|
||||
onLog?: (entry: LogEntry) => void;
|
||||
|
||||
constructor(
|
||||
private readonly endpointUrl: string,
|
||||
private readonly token?: string,
|
||||
) {}
|
||||
|
||||
getSessionId(): string | undefined {
|
||||
return this.sessionId;
|
||||
}
|
||||
|
||||
getLog(): LogEntry[] {
|
||||
return this.log;
|
||||
}
|
||||
|
||||
async initialize(): Promise<InitializeResult> {
|
||||
const request = {
|
||||
jsonrpc: '2.0',
|
||||
id: this.nextId++,
|
||||
method: 'initialize',
|
||||
params: {
|
||||
protocolVersion: '2024-11-05',
|
||||
capabilities: {},
|
||||
clientInfo: { name: 'mcpctl-console', version: '1.0.0' },
|
||||
},
|
||||
};
|
||||
|
||||
const result = await this.send(request);
|
||||
|
||||
// Send initialized notification
|
||||
const notification = {
|
||||
jsonrpc: '2.0',
|
||||
method: 'notifications/initialized',
|
||||
};
|
||||
await this.sendNotification(notification);
|
||||
|
||||
return result as InitializeResult;
|
||||
}
|
||||
|
||||
async listTools(): Promise<McpTool[]> {
|
||||
const result = await this.send({
|
||||
jsonrpc: '2.0',
|
||||
id: this.nextId++,
|
||||
method: 'tools/list',
|
||||
params: {},
|
||||
}) as { tools: McpTool[] };
|
||||
return result.tools ?? [];
|
||||
}
|
||||
|
||||
async callTool(name: string, args: Record<string, unknown>): Promise<CallToolResult> {
|
||||
return await this.send({
|
||||
jsonrpc: '2.0',
|
||||
id: this.nextId++,
|
||||
method: 'tools/call',
|
||||
params: { name, arguments: args },
|
||||
}) as CallToolResult;
|
||||
}
|
||||
|
||||
async listResources(): Promise<McpResource[]> {
|
||||
const result = await this.send({
|
||||
jsonrpc: '2.0',
|
||||
id: this.nextId++,
|
||||
method: 'resources/list',
|
||||
params: {},
|
||||
}) as { resources: McpResource[] };
|
||||
return result.resources ?? [];
|
||||
}
|
||||
|
||||
async readResource(uri: string): Promise<ReadResourceResult> {
|
||||
return await this.send({
|
||||
jsonrpc: '2.0',
|
||||
id: this.nextId++,
|
||||
method: 'resources/read',
|
||||
params: { uri },
|
||||
}) as ReadResourceResult;
|
||||
}
|
||||
|
||||
async listPrompts(): Promise<McpPrompt[]> {
|
||||
const result = await this.send({
|
||||
jsonrpc: '2.0',
|
||||
id: this.nextId++,
|
||||
method: 'prompts/list',
|
||||
params: {},
|
||||
}) as { prompts: McpPrompt[] };
|
||||
return result.prompts ?? [];
|
||||
}
|
||||
|
||||
async getPrompt(name: string, args?: Record<string, unknown>): Promise<unknown> {
|
||||
return await this.send({
|
||||
jsonrpc: '2.0',
|
||||
id: this.nextId++,
|
||||
method: 'prompts/get',
|
||||
params: { name, arguments: args ?? {} },
|
||||
});
|
||||
}
|
||||
|
||||
async sendRaw(json: string): Promise<string> {
|
||||
this.addLog('request', undefined, JSON.parse(json));
|
||||
|
||||
const result = await postJsonRpc(this.endpointUrl, json, this.sessionId, this.token);
|
||||
|
||||
if (!this.sessionId) {
|
||||
const sid = result.headers['mcp-session-id'];
|
||||
if (typeof sid === 'string') {
|
||||
this.sessionId = sid;
|
||||
}
|
||||
}
|
||||
|
||||
const messages = extractJsonRpcMessages(result.headers['content-type'], result.body);
|
||||
const combined = messages.join('\n');
|
||||
|
||||
for (const msg of messages) {
|
||||
try {
|
||||
this.addLog('response', undefined, JSON.parse(msg));
|
||||
} catch {
|
||||
this.addLog('response', undefined, msg);
|
||||
}
|
||||
}
|
||||
|
||||
return combined;
|
||||
}
|
||||
|
||||
async close(): Promise<void> {
|
||||
if (this.sessionId) {
|
||||
await sendDelete(this.endpointUrl, this.sessionId, this.token);
|
||||
this.sessionId = undefined;
|
||||
}
|
||||
}
|
||||
|
||||
private async send(request: Record<string, unknown>): Promise<unknown> {
|
||||
const method = request.method as string;
|
||||
this.addLog('request', method, request);
|
||||
|
||||
const body = JSON.stringify(request);
|
||||
let result;
|
||||
try {
|
||||
result = await postJsonRpc(this.endpointUrl, body, this.sessionId, this.token);
|
||||
} catch (err) {
|
||||
this.addLog('error', method, { error: err instanceof Error ? err.message : String(err) });
|
||||
throw err;
|
||||
}
|
||||
|
||||
// Capture session ID
|
||||
if (!this.sessionId) {
|
||||
const sid = result.headers['mcp-session-id'];
|
||||
if (typeof sid === 'string') {
|
||||
this.sessionId = sid;
|
||||
}
|
||||
}
|
||||
|
||||
const messages = extractJsonRpcMessages(result.headers['content-type'], result.body);
|
||||
const firstMsg = messages[0];
|
||||
if (!firstMsg) {
|
||||
throw new Error(`Empty response for ${method}`);
|
||||
}
|
||||
|
||||
const parsed = JSON.parse(firstMsg) as { result?: unknown; error?: { code: number; message: string } };
|
||||
this.addLog('response', method, parsed);
|
||||
|
||||
if (parsed.error) {
|
||||
throw new Error(`MCP error ${parsed.error.code}: ${parsed.error.message}`);
|
||||
}
|
||||
|
||||
return parsed.result;
|
||||
}
|
||||
|
||||
private async sendNotification(notification: Record<string, unknown>): Promise<void> {
|
||||
const body = JSON.stringify(notification);
|
||||
this.addLog('request', notification.method as string, notification);
|
||||
try {
|
||||
await postJsonRpc(this.endpointUrl, body, this.sessionId, this.token);
|
||||
} catch {
|
||||
// Notifications are fire-and-forget
|
||||
}
|
||||
}
|
||||
|
||||
private addLog(direction: LogEntry['direction'], method: string | undefined, body: unknown): void {
|
||||
const entry: LogEntry = { timestamp: new Date(), direction, method, body };
|
||||
this.log.push(entry);
|
||||
this.onLog?.(entry);
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,157 +0,0 @@
|
||||
/**
|
||||
* Shared types for the unified MCP console.
|
||||
*/
|
||||
|
||||
import type { McpTool, McpResource, McpPrompt, InitializeResult, McpSession } from './mcp-session.js';
|
||||
|
||||
// ── Traffic event types (mirrors mcplocal's TrafficEvent) ──
|
||||
|
||||
export type TrafficEventType =
|
||||
| 'client_request'
|
||||
| 'client_response'
|
||||
| 'client_notification'
|
||||
| 'upstream_request'
|
||||
| 'upstream_response'
|
||||
| 'session_created'
|
||||
| 'session_closed';
|
||||
|
||||
export interface ActiveSession {
|
||||
sessionId: string;
|
||||
projectName: string;
|
||||
startedAt: string;
|
||||
}
|
||||
|
||||
// ── Timeline ──
|
||||
|
||||
export type EventLane = 'interactive' | 'observed';
|
||||
|
||||
export interface TimelineEvent {
|
||||
id: number;
|
||||
timestamp: Date;
|
||||
lane: EventLane;
|
||||
eventType: TrafficEventType;
|
||||
method?: string | undefined;
|
||||
projectName: string;
|
||||
sessionId: string;
|
||||
upstreamName?: string | undefined;
|
||||
body: unknown;
|
||||
durationMs?: number | undefined;
|
||||
correlationId?: string | undefined;
|
||||
}
|
||||
|
||||
// ── Lane filter ──
|
||||
|
||||
export type LaneFilter = 'all' | 'interactive' | 'observed';
|
||||
|
||||
// ── Action area ──
|
||||
|
||||
export interface ReplayConfig {
|
||||
proxyModel: string;
|
||||
provider: string | null;
|
||||
llmModel: string | null;
|
||||
}
|
||||
|
||||
export interface ReplayResult {
|
||||
content: string;
|
||||
durationMs: number;
|
||||
error?: string | undefined;
|
||||
}
|
||||
|
||||
export interface ProxyModelDetails {
|
||||
name: string;
|
||||
source: 'built-in' | 'local';
|
||||
type?: 'pipeline' | 'plugin' | undefined;
|
||||
controller?: string | undefined;
|
||||
controllerConfig?: Record<string, unknown> | undefined;
|
||||
stages?: Array<{ type: string; config?: Record<string, unknown> }> | undefined;
|
||||
appliesTo?: string[] | undefined;
|
||||
cacheable?: boolean | undefined;
|
||||
hooks?: string[] | undefined;
|
||||
extends?: string[] | undefined;
|
||||
description?: string | undefined;
|
||||
}
|
||||
|
||||
export interface SearchState {
|
||||
searchMode: boolean;
|
||||
searchQuery: string;
|
||||
searchMatches: number[]; // line indices matching query
|
||||
searchMatchIdx: number; // current match index, -1 = none
|
||||
}
|
||||
|
||||
export type ActionState =
|
||||
| { type: 'none' }
|
||||
| { type: 'detail'; eventIdx: number; scrollOffset: number; horizontalOffset: number } & SearchState
|
||||
| {
|
||||
type: 'provenance';
|
||||
clientEventIdx: number;
|
||||
upstreamEvent: TimelineEvent | null;
|
||||
scrollOffset: number;
|
||||
horizontalOffset: number;
|
||||
focusedPanel: 'client' | 'upstream' | 'parameters' | 'preview';
|
||||
replayConfig: ReplayConfig;
|
||||
replayResult: ReplayResult | null;
|
||||
replayRunning: boolean;
|
||||
editingUpstream: boolean;
|
||||
editedContent: string;
|
||||
parameterIdx: number; // 0=ProxyModel, 1=Provider, 2=Model, 3=Live, 4=Server
|
||||
proxyModelDetails: ProxyModelDetails | null;
|
||||
liveOverride: boolean;
|
||||
serverList: string[];
|
||||
serverOverrides: Record<string, string>;
|
||||
selectedServerIdx: number; // -1 = project-wide, 0+ = specific server
|
||||
serverPickerOpen: boolean;
|
||||
modelPickerOpen: boolean;
|
||||
modelPickerIdx: number;
|
||||
} & SearchState
|
||||
| { type: 'tool-input'; tool: McpTool; loading: boolean }
|
||||
| { type: 'tool-browser' }
|
||||
| { type: 'resource-browser' }
|
||||
| { type: 'prompt-browser' }
|
||||
| { type: 'raw-jsonrpc' };
|
||||
|
||||
// ── Console state ──
|
||||
|
||||
export interface UnifiedConsoleState {
|
||||
// Connection
|
||||
phase: 'connecting' | 'ready' | 'error';
|
||||
error: string | null;
|
||||
|
||||
// Interactive session
|
||||
session: McpSession | null;
|
||||
gated: boolean;
|
||||
initResult: InitializeResult | null;
|
||||
tools: McpTool[];
|
||||
resources: McpResource[];
|
||||
prompts: McpPrompt[];
|
||||
|
||||
// Observed traffic (SSE)
|
||||
sseConnected: boolean;
|
||||
observedSessions: ActiveSession[];
|
||||
|
||||
// Session sidebar
|
||||
showSidebar: boolean;
|
||||
selectedSessionIdx: number; // -2 = "New Session", -1 = all sessions, 0+ = sessions
|
||||
sidebarMode: 'sessions' | 'project-picker';
|
||||
availableProjects: string[];
|
||||
activeProjectName: string | null;
|
||||
|
||||
// Toolbar
|
||||
toolbarFocusIdx: number; // -1 = not focused, 0-3 = which item
|
||||
|
||||
// Timeline
|
||||
events: TimelineEvent[];
|
||||
focusedEventIdx: number; // -1 = auto-scroll
|
||||
nextEventId: number;
|
||||
laneFilter: LaneFilter;
|
||||
|
||||
// Action area
|
||||
action: ActionState;
|
||||
|
||||
// ProxyModel / LLM options (for provenance preview)
|
||||
availableModels: string[];
|
||||
availableProviders: string[];
|
||||
availableLlms: string[];
|
||||
|
||||
}
|
||||
|
||||
export const MAX_TIMELINE_EVENTS = 10_000;
|
||||
@@ -1,6 +1,5 @@
|
||||
import { Command } from 'commander';
|
||||
import { type ApiClient, ApiError } from '../api-client.js';
|
||||
import { resolveNameOrId } from './shared.js';
|
||||
export interface CreateCommandDeps {
|
||||
client: ApiClient;
|
||||
log: (...args: unknown[]) => void;
|
||||
@@ -56,15 +55,14 @@ export function createCreateCommand(deps: CreateCommandDeps): Command {
|
||||
const { client, log } = deps;
|
||||
|
||||
const cmd = new Command('create')
|
||||
.description('Create a resource (server, secret, project, user, group, rbac, serverattachment, prompt)');
|
||||
.description('Create a resource (server, secret, project, user, group, rbac)');
|
||||
|
||||
// --- create server ---
|
||||
cmd.command('server')
|
||||
.description('Create an MCP server definition')
|
||||
.argument('<name>', 'Server name (lowercase, hyphens allowed)')
|
||||
.option('-d, --description <text>', 'Server description')
|
||||
.option('--package-name <name>', 'Package name (npm, PyPI, Go module, etc.)')
|
||||
.option('--runtime <type>', 'Package runtime (node, python, go — default: node)')
|
||||
.option('--package-name <name>', 'NPM package name')
|
||||
.option('--docker-image <image>', 'Docker image')
|
||||
.option('--transport <type>', 'Transport type (STDIO, SSE, STREAMABLE_HTTP)')
|
||||
.option('--repository-url <url>', 'Source repository URL')
|
||||
@@ -74,7 +72,6 @@ export function createCreateCommand(deps: CreateCommandDeps): Command {
|
||||
.option('--replicas <count>', 'Number of replicas')
|
||||
.option('--env <entry>', 'Env var: KEY=value (inline) or KEY=secretRef:SECRET:KEY (secret ref, repeat for multiple)', collect, [])
|
||||
.option('--from-template <name>', 'Create from template (name or name:version)')
|
||||
.option('--env-from-secret <secret>', 'Map template env vars from a secret')
|
||||
.option('--force', 'Update if already exists')
|
||||
.action(async (name: string, opts) => {
|
||||
let base: Record<string, unknown> = {};
|
||||
@@ -106,33 +103,7 @@ export function createCreateCommand(deps: CreateCommandDeps): Command {
|
||||
// Convert template env (description/required) to server env (name/value/valueFrom)
|
||||
const tplEnv = template.env as Array<{ name: string; description?: string; required?: boolean; defaultValue?: string }> | undefined;
|
||||
if (tplEnv && tplEnv.length > 0) {
|
||||
if (opts.envFromSecret) {
|
||||
// --env-from-secret: map all template env vars from the specified secret
|
||||
const secretName = opts.envFromSecret as string;
|
||||
const secrets = await client.get<Array<{ name: string; data: Record<string, string> }>>('/api/v1/secrets');
|
||||
const secret = secrets.find((s) => s.name === secretName);
|
||||
if (!secret) throw new Error(`Secret '${secretName}' not found`);
|
||||
|
||||
const missing = tplEnv
|
||||
.filter((e) => e.required !== false && !(e.name in secret.data))
|
||||
.map((e) => e.name);
|
||||
if (missing.length > 0) {
|
||||
throw new Error(
|
||||
`Secret '${secretName}' is missing required keys: ${missing.join(', ')}\n` +
|
||||
`Secret has: ${Object.keys(secret.data).join(', ')}`,
|
||||
);
|
||||
}
|
||||
|
||||
base.env = tplEnv.map((e) => {
|
||||
if (e.name in secret.data) {
|
||||
return { name: e.name, valueFrom: { secretRef: { name: secretName, key: e.name } } };
|
||||
}
|
||||
return { name: e.name, value: e.defaultValue ?? '' };
|
||||
});
|
||||
log(`Mapped ${tplEnv.filter((e) => e.name in secret.data).length} env var(s) from secret '${secretName}'`);
|
||||
} else {
|
||||
base.env = tplEnv.map((e) => ({ name: e.name, value: e.defaultValue ?? '' }));
|
||||
}
|
||||
base.env = tplEnv.map((e) => ({ name: e.name, value: e.defaultValue ?? '' }));
|
||||
}
|
||||
|
||||
// Track template origin
|
||||
@@ -149,7 +120,6 @@ export function createCreateCommand(deps: CreateCommandDeps): Command {
|
||||
if (opts.transport) body.transport = opts.transport;
|
||||
if (opts.replicas) body.replicas = parseInt(opts.replicas, 10);
|
||||
if (opts.packageName) body.packageName = opts.packageName;
|
||||
if (opts.runtime) body.runtime = opts.runtime;
|
||||
if (opts.dockerImage) body.dockerImage = opts.dockerImage;
|
||||
if (opts.repositoryUrl) body.repositoryUrl = opts.repositoryUrl;
|
||||
if (opts.externalUrl) body.externalUrl = opts.externalUrl;
|
||||
@@ -225,27 +195,22 @@ export function createCreateCommand(deps: CreateCommandDeps): Command {
|
||||
.description('Create a project')
|
||||
.argument('<name>', 'Project name')
|
||||
.option('-d, --description <text>', 'Project description', '')
|
||||
.option('--proxy-model <name>', 'Plugin name (default, content-pipeline, gate, none)')
|
||||
.option('--prompt <text>', 'Project-level prompt / instructions for the LLM')
|
||||
.option('--gated', '[deprecated: use --proxy-model default]')
|
||||
.option('--no-gated', '[deprecated: use --proxy-model content-pipeline]')
|
||||
.option('--proxy-mode <mode>', 'Proxy mode (direct, filtered)')
|
||||
.option('--llm-provider <name>', 'LLM provider name')
|
||||
.option('--llm-model <name>', 'LLM model name')
|
||||
.option('--server <name>', 'Server name (repeat for multiple)', collect, [])
|
||||
.option('--member <email>', 'Member email (repeat for multiple)', collect, [])
|
||||
.option('--force', 'Update if already exists')
|
||||
.action(async (name: string, opts) => {
|
||||
const body: Record<string, unknown> = {
|
||||
name,
|
||||
description: opts.description,
|
||||
proxyMode: opts.proxyMode ?? 'direct',
|
||||
};
|
||||
if (opts.prompt) body.prompt = opts.prompt;
|
||||
if (opts.proxyModel) {
|
||||
body.proxyModel = opts.proxyModel;
|
||||
} else if (opts.gated === false) {
|
||||
// Backward compat: --no-gated → proxyModel: content-pipeline
|
||||
body.proxyModel = 'content-pipeline';
|
||||
}
|
||||
// Pass gated for backward compat with older mcpd
|
||||
if (opts.gated !== undefined) body.gated = opts.gated as boolean;
|
||||
if (opts.llmProvider) body.llmProvider = opts.llmProvider;
|
||||
if (opts.llmModel) body.llmModel = opts.llmModel;
|
||||
if (opts.server.length > 0) body.servers = opts.server;
|
||||
if (opts.member.length > 0) body.members = opts.member;
|
||||
|
||||
try {
|
||||
const project = await client.post<{ id: string; name: string }>('/api/v1/projects', body);
|
||||
@@ -384,105 +349,5 @@ export function createCreateCommand(deps: CreateCommandDeps): Command {
|
||||
}
|
||||
});
|
||||
|
||||
// --- create prompt ---
|
||||
cmd.command('prompt')
|
||||
.description('Create an approved prompt')
|
||||
.argument('<name>', 'Prompt name (lowercase alphanumeric with hyphens)')
|
||||
.option('-p, --project <name>', 'Project name to scope the prompt to')
|
||||
.option('--content <text>', 'Prompt content text')
|
||||
.option('--content-file <path>', 'Read prompt content from file')
|
||||
.option('--priority <number>', 'Priority 1-10 (default: 5, higher = more important)')
|
||||
.option('--link <target>', 'Link to MCP resource (format: project/server:uri)')
|
||||
.action(async (name: string, opts) => {
|
||||
let content = opts.content as string | undefined;
|
||||
if (opts.contentFile) {
|
||||
const fs = await import('node:fs/promises');
|
||||
content = await fs.readFile(opts.contentFile as string, 'utf-8');
|
||||
}
|
||||
// For linked prompts, auto-generate placeholder content if none provided
|
||||
if (!content && opts.link) {
|
||||
content = `Linked prompt — content fetched from ${opts.link as string}`;
|
||||
}
|
||||
if (!content) {
|
||||
throw new Error('--content or --content-file is required');
|
||||
}
|
||||
|
||||
const body: Record<string, unknown> = { name, content };
|
||||
if (opts.project) {
|
||||
// Resolve project name to ID
|
||||
const projects = await client.get<Array<{ id: string; name: string }>>('/api/v1/projects');
|
||||
const project = projects.find((p) => p.name === opts.project);
|
||||
if (!project) throw new Error(`Project '${opts.project as string}' not found`);
|
||||
body.projectId = project.id;
|
||||
}
|
||||
if (opts.priority) {
|
||||
const priority = Number(opts.priority);
|
||||
if (isNaN(priority) || priority < 1 || priority > 10) {
|
||||
throw new Error('--priority must be a number between 1 and 10');
|
||||
}
|
||||
body.priority = priority;
|
||||
}
|
||||
if (opts.link) {
|
||||
body.linkTarget = opts.link;
|
||||
}
|
||||
|
||||
const prompt = await client.post<{ id: string; name: string }>('/api/v1/prompts', body);
|
||||
log(`prompt '${prompt.name}' created (id: ${prompt.id})`);
|
||||
});
|
||||
|
||||
// --- create serverattachment ---
|
||||
cmd.command('serverattachment')
|
||||
.alias('sa')
|
||||
.description('Attach a server to a project')
|
||||
.argument('<server>', 'Server name')
|
||||
.option('-p, --project <name>', 'Project name')
|
||||
.action(async (serverName: string, opts) => {
|
||||
const projectName = opts.project as string | undefined;
|
||||
if (!projectName) {
|
||||
throw new Error('--project is required. Usage: mcpctl create serverattachment <server> --project <name>');
|
||||
}
|
||||
const projectId = await resolveNameOrId(client, 'projects', projectName);
|
||||
await client.post(`/api/v1/projects/${projectId}/servers`, { server: serverName });
|
||||
log(`server '${serverName}' attached to project '${projectName}'`);
|
||||
});
|
||||
|
||||
// --- create promptrequest ---
|
||||
cmd.command('promptrequest')
|
||||
.description('Create a prompt request (pending proposal that needs approval)')
|
||||
.argument('<name>', 'Prompt request name (lowercase alphanumeric with hyphens)')
|
||||
.option('-p, --project <name>', 'Project name to scope the prompt request to')
|
||||
.option('--content <text>', 'Prompt content text')
|
||||
.option('--content-file <path>', 'Read prompt content from file')
|
||||
.option('--priority <number>', 'Priority 1-10 (default: 5, higher = more important)')
|
||||
.action(async (name: string, opts) => {
|
||||
let content = opts.content as string | undefined;
|
||||
if (opts.contentFile) {
|
||||
const fs = await import('node:fs/promises');
|
||||
content = await fs.readFile(opts.contentFile as string, 'utf-8');
|
||||
}
|
||||
if (!content) {
|
||||
throw new Error('--content or --content-file is required');
|
||||
}
|
||||
|
||||
const body: Record<string, unknown> = { name, content };
|
||||
if (opts.project) {
|
||||
body.project = opts.project;
|
||||
}
|
||||
if (opts.priority) {
|
||||
const priority = Number(opts.priority);
|
||||
if (isNaN(priority) || priority < 1 || priority > 10) {
|
||||
throw new Error('--priority must be a number between 1 and 10');
|
||||
}
|
||||
body.priority = priority;
|
||||
}
|
||||
|
||||
const pr = await client.post<{ id: string; name: string }>(
|
||||
'/api/v1/promptrequests',
|
||||
body,
|
||||
);
|
||||
log(`prompt request '${pr.name}' created (id: ${pr.id})`);
|
||||
log(` approve with: mcpctl approve promptrequest ${pr.name}`);
|
||||
});
|
||||
|
||||
return cmd;
|
||||
}
|
||||
|
||||
@@ -14,21 +14,9 @@ export function createDeleteCommand(deps: DeleteCommandDeps): Command {
|
||||
.description('Delete a resource (server, instance, secret, project, user, group, rbac)')
|
||||
.argument('<resource>', 'resource type')
|
||||
.argument('<id>', 'resource ID or name')
|
||||
.option('-p, --project <name>', 'Project name (for serverattachment)')
|
||||
.action(async (resourceArg: string, idOrName: string, opts: { project?: string }) => {
|
||||
.action(async (resourceArg: string, idOrName: string) => {
|
||||
const resource = resolveResource(resourceArg);
|
||||
|
||||
// Serverattachments: delete serverattachment <server> --project <project>
|
||||
if (resource === 'serverattachments') {
|
||||
if (!opts.project) {
|
||||
throw new Error('--project is required. Usage: mcpctl delete serverattachment <server> --project <name>');
|
||||
}
|
||||
const projectId = await resolveNameOrId(client, 'projects', opts.project);
|
||||
await client.delete(`/api/v1/projects/${projectId}/servers/${idOrName}`);
|
||||
log(`server '${idOrName}' detached from project '${opts.project}'`);
|
||||
return;
|
||||
}
|
||||
|
||||
// Resolve name → ID for any resource type
|
||||
let id: string;
|
||||
try {
|
||||
|
||||
@@ -8,7 +8,6 @@ export interface DescribeCommandDeps {
|
||||
fetchResource: (resource: string, id: string) => Promise<unknown>;
|
||||
fetchInspect?: (id: string) => Promise<unknown>;
|
||||
log: (...args: string[]) => void;
|
||||
mcplocalUrl?: string;
|
||||
}
|
||||
|
||||
function pad(label: string, width = 18): string {
|
||||
@@ -134,25 +133,23 @@ function formatInstanceDetail(instance: Record<string, unknown>, inspect?: Recor
|
||||
return lines.join('\n');
|
||||
}
|
||||
|
||||
function formatProjectDetail(
|
||||
project: Record<string, unknown>,
|
||||
prompts: Array<{ name: string; priority: number; linkTarget: string | null }> = [],
|
||||
): string {
|
||||
function formatProjectDetail(project: Record<string, unknown>): string {
|
||||
const lines: string[] = [];
|
||||
lines.push(`=== Project: ${project.name} ===`);
|
||||
lines.push(`${pad('Name:')}${project.name}`);
|
||||
if (project.description) lines.push(`${pad('Description:')}${project.description}`);
|
||||
|
||||
// Plugin config
|
||||
const proxyModel = (project.proxyModel as string | undefined) || 'default';
|
||||
// Proxy config section
|
||||
const proxyMode = project.proxyMode as string | undefined;
|
||||
const llmProvider = project.llmProvider as string | undefined;
|
||||
const llmModel = project.llmModel as string | undefined;
|
||||
|
||||
lines.push('');
|
||||
lines.push('Plugin Config:');
|
||||
lines.push(` ${pad('Plugin:', 18)}${proxyModel}`);
|
||||
if (llmProvider) lines.push(` ${pad('LLM Provider:', 18)}${llmProvider}`);
|
||||
if (llmModel) lines.push(` ${pad('LLM Model:', 18)}${llmModel}`);
|
||||
if (proxyMode || llmProvider || llmModel) {
|
||||
lines.push('');
|
||||
lines.push('Proxy Config:');
|
||||
lines.push(` ${pad('Mode:', 18)}${proxyMode ?? 'direct'}`);
|
||||
if (llmProvider) lines.push(` ${pad('LLM Provider:', 18)}${llmProvider}`);
|
||||
if (llmModel) lines.push(` ${pad('LLM Model:', 18)}${llmModel}`);
|
||||
}
|
||||
|
||||
// Servers section
|
||||
const servers = project.servers as Array<{ server: { name: string } }> | undefined;
|
||||
@@ -165,15 +162,14 @@ function formatProjectDetail(
|
||||
}
|
||||
}
|
||||
|
||||
// Prompts section
|
||||
if (prompts.length > 0) {
|
||||
// Members section (no role — all permissions are in RBAC)
|
||||
const members = project.members as Array<{ user: { email: string } }> | undefined;
|
||||
if (members && members.length > 0) {
|
||||
lines.push('');
|
||||
lines.push('Prompts:');
|
||||
const nameW = Math.max(4, ...prompts.map((p) => p.name.length)) + 2;
|
||||
lines.push(` ${'NAME'.padEnd(nameW)}${'PRI'.padEnd(6)}TYPE`);
|
||||
for (const p of prompts) {
|
||||
const type = p.linkTarget ? 'link' : 'local';
|
||||
lines.push(` ${p.name.padEnd(nameW)}${String(p.priority).padEnd(6)}${type}`);
|
||||
lines.push('Members:');
|
||||
lines.push(' EMAIL');
|
||||
for (const m of members) {
|
||||
lines.push(` ${m.user.email}`);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -503,156 +499,6 @@ function formatRbacDetail(rbac: Record<string, unknown>): string {
|
||||
return lines.join('\n');
|
||||
}
|
||||
|
||||
async function formatPromptDetail(prompt: Record<string, unknown>, client?: ApiClient): Promise<string> {
|
||||
const lines: string[] = [];
|
||||
lines.push(`=== Prompt: ${prompt.name} ===`);
|
||||
lines.push(`${pad('Name:')}${prompt.name}`);
|
||||
|
||||
const proj = prompt.project as { name: string } | null | undefined;
|
||||
lines.push(`${pad('Project:')}${proj?.name ?? (prompt.projectId ? String(prompt.projectId) : '(global)')}`);
|
||||
lines.push(`${pad('Priority:')}${prompt.priority ?? 5}`);
|
||||
|
||||
// Link info
|
||||
const link = prompt.linkTarget as string | null | undefined;
|
||||
if (link) {
|
||||
lines.push('');
|
||||
lines.push('Link:');
|
||||
lines.push(` ${pad('Target:', 12)}${link}`);
|
||||
const status = prompt.linkStatus as string | null | undefined;
|
||||
if (status) lines.push(` ${pad('Status:', 12)}${status}`);
|
||||
}
|
||||
|
||||
// Content — resolve linked content if possible
|
||||
let content = prompt.content as string | undefined;
|
||||
if (link && client) {
|
||||
const resolved = await resolveLink(link, client);
|
||||
if (resolved) content = resolved;
|
||||
}
|
||||
|
||||
lines.push('');
|
||||
lines.push('Content:');
|
||||
if (content) {
|
||||
// Indent content with 2 spaces for readability
|
||||
for (const line of content.split('\n')) {
|
||||
lines.push(` ${line}`);
|
||||
}
|
||||
} else {
|
||||
lines.push(' (no content)');
|
||||
}
|
||||
|
||||
lines.push('');
|
||||
lines.push('Metadata:');
|
||||
lines.push(` ${pad('ID:', 12)}${prompt.id}`);
|
||||
if (prompt.version) lines.push(` ${pad('Version:', 12)}${prompt.version}`);
|
||||
if (prompt.createdAt) lines.push(` ${pad('Created:', 12)}${prompt.createdAt}`);
|
||||
if (prompt.updatedAt) lines.push(` ${pad('Updated:', 12)}${prompt.updatedAt}`);
|
||||
|
||||
return lines.join('\n');
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve a prompt link target via mcpd proxy's resources/read.
|
||||
* Returns resolved content string or null on failure.
|
||||
*/
|
||||
async function resolveLink(linkTarget: string, client: ApiClient): Promise<string | null> {
|
||||
try {
|
||||
// Parse link: project/server:uri
|
||||
const slashIdx = linkTarget.indexOf('/');
|
||||
if (slashIdx < 1) return null;
|
||||
const project = linkTarget.slice(0, slashIdx);
|
||||
const rest = linkTarget.slice(slashIdx + 1);
|
||||
const colonIdx = rest.indexOf(':');
|
||||
if (colonIdx < 1) return null;
|
||||
const serverName = rest.slice(0, colonIdx);
|
||||
const uri = rest.slice(colonIdx + 1);
|
||||
|
||||
// Resolve server name → ID
|
||||
const servers = await client.get<Array<{ id: string; name: string }>>(
|
||||
`/api/v1/projects/${encodeURIComponent(project)}/servers`,
|
||||
);
|
||||
const target = servers.find((s) => s.name === serverName);
|
||||
if (!target) return null;
|
||||
|
||||
// Call resources/read via proxy
|
||||
const proxyResponse = await client.post<{
|
||||
result?: { contents?: Array<{ text?: string }> };
|
||||
error?: { code: number; message: string };
|
||||
}>('/api/v1/mcp/proxy', {
|
||||
serverId: target.id,
|
||||
method: 'resources/read',
|
||||
params: { uri },
|
||||
});
|
||||
|
||||
if (proxyResponse.error) return null;
|
||||
const contents = proxyResponse.result?.contents;
|
||||
if (!contents || contents.length === 0) return null;
|
||||
return contents.map((c) => c.text ?? '').join('\n');
|
||||
} catch {
|
||||
return null; // Silently fall back to stored content
|
||||
}
|
||||
}
|
||||
|
||||
function formatProxymodelDetail(model: Record<string, unknown>): string {
|
||||
const lines: string[] = [];
|
||||
const modelType = (model.type as string | undefined) ?? 'pipeline';
|
||||
lines.push(`=== ProxyModel: ${model.name} ===`);
|
||||
lines.push(`${pad('Name:')}${model.name}`);
|
||||
lines.push(`${pad('Source:')}${model.source ?? 'unknown'}`);
|
||||
lines.push(`${pad('Type:')}${modelType}`);
|
||||
|
||||
if (modelType === 'plugin') {
|
||||
if (model.description) lines.push(`${pad('Description:')}${model.description}`);
|
||||
const extendsArr = model.extends as readonly string[] | undefined;
|
||||
if (extendsArr && extendsArr.length > 0) {
|
||||
lines.push(`${pad('Extends:')}${[...extendsArr].join(', ')}`);
|
||||
}
|
||||
const hooks = model.hooks as string[] | undefined;
|
||||
if (hooks && hooks.length > 0) {
|
||||
lines.push('');
|
||||
lines.push('Hooks:');
|
||||
for (const h of hooks) {
|
||||
lines.push(` - ${h}`);
|
||||
}
|
||||
}
|
||||
return lines.join('\n');
|
||||
}
|
||||
|
||||
// Pipeline type
|
||||
lines.push(`${pad('Controller:')}${model.controller ?? '-'}`);
|
||||
lines.push(`${pad('Cacheable:')}${model.cacheable ? 'yes' : 'no'}`);
|
||||
|
||||
const appliesTo = model.appliesTo as string[] | undefined;
|
||||
if (appliesTo && appliesTo.length > 0) {
|
||||
lines.push(`${pad('Applies To:')}${appliesTo.join(', ')}`);
|
||||
}
|
||||
|
||||
const controllerConfig = model.controllerConfig as Record<string, unknown> | undefined;
|
||||
if (controllerConfig && Object.keys(controllerConfig).length > 0) {
|
||||
lines.push('');
|
||||
lines.push('Controller Config:');
|
||||
for (const [key, value] of Object.entries(controllerConfig)) {
|
||||
lines.push(` ${pad(key + ':', 20)}${String(value)}`);
|
||||
}
|
||||
}
|
||||
|
||||
const stages = model.stages as Array<{ type: string; config?: Record<string, unknown> }> | undefined;
|
||||
if (stages && stages.length > 0) {
|
||||
lines.push('');
|
||||
lines.push('Stages:');
|
||||
for (let i = 0; i < stages.length; i++) {
|
||||
const s = stages[i]!;
|
||||
lines.push(` ${i + 1}. ${s.type}`);
|
||||
if (s.config && Object.keys(s.config).length > 0) {
|
||||
for (const [key, value] of Object.entries(s.config)) {
|
||||
lines.push(` ${pad(key + ':', 20)}${String(value)}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return lines.join('\n');
|
||||
}
|
||||
|
||||
function formatGenericDetail(obj: Record<string, unknown>): string {
|
||||
const lines: string[] = [];
|
||||
for (const [key, value] of Object.entries(obj)) {
|
||||
@@ -689,20 +535,6 @@ export function createDescribeCommand(deps: DescribeCommandDeps): Command {
|
||||
.action(async (resourceArg: string, idOrName: string, opts: { output: string; showValues?: boolean }) => {
|
||||
const resource = resolveResource(resourceArg);
|
||||
|
||||
// ProxyModels are served by mcplocal, not mcpd
|
||||
if (resource === 'proxymodels') {
|
||||
const mcplocalUrl = deps.mcplocalUrl ?? 'http://localhost:3200';
|
||||
const item = await fetchProxymodelFromMcplocal(mcplocalUrl, idOrName);
|
||||
if (opts.output === 'json') {
|
||||
deps.log(formatJson(item));
|
||||
} else if (opts.output === 'yaml') {
|
||||
deps.log(formatYaml(item));
|
||||
} else {
|
||||
deps.log(formatProxymodelDetail(item));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Resolve name → ID
|
||||
let id: string;
|
||||
if (resource === 'instances') {
|
||||
@@ -726,15 +558,10 @@ export function createDescribeCommand(deps: DescribeCommandDeps): Command {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Prompts/promptrequests: let fetchResource handle scoping (it respects --project)
|
||||
if (resource === 'prompts' || resource === 'promptrequests') {
|
||||
try {
|
||||
id = await resolveNameOrId(deps.client, resource, idOrName);
|
||||
} catch {
|
||||
id = idOrName;
|
||||
} else {
|
||||
try {
|
||||
id = await resolveNameOrId(deps.client, resource, idOrName);
|
||||
} catch {
|
||||
id = idOrName;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -770,13 +597,9 @@ export function createDescribeCommand(deps: DescribeCommandDeps): Command {
|
||||
case 'templates':
|
||||
deps.log(formatTemplateDetail(item));
|
||||
break;
|
||||
case 'projects': {
|
||||
const projectPrompts = await deps.client
|
||||
.get<Array<{ name: string; priority: number; linkTarget: string | null }>>(`/api/v1/prompts?projectId=${item.id as string}`)
|
||||
.catch(() => []);
|
||||
deps.log(formatProjectDetail(item, projectPrompts));
|
||||
case 'projects':
|
||||
deps.log(formatProjectDetail(item));
|
||||
break;
|
||||
}
|
||||
case 'users': {
|
||||
// Fetch RBAC definitions and groups to show permissions
|
||||
const [rbacDefsForUser, allGroupsForUser] = await Promise.all([
|
||||
@@ -798,37 +621,9 @@ export function createDescribeCommand(deps: DescribeCommandDeps): Command {
|
||||
case 'rbac':
|
||||
deps.log(formatRbacDetail(item));
|
||||
break;
|
||||
case 'prompts':
|
||||
deps.log(await formatPromptDetail(item, deps.client));
|
||||
break;
|
||||
default:
|
||||
deps.log(formatGenericDetail(item));
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async function fetchProxymodelFromMcplocal(mcplocalUrl: string, name: string): Promise<Record<string, unknown>> {
|
||||
const http = await import('node:http');
|
||||
const url = `${mcplocalUrl}/proxymodels/${encodeURIComponent(name)}`;
|
||||
|
||||
return new Promise<Record<string, unknown>>((resolve, reject) => {
|
||||
const req = http.get(url, { timeout: 5000 }, (res) => {
|
||||
let data = '';
|
||||
res.on('data', (chunk: Buffer) => { data += chunk.toString(); });
|
||||
res.on('end', () => {
|
||||
try {
|
||||
if (res.statusCode === 404) {
|
||||
reject(new Error(`ProxyModel '${name}' not found`));
|
||||
return;
|
||||
}
|
||||
resolve(JSON.parse(data) as Record<string, unknown>);
|
||||
} catch {
|
||||
reject(new Error('Invalid response from mcplocal'));
|
||||
}
|
||||
});
|
||||
});
|
||||
req.on('error', () => reject(new Error(`Cannot connect to mcplocal at ${mcplocalUrl}`)));
|
||||
req.on('timeout', () => { req.destroy(); reject(new Error('mcplocal request timed out')); });
|
||||
});
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import { execSync } from 'node:child_process';
|
||||
import yaml from 'js-yaml';
|
||||
import type { ApiClient } from '../api-client.js';
|
||||
import { resolveResource, resolveNameOrId, stripInternalFields } from './shared.js';
|
||||
import { reorderKeys } from '../formatters/output.js';
|
||||
|
||||
export interface EditCommandDeps {
|
||||
client: ApiClient;
|
||||
@@ -48,7 +47,7 @@ export function createEditCommand(deps: EditCommandDeps): Command {
|
||||
return;
|
||||
}
|
||||
|
||||
const validResources = ['servers', 'secrets', 'projects', 'groups', 'rbac', 'prompts', 'promptrequests'];
|
||||
const validResources = ['servers', 'secrets', 'projects', 'groups', 'rbac'];
|
||||
if (!validResources.includes(resource)) {
|
||||
log(`Error: unknown resource type '${resourceArg}'`);
|
||||
process.exitCode = 1;
|
||||
@@ -62,7 +61,7 @@ export function createEditCommand(deps: EditCommandDeps): Command {
|
||||
const current = await client.get<Record<string, unknown>>(`/api/v1/${resource}/${id}`);
|
||||
|
||||
// Strip read-only fields for editor
|
||||
const editable = reorderKeys(stripInternalFields(current)) as Record<string, unknown>;
|
||||
const editable = stripInternalFields(current);
|
||||
|
||||
// Serialize to YAML
|
||||
const singular = resource.replace(/s$/, '');
|
||||
|
||||
@@ -1,14 +1,12 @@
|
||||
import { Command } from 'commander';
|
||||
import { formatTable } from '../formatters/table.js';
|
||||
import { formatJson, formatYamlMultiDoc } from '../formatters/output.js';
|
||||
import { formatJson, formatYaml } from '../formatters/output.js';
|
||||
import type { Column } from '../formatters/table.js';
|
||||
import { resolveResource, stripInternalFields } from './shared.js';
|
||||
|
||||
export interface GetCommandDeps {
|
||||
fetchResource: (resource: string, id?: string, opts?: { project?: string; all?: boolean }) => Promise<unknown[]>;
|
||||
fetchResource: (resource: string, id?: string) => Promise<unknown[]>;
|
||||
log: (...args: string[]) => void;
|
||||
getProject?: () => string | undefined;
|
||||
mcplocalUrl?: string;
|
||||
}
|
||||
|
||||
interface ServerRow {
|
||||
@@ -23,10 +21,10 @@ interface ProjectRow {
|
||||
id: string;
|
||||
name: string;
|
||||
description: string;
|
||||
proxyModel: string;
|
||||
gated?: boolean;
|
||||
proxyMode: string;
|
||||
ownerId: string;
|
||||
servers?: Array<{ server: { name: string } }>;
|
||||
members?: Array<{ user: { email: string }; role: string }>;
|
||||
}
|
||||
|
||||
interface SecretRow {
|
||||
@@ -85,8 +83,9 @@ interface RbacRow {
|
||||
|
||||
const projectColumns: Column<ProjectRow>[] = [
|
||||
{ header: 'NAME', key: 'name' },
|
||||
{ header: 'PLUGIN', key: (r) => r.proxyModel || 'default', width: 18 },
|
||||
{ header: 'MODE', key: (r) => r.proxyMode ?? 'direct', width: 10 },
|
||||
{ header: 'SERVERS', key: (r) => r.servers ? String(r.servers.length) : '0', width: 8 },
|
||||
{ header: 'MEMBERS', key: (r) => r.members ? String(r.members.length) : '0', width: 8 },
|
||||
{ header: 'DESCRIPTION', key: 'description', width: 30 },
|
||||
{ header: 'ID', key: 'id' },
|
||||
];
|
||||
@@ -133,44 +132,6 @@ const templateColumns: Column<TemplateRow>[] = [
|
||||
{ header: 'DESCRIPTION', key: 'description', width: 50 },
|
||||
];
|
||||
|
||||
interface PromptRow {
|
||||
id: string;
|
||||
name: string;
|
||||
projectId: string | null;
|
||||
project?: { name: string } | null;
|
||||
priority: number;
|
||||
linkTarget: string | null;
|
||||
linkStatus: 'alive' | 'dead' | null;
|
||||
createdAt: string;
|
||||
}
|
||||
|
||||
interface PromptRequestRow {
|
||||
id: string;
|
||||
name: string;
|
||||
projectId: string | null;
|
||||
project?: { name: string } | null;
|
||||
createdBySession: string | null;
|
||||
createdAt: string;
|
||||
}
|
||||
|
||||
const promptColumns: Column<PromptRow>[] = [
|
||||
{ header: 'NAME', key: 'name' },
|
||||
{ header: 'PROJECT', key: (r) => r.project?.name ?? (r.projectId ? r.projectId : '(global)'), width: 20 },
|
||||
{ header: 'PRI', key: (r) => String(r.priority), width: 4 },
|
||||
{ header: 'LINK', key: (r) => r.linkTarget ? r.linkTarget.split(':')[0]! : '-', width: 20 },
|
||||
{ header: 'STATUS', key: (r) => r.linkStatus ?? '-', width: 6 },
|
||||
{ header: 'CREATED', key: (r) => new Date(r.createdAt).toLocaleString(), width: 20 },
|
||||
{ header: 'ID', key: 'id' },
|
||||
];
|
||||
|
||||
const promptRequestColumns: Column<PromptRequestRow>[] = [
|
||||
{ header: 'NAME', key: 'name' },
|
||||
{ header: 'PROJECT', key: (r) => r.project?.name ?? (r.projectId ? r.projectId : '(global)'), width: 20 },
|
||||
{ header: 'SESSION', key: (r) => r.createdBySession ? r.createdBySession.slice(0, 12) : '-', width: 14 },
|
||||
{ header: 'CREATED', key: (r) => new Date(r.createdAt).toLocaleString(), width: 20 },
|
||||
{ header: 'ID', key: 'id' },
|
||||
];
|
||||
|
||||
const instanceColumns: Column<InstanceRow>[] = [
|
||||
{ header: 'NAME', key: (r) => r.server?.name ?? '-', width: 20 },
|
||||
{ header: 'STATUS', key: 'status', width: 10 },
|
||||
@@ -180,42 +141,6 @@ const instanceColumns: Column<InstanceRow>[] = [
|
||||
{ header: 'ID', key: 'id' },
|
||||
];
|
||||
|
||||
interface ServerAttachmentRow {
|
||||
project: string;
|
||||
server: string;
|
||||
}
|
||||
|
||||
const serverAttachmentColumns: Column<ServerAttachmentRow>[] = [
|
||||
{ header: 'SERVER', key: 'server', width: 25 },
|
||||
{ header: 'PROJECT', key: 'project', width: 25 },
|
||||
];
|
||||
|
||||
interface ProxymodelRow {
|
||||
name: string;
|
||||
source: string;
|
||||
type?: string;
|
||||
controller?: string;
|
||||
stages?: string[];
|
||||
cacheable?: boolean;
|
||||
extends?: readonly string[];
|
||||
hooks?: string[];
|
||||
description?: string;
|
||||
}
|
||||
|
||||
const proxymodelColumns: Column<ProxymodelRow>[] = [
|
||||
{ header: 'NAME', key: 'name' },
|
||||
{ header: 'TYPE', key: (r) => r.type ?? 'pipeline', width: 10 },
|
||||
{ header: 'SOURCE', key: 'source', width: 10 },
|
||||
{ header: 'DETAIL', key: (r) => {
|
||||
if (r.type === 'plugin') {
|
||||
const ext = r.extends?.length ? `extends: ${[...r.extends].join(', ')}` : '';
|
||||
const hooks = r.hooks?.length ? `hooks: ${r.hooks.length}` : '';
|
||||
return [ext, hooks].filter(Boolean).join(' | ') || '-';
|
||||
}
|
||||
return r.stages?.join(', ') ?? '-';
|
||||
}, width: 45 },
|
||||
];
|
||||
|
||||
function getColumnsForResource(resource: string): Column<Record<string, unknown>>[] {
|
||||
switch (resource) {
|
||||
case 'servers':
|
||||
@@ -234,14 +159,6 @@ function getColumnsForResource(resource: string): Column<Record<string, unknown>
|
||||
return groupColumns as unknown as Column<Record<string, unknown>>[];
|
||||
case 'rbac':
|
||||
return rbacColumns as unknown as Column<Record<string, unknown>>[];
|
||||
case 'prompts':
|
||||
return promptColumns as unknown as Column<Record<string, unknown>>[];
|
||||
case 'promptrequests':
|
||||
return promptRequestColumns as unknown as Column<Record<string, unknown>>[];
|
||||
case 'serverattachments':
|
||||
return serverAttachmentColumns as unknown as Column<Record<string, unknown>>[];
|
||||
case 'proxymodels':
|
||||
return proxymodelColumns as unknown as Column<Record<string, unknown>>[];
|
||||
default:
|
||||
return [
|
||||
{ header: 'ID', key: 'id' as keyof Record<string, unknown> },
|
||||
@@ -250,80 +167,33 @@ function getColumnsForResource(resource: string): Column<Record<string, unknown>
|
||||
}
|
||||
}
|
||||
|
||||
/** Map plural resource name → singular kind for YAML documents */
|
||||
const RESOURCE_KIND: Record<string, string> = {
|
||||
servers: 'server',
|
||||
projects: 'project',
|
||||
secrets: 'secret',
|
||||
templates: 'template',
|
||||
instances: 'instance',
|
||||
users: 'user',
|
||||
groups: 'group',
|
||||
rbac: 'rbac',
|
||||
prompts: 'prompt',
|
||||
promptrequests: 'promptrequest',
|
||||
serverattachments: 'serverattachment',
|
||||
};
|
||||
|
||||
/**
|
||||
* Transform API response items into apply-compatible multi-doc format.
|
||||
* Each item gets a `kind` field and internal fields stripped.
|
||||
* Transform API response items into apply-compatible format.
|
||||
* Strips internal fields and wraps in the resource key.
|
||||
*/
|
||||
function toApplyDocs(resource: string, items: unknown[]): Array<{ kind: string } & Record<string, unknown>> {
|
||||
const kind = RESOURCE_KIND[resource] ?? resource;
|
||||
return items.map((item) => {
|
||||
const cleaned = stripInternalFields(item as Record<string, unknown>);
|
||||
return { kind, ...cleaned };
|
||||
function toApplyFormat(resource: string, items: unknown[]): Record<string, unknown[]> {
|
||||
const cleaned = items.map((item) => {
|
||||
return stripInternalFields(item as Record<string, unknown>);
|
||||
});
|
||||
return { [resource]: cleaned };
|
||||
}
|
||||
|
||||
export function createGetCommand(deps: GetCommandDeps): Command {
|
||||
return new Command('get')
|
||||
.description('List resources (servers, projects, instances, all)')
|
||||
.argument('<resource>', 'resource type (servers, projects, instances, all)')
|
||||
.description('List resources (servers, projects, instances)')
|
||||
.argument('<resource>', 'resource type (servers, projects, instances)')
|
||||
.argument('[id]', 'specific resource ID or name')
|
||||
.option('-o, --output <format>', 'output format (table, json, yaml)', 'table')
|
||||
.option('-p, --project <name>', 'Filter by project')
|
||||
.option('-A, --all', 'Show all (including project-scoped) resources')
|
||||
.action(async (resourceArg: string, id: string | undefined, opts: { output: string; project?: string; all?: true }) => {
|
||||
.action(async (resourceArg: string, id: string | undefined, opts: { output: string }) => {
|
||||
const resource = resolveResource(resourceArg);
|
||||
// Merge parent --project with local --project
|
||||
const project = opts.project ?? deps.getProject?.();
|
||||
|
||||
// Handle `get all --project X` composite export
|
||||
if (resource === 'all') {
|
||||
await handleGetAll(deps, { ...opts, project });
|
||||
return;
|
||||
}
|
||||
|
||||
// ProxyModels are served by mcplocal, not mcpd
|
||||
if (resource === 'proxymodels') {
|
||||
const mcplocalUrl = deps.mcplocalUrl ?? 'http://localhost:3200';
|
||||
const items = await fetchProxymodels(mcplocalUrl, id);
|
||||
if (opts.output === 'json') {
|
||||
deps.log(formatJson(items));
|
||||
} else if (opts.output === 'yaml') {
|
||||
deps.log(formatYamlMultiDoc(items.map((i) => ({ kind: 'proxymodel', ...(i as Record<string, unknown>) }))));
|
||||
} else {
|
||||
if (items.length === 0) {
|
||||
deps.log('No proxymodels found.');
|
||||
return;
|
||||
}
|
||||
const columns = getColumnsForResource(resource);
|
||||
deps.log(formatTable(items as Record<string, unknown>[], columns));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
const fetchOpts: { project?: string; all?: boolean } = {};
|
||||
if (project) fetchOpts.project = project;
|
||||
if (opts.all) fetchOpts.all = true;
|
||||
const items = await deps.fetchResource(resource, id, Object.keys(fetchOpts).length > 0 ? fetchOpts : undefined);
|
||||
const items = await deps.fetchResource(resource, id);
|
||||
|
||||
if (opts.output === 'json') {
|
||||
deps.log(formatJson(toApplyDocs(resource, items)));
|
||||
// Apply-compatible JSON wrapped in resource key
|
||||
deps.log(formatJson(toApplyFormat(resource, items)));
|
||||
} else if (opts.output === 'yaml') {
|
||||
deps.log(formatYamlMultiDoc(toApplyDocs(resource, items)));
|
||||
// Apply-compatible YAML wrapped in resource key
|
||||
deps.log(formatYaml(toApplyFormat(resource, items)));
|
||||
} else {
|
||||
if (items.length === 0) {
|
||||
deps.log(`No ${resource} found.`);
|
||||
@@ -334,83 +204,3 @@ export function createGetCommand(deps: GetCommandDeps): Command {
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async function handleGetAll(
|
||||
deps: GetCommandDeps,
|
||||
opts: { output: string; project?: string },
|
||||
): Promise<void> {
|
||||
if (!opts.project) {
|
||||
throw new Error('--project is required with "get all". Usage: mcpctl get all --project <name>');
|
||||
}
|
||||
|
||||
const docs: Array<{ kind: string } & Record<string, unknown>> = [];
|
||||
|
||||
// 1. Fetch the project
|
||||
const projects = await deps.fetchResource('projects', opts.project);
|
||||
if (projects.length === 0) {
|
||||
deps.log(`Project '${opts.project}' not found.`);
|
||||
return;
|
||||
}
|
||||
|
||||
// 2. Add the project itself
|
||||
for (const p of projects) {
|
||||
docs.push({ kind: 'project', ...stripInternalFields(p as Record<string, unknown>) });
|
||||
}
|
||||
|
||||
// 3. Extract serverattachments from project's server list
|
||||
const project = projects[0] as ProjectRow;
|
||||
let attachmentCount = 0;
|
||||
if (project.servers && project.servers.length > 0) {
|
||||
for (const ps of project.servers) {
|
||||
docs.push({
|
||||
kind: 'serverattachment',
|
||||
server: typeof ps === 'string' ? ps : ps.server.name,
|
||||
project: project.name,
|
||||
});
|
||||
attachmentCount++;
|
||||
}
|
||||
}
|
||||
|
||||
// 4. Fetch prompts owned by this project (exclude global prompts)
|
||||
const prompts = await deps.fetchResource('prompts', undefined, { project: opts.project });
|
||||
const projectPrompts = prompts.filter((p) => (p as { projectId?: string }).projectId != null);
|
||||
for (const p of projectPrompts) {
|
||||
docs.push({ kind: 'prompt', ...stripInternalFields(p as Record<string, unknown>) });
|
||||
}
|
||||
|
||||
if (opts.output === 'json') {
|
||||
deps.log(formatJson(docs));
|
||||
} else if (opts.output === 'yaml') {
|
||||
deps.log(formatYamlMultiDoc(docs));
|
||||
} else {
|
||||
// Table output: show summary
|
||||
deps.log(`Project: ${opts.project}`);
|
||||
deps.log(` Server Attachments: ${attachmentCount}`);
|
||||
deps.log(` Prompts: ${projectPrompts.length}`);
|
||||
deps.log(`\nUse -o yaml or -o json for apply-compatible output.`);
|
||||
}
|
||||
}
|
||||
|
||||
async function fetchProxymodels(mcplocalUrl: string, name?: string): Promise<unknown[]> {
|
||||
const http = await import('node:http');
|
||||
const url = name
|
||||
? `${mcplocalUrl}/proxymodels/${encodeURIComponent(name)}`
|
||||
: `${mcplocalUrl}/proxymodels`;
|
||||
|
||||
return new Promise<unknown[]>((resolve, reject) => {
|
||||
const req = http.get(url, { timeout: 5000 }, (res) => {
|
||||
let data = '';
|
||||
res.on('data', (chunk: Buffer) => { data += chunk.toString(); });
|
||||
res.on('end', () => {
|
||||
try {
|
||||
const parsed = JSON.parse(data) as unknown;
|
||||
resolve(Array.isArray(parsed) ? parsed : [parsed]);
|
||||
} catch {
|
||||
reject(new Error('Invalid response from mcplocal'));
|
||||
}
|
||||
});
|
||||
});
|
||||
req.on('error', () => reject(new Error(`Cannot connect to mcplocal at ${mcplocalUrl}`)));
|
||||
req.on('timeout', () => { req.destroy(); reject(new Error('mcplocal request timed out')); });
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1,224 +0,0 @@
|
||||
import { Command } from 'commander';
|
||||
import http from 'node:http';
|
||||
import { createInterface } from 'node:readline';
|
||||
|
||||
export interface McpBridgeOptions {
|
||||
projectName: string;
|
||||
mcplocalUrl: string;
|
||||
token?: string | undefined;
|
||||
stdin: NodeJS.ReadableStream;
|
||||
stdout: NodeJS.WritableStream;
|
||||
stderr: NodeJS.WritableStream;
|
||||
}
|
||||
|
||||
export function postJsonRpc(
|
||||
url: string,
|
||||
body: string,
|
||||
sessionId: string | undefined,
|
||||
token: string | undefined,
|
||||
): Promise<{ status: number; headers: http.IncomingHttpHeaders; body: string }> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const parsed = new URL(url);
|
||||
const headers: Record<string, string> = {
|
||||
'Content-Type': 'application/json',
|
||||
'Accept': 'application/json, text/event-stream',
|
||||
};
|
||||
if (sessionId) {
|
||||
headers['mcp-session-id'] = sessionId;
|
||||
}
|
||||
if (token) {
|
||||
headers['Authorization'] = `Bearer ${token}`;
|
||||
}
|
||||
|
||||
const req = http.request(
|
||||
{
|
||||
hostname: parsed.hostname,
|
||||
port: parsed.port,
|
||||
path: parsed.pathname,
|
||||
method: 'POST',
|
||||
headers,
|
||||
timeout: 30_000,
|
||||
},
|
||||
(res) => {
|
||||
const chunks: Buffer[] = [];
|
||||
res.on('data', (chunk: Buffer) => chunks.push(chunk));
|
||||
res.on('end', () => {
|
||||
resolve({
|
||||
status: res.statusCode ?? 0,
|
||||
headers: res.headers,
|
||||
body: Buffer.concat(chunks).toString('utf-8'),
|
||||
});
|
||||
});
|
||||
},
|
||||
);
|
||||
req.on('error', reject);
|
||||
req.on('timeout', () => {
|
||||
req.destroy();
|
||||
reject(new Error('Request timed out'));
|
||||
});
|
||||
req.write(body);
|
||||
req.end();
|
||||
});
|
||||
}
|
||||
|
||||
export function sendDelete(
|
||||
url: string,
|
||||
sessionId: string,
|
||||
token: string | undefined,
|
||||
): Promise<void> {
|
||||
return new Promise((resolve) => {
|
||||
const parsed = new URL(url);
|
||||
const headers: Record<string, string> = {
|
||||
'mcp-session-id': sessionId,
|
||||
};
|
||||
if (token) {
|
||||
headers['Authorization'] = `Bearer ${token}`;
|
||||
}
|
||||
|
||||
const req = http.request(
|
||||
{
|
||||
hostname: parsed.hostname,
|
||||
port: parsed.port,
|
||||
path: parsed.pathname,
|
||||
method: 'DELETE',
|
||||
headers,
|
||||
timeout: 5_000,
|
||||
},
|
||||
() => resolve(),
|
||||
);
|
||||
req.on('error', () => resolve()); // Best effort cleanup
|
||||
req.on('timeout', () => {
|
||||
req.destroy();
|
||||
resolve();
|
||||
});
|
||||
req.end();
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract JSON-RPC messages from an HTTP response body.
|
||||
* Handles both plain JSON and SSE (text/event-stream) formats.
|
||||
*/
|
||||
export function extractJsonRpcMessages(contentType: string | undefined, body: string): string[] {
|
||||
if (contentType?.includes('text/event-stream')) {
|
||||
// Parse SSE: extract data: lines
|
||||
const messages: string[] = [];
|
||||
for (const line of body.split('\n')) {
|
||||
if (line.startsWith('data: ')) {
|
||||
messages.push(line.slice(6));
|
||||
}
|
||||
}
|
||||
return messages;
|
||||
}
|
||||
// Plain JSON response
|
||||
return [body];
|
||||
}
|
||||
|
||||
/**
|
||||
* STDIO-to-Streamable-HTTP MCP bridge.
|
||||
*
|
||||
* Reads JSON-RPC messages line-by-line from stdin, POSTs them to
|
||||
* mcplocal's project endpoint, and writes responses to stdout.
|
||||
*/
|
||||
export async function runMcpBridge(opts: McpBridgeOptions): Promise<void> {
|
||||
const { projectName, mcplocalUrl, token, stdin, stdout, stderr } = opts;
|
||||
const endpointUrl = `${mcplocalUrl.replace(/\/$/, '')}/projects/${encodeURIComponent(projectName)}/mcp`;
|
||||
|
||||
let sessionId: string | undefined;
|
||||
|
||||
const rl = createInterface({ input: stdin, crlfDelay: Infinity });
|
||||
|
||||
for await (const line of rl) {
|
||||
const trimmed = line.trim();
|
||||
if (!trimmed) continue;
|
||||
|
||||
try {
|
||||
const result = await postJsonRpc(endpointUrl, trimmed, sessionId, token);
|
||||
|
||||
// Capture session ID from first response
|
||||
if (!sessionId) {
|
||||
const sid = result.headers['mcp-session-id'];
|
||||
if (typeof sid === 'string') {
|
||||
sessionId = sid;
|
||||
}
|
||||
}
|
||||
|
||||
if (result.status >= 400) {
|
||||
stderr.write(`MCP bridge error: HTTP ${result.status}: ${result.body}\n`);
|
||||
}
|
||||
|
||||
// Handle both plain JSON and SSE responses
|
||||
const messages = extractJsonRpcMessages(result.headers['content-type'], result.body);
|
||||
for (const msg of messages) {
|
||||
const trimmedMsg = msg.trim();
|
||||
if (trimmedMsg) {
|
||||
stdout.write(trimmedMsg + '\n');
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
stderr.write(`MCP bridge error: ${err instanceof Error ? err.message : String(err)}\n`);
|
||||
}
|
||||
}
|
||||
|
||||
// stdin closed — cleanup session
|
||||
if (sessionId) {
|
||||
await sendDelete(endpointUrl, sessionId, token);
|
||||
}
|
||||
}
|
||||
|
||||
export interface McpCommandDeps {
|
||||
getProject: () => string | undefined;
|
||||
configLoader?: () => { mcplocalUrl: string };
|
||||
credentialsLoader?: () => { token: string } | null;
|
||||
}
|
||||
|
||||
export function createMcpCommand(deps: McpCommandDeps): Command {
|
||||
const cmd = new Command('mcp')
|
||||
.description('MCP STDIO transport bridge — connects stdin/stdout to a project MCP endpoint')
|
||||
.passThroughOptions()
|
||||
.option('-p, --project <name>', 'Project name')
|
||||
.action(async (opts: { project?: string }) => {
|
||||
// Accept -p/--project on the command itself, or fall back to global --project
|
||||
const projectName = opts.project ?? deps.getProject();
|
||||
if (!projectName) {
|
||||
process.stderr.write('Error: --project is required for the mcp command\n');
|
||||
process.exitCode = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
let mcplocalUrl = 'http://localhost:3200';
|
||||
if (deps.configLoader) {
|
||||
mcplocalUrl = deps.configLoader().mcplocalUrl;
|
||||
} else {
|
||||
try {
|
||||
const { loadConfig } = await import('../config/index.js');
|
||||
mcplocalUrl = loadConfig().mcplocalUrl;
|
||||
} catch {
|
||||
// Use default
|
||||
}
|
||||
}
|
||||
|
||||
let token: string | undefined;
|
||||
if (deps.credentialsLoader) {
|
||||
token = deps.credentialsLoader()?.token;
|
||||
} else {
|
||||
try {
|
||||
const { loadCredentials } = await import('../auth/index.js');
|
||||
token = loadCredentials()?.token;
|
||||
} catch {
|
||||
// No credentials
|
||||
}
|
||||
}
|
||||
|
||||
await runMcpBridge({
|
||||
projectName,
|
||||
mcplocalUrl,
|
||||
token,
|
||||
stdin: process.stdin,
|
||||
stdout: process.stdout,
|
||||
stderr: process.stderr,
|
||||
});
|
||||
});
|
||||
|
||||
return cmd;
|
||||
}
|
||||
@@ -1,58 +0,0 @@
|
||||
import { Command } from 'commander';
|
||||
import type { ApiClient } from '../api-client.js';
|
||||
import { resolveResource, resolveNameOrId } from './shared.js';
|
||||
|
||||
export interface PatchCommandDeps {
|
||||
client: ApiClient;
|
||||
log: (...args: string[]) => void;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse "key=value" pairs into a partial update object.
|
||||
* Supports: key=value, key=null (sets null), key=123 (number if parseable).
|
||||
*/
|
||||
function parsePatches(pairs: string[]): Record<string, unknown> {
|
||||
const result: Record<string, unknown> = {};
|
||||
for (const pair of pairs) {
|
||||
const eqIdx = pair.indexOf('=');
|
||||
if (eqIdx === -1) {
|
||||
throw new Error(`Invalid patch format '${pair}'. Expected key=value`);
|
||||
}
|
||||
const key = pair.slice(0, eqIdx);
|
||||
const raw = pair.slice(eqIdx + 1);
|
||||
|
||||
if (raw === 'null') {
|
||||
result[key] = null;
|
||||
} else if (raw === 'true') {
|
||||
result[key] = true;
|
||||
} else if (raw === 'false') {
|
||||
result[key] = false;
|
||||
} else if (/^\d+$/.test(raw)) {
|
||||
result[key] = parseInt(raw, 10);
|
||||
} else {
|
||||
result[key] = raw;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
export function createPatchCommand(deps: PatchCommandDeps): Command {
|
||||
const { client, log } = deps;
|
||||
|
||||
return new Command('patch')
|
||||
.description('Patch a resource field (e.g. mcpctl patch project myproj llmProvider=none)')
|
||||
.argument('<resource>', 'resource type (server, project, secret, ...)')
|
||||
.argument('<name>', 'resource name or ID')
|
||||
.argument('<patches...>', 'key=value pairs to patch')
|
||||
.action(async (resourceArg: string, nameOrId: string, patches: string[]) => {
|
||||
const resource = resolveResource(resourceArg);
|
||||
const id = await resolveNameOrId(client, resource, nameOrId);
|
||||
const body = parsePatches(patches);
|
||||
|
||||
await client.put(`/api/v1/${resource}/${id}`, body);
|
||||
const fields = Object.entries(body)
|
||||
.map(([k, v]) => `${k}=${v === null ? 'null' : String(v)}`)
|
||||
.join(', ');
|
||||
log(`patched ${resource.replace(/s$/, '')} '${nameOrId}': ${fields}`);
|
||||
});
|
||||
}
|
||||
@@ -1,65 +0,0 @@
|
||||
import { Command } from 'commander';
|
||||
import type { ApiClient } from '../api-client.js';
|
||||
import { resolveNameOrId, resolveResource } from './shared.js';
|
||||
|
||||
export interface ProjectOpsDeps {
|
||||
client: ApiClient;
|
||||
log: (...args: string[]) => void;
|
||||
getProject: () => string | undefined;
|
||||
}
|
||||
|
||||
function requireProject(deps: ProjectOpsDeps): string {
|
||||
const project = deps.getProject();
|
||||
if (!project) {
|
||||
deps.log('Error: --project <name> is required for this command.');
|
||||
process.exitCode = 1;
|
||||
throw new Error('--project required');
|
||||
}
|
||||
return project;
|
||||
}
|
||||
|
||||
export function createAttachServerCommand(deps: ProjectOpsDeps): Command {
|
||||
const { client, log } = deps;
|
||||
|
||||
return new Command('attach-server')
|
||||
.description('Attach a server to a project (requires --project)')
|
||||
.argument('<server-name>', 'Server name to attach')
|
||||
.action(async (serverName: string) => {
|
||||
const projectName = requireProject(deps);
|
||||
const projectId = await resolveNameOrId(client, 'projects', projectName);
|
||||
await client.post(`/api/v1/projects/${projectId}/servers`, { server: serverName });
|
||||
log(`server '${serverName}' attached to project '${projectName}'`);
|
||||
});
|
||||
}
|
||||
|
||||
export function createDetachServerCommand(deps: ProjectOpsDeps): Command {
|
||||
const { client, log } = deps;
|
||||
|
||||
return new Command('detach-server')
|
||||
.description('Detach a server from a project (requires --project)')
|
||||
.argument('<server-name>', 'Server name to detach')
|
||||
.action(async (serverName: string) => {
|
||||
const projectName = requireProject(deps);
|
||||
const projectId = await resolveNameOrId(client, 'projects', projectName);
|
||||
await client.delete(`/api/v1/projects/${projectId}/servers/${serverName}`);
|
||||
log(`server '${serverName}' detached from project '${projectName}'`);
|
||||
});
|
||||
}
|
||||
|
||||
export function createApproveCommand(deps: ProjectOpsDeps): Command {
|
||||
const { client, log } = deps;
|
||||
|
||||
return new Command('approve')
|
||||
.description('Approve a pending prompt request (atomic: delete request, create prompt)')
|
||||
.argument('<resource>', 'Resource type (promptrequest)')
|
||||
.argument('<name>', 'Resource name or ID')
|
||||
.action(async (resourceArg: string, nameOrId: string) => {
|
||||
const resource = resolveResource(resourceArg);
|
||||
if (resource !== 'promptrequests') {
|
||||
throw new Error(`approve is only supported for 'promptrequest', got '${resourceArg}'`);
|
||||
}
|
||||
const id = await resolveNameOrId(client, 'promptrequests', nameOrId);
|
||||
const prompt = await client.post<{ id: string; name: string }>(`/api/v1/promptrequests/${id}/approve`, {});
|
||||
log(`prompt request approved → prompt '${prompt.name}' created (id: ${prompt.id})`);
|
||||
});
|
||||
}
|
||||
@@ -16,18 +16,6 @@ export const RESOURCE_ALIASES: Record<string, string> = {
|
||||
rbac: 'rbac',
|
||||
'rbac-definition': 'rbac',
|
||||
'rbac-binding': 'rbac',
|
||||
prompt: 'prompts',
|
||||
prompts: 'prompts',
|
||||
promptrequest: 'promptrequests',
|
||||
promptrequests: 'promptrequests',
|
||||
pr: 'promptrequests',
|
||||
serverattachment: 'serverattachments',
|
||||
serverattachments: 'serverattachments',
|
||||
sa: 'serverattachments',
|
||||
proxymodel: 'proxymodels',
|
||||
proxymodels: 'proxymodels',
|
||||
pm: 'proxymodels',
|
||||
all: 'all',
|
||||
};
|
||||
|
||||
export function resolveResource(name: string): string {
|
||||
@@ -68,61 +56,8 @@ export async function resolveNameOrId(
|
||||
/** Strip internal/read-only fields from an API response to make it apply-compatible. */
|
||||
export function stripInternalFields(obj: Record<string, unknown>): Record<string, unknown> {
|
||||
const result = { ...obj };
|
||||
for (const key of ['id', 'createdAt', 'updatedAt', 'version', 'ownerId', 'summary', 'chapters', 'linkStatus', 'serverId']) {
|
||||
for (const key of ['id', 'createdAt', 'updatedAt', 'version', 'ownerId']) {
|
||||
delete result[key];
|
||||
}
|
||||
|
||||
// Rename linkTarget → link for cleaner YAML
|
||||
if ('linkTarget' in result) {
|
||||
result.link = result.linkTarget;
|
||||
delete result.linkTarget;
|
||||
// Linked prompts: strip content (it's fetched from the link source, not static)
|
||||
if (result.link) {
|
||||
delete result.content;
|
||||
}
|
||||
}
|
||||
|
||||
// Convert project servers join array → string[] of server names
|
||||
if ('servers' in result && Array.isArray(result.servers)) {
|
||||
const entries = result.servers as Array<{ server?: { name: string } }>;
|
||||
if (entries.length > 0 && entries[0]?.server) {
|
||||
result.servers = entries.map((e) => e.server!.name);
|
||||
} else if (entries.length === 0) {
|
||||
result.servers = [];
|
||||
} else {
|
||||
delete result.servers;
|
||||
}
|
||||
}
|
||||
|
||||
// Convert prompt projectId CUID → project name string
|
||||
if ('project' in result && typeof result.project === 'object' && result.project !== null) {
|
||||
const proj = result.project as { name: string };
|
||||
result.project = proj.name;
|
||||
delete result.projectId;
|
||||
}
|
||||
|
||||
// Strip remaining relationship objects
|
||||
if ('owner' in result && typeof result.owner === 'object') {
|
||||
delete result.owner;
|
||||
}
|
||||
if ('members' in result && Array.isArray(result.members)) {
|
||||
delete result.members;
|
||||
}
|
||||
|
||||
// Normalize proxyModel: resolve from gated when empty, then drop deprecated gated field
|
||||
if ('gated' in result || 'proxyModel' in result) {
|
||||
if (!result.proxyModel) {
|
||||
result.proxyModel = result.gated === false ? 'content-pipeline' : 'default';
|
||||
}
|
||||
delete result.gated;
|
||||
}
|
||||
|
||||
// Strip null values last (null = unset, omitting from YAML is cleaner and equivalent)
|
||||
for (const key of Object.keys(result)) {
|
||||
if (result[key] === null) {
|
||||
delete result[key];
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@@ -7,40 +7,11 @@ import type { CredentialsDeps } from '../auth/index.js';
|
||||
import { formatJson, formatYaml } from '../formatters/index.js';
|
||||
import { APP_VERSION } from '@mcpctl/shared';
|
||||
|
||||
// ANSI helpers
|
||||
const GREEN = '\x1b[32m';
|
||||
const RED = '\x1b[31m';
|
||||
const YELLOW = '\x1b[33m';
|
||||
const DIM = '\x1b[2m';
|
||||
const RESET = '\x1b[0m';
|
||||
const CLEAR_LINE = '\x1b[2K\r';
|
||||
|
||||
interface ProviderDetail {
|
||||
managed: boolean;
|
||||
state?: string;
|
||||
lastError?: string;
|
||||
}
|
||||
|
||||
interface ProvidersInfo {
|
||||
providers: string[];
|
||||
tiers: { fast: string[]; heavy: string[] };
|
||||
health: Record<string, boolean>;
|
||||
details?: Record<string, ProviderDetail>;
|
||||
}
|
||||
|
||||
export interface StatusCommandDeps {
|
||||
configDeps: Partial<ConfigLoaderDeps>;
|
||||
credentialsDeps: Partial<CredentialsDeps>;
|
||||
log: (...args: string[]) => void;
|
||||
write: (text: string) => void;
|
||||
checkHealth: (url: string) => Promise<boolean>;
|
||||
/** Check LLM health via mcplocal's /llm/health endpoint */
|
||||
checkLlm: (mcplocalUrl: string) => Promise<string>;
|
||||
/** Fetch available models from mcplocal's /llm/models endpoint */
|
||||
fetchModels: (mcplocalUrl: string) => Promise<string[]>;
|
||||
/** Fetch provider tier info from mcplocal's /llm/providers endpoint */
|
||||
fetchProviders: (mcplocalUrl: string) => Promise<ProvidersInfo | null>;
|
||||
isTTY: boolean;
|
||||
}
|
||||
|
||||
function defaultCheckHealth(url: string): Promise<boolean> {
|
||||
@@ -57,148 +28,15 @@ function defaultCheckHealth(url: string): Promise<boolean> {
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Check LLM health by querying mcplocal's /llm/health endpoint.
|
||||
* This tests the actual provider running inside the daemon (uses persistent ACP for gemini, etc.)
|
||||
*/
|
||||
function defaultCheckLlm(mcplocalUrl: string): Promise<string> {
|
||||
return new Promise((resolve) => {
|
||||
const req = http.get(`${mcplocalUrl}/llm/health`, { timeout: 45000 }, (res) => {
|
||||
const chunks: Buffer[] = [];
|
||||
res.on('data', (chunk: Buffer) => chunks.push(chunk));
|
||||
res.on('end', () => {
|
||||
try {
|
||||
const body = JSON.parse(Buffer.concat(chunks).toString('utf-8')) as { status: string; error?: string };
|
||||
if (body.status === 'ok') {
|
||||
resolve('ok');
|
||||
} else if (body.status === 'not configured') {
|
||||
resolve('not configured');
|
||||
} else if (body.error) {
|
||||
resolve(body.error.slice(0, 80));
|
||||
} else {
|
||||
resolve(body.status);
|
||||
}
|
||||
} catch {
|
||||
resolve('invalid response');
|
||||
}
|
||||
});
|
||||
});
|
||||
req.on('error', () => resolve('mcplocal unreachable'));
|
||||
req.on('timeout', () => { req.destroy(); resolve('timeout'); });
|
||||
});
|
||||
}
|
||||
|
||||
function defaultFetchModels(mcplocalUrl: string): Promise<string[]> {
|
||||
return new Promise((resolve) => {
|
||||
const req = http.get(`${mcplocalUrl}/llm/models`, { timeout: 5000 }, (res) => {
|
||||
const chunks: Buffer[] = [];
|
||||
res.on('data', (chunk: Buffer) => chunks.push(chunk));
|
||||
res.on('end', () => {
|
||||
try {
|
||||
const body = JSON.parse(Buffer.concat(chunks).toString('utf-8')) as { models?: string[] };
|
||||
resolve(body.models ?? []);
|
||||
} catch {
|
||||
resolve([]);
|
||||
}
|
||||
});
|
||||
});
|
||||
req.on('error', () => resolve([]));
|
||||
req.on('timeout', () => { req.destroy(); resolve([]); });
|
||||
});
|
||||
}
|
||||
|
||||
function defaultFetchProviders(mcplocalUrl: string): Promise<ProvidersInfo | null> {
|
||||
return new Promise((resolve) => {
|
||||
const req = http.get(`${mcplocalUrl}/llm/providers`, { timeout: 5000 }, (res) => {
|
||||
const chunks: Buffer[] = [];
|
||||
res.on('data', (chunk: Buffer) => chunks.push(chunk));
|
||||
res.on('end', () => {
|
||||
try {
|
||||
const body = JSON.parse(Buffer.concat(chunks).toString('utf-8')) as ProvidersInfo;
|
||||
resolve(body);
|
||||
} catch {
|
||||
resolve(null);
|
||||
}
|
||||
});
|
||||
});
|
||||
req.on('error', () => resolve(null));
|
||||
req.on('timeout', () => { req.destroy(); resolve(null); });
|
||||
});
|
||||
}
|
||||
|
||||
const SPINNER_FRAMES = ['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏'];
|
||||
|
||||
const defaultDeps: StatusCommandDeps = {
|
||||
configDeps: {},
|
||||
credentialsDeps: {},
|
||||
log: (...args) => console.log(...args),
|
||||
write: (text) => process.stdout.write(text),
|
||||
checkHealth: defaultCheckHealth,
|
||||
checkLlm: defaultCheckLlm,
|
||||
fetchModels: defaultFetchModels,
|
||||
fetchProviders: defaultFetchProviders,
|
||||
isTTY: process.stdout.isTTY ?? false,
|
||||
};
|
||||
|
||||
/** Determine LLM label from config (handles both legacy and multi-provider formats). */
|
||||
function getLlmLabel(llm: unknown): string | null {
|
||||
if (!llm || typeof llm !== 'object') return null;
|
||||
// Legacy format: { provider, model }
|
||||
if ('provider' in llm) {
|
||||
const legacy = llm as { provider: string; model?: string };
|
||||
if (legacy.provider === 'none') return null;
|
||||
return `${legacy.provider}${legacy.model ? ` / ${legacy.model}` : ''}`;
|
||||
}
|
||||
// Multi-provider format: { providers: [...] }
|
||||
if ('providers' in llm) {
|
||||
const multi = llm as { providers: Array<{ name: string; type: string; tier?: string }> };
|
||||
if (multi.providers.length === 0) return null;
|
||||
return multi.providers.map((p) => `${p.name}${p.tier ? ` (${p.tier})` : ''}`).join(', ');
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/** Check if config uses multi-provider format. */
|
||||
function isMultiProvider(llm: unknown): boolean {
|
||||
return !!llm && typeof llm === 'object' && 'providers' in llm;
|
||||
}
|
||||
|
||||
/**
|
||||
* Format a single provider's status string for display.
|
||||
* Managed providers show lifecycle state; regular providers show health check result.
|
||||
*/
|
||||
function formatProviderStatus(name: string, info: ProvidersInfo, ansi: boolean): string {
|
||||
const detail = info.details?.[name];
|
||||
if (detail?.managed) {
|
||||
switch (detail.state) {
|
||||
case 'running':
|
||||
return ansi ? `${name} ${GREEN}✓ running${RESET}` : `${name} ✓ running`;
|
||||
case 'stopped':
|
||||
return ansi
|
||||
? `${name} ${DIM}○ stopped (auto-starts on demand)${RESET}`
|
||||
: `${name} ○ stopped (auto-starts on demand)`;
|
||||
case 'starting':
|
||||
return ansi ? `${name} ${YELLOW}⟳ starting...${RESET}` : `${name} ⟳ starting...`;
|
||||
case 'error':
|
||||
return ansi
|
||||
? `${name} ${RED}✗ error: ${detail.lastError ?? 'unknown'}${RESET}`
|
||||
: `${name} ✗ error: ${detail.lastError ?? 'unknown'}`;
|
||||
default: {
|
||||
const ok = info.health[name];
|
||||
return ansi
|
||||
? ok ? `${name} ${GREEN}✓${RESET}` : `${name} ${RED}✗${RESET}`
|
||||
: ok ? `${name} ✓` : `${name} ✗`;
|
||||
}
|
||||
}
|
||||
}
|
||||
const ok = info.health[name];
|
||||
return ansi
|
||||
? ok ? `${name} ${GREEN}✓${RESET}` : `${name} ${RED}✗${RESET}`
|
||||
: ok ? `${name} ✓` : `${name} ✗`;
|
||||
}
|
||||
|
||||
export function createStatusCommand(deps?: Partial<StatusCommandDeps>): Command {
|
||||
const { configDeps, credentialsDeps, log, write, checkHealth, checkLlm, fetchModels, fetchProviders, isTTY } = { ...defaultDeps, ...deps };
|
||||
const { configDeps, credentialsDeps, log, checkHealth } = { ...defaultDeps, ...deps };
|
||||
|
||||
return new Command('status')
|
||||
.description('Show mcpctl status and connectivity')
|
||||
@@ -207,118 +45,33 @@ export function createStatusCommand(deps?: Partial<StatusCommandDeps>): Command
|
||||
const config = loadConfig(configDeps);
|
||||
const creds = loadCredentials(credentialsDeps);
|
||||
|
||||
const llmLabel = getLlmLabel(config.llm);
|
||||
const multiProvider = isMultiProvider(config.llm);
|
||||
|
||||
if (opts.output !== 'table') {
|
||||
// JSON/YAML: run everything in parallel, wait, output at once
|
||||
const [mcplocalReachable, mcpdReachable, llmStatus, providersInfo] = await Promise.all([
|
||||
checkHealth(config.mcplocalUrl),
|
||||
checkHealth(config.mcpdUrl),
|
||||
llmLabel ? checkLlm(config.mcplocalUrl) : Promise.resolve(null),
|
||||
multiProvider ? fetchProviders(config.mcplocalUrl) : Promise.resolve(null),
|
||||
]);
|
||||
|
||||
const llm = llmLabel
|
||||
? llmStatus === 'ok' ? llmLabel : `${llmLabel} (${llmStatus})`
|
||||
: null;
|
||||
|
||||
const status = {
|
||||
version: APP_VERSION,
|
||||
mcplocalUrl: config.mcplocalUrl,
|
||||
mcplocalReachable,
|
||||
mcpdUrl: config.mcpdUrl,
|
||||
mcpdReachable,
|
||||
auth: creds ? { user: creds.user } : null,
|
||||
registries: config.registries,
|
||||
outputFormat: config.outputFormat,
|
||||
llm,
|
||||
llmStatus,
|
||||
...(providersInfo ? { providers: providersInfo } : {}),
|
||||
};
|
||||
|
||||
log(opts.output === 'json' ? formatJson(status) : formatYaml(status));
|
||||
return;
|
||||
}
|
||||
|
||||
// Table format: print lines progressively, LLM last with spinner
|
||||
|
||||
// Fast health checks first
|
||||
const [mcplocalReachable, mcpdReachable] = await Promise.all([
|
||||
checkHealth(config.mcplocalUrl),
|
||||
checkHealth(config.mcpdUrl),
|
||||
]);
|
||||
|
||||
log(`mcpctl v${APP_VERSION}`);
|
||||
log(`mcplocal: ${config.mcplocalUrl} (${mcplocalReachable ? 'connected' : 'unreachable'})`);
|
||||
log(`mcpd: ${config.mcpdUrl} (${mcpdReachable ? 'connected' : 'unreachable'})`);
|
||||
log(`Auth: ${creds ? `logged in as ${creds.user}` : 'not logged in'}`);
|
||||
log(`Registries: ${config.registries.join(', ')}`);
|
||||
log(`Output: ${config.outputFormat}`);
|
||||
const status = {
|
||||
version: APP_VERSION,
|
||||
mcplocalUrl: config.mcplocalUrl,
|
||||
mcplocalReachable,
|
||||
mcpdUrl: config.mcpdUrl,
|
||||
mcpdReachable,
|
||||
auth: creds ? { user: creds.user } : null,
|
||||
registries: config.registries,
|
||||
outputFormat: config.outputFormat,
|
||||
};
|
||||
|
||||
if (!llmLabel) {
|
||||
log(`LLM: not configured (run 'mcpctl config setup')`);
|
||||
return;
|
||||
}
|
||||
|
||||
// LLM check + models + providers fetch in parallel
|
||||
const llmPromise = checkLlm(config.mcplocalUrl);
|
||||
const modelsPromise = fetchModels(config.mcplocalUrl);
|
||||
const providersPromise = multiProvider ? fetchProviders(config.mcplocalUrl) : Promise.resolve(null);
|
||||
|
||||
if (isTTY) {
|
||||
let frame = 0;
|
||||
const interval = setInterval(() => {
|
||||
write(`${CLEAR_LINE}LLM: ${DIM}${SPINNER_FRAMES[frame % SPINNER_FRAMES.length]} checking...${RESET}`);
|
||||
frame++;
|
||||
}, 80);
|
||||
|
||||
const [llmStatus, models, providersInfo] = await Promise.all([llmPromise, modelsPromise, providersPromise]);
|
||||
clearInterval(interval);
|
||||
|
||||
if (providersInfo && (providersInfo.tiers.fast.length > 0 || providersInfo.tiers.heavy.length > 0)) {
|
||||
// Tiered display with per-provider health
|
||||
write(`${CLEAR_LINE}`);
|
||||
for (const tier of ['fast', 'heavy'] as const) {
|
||||
const names = providersInfo.tiers[tier];
|
||||
if (names.length === 0) continue;
|
||||
const label = tier === 'fast' ? 'LLM (fast): ' : 'LLM (heavy):';
|
||||
const parts = names.map((n) => formatProviderStatus(n, providersInfo, true));
|
||||
log(`${label} ${parts.join(', ')}`);
|
||||
}
|
||||
} else {
|
||||
// Legacy single provider display
|
||||
if (llmStatus === 'ok' || llmStatus === 'ok (key stored)') {
|
||||
write(`${CLEAR_LINE}LLM: ${llmLabel} ${GREEN}✓ ${llmStatus}${RESET}\n`);
|
||||
} else {
|
||||
write(`${CLEAR_LINE}LLM: ${llmLabel} ${RED}✗ ${llmStatus}${RESET}\n`);
|
||||
}
|
||||
}
|
||||
if (models.length > 0) {
|
||||
log(`${DIM} Available: ${models.join(', ')}${RESET}`);
|
||||
}
|
||||
if (opts.output === 'json') {
|
||||
log(formatJson(status));
|
||||
} else if (opts.output === 'yaml') {
|
||||
log(formatYaml(status));
|
||||
} else {
|
||||
// Non-TTY: no spinner, just wait and print
|
||||
const [llmStatus, models, providersInfo] = await Promise.all([llmPromise, modelsPromise, providersPromise]);
|
||||
|
||||
if (providersInfo && (providersInfo.tiers.fast.length > 0 || providersInfo.tiers.heavy.length > 0)) {
|
||||
for (const tier of ['fast', 'heavy'] as const) {
|
||||
const names = providersInfo.tiers[tier];
|
||||
if (names.length === 0) continue;
|
||||
const label = tier === 'fast' ? 'LLM (fast): ' : 'LLM (heavy):';
|
||||
const parts = names.map((n) => formatProviderStatus(n, providersInfo, false));
|
||||
log(`${label} ${parts.join(', ')}`);
|
||||
}
|
||||
} else {
|
||||
if (llmStatus === 'ok' || llmStatus === 'ok (key stored)') {
|
||||
log(`LLM: ${llmLabel} ✓ ${llmStatus}`);
|
||||
} else {
|
||||
log(`LLM: ${llmLabel} ✗ ${llmStatus}`);
|
||||
}
|
||||
}
|
||||
if (models.length > 0) {
|
||||
log(`${DIM} Available: ${models.join(', ')}${RESET}`);
|
||||
}
|
||||
log(`mcpctl v${status.version}`);
|
||||
log(`mcplocal: ${status.mcplocalUrl} (${mcplocalReachable ? 'connected' : 'unreachable'})`);
|
||||
log(`mcpd: ${status.mcpdUrl} (${mcpdReachable ? 'connected' : 'unreachable'})`);
|
||||
log(`Auth: ${creds ? `logged in as ${creds.user}` : 'not logged in'}`);
|
||||
log(`Registries: ${status.registries.join(', ')}`);
|
||||
log(`Output: ${status.outputFormat}`);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
export { McpctlConfigSchema, LlmConfigSchema, LlmProviderEntrySchema, LlmMultiConfigSchema, LLM_PROVIDERS, LLM_TIERS, DEFAULT_CONFIG } from './schema.js';
|
||||
export type { McpctlConfig, LlmConfig, LlmProviderEntry, LlmMultiConfig, LlmProviderName, LlmTier } from './schema.js';
|
||||
export { McpctlConfigSchema, DEFAULT_CONFIG } from './schema.js';
|
||||
export type { McpctlConfig } from './schema.js';
|
||||
export { loadConfig, saveConfig, mergeConfig, getConfigPath } from './loader.js';
|
||||
export type { ConfigLoaderDeps } from './loader.js';
|
||||
|
||||
@@ -1,62 +1,5 @@
|
||||
import { z } from 'zod';
|
||||
|
||||
export const LLM_PROVIDERS = ['gemini-cli', 'ollama', 'anthropic', 'openai', 'deepseek', 'vllm', 'vllm-managed', 'none'] as const;
|
||||
export type LlmProviderName = typeof LLM_PROVIDERS[number];
|
||||
|
||||
export const LLM_TIERS = ['fast', 'heavy'] as const;
|
||||
export type LlmTier = typeof LLM_TIERS[number];
|
||||
|
||||
/** Legacy single-provider format. */
|
||||
export const LlmConfigSchema = z.object({
|
||||
/** LLM provider name */
|
||||
provider: z.enum(LLM_PROVIDERS),
|
||||
/** Model name */
|
||||
model: z.string().optional(),
|
||||
/** Provider URL (for ollama, vllm, openai with custom endpoint) */
|
||||
url: z.string().optional(),
|
||||
/** Binary path override (for gemini-cli) */
|
||||
binaryPath: z.string().optional(),
|
||||
}).strict();
|
||||
|
||||
export type LlmConfig = z.infer<typeof LlmConfigSchema>;
|
||||
|
||||
/** Multi-provider entry (advanced mode). */
|
||||
export const LlmProviderEntrySchema = z.object({
|
||||
/** User-chosen name for this provider instance (e.g. "vllm-local") */
|
||||
name: z.string(),
|
||||
/** Provider type */
|
||||
type: z.enum(LLM_PROVIDERS),
|
||||
/** Model name */
|
||||
model: z.string().optional(),
|
||||
/** Provider URL (for ollama, vllm, openai with custom endpoint) */
|
||||
url: z.string().optional(),
|
||||
/** Binary path override (for gemini-cli) */
|
||||
binaryPath: z.string().optional(),
|
||||
/** Tier assignment */
|
||||
tier: z.enum(LLM_TIERS).optional(),
|
||||
/** vllm-managed: path to Python venv (e.g. "~/vllm_env") */
|
||||
venvPath: z.string().optional(),
|
||||
/** vllm-managed: port for vLLM HTTP server */
|
||||
port: z.number().int().positive().optional(),
|
||||
/** vllm-managed: GPU memory utilization fraction */
|
||||
gpuMemoryUtilization: z.number().min(0.1).max(1.0).optional(),
|
||||
/** vllm-managed: max model context length */
|
||||
maxModelLen: z.number().int().positive().optional(),
|
||||
/** vllm-managed: minutes of idle before stopping vLLM */
|
||||
idleTimeoutMinutes: z.number().int().positive().optional(),
|
||||
/** vllm-managed: extra args for `vllm serve` */
|
||||
extraArgs: z.array(z.string()).optional(),
|
||||
}).strict();
|
||||
|
||||
export type LlmProviderEntry = z.infer<typeof LlmProviderEntrySchema>;
|
||||
|
||||
/** Multi-provider format with providers array. */
|
||||
export const LlmMultiConfigSchema = z.object({
|
||||
providers: z.array(LlmProviderEntrySchema).min(1),
|
||||
}).strict();
|
||||
|
||||
export type LlmMultiConfig = z.infer<typeof LlmMultiConfigSchema>;
|
||||
|
||||
export const McpctlConfigSchema = z.object({
|
||||
/** mcplocal daemon endpoint (local LLM pre-processing proxy) */
|
||||
mcplocalUrl: z.string().default('http://localhost:3200'),
|
||||
@@ -76,8 +19,6 @@ export const McpctlConfigSchema = z.object({
|
||||
outputFormat: z.enum(['table', 'json', 'yaml']).default('table'),
|
||||
/** Smithery API key */
|
||||
smitheryApiKey: z.string().optional(),
|
||||
/** LLM provider configuration — accepts legacy single-provider or multi-provider format */
|
||||
llm: z.union([LlmConfigSchema, LlmMultiConfigSchema]).optional(),
|
||||
}).transform((cfg) => {
|
||||
// Backward compatibility: if old daemonUrl is set but mcplocalUrl wasn't explicitly changed,
|
||||
// use daemonUrl as mcplocalUrl
|
||||
|
||||
@@ -6,46 +6,6 @@ export function formatJson(data: unknown): string {
|
||||
return JSON.stringify(data, null, 2);
|
||||
}
|
||||
|
||||
/**
|
||||
* Reorder object keys so that long text fields (like `content`, `prompt`)
|
||||
* come last. This makes YAML output more readable when content spans
|
||||
* multiple lines.
|
||||
*/
|
||||
export function reorderKeys(obj: unknown): unknown {
|
||||
if (Array.isArray(obj)) return obj.map(reorderKeys);
|
||||
if (obj !== null && typeof obj === 'object') {
|
||||
const rec = obj as Record<string, unknown>;
|
||||
const firstKeys = ['kind'];
|
||||
const lastKeys = ['link', 'content', 'prompt'];
|
||||
const ordered: Record<string, unknown> = {};
|
||||
for (const key of firstKeys) {
|
||||
if (key in rec) ordered[key] = rec[key];
|
||||
}
|
||||
for (const key of Object.keys(rec)) {
|
||||
if (!firstKeys.includes(key) && !lastKeys.includes(key)) ordered[key] = reorderKeys(rec[key]);
|
||||
}
|
||||
for (const key of lastKeys) {
|
||||
if (key in rec) ordered[key] = rec[key];
|
||||
}
|
||||
return ordered;
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
||||
export function formatYaml(data: unknown): string {
|
||||
const reordered = reorderKeys(data);
|
||||
return yaml.dump(reordered, { lineWidth: 120, noRefs: true }).trimEnd();
|
||||
}
|
||||
|
||||
/**
|
||||
* Format multiple resources as Kubernetes-style multi-document YAML.
|
||||
* Each item gets its own `---` separated document with a `kind` field.
|
||||
*/
|
||||
export function formatYamlMultiDoc(items: Array<{ kind: string } & Record<string, unknown>>): string {
|
||||
return items
|
||||
.map((item) => {
|
||||
const reordered = reorderKeys(item);
|
||||
return '---\n' + yaml.dump(reordered, { lineWidth: 120, noRefs: true }).trimEnd();
|
||||
})
|
||||
.join('\n');
|
||||
return yaml.dump(data, { lineWidth: 120, noRefs: true }).trimEnd();
|
||||
}
|
||||
|
||||
@@ -10,13 +10,8 @@ import { createLogsCommand } from './commands/logs.js';
|
||||
import { createApplyCommand } from './commands/apply.js';
|
||||
import { createCreateCommand } from './commands/create.js';
|
||||
import { createEditCommand } from './commands/edit.js';
|
||||
import { createBackupCommand } from './commands/backup.js';
|
||||
import { createBackupCommand, createRestoreCommand } from './commands/backup.js';
|
||||
import { createLoginCommand, createLogoutCommand } from './commands/auth.js';
|
||||
import { createAttachServerCommand, createDetachServerCommand, createApproveCommand } from './commands/project-ops.js';
|
||||
import { createMcpCommand } from './commands/mcp.js';
|
||||
import { createPatchCommand } from './commands/patch.js';
|
||||
import { createConsoleCommand } from './commands/console/index.js';
|
||||
import { createCacheCommand } from './commands/cache.js';
|
||||
import { ApiClient, ApiError } from './api-client.js';
|
||||
import { loadConfig } from './config/index.js';
|
||||
import { loadCredentials } from './auth/index.js';
|
||||
@@ -29,8 +24,7 @@ export function createProgram(): Command {
|
||||
.version(APP_VERSION, '-v, --version')
|
||||
.enablePositionalOptions()
|
||||
.option('--daemon-url <url>', 'mcplocal daemon URL')
|
||||
.option('--direct', 'bypass mcplocal and connect directly to mcpd')
|
||||
.option('-p, --project <name>', 'Target project for project commands');
|
||||
.option('--direct', 'bypass mcplocal and connect directly to mcpd');
|
||||
|
||||
program.addCommand(createStatusCommand());
|
||||
program.addCommand(createLoginCommand());
|
||||
@@ -57,48 +51,7 @@ export function createProgram(): Command {
|
||||
log: (...args) => console.log(...args),
|
||||
}));
|
||||
|
||||
const fetchResource = async (resource: string, nameOrId?: string, opts?: { project?: string; all?: boolean }): Promise<unknown[]> => {
|
||||
const projectName = opts?.project ?? program.opts().project as string | undefined;
|
||||
|
||||
// Virtual resource: serverattachments (composed from project data)
|
||||
if (resource === 'serverattachments') {
|
||||
type ProjectWithServers = { name: string; id: string; servers?: Array<{ server: { name: string } }> };
|
||||
let projects: ProjectWithServers[];
|
||||
if (projectName) {
|
||||
const projectId = await resolveNameOrId(client, 'projects', projectName);
|
||||
const project = await client.get<ProjectWithServers>(`/api/v1/projects/${projectId}`);
|
||||
projects = [project];
|
||||
} else {
|
||||
projects = await client.get<ProjectWithServers[]>('/api/v1/projects');
|
||||
}
|
||||
const attachments: Array<{ project: string; server: string }> = [];
|
||||
for (const p of projects) {
|
||||
if (p.servers) {
|
||||
for (const ps of p.servers) {
|
||||
attachments.push({ server: ps.server.name, project: p.name });
|
||||
}
|
||||
}
|
||||
}
|
||||
return attachments;
|
||||
}
|
||||
|
||||
// --project scoping for servers: show only attached servers
|
||||
if (!nameOrId && resource === 'servers' && projectName) {
|
||||
const projectId = await resolveNameOrId(client, 'projects', projectName);
|
||||
return client.get<unknown[]>(`/api/v1/projects/${projectId}/servers`);
|
||||
}
|
||||
|
||||
// --project scoping for prompts and promptrequests
|
||||
if (!nameOrId && (resource === 'prompts' || resource === 'promptrequests')) {
|
||||
if (projectName) {
|
||||
return client.get<unknown[]>(`/api/v1/${resource}?project=${encodeURIComponent(projectName)}`);
|
||||
}
|
||||
// Default: global-only. --all (-A) shows everything.
|
||||
if (!opts?.all) {
|
||||
return client.get<unknown[]>(`/api/v1/${resource}?scope=global`);
|
||||
}
|
||||
}
|
||||
|
||||
const fetchResource = async (resource: string, nameOrId?: string): Promise<unknown[]> => {
|
||||
if (nameOrId) {
|
||||
// Glob pattern — use query param filtering
|
||||
if (nameOrId.includes('*')) {
|
||||
@@ -117,21 +70,6 @@ export function createProgram(): Command {
|
||||
};
|
||||
|
||||
const fetchSingleResource = async (resource: string, nameOrId: string): Promise<unknown> => {
|
||||
const projectName = program.opts().project as string | undefined;
|
||||
|
||||
// Prompts: resolve within project scope (or global-only without --project)
|
||||
if (resource === 'prompts' || resource === 'promptrequests') {
|
||||
const scope = projectName
|
||||
? `?project=${encodeURIComponent(projectName)}`
|
||||
: '?scope=global';
|
||||
const items = await client.get<Array<Record<string, unknown>>>(`/api/v1/${resource}${scope}`);
|
||||
const match = items.find((item) => item.name === nameOrId);
|
||||
if (!match) {
|
||||
throw new Error(`${resource.replace(/s$/, '')} '${nameOrId}' not found${projectName ? ` in project '${projectName}'` : ' (global scope). Use --project to specify a project'}`);
|
||||
}
|
||||
return client.get(`/api/v1/${resource}/${match.id as string}`);
|
||||
}
|
||||
|
||||
let id: string;
|
||||
try {
|
||||
id = await resolveNameOrId(client, resource, nameOrId);
|
||||
@@ -144,8 +82,6 @@ export function createProgram(): Command {
|
||||
program.addCommand(createGetCommand({
|
||||
fetchResource,
|
||||
log: (...args) => console.log(...args),
|
||||
getProject: () => program.opts().project as string | undefined,
|
||||
mcplocalUrl: config.mcplocalUrl,
|
||||
}));
|
||||
|
||||
program.addCommand(createDescribeCommand({
|
||||
@@ -153,7 +89,6 @@ export function createProgram(): Command {
|
||||
fetchResource: fetchSingleResource,
|
||||
fetchInspect: async (id: string) => client.get(`/api/v1/instances/${id}/inspect`),
|
||||
log: (...args) => console.log(...args),
|
||||
mcplocalUrl: config.mcplocalUrl,
|
||||
}));
|
||||
|
||||
program.addCommand(createDeleteCommand({
|
||||
@@ -181,35 +116,14 @@ export function createProgram(): Command {
|
||||
log: (...args) => console.log(...args),
|
||||
}));
|
||||
|
||||
program.addCommand(createPatchCommand({
|
||||
client,
|
||||
log: (...args) => console.log(...args),
|
||||
}));
|
||||
|
||||
program.addCommand(createBackupCommand({
|
||||
client,
|
||||
log: (...args) => console.log(...args),
|
||||
}));
|
||||
|
||||
const projectOpsDeps = {
|
||||
program.addCommand(createRestoreCommand({
|
||||
client,
|
||||
log: (...args: string[]) => console.log(...args),
|
||||
getProject: () => program.opts().project as string | undefined,
|
||||
};
|
||||
program.addCommand(createAttachServerCommand(projectOpsDeps), { hidden: true });
|
||||
program.addCommand(createDetachServerCommand(projectOpsDeps), { hidden: true });
|
||||
program.addCommand(createApproveCommand(projectOpsDeps));
|
||||
program.addCommand(createMcpCommand({
|
||||
getProject: () => program.opts().project as string | undefined,
|
||||
}), { hidden: true });
|
||||
|
||||
program.addCommand(createConsoleCommand({
|
||||
getProject: () => program.opts().project as string | undefined,
|
||||
}));
|
||||
|
||||
program.addCommand(createCacheCommand({
|
||||
log: (...args) => console.log(...args),
|
||||
mcplocalUrl: config.mcplocalUrl,
|
||||
}));
|
||||
|
||||
return program;
|
||||
|
||||
2
src/cli/stubs/react-devtools-core/index.js
vendored
2
src/cli/stubs/react-devtools-core/index.js
vendored
@@ -1,2 +0,0 @@
|
||||
// Stub for react-devtools-core — not needed in production builds
|
||||
export default { initialize() {}, connectToDevTools() {} };
|
||||
@@ -1,6 +0,0 @@
|
||||
{
|
||||
"name": "react-devtools-core",
|
||||
"version": "0.0.0",
|
||||
"main": "index.js",
|
||||
"type": "module"
|
||||
}
|
||||
@@ -21,16 +21,6 @@ beforeAll(async () => {
|
||||
res.writeHead(201, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify({ id: 'srv-new', ...body }));
|
||||
});
|
||||
} else if (req.url === '/api/v1/servers/srv-1' && req.method === 'DELETE') {
|
||||
// Fastify rejects empty body with Content-Type: application/json
|
||||
const ct = req.headers['content-type'] ?? '';
|
||||
if (ct.includes('application/json')) {
|
||||
res.writeHead(400, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify({ error: "Body cannot be empty when content-type is set to 'application/json'" }));
|
||||
} else {
|
||||
res.writeHead(204);
|
||||
res.end();
|
||||
}
|
||||
} else if (req.url === '/api/v1/missing' && req.method === 'GET') {
|
||||
res.writeHead(404, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify({ error: 'Not found' }));
|
||||
@@ -85,12 +75,6 @@ describe('ApiClient', () => {
|
||||
await expect(client.get('/anything')).rejects.toThrow();
|
||||
});
|
||||
|
||||
it('performs DELETE without Content-Type header', async () => {
|
||||
const client = new ApiClient({ baseUrl: `http://localhost:${port}` });
|
||||
// Should succeed (204) because no Content-Type is sent on bodyless DELETE
|
||||
await expect(client.delete('/api/v1/servers/srv-1')).resolves.toBeUndefined();
|
||||
});
|
||||
|
||||
it('sends Authorization header when token provided', async () => {
|
||||
// We need a separate server to check the header
|
||||
let receivedAuth = '';
|
||||
|
||||
@@ -9,7 +9,7 @@ describe('createProgram', () => {
|
||||
|
||||
it('has version flag', () => {
|
||||
const program = createProgram();
|
||||
expect(program.version()).toBe('0.0.1');
|
||||
expect(program.version()).toBe('0.1.0');
|
||||
});
|
||||
|
||||
it('has config subcommand', () => {
|
||||
|
||||
@@ -326,17 +326,21 @@ rbacBindings:
|
||||
rmSync(tmpDir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it('applies projects with servers', async () => {
|
||||
it('applies projects with servers and members', async () => {
|
||||
const configPath = join(tmpDir, 'config.yaml');
|
||||
writeFileSync(configPath, `
|
||||
projects:
|
||||
- name: smart-home
|
||||
description: Home automation
|
||||
proxyMode: filtered
|
||||
llmProvider: gemini-cli
|
||||
llmModel: gemini-2.0-flash
|
||||
servers:
|
||||
- my-grafana
|
||||
- my-ha
|
||||
members:
|
||||
- alice@test.com
|
||||
- bob@test.com
|
||||
`);
|
||||
|
||||
const cmd = createApplyCommand({ client, log });
|
||||
@@ -344,9 +348,11 @@ projects:
|
||||
|
||||
expect(client.post).toHaveBeenCalledWith('/api/v1/projects', expect.objectContaining({
|
||||
name: 'smart-home',
|
||||
proxyMode: 'filtered',
|
||||
llmProvider: 'gemini-cli',
|
||||
llmModel: 'gemini-2.0-flash',
|
||||
servers: ['my-grafana', 'my-ha'],
|
||||
members: ['alice@test.com', 'bob@test.com'],
|
||||
}));
|
||||
expect(output.join('\n')).toContain('Created project: smart-home');
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import { createBackupCommand } from '../../src/commands/backup.js';
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import fs from 'node:fs';
|
||||
import { createBackupCommand, createRestoreCommand } from '../../src/commands/backup.js';
|
||||
|
||||
const mockClient = {
|
||||
get: vi.fn(),
|
||||
@@ -10,217 +11,110 @@ const mockClient = {
|
||||
|
||||
const log = vi.fn();
|
||||
|
||||
function makeCmd() {
|
||||
return createBackupCommand({ client: mockClient as never, log });
|
||||
}
|
||||
|
||||
describe('backup command', () => {
|
||||
beforeEach(() => {
|
||||
vi.resetAllMocks();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
// Clean up any created files
|
||||
try { fs.unlinkSync('test-backup.json'); } catch { /* ignore */ }
|
||||
});
|
||||
|
||||
it('creates backup command', () => {
|
||||
expect(makeCmd().name()).toBe('backup');
|
||||
const cmd = createBackupCommand({ client: mockClient as never, log });
|
||||
expect(cmd.name()).toBe('backup');
|
||||
});
|
||||
|
||||
it('shows status when enabled', async () => {
|
||||
mockClient.get.mockResolvedValue({
|
||||
enabled: true,
|
||||
repoUrl: 'ssh://git@10.0.0.194:2222/michal/mcp-backup.git',
|
||||
gitReachable: true,
|
||||
lastSyncAt: new Date().toISOString(),
|
||||
lastPushAt: null,
|
||||
lastError: null,
|
||||
pendingCount: 0,
|
||||
});
|
||||
it('calls API and writes file', async () => {
|
||||
const bundle = { version: '1', servers: [], profiles: [], projects: [] };
|
||||
mockClient.post.mockResolvedValue(bundle);
|
||||
|
||||
await makeCmd().parseAsync([], { from: 'user' });
|
||||
const cmd = createBackupCommand({ client: mockClient as never, log });
|
||||
await cmd.parseAsync(['-o', 'test-backup.json'], { from: 'user' });
|
||||
|
||||
expect(mockClient.get).toHaveBeenCalledWith('/api/v1/backup/status');
|
||||
expect(log).toHaveBeenCalledWith(expect.stringContaining('ssh://git@10.0.0.194:2222/michal/mcp-backup.git'));
|
||||
expect(log).toHaveBeenCalledWith(expect.stringContaining('synced'));
|
||||
expect(mockClient.post).toHaveBeenCalledWith('/api/v1/backup', {});
|
||||
expect(fs.existsSync('test-backup.json')).toBe(true);
|
||||
expect(log).toHaveBeenCalledWith(expect.stringContaining('test-backup.json'));
|
||||
});
|
||||
|
||||
it('shows disabled when not configured', async () => {
|
||||
mockClient.get.mockResolvedValue({
|
||||
enabled: false,
|
||||
repoUrl: null,
|
||||
gitReachable: false,
|
||||
lastSyncAt: null,
|
||||
lastPushAt: null,
|
||||
lastError: null,
|
||||
pendingCount: 0,
|
||||
});
|
||||
it('passes password when provided', async () => {
|
||||
mockClient.post.mockResolvedValue({ version: '1', servers: [], profiles: [], projects: [] });
|
||||
|
||||
await makeCmd().parseAsync([], { from: 'user' });
|
||||
const cmd = createBackupCommand({ client: mockClient as never, log });
|
||||
await cmd.parseAsync(['-o', 'test-backup.json', '-p', 'secret'], { from: 'user' });
|
||||
|
||||
expect(log).toHaveBeenCalledWith(expect.stringContaining('disabled'));
|
||||
expect(mockClient.post).toHaveBeenCalledWith('/api/v1/backup', { password: 'secret' });
|
||||
});
|
||||
|
||||
it('shows pending count', async () => {
|
||||
mockClient.get.mockResolvedValue({
|
||||
enabled: true,
|
||||
repoUrl: 'ssh://git@host/repo.git',
|
||||
gitReachable: true,
|
||||
lastSyncAt: null,
|
||||
lastPushAt: null,
|
||||
lastError: null,
|
||||
pendingCount: 5,
|
||||
it('passes resource filter', async () => {
|
||||
mockClient.post.mockResolvedValue({ version: '1', servers: [], profiles: [], projects: [] });
|
||||
|
||||
const cmd = createBackupCommand({ client: mockClient as never, log });
|
||||
await cmd.parseAsync(['-o', 'test-backup.json', '-r', 'servers,profiles'], { from: 'user' });
|
||||
|
||||
expect(mockClient.post).toHaveBeenCalledWith('/api/v1/backup', {
|
||||
resources: ['servers', 'profiles'],
|
||||
});
|
||||
|
||||
await makeCmd().parseAsync([], { from: 'user' });
|
||||
|
||||
expect(log).toHaveBeenCalledWith(expect.stringContaining('5 changes pending'));
|
||||
});
|
||||
|
||||
it('shows SSH public key in status when enabled', async () => {
|
||||
mockClient.get.mockResolvedValue({
|
||||
enabled: true,
|
||||
repoUrl: 'ssh://git@host/repo.git',
|
||||
publicKey: 'ssh-ed25519 AAAA... mcpd@mcpctl.local',
|
||||
gitReachable: true,
|
||||
lastSyncAt: null,
|
||||
lastPushAt: null,
|
||||
lastError: null,
|
||||
pendingCount: 0,
|
||||
});
|
||||
|
||||
await makeCmd().parseAsync([], { from: 'user' });
|
||||
|
||||
expect(log).toHaveBeenCalledWith(expect.stringContaining('ssh-ed25519 AAAA... mcpd@mcpctl.local'));
|
||||
});
|
||||
|
||||
it('shows setup instructions when disabled', async () => {
|
||||
mockClient.get.mockResolvedValue({
|
||||
enabled: false,
|
||||
repoUrl: null,
|
||||
publicKey: null,
|
||||
gitReachable: false,
|
||||
lastSyncAt: null,
|
||||
lastPushAt: null,
|
||||
lastError: null,
|
||||
pendingCount: 0,
|
||||
});
|
||||
|
||||
await makeCmd().parseAsync([], { from: 'user' });
|
||||
|
||||
expect(log).toHaveBeenCalledWith(expect.stringContaining('mcpctl create secret backup-ssh'));
|
||||
});
|
||||
|
||||
it('shows commit log', async () => {
|
||||
mockClient.get.mockResolvedValue({
|
||||
entries: [
|
||||
{ hash: 'abc1234567890', date: '2026-03-08T10:00:00Z', author: 'mcpd <mcpd@mcpctl.local>', message: 'Update server grafana', manual: false },
|
||||
{ hash: 'def4567890123', date: '2026-03-07T09:00:00Z', author: 'Michal <michal@test.com>', message: 'Manual fix', manual: true },
|
||||
],
|
||||
});
|
||||
|
||||
await makeCmd().parseAsync(['log'], { from: 'user' });
|
||||
|
||||
expect(mockClient.get).toHaveBeenCalledWith('/api/v1/backup/log?limit=20');
|
||||
expect(log).toHaveBeenCalledWith(expect.stringContaining('COMMIT'));
|
||||
expect(log).toHaveBeenCalledWith(expect.stringContaining('abc1234'));
|
||||
expect(log).toHaveBeenCalledWith(expect.stringContaining('[manual]'));
|
||||
});
|
||||
});
|
||||
|
||||
describe('backup restore subcommands', () => {
|
||||
describe('restore command', () => {
|
||||
const testFile = 'test-restore-input.json';
|
||||
|
||||
beforeEach(() => {
|
||||
vi.resetAllMocks();
|
||||
fs.writeFileSync(testFile, JSON.stringify({
|
||||
version: '1', servers: [], profiles: [], projects: [],
|
||||
}));
|
||||
});
|
||||
|
||||
it('lists restore points', async () => {
|
||||
mockClient.get.mockResolvedValue({
|
||||
entries: [
|
||||
{ hash: 'abc1234567890', date: '2026-03-08T10:00:00Z', author: 'mcpd <mcpd@mcpctl.local>', message: 'Sync' },
|
||||
],
|
||||
});
|
||||
|
||||
await makeCmd().parseAsync(['restore', 'list'], { from: 'user' });
|
||||
|
||||
expect(mockClient.get).toHaveBeenCalledWith('/api/v1/backup/log?limit=30');
|
||||
expect(log).toHaveBeenCalledWith(expect.stringContaining('abc1234'));
|
||||
afterEach(() => {
|
||||
try { fs.unlinkSync(testFile); } catch { /* ignore */ }
|
||||
});
|
||||
|
||||
it('shows restore diff preview', async () => {
|
||||
it('creates restore command', () => {
|
||||
const cmd = createRestoreCommand({ client: mockClient as never, log });
|
||||
expect(cmd.name()).toBe('restore');
|
||||
});
|
||||
|
||||
it('reads file and calls API', async () => {
|
||||
mockClient.post.mockResolvedValue({
|
||||
targetCommit: 'abc1234567890',
|
||||
targetDate: '2026-03-08T10:00:00Z',
|
||||
targetMessage: 'Snapshot',
|
||||
added: ['servers/new.yaml'],
|
||||
removed: ['servers/old.yaml'],
|
||||
modified: ['projects/default.yaml'],
|
||||
serversCreated: 1, serversSkipped: 0,
|
||||
profilesCreated: 0, profilesSkipped: 0,
|
||||
projectsCreated: 0, projectsSkipped: 0,
|
||||
errors: [],
|
||||
});
|
||||
|
||||
await makeCmd().parseAsync(['restore', 'diff', 'abc1234'], { from: 'user' });
|
||||
const cmd = createRestoreCommand({ client: mockClient as never, log });
|
||||
await cmd.parseAsync(['-i', testFile], { from: 'user' });
|
||||
|
||||
expect(mockClient.post).toHaveBeenCalledWith('/api/v1/backup/restore/preview', { commit: 'abc1234' });
|
||||
expect(log).toHaveBeenCalledWith(expect.stringContaining('+ servers/new.yaml'));
|
||||
expect(log).toHaveBeenCalledWith(expect.stringContaining('- servers/old.yaml'));
|
||||
expect(log).toHaveBeenCalledWith(expect.stringContaining('~ projects/default.yaml'));
|
||||
expect(mockClient.post).toHaveBeenCalledWith('/api/v1/restore', expect.objectContaining({
|
||||
bundle: expect.objectContaining({ version: '1' }),
|
||||
conflictStrategy: 'skip',
|
||||
}));
|
||||
expect(log).toHaveBeenCalledWith('Restore complete:');
|
||||
});
|
||||
|
||||
it('requires --force for restore', async () => {
|
||||
it('reports errors from restore', async () => {
|
||||
mockClient.post.mockResolvedValue({
|
||||
targetCommit: 'abc1234567890',
|
||||
targetDate: '2026-03-08T10:00:00Z',
|
||||
targetMessage: 'Snapshot',
|
||||
added: ['servers/new.yaml'],
|
||||
removed: [],
|
||||
modified: [],
|
||||
serversCreated: 0, serversSkipped: 0,
|
||||
profilesCreated: 0, profilesSkipped: 0,
|
||||
projectsCreated: 0, projectsSkipped: 0,
|
||||
errors: ['Server "x" already exists'],
|
||||
});
|
||||
|
||||
await makeCmd().parseAsync(['restore', 'to', 'abc1234'], { from: 'user' });
|
||||
const cmd = createRestoreCommand({ client: mockClient as never, log });
|
||||
await cmd.parseAsync(['-i', testFile], { from: 'user' });
|
||||
|
||||
expect(mockClient.post).toHaveBeenCalledWith('/api/v1/backup/restore/preview', { commit: 'abc1234' });
|
||||
expect(mockClient.post).not.toHaveBeenCalledWith('/api/v1/backup/restore', expect.anything());
|
||||
expect(log).toHaveBeenCalledWith(expect.stringContaining('--force'));
|
||||
expect(log).toHaveBeenCalledWith(expect.stringContaining('Errors'));
|
||||
});
|
||||
|
||||
it('executes restore with --force', async () => {
|
||||
mockClient.post
|
||||
.mockResolvedValueOnce({
|
||||
targetCommit: 'abc1234567890',
|
||||
targetDate: '2026-03-08T10:00:00Z',
|
||||
targetMessage: 'Snapshot',
|
||||
added: ['servers/new.yaml'],
|
||||
removed: [],
|
||||
modified: [],
|
||||
})
|
||||
.mockResolvedValueOnce({
|
||||
branchName: 'timeline/20260308-100000',
|
||||
applied: 1,
|
||||
deleted: 0,
|
||||
errors: [],
|
||||
});
|
||||
it('logs error for missing file', async () => {
|
||||
const cmd = createRestoreCommand({ client: mockClient as never, log });
|
||||
await cmd.parseAsync(['-i', 'nonexistent.json'], { from: 'user' });
|
||||
|
||||
await makeCmd().parseAsync(['restore', 'to', 'abc1234', '--force'], { from: 'user' });
|
||||
|
||||
expect(mockClient.post).toHaveBeenCalledWith('/api/v1/backup/restore', { commit: 'abc1234' });
|
||||
expect(log).toHaveBeenCalledWith(expect.stringContaining('1 applied'));
|
||||
expect(log).toHaveBeenCalledWith(expect.stringContaining('timeline/20260308-100000'));
|
||||
});
|
||||
|
||||
it('reports restore errors', async () => {
|
||||
mockClient.post
|
||||
.mockResolvedValueOnce({
|
||||
targetCommit: 'abc1234567890',
|
||||
targetDate: '2026-03-08T10:00:00Z',
|
||||
targetMessage: 'Snapshot',
|
||||
added: [],
|
||||
removed: [],
|
||||
modified: ['servers/broken.yaml'],
|
||||
})
|
||||
.mockResolvedValueOnce({
|
||||
branchName: 'timeline/20260308-100000',
|
||||
applied: 0,
|
||||
deleted: 0,
|
||||
errors: ['Failed to apply servers/broken.yaml: invalid YAML'],
|
||||
});
|
||||
|
||||
await makeCmd().parseAsync(['restore', 'to', 'abc1234', '--force'], { from: 'user' });
|
||||
|
||||
expect(log).toHaveBeenCalledWith('Errors:');
|
||||
expect(log).toHaveBeenCalledWith(expect.stringContaining('invalid YAML'));
|
||||
expect(log).toHaveBeenCalledWith(expect.stringContaining('not found'));
|
||||
expect(mockClient.post).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -8,14 +8,19 @@ import { saveCredentials, loadCredentials } from '../../src/auth/index.js';
|
||||
|
||||
function mockClient(): ApiClient {
|
||||
return {
|
||||
get: vi.fn(async () => ({})),
|
||||
get: vi.fn(async () => ({
|
||||
mcpServers: {
|
||||
'slack--default': { command: 'npx', args: ['-y', '@anthropic/slack-mcp'], env: { WORKSPACE: 'test' } },
|
||||
'github--default': { command: 'npx', args: ['-y', '@anthropic/github-mcp'] },
|
||||
},
|
||||
})),
|
||||
post: vi.fn(async () => ({ token: 'impersonated-tok', user: { email: 'other@test.com' } })),
|
||||
put: vi.fn(async () => ({})),
|
||||
delete: vi.fn(async () => {}),
|
||||
} as unknown as ApiClient;
|
||||
}
|
||||
|
||||
describe('config claude', () => {
|
||||
describe('config claude-generate', () => {
|
||||
let client: ReturnType<typeof mockClient>;
|
||||
let output: string[];
|
||||
let tmpDir: string;
|
||||
@@ -31,23 +36,18 @@ describe('config claude', () => {
|
||||
rmSync(tmpDir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it('generates .mcp.json with mcpctl mcp bridge entry', async () => {
|
||||
it('generates .mcp.json from project config', async () => {
|
||||
const outPath = join(tmpDir, '.mcp.json');
|
||||
const cmd = createConfigCommand(
|
||||
{ configDeps: { configDir: tmpDir }, log },
|
||||
{ client, credentialsDeps: { configDir: tmpDir }, log },
|
||||
);
|
||||
await cmd.parseAsync(['claude', '--project', 'homeautomation', '-o', outPath], { from: 'user' });
|
||||
|
||||
// No API call should be made
|
||||
expect(client.get).not.toHaveBeenCalled();
|
||||
await cmd.parseAsync(['claude-generate', '--project', 'proj-1', '-o', outPath], { from: 'user' });
|
||||
|
||||
expect(client.get).toHaveBeenCalledWith('/api/v1/projects/proj-1/mcp-config');
|
||||
const written = JSON.parse(readFileSync(outPath, 'utf-8'));
|
||||
expect(written.mcpServers['homeautomation']).toEqual({
|
||||
command: 'mcpctl',
|
||||
args: ['mcp', '-p', 'homeautomation'],
|
||||
});
|
||||
expect(output.join('\n')).toContain('1 server(s)');
|
||||
expect(written.mcpServers['slack--default']).toBeDefined();
|
||||
expect(output.join('\n')).toContain('2 server(s)');
|
||||
});
|
||||
|
||||
it('prints to stdout with --stdout', async () => {
|
||||
@@ -55,16 +55,12 @@ describe('config claude', () => {
|
||||
{ configDeps: { configDir: tmpDir }, log },
|
||||
{ client, credentialsDeps: { configDir: tmpDir }, log },
|
||||
);
|
||||
await cmd.parseAsync(['claude', '--project', 'myproj', '--stdout'], { from: 'user' });
|
||||
await cmd.parseAsync(['claude-generate', '--project', 'proj-1', '--stdout'], { from: 'user' });
|
||||
|
||||
const parsed = JSON.parse(output[0]);
|
||||
expect(parsed.mcpServers['myproj']).toEqual({
|
||||
command: 'mcpctl',
|
||||
args: ['mcp', '-p', 'myproj'],
|
||||
});
|
||||
expect(output[0]).toContain('mcpServers');
|
||||
});
|
||||
|
||||
it('always merges with existing .mcp.json', async () => {
|
||||
it('merges with existing .mcp.json', async () => {
|
||||
const outPath = join(tmpDir, '.mcp.json');
|
||||
writeFileSync(outPath, JSON.stringify({
|
||||
mcpServers: { 'existing--server': { command: 'echo', args: [] } },
|
||||
@@ -74,71 +70,12 @@ describe('config claude', () => {
|
||||
{ configDeps: { configDir: tmpDir }, log },
|
||||
{ client, credentialsDeps: { configDir: tmpDir }, log },
|
||||
);
|
||||
await cmd.parseAsync(['claude', '--project', 'proj-1', '-o', outPath], { from: 'user' });
|
||||
await cmd.parseAsync(['claude-generate', '--project', 'proj-1', '-o', outPath, '--merge'], { from: 'user' });
|
||||
|
||||
const written = JSON.parse(readFileSync(outPath, 'utf-8'));
|
||||
expect(written.mcpServers['existing--server']).toBeDefined();
|
||||
expect(written.mcpServers['proj-1']).toEqual({
|
||||
command: 'mcpctl',
|
||||
args: ['mcp', '-p', 'proj-1'],
|
||||
});
|
||||
expect(output.join('\n')).toContain('2 server(s)');
|
||||
});
|
||||
|
||||
it('adds inspect MCP server with --inspect', async () => {
|
||||
const outPath = join(tmpDir, '.mcp.json');
|
||||
const cmd = createConfigCommand(
|
||||
{ configDeps: { configDir: tmpDir }, log },
|
||||
{ client, credentialsDeps: { configDir: tmpDir }, log },
|
||||
);
|
||||
await cmd.parseAsync(['claude', '--inspect', '-o', outPath], { from: 'user' });
|
||||
|
||||
const written = JSON.parse(readFileSync(outPath, 'utf-8'));
|
||||
expect(written.mcpServers['mcpctl-inspect']).toEqual({
|
||||
command: 'mcpctl',
|
||||
args: ['console', '--stdin-mcp'],
|
||||
});
|
||||
expect(output.join('\n')).toContain('1 server(s)');
|
||||
});
|
||||
|
||||
it('adds both project and inspect with --project --inspect', async () => {
|
||||
const outPath = join(tmpDir, '.mcp.json');
|
||||
const cmd = createConfigCommand(
|
||||
{ configDeps: { configDir: tmpDir }, log },
|
||||
{ client, credentialsDeps: { configDir: tmpDir }, log },
|
||||
);
|
||||
await cmd.parseAsync(['claude', '--project', 'ha', '--inspect', '-o', outPath], { from: 'user' });
|
||||
|
||||
const written = JSON.parse(readFileSync(outPath, 'utf-8'));
|
||||
expect(written.mcpServers['ha']).toBeDefined();
|
||||
expect(written.mcpServers['mcpctl-inspect']).toBeDefined();
|
||||
expect(output.join('\n')).toContain('2 server(s)');
|
||||
});
|
||||
|
||||
it('backward compat: claude-generate still works', async () => {
|
||||
const outPath = join(tmpDir, '.mcp.json');
|
||||
const cmd = createConfigCommand(
|
||||
{ configDeps: { configDir: tmpDir }, log },
|
||||
{ client, credentialsDeps: { configDir: tmpDir }, log },
|
||||
);
|
||||
await cmd.parseAsync(['claude-generate', '--project', 'proj-1', '-o', outPath], { from: 'user' });
|
||||
|
||||
const written = JSON.parse(readFileSync(outPath, 'utf-8'));
|
||||
expect(written.mcpServers['proj-1']).toEqual({
|
||||
command: 'mcpctl',
|
||||
args: ['mcp', '-p', 'proj-1'],
|
||||
});
|
||||
});
|
||||
|
||||
it('uses project name as the server key', async () => {
|
||||
const outPath = join(tmpDir, '.mcp.json');
|
||||
const cmd = createConfigCommand(
|
||||
{ configDeps: { configDir: tmpDir }, log },
|
||||
);
|
||||
await cmd.parseAsync(['claude', '--project', 'my-fancy-project', '-o', outPath], { from: 'user' });
|
||||
|
||||
const written = JSON.parse(readFileSync(outPath, 'utf-8'));
|
||||
expect(Object.keys(written.mcpServers)).toEqual(['my-fancy-project']);
|
||||
expect(written.mcpServers['slack--default']).toBeDefined();
|
||||
expect(output.join('\n')).toContain('3 server(s)');
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
@@ -1,402 +0,0 @@
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import { createConfigSetupCommand } from '../../src/commands/config-setup.js';
|
||||
import type { ConfigSetupDeps, ConfigSetupPrompt } from '../../src/commands/config-setup.js';
|
||||
import type { SecretStore } from '@mcpctl/shared';
|
||||
import { mkdtempSync, rmSync, readFileSync } from 'node:fs';
|
||||
import { join } from 'node:path';
|
||||
import { tmpdir } from 'node:os';
|
||||
|
||||
let tempDir: string;
|
||||
let logs: string[];
|
||||
|
||||
beforeEach(() => {
|
||||
tempDir = mkdtempSync(join(tmpdir(), 'mcpctl-config-setup-test-'));
|
||||
logs = [];
|
||||
});
|
||||
|
||||
function cleanup() {
|
||||
rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
|
||||
function mockSecretStore(secrets: Record<string, string> = {}): SecretStore {
|
||||
const store: Record<string, string> = { ...secrets };
|
||||
return {
|
||||
get: vi.fn(async (key: string) => store[key] ?? null),
|
||||
set: vi.fn(async (key: string, value: string) => { store[key] = value; }),
|
||||
delete: vi.fn(async () => true),
|
||||
backend: () => 'mock',
|
||||
};
|
||||
}
|
||||
|
||||
function mockPrompt(answers: unknown[]): ConfigSetupPrompt {
|
||||
let callIndex = 0;
|
||||
return {
|
||||
select: vi.fn(async () => answers[callIndex++]),
|
||||
input: vi.fn(async () => answers[callIndex++] as string),
|
||||
password: vi.fn(async () => answers[callIndex++] as string),
|
||||
confirm: vi.fn(async () => answers[callIndex++] as boolean),
|
||||
};
|
||||
}
|
||||
|
||||
function buildDeps(overrides: {
|
||||
secrets?: Record<string, string>;
|
||||
answers?: unknown[];
|
||||
fetchModels?: ConfigSetupDeps['fetchModels'];
|
||||
whichBinary?: ConfigSetupDeps['whichBinary'];
|
||||
} = {}): ConfigSetupDeps {
|
||||
return {
|
||||
configDeps: { configDir: tempDir },
|
||||
secretStore: mockSecretStore(overrides.secrets),
|
||||
log: (...args: string[]) => logs.push(args.join(' ')),
|
||||
prompt: mockPrompt(overrides.answers ?? []),
|
||||
fetchModels: overrides.fetchModels ?? vi.fn(async () => []),
|
||||
whichBinary: overrides.whichBinary ?? vi.fn(async () => '/usr/bin/gemini'),
|
||||
};
|
||||
}
|
||||
|
||||
function readConfig(): Record<string, unknown> {
|
||||
const raw = readFileSync(join(tempDir, 'config.json'), 'utf-8');
|
||||
return JSON.parse(raw) as Record<string, unknown>;
|
||||
}
|
||||
|
||||
async function runSetup(deps: ConfigSetupDeps): Promise<void> {
|
||||
const cmd = createConfigSetupCommand(deps);
|
||||
await cmd.parseAsync([], { from: 'user' });
|
||||
}
|
||||
|
||||
describe('config setup wizard', () => {
|
||||
describe('provider: none', () => {
|
||||
it('disables LLM and saves config', async () => {
|
||||
const deps = buildDeps({ answers: ['simple', 'none'] });
|
||||
await runSetup(deps);
|
||||
|
||||
const config = readConfig();
|
||||
expect(config.llm).toEqual({ provider: 'none' });
|
||||
expect(logs.some((l) => l.includes('LLM disabled'))).toBe(true);
|
||||
cleanup();
|
||||
});
|
||||
});
|
||||
|
||||
describe('provider: gemini-cli', () => {
|
||||
it('auto-detects binary path and saves config', async () => {
|
||||
// Answers: select provider, select model (no binary prompt — auto-detected)
|
||||
const deps = buildDeps({
|
||||
answers: ['simple', 'gemini-cli', 'gemini-2.5-flash'],
|
||||
whichBinary: vi.fn(async () => '/home/user/.npm-global/bin/gemini'),
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
const config = readConfig();
|
||||
const llm = config.llm as Record<string, unknown>;
|
||||
expect(llm.provider).toBe('gemini-cli');
|
||||
expect(llm.model).toBe('gemini-2.5-flash');
|
||||
expect(llm.binaryPath).toBe('/home/user/.npm-global/bin/gemini');
|
||||
expect(logs.some((l) => l.includes('Found gemini at'))).toBe(true);
|
||||
cleanup();
|
||||
});
|
||||
|
||||
it('prompts for manual path when binary not found', async () => {
|
||||
// Answers: select provider, select model, enter manual path
|
||||
const deps = buildDeps({
|
||||
answers: ['simple', 'gemini-cli', 'gemini-2.5-flash', '/opt/gemini'],
|
||||
whichBinary: vi.fn(async () => null),
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
const config = readConfig();
|
||||
const llm = config.llm as Record<string, unknown>;
|
||||
expect(llm.binaryPath).toBe('/opt/gemini');
|
||||
expect(logs.some((l) => l.includes('not found'))).toBe(true);
|
||||
cleanup();
|
||||
});
|
||||
|
||||
it('saves gemini-cli with custom model', async () => {
|
||||
// Answers: select provider, select custom, enter model name
|
||||
const deps = buildDeps({
|
||||
answers: ['simple', 'gemini-cli', '__custom__', 'gemini-3.0-flash'],
|
||||
whichBinary: vi.fn(async () => '/usr/bin/gemini'),
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
const config = readConfig();
|
||||
const llm = config.llm as Record<string, unknown>;
|
||||
expect(llm.model).toBe('gemini-3.0-flash');
|
||||
cleanup();
|
||||
});
|
||||
});
|
||||
|
||||
describe('provider: ollama', () => {
|
||||
it('fetches models and allows selection', async () => {
|
||||
const fetchModels = vi.fn(async () => ['llama3.2', 'codellama', 'mistral']);
|
||||
// Answers: select provider, enter URL, select model
|
||||
const deps = buildDeps({
|
||||
answers: ['simple', 'ollama', 'http://localhost:11434', 'codellama'],
|
||||
fetchModels,
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
expect(fetchModels).toHaveBeenCalledWith('http://localhost:11434', '/api/tags');
|
||||
const config = readConfig();
|
||||
const llm = config.llm as Record<string, unknown>;
|
||||
expect(llm.provider).toBe('ollama');
|
||||
expect(llm.model).toBe('codellama');
|
||||
expect(llm.url).toBe('http://localhost:11434');
|
||||
cleanup();
|
||||
});
|
||||
|
||||
it('falls back to manual input when fetch fails', async () => {
|
||||
const fetchModels = vi.fn(async () => []);
|
||||
// Answers: select provider, enter URL, enter model manually
|
||||
const deps = buildDeps({
|
||||
answers: ['simple', 'ollama', 'http://localhost:11434', 'llama3.2'],
|
||||
fetchModels,
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
const config = readConfig();
|
||||
expect((config.llm as Record<string, unknown>).model).toBe('llama3.2');
|
||||
cleanup();
|
||||
});
|
||||
});
|
||||
|
||||
describe('provider: anthropic', () => {
|
||||
it('prompts for API key and saves to secret store', async () => {
|
||||
// Flow: simple → anthropic → (no existing key) → whichBinary('claude') returns null →
|
||||
// log tip → password prompt → select model
|
||||
const deps = buildDeps({
|
||||
answers: ['simple', 'anthropic', 'sk-ant-new-key', 'claude-haiku-3-5-20241022'],
|
||||
whichBinary: vi.fn(async () => null),
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
expect(deps.secretStore.set).toHaveBeenCalledWith('anthropic-api-key', 'sk-ant-new-key');
|
||||
const config = readConfig();
|
||||
const llm = config.llm as Record<string, unknown>;
|
||||
expect(llm.provider).toBe('anthropic');
|
||||
expect(llm.model).toBe('claude-haiku-3-5-20241022');
|
||||
// API key should NOT be in config file
|
||||
expect(llm).not.toHaveProperty('apiKey');
|
||||
cleanup();
|
||||
});
|
||||
|
||||
it('shows existing key masked and allows keeping it', async () => {
|
||||
// Answers: select provider, confirm change=false, select model
|
||||
const deps = buildDeps({
|
||||
secrets: { 'anthropic-api-key': 'sk-ant-existing-key-1234' },
|
||||
answers: ['simple', 'anthropic', false, 'claude-sonnet-4-20250514'],
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
// Should NOT have called set (kept existing key)
|
||||
expect(deps.secretStore.set).not.toHaveBeenCalled();
|
||||
const config = readConfig();
|
||||
expect((config.llm as Record<string, unknown>).model).toBe('claude-sonnet-4-20250514');
|
||||
cleanup();
|
||||
});
|
||||
|
||||
it('allows replacing existing key', async () => {
|
||||
// Answers: select provider, confirm change=true, enter new key, select model
|
||||
// Change=true → promptForAnthropicKey → whichBinary returns null → password prompt
|
||||
const deps = buildDeps({
|
||||
secrets: { 'anthropic-api-key': 'sk-ant-old' },
|
||||
answers: ['simple', 'anthropic', true, 'sk-ant-new', 'claude-haiku-3-5-20241022'],
|
||||
whichBinary: vi.fn(async () => null),
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
expect(deps.secretStore.set).toHaveBeenCalledWith('anthropic-api-key', 'sk-ant-new');
|
||||
cleanup();
|
||||
});
|
||||
|
||||
it('detects claude binary and prompts for OAuth token', async () => {
|
||||
// Flow: simple → anthropic → (no existing key) → whichBinary finds claude →
|
||||
// confirm OAuth=true → password prompt → select model
|
||||
const deps = buildDeps({
|
||||
answers: ['simple', 'anthropic', true, 'sk-ant-oat01-test-token', 'claude-haiku-3-5-20241022'],
|
||||
whichBinary: vi.fn(async () => '/usr/bin/claude'),
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
expect(deps.secretStore.set).toHaveBeenCalledWith('anthropic-api-key', 'sk-ant-oat01-test-token');
|
||||
expect(logs.some((l) => l.includes('Found Claude CLI at'))).toBe(true);
|
||||
expect(logs.some((l) => l.includes('claude setup-token'))).toBe(true);
|
||||
const config = readConfig();
|
||||
const llm = config.llm as Record<string, unknown>;
|
||||
expect(llm.provider).toBe('anthropic');
|
||||
expect(llm.model).toBe('claude-haiku-3-5-20241022');
|
||||
cleanup();
|
||||
});
|
||||
|
||||
it('falls back to API key when claude binary not found', async () => {
|
||||
// Flow: simple → anthropic → (no existing key) → whichBinary returns null →
|
||||
// password prompt (API key) → select model
|
||||
const deps = buildDeps({
|
||||
answers: ['simple', 'anthropic', 'sk-ant-api03-test', 'claude-sonnet-4-20250514'],
|
||||
whichBinary: vi.fn(async () => null),
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
expect(deps.secretStore.set).toHaveBeenCalledWith('anthropic-api-key', 'sk-ant-api03-test');
|
||||
expect(logs.some((l) => l.includes('Tip: Install Claude CLI'))).toBe(true);
|
||||
const config = readConfig();
|
||||
const llm = config.llm as Record<string, unknown>;
|
||||
expect(llm.model).toBe('claude-sonnet-4-20250514');
|
||||
cleanup();
|
||||
});
|
||||
|
||||
it('shows OAuth label when existing token is OAuth', async () => {
|
||||
// Flow: simple → anthropic → existing OAuth key → confirm change=false → select model
|
||||
const deps = buildDeps({
|
||||
secrets: { 'anthropic-api-key': 'sk-ant-oat01-existing-token' },
|
||||
answers: ['simple', 'anthropic', false, 'claude-haiku-3-5-20241022'],
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
// Should NOT have called set (kept existing key)
|
||||
expect(deps.secretStore.set).not.toHaveBeenCalled();
|
||||
// Confirm prompt should have received an OAuth label
|
||||
expect(deps.prompt.confirm).toHaveBeenCalledWith(
|
||||
expect.stringContaining('OAuth token stored'),
|
||||
false,
|
||||
);
|
||||
cleanup();
|
||||
});
|
||||
|
||||
it('declines OAuth and enters API key instead', async () => {
|
||||
// Flow: simple → anthropic → (no existing key) → whichBinary finds claude →
|
||||
// confirm OAuth=false → password prompt (API key) → select model
|
||||
const deps = buildDeps({
|
||||
answers: ['simple', 'anthropic', false, 'sk-ant-api03-manual', 'claude-sonnet-4-20250514'],
|
||||
whichBinary: vi.fn(async () => '/usr/bin/claude'),
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
expect(deps.secretStore.set).toHaveBeenCalledWith('anthropic-api-key', 'sk-ant-api03-manual');
|
||||
cleanup();
|
||||
});
|
||||
});
|
||||
|
||||
describe('provider: vllm', () => {
|
||||
it('fetches models from vLLM and allows selection', async () => {
|
||||
const fetchModels = vi.fn(async () => ['my-model', 'llama-70b']);
|
||||
// Answers: select provider, enter URL, select model
|
||||
const deps = buildDeps({
|
||||
answers: ['simple', 'vllm', 'http://gpu:8000', 'llama-70b'],
|
||||
fetchModels,
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
expect(fetchModels).toHaveBeenCalledWith('http://gpu:8000', '/v1/models');
|
||||
const config = readConfig();
|
||||
const llm = config.llm as Record<string, unknown>;
|
||||
expect(llm.provider).toBe('vllm');
|
||||
expect(llm.url).toBe('http://gpu:8000');
|
||||
expect(llm.model).toBe('llama-70b');
|
||||
cleanup();
|
||||
});
|
||||
});
|
||||
|
||||
describe('provider: openai', () => {
|
||||
it('prompts for key, model, and optional custom endpoint', async () => {
|
||||
// Answers: select provider, enter key, enter model, confirm custom URL=true, enter URL
|
||||
const deps = buildDeps({
|
||||
answers: ['simple', 'openai', 'sk-openai-key', 'gpt-4o', true, 'https://custom.api.com'],
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
expect(deps.secretStore.set).toHaveBeenCalledWith('openai-api-key', 'sk-openai-key');
|
||||
const config = readConfig();
|
||||
const llm = config.llm as Record<string, unknown>;
|
||||
expect(llm.provider).toBe('openai');
|
||||
expect(llm.model).toBe('gpt-4o');
|
||||
expect(llm.url).toBe('https://custom.api.com');
|
||||
cleanup();
|
||||
});
|
||||
|
||||
it('skips custom URL when not requested', async () => {
|
||||
// Answers: select provider, enter key, enter model, confirm custom URL=false
|
||||
const deps = buildDeps({
|
||||
answers: ['simple', 'openai', 'sk-openai-key', 'gpt-4o-mini', false],
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
const config = readConfig();
|
||||
const llm = config.llm as Record<string, unknown>;
|
||||
expect(llm.url).toBeUndefined();
|
||||
cleanup();
|
||||
});
|
||||
});
|
||||
|
||||
describe('provider: deepseek', () => {
|
||||
it('prompts for key and model', async () => {
|
||||
// Answers: select provider, enter key, select model
|
||||
const deps = buildDeps({
|
||||
answers: ['simple', 'deepseek', 'sk-ds-key', 'deepseek-chat'],
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
expect(deps.secretStore.set).toHaveBeenCalledWith('deepseek-api-key', 'sk-ds-key');
|
||||
const config = readConfig();
|
||||
const llm = config.llm as Record<string, unknown>;
|
||||
expect(llm.provider).toBe('deepseek');
|
||||
expect(llm.model).toBe('deepseek-chat');
|
||||
cleanup();
|
||||
});
|
||||
});
|
||||
|
||||
describe('advanced mode: duplicate names', () => {
|
||||
it('generates unique default name when same provider added to both tiers', async () => {
|
||||
// Flow: advanced →
|
||||
// add fast? yes → anthropic → name "anthropic" (default) → whichBinary null → key → model → add more? no →
|
||||
// add heavy? yes → anthropic → name "anthropic-2" (deduped default) → existing key, keep → model → add more? no
|
||||
const deps = buildDeps({
|
||||
answers: [
|
||||
'advanced',
|
||||
// fast tier
|
||||
true, // add fast?
|
||||
'anthropic', // fast provider type
|
||||
'anthropic', // provider name (default)
|
||||
'sk-ant-oat01-token', // API key (whichBinary returns null → password prompt)
|
||||
'claude-haiku-3-5-20241022', // model
|
||||
false, // add another fast?
|
||||
// heavy tier
|
||||
true, // add heavy?
|
||||
'anthropic', // heavy provider type
|
||||
'anthropic-2', // provider name (deduped default)
|
||||
false, // keep existing key
|
||||
'claude-opus-4-20250514', // model
|
||||
false, // add another heavy?
|
||||
],
|
||||
whichBinary: vi.fn(async () => null),
|
||||
});
|
||||
await runSetup(deps);
|
||||
|
||||
const config = readConfig();
|
||||
const llm = config.llm as { providers: Array<{ name: string; type: string; model: string; tier: string }> };
|
||||
expect(llm.providers).toHaveLength(2);
|
||||
expect(llm.providers[0].name).toBe('anthropic');
|
||||
expect(llm.providers[0].tier).toBe('fast');
|
||||
expect(llm.providers[1].name).toBe('anthropic-2');
|
||||
expect(llm.providers[1].tier).toBe('heavy');
|
||||
cleanup();
|
||||
});
|
||||
});
|
||||
|
||||
describe('output messages', () => {
|
||||
it('shows restart instruction', async () => {
|
||||
const deps = buildDeps({ answers: ['simple', 'gemini-cli', 'gemini-2.5-flash'] });
|
||||
await runSetup(deps);
|
||||
|
||||
expect(logs.some((l) => l.includes('systemctl --user restart mcplocal'))).toBe(true);
|
||||
cleanup();
|
||||
});
|
||||
|
||||
it('shows configured provider and model', async () => {
|
||||
const deps = buildDeps({ answers: ['simple', 'gemini-cli', 'gemini-2.5-flash'] });
|
||||
await runSetup(deps);
|
||||
|
||||
expect(logs.some((l) => l.includes('gemini-cli') && l.includes('gemini-2.5-flash'))).toBe(true);
|
||||
cleanup();
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,464 +0,0 @@
|
||||
import { describe, it, expect, vi, beforeAll, afterAll, beforeEach } from 'vitest';
|
||||
import http from 'node:http';
|
||||
import { McpSession } from '../../src/commands/console/mcp-session.js';
|
||||
import type { LogEntry } from '../../src/commands/console/mcp-session.js';
|
||||
|
||||
// ---- Mock MCP server ----
|
||||
|
||||
let mockServer: http.Server;
|
||||
let mockPort: number;
|
||||
let sessionCounter = 0;
|
||||
|
||||
interface RecordedRequest {
|
||||
method: string;
|
||||
url: string;
|
||||
headers: http.IncomingHttpHeaders;
|
||||
body: string;
|
||||
}
|
||||
|
||||
const recorded: RecordedRequest[] = [];
|
||||
|
||||
function makeJsonRpcResponse(id: number | string | null, result: unknown) {
|
||||
return JSON.stringify({ jsonrpc: '2.0', id, result });
|
||||
}
|
||||
|
||||
function makeJsonRpcError(id: number | string, code: number, message: string) {
|
||||
return JSON.stringify({ jsonrpc: '2.0', id, error: { code, message } });
|
||||
}
|
||||
|
||||
beforeAll(async () => {
|
||||
mockServer = http.createServer((req, res) => {
|
||||
const chunks: Buffer[] = [];
|
||||
req.on('data', (c: Buffer) => chunks.push(c));
|
||||
req.on('end', () => {
|
||||
const body = Buffer.concat(chunks).toString('utf-8');
|
||||
recorded.push({ method: req.method ?? '', url: req.url ?? '', headers: req.headers, body });
|
||||
|
||||
if (req.method === 'DELETE') {
|
||||
res.writeHead(200);
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
|
||||
// Assign session ID on first request
|
||||
const sid = req.headers['mcp-session-id'] ?? `session-${++sessionCounter}`;
|
||||
res.setHeader('mcp-session-id', sid);
|
||||
res.setHeader('content-type', 'application/json');
|
||||
|
||||
let parsed: { method?: string; id?: number | string };
|
||||
try {
|
||||
parsed = JSON.parse(body);
|
||||
} catch {
|
||||
res.writeHead(400);
|
||||
res.end(JSON.stringify({ error: 'Invalid JSON' }));
|
||||
return;
|
||||
}
|
||||
|
||||
const method = parsed.method;
|
||||
const id = parsed.id;
|
||||
|
||||
switch (method) {
|
||||
case 'initialize':
|
||||
res.writeHead(200);
|
||||
res.end(makeJsonRpcResponse(id!, {
|
||||
protocolVersion: '2024-11-05',
|
||||
capabilities: { tools: {} },
|
||||
serverInfo: { name: 'test-server', version: '1.0.0' },
|
||||
}));
|
||||
break;
|
||||
case 'notifications/initialized':
|
||||
res.writeHead(200);
|
||||
res.end();
|
||||
break;
|
||||
case 'tools/list':
|
||||
res.writeHead(200);
|
||||
res.end(makeJsonRpcResponse(id!, {
|
||||
tools: [
|
||||
{ name: 'begin_session', description: 'Begin a session', inputSchema: { type: 'object' } },
|
||||
{ name: 'query_grafana', description: 'Query Grafana', inputSchema: { type: 'object', properties: { query: { type: 'string' } } } },
|
||||
],
|
||||
}));
|
||||
break;
|
||||
case 'tools/call':
|
||||
res.writeHead(200);
|
||||
res.end(makeJsonRpcResponse(id!, {
|
||||
content: [{ type: 'text', text: 'tool result' }],
|
||||
}));
|
||||
break;
|
||||
case 'resources/list':
|
||||
res.writeHead(200);
|
||||
res.end(makeJsonRpcResponse(id!, {
|
||||
resources: [
|
||||
{ uri: 'config://main', name: 'Main Config', mimeType: 'application/json' },
|
||||
],
|
||||
}));
|
||||
break;
|
||||
case 'resources/read':
|
||||
res.writeHead(200);
|
||||
res.end(makeJsonRpcResponse(id!, {
|
||||
contents: [{ uri: 'config://main', mimeType: 'application/json', text: '{"key": "value"}' }],
|
||||
}));
|
||||
break;
|
||||
case 'prompts/list':
|
||||
res.writeHead(200);
|
||||
res.end(makeJsonRpcResponse(id!, {
|
||||
prompts: [
|
||||
{ name: 'system-prompt', description: 'System prompt' },
|
||||
],
|
||||
}));
|
||||
break;
|
||||
case 'prompts/get':
|
||||
res.writeHead(200);
|
||||
res.end(makeJsonRpcResponse(id!, {
|
||||
messages: [{ role: 'user', content: { type: 'text', text: 'Hello' } }],
|
||||
}));
|
||||
break;
|
||||
case 'error-method':
|
||||
res.writeHead(200);
|
||||
res.end(makeJsonRpcError(id!, -32601, 'Method not found'));
|
||||
break;
|
||||
default:
|
||||
// Raw/unknown method
|
||||
res.writeHead(200);
|
||||
res.end(makeJsonRpcResponse(id ?? null, { echo: method }));
|
||||
break;
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
await new Promise<void>((resolve) => {
|
||||
mockServer.listen(0, '127.0.0.1', () => {
|
||||
const addr = mockServer.address();
|
||||
if (addr && typeof addr === 'object') {
|
||||
mockPort = addr.port;
|
||||
}
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
afterAll(() => {
|
||||
mockServer.close();
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
recorded.length = 0;
|
||||
sessionCounter = 0;
|
||||
});
|
||||
|
||||
function makeSession(token?: string) {
|
||||
return new McpSession(`http://127.0.0.1:${mockPort}/projects/test/mcp`, token);
|
||||
}
|
||||
|
||||
describe('McpSession', () => {
|
||||
describe('initialize', () => {
|
||||
it('sends initialize and notifications/initialized', async () => {
|
||||
const session = makeSession();
|
||||
const result = await session.initialize();
|
||||
|
||||
expect(result.protocolVersion).toBe('2024-11-05');
|
||||
expect(result.serverInfo.name).toBe('test-server');
|
||||
expect(result.capabilities).toHaveProperty('tools');
|
||||
|
||||
// Should have sent 2 requests: initialize + notifications/initialized
|
||||
expect(recorded.length).toBe(2);
|
||||
expect(JSON.parse(recorded[0].body).method).toBe('initialize');
|
||||
expect(JSON.parse(recorded[1].body).method).toBe('notifications/initialized');
|
||||
|
||||
await session.close();
|
||||
});
|
||||
|
||||
it('captures session ID from response', async () => {
|
||||
const session = makeSession();
|
||||
expect(session.getSessionId()).toBeUndefined();
|
||||
|
||||
await session.initialize();
|
||||
expect(session.getSessionId()).toBeDefined();
|
||||
expect(session.getSessionId()).toMatch(/^session-/);
|
||||
|
||||
await session.close();
|
||||
});
|
||||
|
||||
it('sends correct client info', async () => {
|
||||
const session = makeSession();
|
||||
await session.initialize();
|
||||
|
||||
const initBody = JSON.parse(recorded[0].body);
|
||||
expect(initBody.params.clientInfo).toEqual({ name: 'mcpctl-console', version: '1.0.0' });
|
||||
expect(initBody.params.protocolVersion).toBe('2024-11-05');
|
||||
|
||||
await session.close();
|
||||
});
|
||||
});
|
||||
|
||||
describe('listTools', () => {
|
||||
it('returns tools array', async () => {
|
||||
const session = makeSession();
|
||||
await session.initialize();
|
||||
|
||||
const tools = await session.listTools();
|
||||
expect(tools).toHaveLength(2);
|
||||
expect(tools[0].name).toBe('begin_session');
|
||||
expect(tools[1].name).toBe('query_grafana');
|
||||
|
||||
await session.close();
|
||||
});
|
||||
});
|
||||
|
||||
describe('callTool', () => {
|
||||
it('sends tool name and arguments', async () => {
|
||||
const session = makeSession();
|
||||
await session.initialize();
|
||||
|
||||
const result = await session.callTool('query_grafana', { query: 'cpu usage' });
|
||||
expect(result.content).toHaveLength(1);
|
||||
expect(result.content[0].text).toBe('tool result');
|
||||
|
||||
// Find the tools/call request
|
||||
const callReq = recorded.find((r) => {
|
||||
try {
|
||||
return JSON.parse(r.body).method === 'tools/call';
|
||||
} catch { return false; }
|
||||
});
|
||||
expect(callReq).toBeDefined();
|
||||
const callBody = JSON.parse(callReq!.body);
|
||||
expect(callBody.params.name).toBe('query_grafana');
|
||||
expect(callBody.params.arguments).toEqual({ query: 'cpu usage' });
|
||||
|
||||
await session.close();
|
||||
});
|
||||
});
|
||||
|
||||
describe('listResources', () => {
|
||||
it('returns resources array', async () => {
|
||||
const session = makeSession();
|
||||
await session.initialize();
|
||||
|
||||
const resources = await session.listResources();
|
||||
expect(resources).toHaveLength(1);
|
||||
expect(resources[0].uri).toBe('config://main');
|
||||
expect(resources[0].name).toBe('Main Config');
|
||||
|
||||
await session.close();
|
||||
});
|
||||
});
|
||||
|
||||
describe('readResource', () => {
|
||||
it('sends uri and returns contents', async () => {
|
||||
const session = makeSession();
|
||||
await session.initialize();
|
||||
|
||||
const result = await session.readResource('config://main');
|
||||
expect(result.contents).toHaveLength(1);
|
||||
expect(result.contents[0].text).toBe('{"key": "value"}');
|
||||
|
||||
await session.close();
|
||||
});
|
||||
});
|
||||
|
||||
describe('listPrompts', () => {
|
||||
it('returns prompts array', async () => {
|
||||
const session = makeSession();
|
||||
await session.initialize();
|
||||
|
||||
const prompts = await session.listPrompts();
|
||||
expect(prompts).toHaveLength(1);
|
||||
expect(prompts[0].name).toBe('system-prompt');
|
||||
|
||||
await session.close();
|
||||
});
|
||||
});
|
||||
|
||||
describe('getPrompt', () => {
|
||||
it('sends prompt name and returns result', async () => {
|
||||
const session = makeSession();
|
||||
await session.initialize();
|
||||
|
||||
const result = await session.getPrompt('system-prompt') as { messages: unknown[] };
|
||||
expect(result.messages).toHaveLength(1);
|
||||
|
||||
await session.close();
|
||||
});
|
||||
});
|
||||
|
||||
describe('sendRaw', () => {
|
||||
it('sends raw JSON and returns response string', async () => {
|
||||
const session = makeSession();
|
||||
await session.initialize();
|
||||
|
||||
const raw = JSON.stringify({ jsonrpc: '2.0', id: 99, method: 'custom/echo', params: {} });
|
||||
const result = await session.sendRaw(raw);
|
||||
const parsed = JSON.parse(result);
|
||||
expect(parsed.result.echo).toBe('custom/echo');
|
||||
|
||||
await session.close();
|
||||
});
|
||||
});
|
||||
|
||||
describe('close', () => {
|
||||
it('sends DELETE to close session', async () => {
|
||||
const session = makeSession();
|
||||
await session.initialize();
|
||||
expect(session.getSessionId()).toBeDefined();
|
||||
|
||||
await session.close();
|
||||
|
||||
const deleteReq = recorded.find((r) => r.method === 'DELETE');
|
||||
expect(deleteReq).toBeDefined();
|
||||
expect(deleteReq!.headers['mcp-session-id']).toBeDefined();
|
||||
});
|
||||
|
||||
it('clears session ID after close', async () => {
|
||||
const session = makeSession();
|
||||
await session.initialize();
|
||||
await session.close();
|
||||
expect(session.getSessionId()).toBeUndefined();
|
||||
});
|
||||
|
||||
it('no-ops if no session ID', async () => {
|
||||
const session = makeSession();
|
||||
await session.close(); // Should not throw
|
||||
expect(recorded.filter((r) => r.method === 'DELETE')).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('logging', () => {
|
||||
it('records log entries for requests and responses', async () => {
|
||||
const session = makeSession();
|
||||
const entries: LogEntry[] = [];
|
||||
session.onLog = (entry) => entries.push(entry);
|
||||
|
||||
await session.initialize();
|
||||
|
||||
// initialize request + response + notification request
|
||||
const requestEntries = entries.filter((e) => e.direction === 'request');
|
||||
const responseEntries = entries.filter((e) => e.direction === 'response');
|
||||
|
||||
expect(requestEntries.length).toBeGreaterThanOrEqual(2); // initialize + notification
|
||||
expect(responseEntries.length).toBeGreaterThanOrEqual(1); // initialize response
|
||||
expect(requestEntries[0].method).toBe('initialize');
|
||||
|
||||
await session.close();
|
||||
});
|
||||
|
||||
it('getLog returns all entries', async () => {
|
||||
const session = makeSession();
|
||||
expect(session.getLog()).toHaveLength(0);
|
||||
|
||||
await session.initialize();
|
||||
expect(session.getLog().length).toBeGreaterThan(0);
|
||||
|
||||
await session.close();
|
||||
});
|
||||
|
||||
it('logs errors on failure', async () => {
|
||||
const session = makeSession();
|
||||
const entries: LogEntry[] = [];
|
||||
session.onLog = (entry) => entries.push(entry);
|
||||
|
||||
await session.initialize();
|
||||
|
||||
try {
|
||||
// Send a method that returns a JSON-RPC error
|
||||
await session.callTool('error-method', {});
|
||||
} catch {
|
||||
// Expected to throw
|
||||
}
|
||||
|
||||
// Should have an error log entry or a response with error
|
||||
const errorOrResponse = entries.filter((e) => e.direction === 'response' || e.direction === 'error');
|
||||
expect(errorOrResponse.length).toBeGreaterThan(0);
|
||||
|
||||
await session.close();
|
||||
});
|
||||
});
|
||||
|
||||
describe('authentication', () => {
|
||||
it('sends Authorization header when token provided', async () => {
|
||||
const session = makeSession('my-test-token');
|
||||
await session.initialize();
|
||||
|
||||
expect(recorded[0].headers['authorization']).toBe('Bearer my-test-token');
|
||||
|
||||
await session.close();
|
||||
});
|
||||
|
||||
it('does not send Authorization header without token', async () => {
|
||||
const session = makeSession();
|
||||
await session.initialize();
|
||||
|
||||
expect(recorded[0].headers['authorization']).toBeUndefined();
|
||||
|
||||
await session.close();
|
||||
});
|
||||
});
|
||||
|
||||
describe('JSON-RPC errors', () => {
|
||||
it('throws on JSON-RPC error response', async () => {
|
||||
const session = makeSession();
|
||||
await session.initialize();
|
||||
|
||||
// The mock server returns an error for method 'error-method'
|
||||
// We need to send a raw request that triggers it
|
||||
// callTool sends method 'tools/call', so use sendRaw for direct control
|
||||
const raw = JSON.stringify({ jsonrpc: '2.0', id: 50, method: 'error-method', params: {} });
|
||||
// sendRaw doesn't parse errors — it returns raw text. Use the private send indirectly.
|
||||
// Actually, callTool only sends tools/call. Let's verify the error path differently.
|
||||
// The mock routes tools/call to a success response, so we test via session internals.
|
||||
|
||||
// Instead, test that sendRaw returns the error response as-is
|
||||
const result = await session.sendRaw(raw);
|
||||
const parsed = JSON.parse(result);
|
||||
expect(parsed.error).toBeDefined();
|
||||
expect(parsed.error.code).toBe(-32601);
|
||||
|
||||
await session.close();
|
||||
});
|
||||
});
|
||||
|
||||
describe('request ID incrementing', () => {
|
||||
it('increments request IDs for each call', async () => {
|
||||
const session = makeSession();
|
||||
await session.initialize();
|
||||
await session.listTools();
|
||||
await session.listResources();
|
||||
|
||||
const ids = recorded
|
||||
.filter((r) => r.method === 'POST')
|
||||
.map((r) => {
|
||||
try { return JSON.parse(r.body).id; } catch { return undefined; }
|
||||
})
|
||||
.filter((id) => id !== undefined);
|
||||
|
||||
// Should have unique, ascending IDs (1, 2, 3)
|
||||
const numericIds = ids.filter((id): id is number => typeof id === 'number');
|
||||
expect(numericIds.length).toBeGreaterThanOrEqual(3);
|
||||
for (let i = 1; i < numericIds.length; i++) {
|
||||
expect(numericIds[i]).toBeGreaterThan(numericIds[i - 1]);
|
||||
}
|
||||
|
||||
await session.close();
|
||||
});
|
||||
});
|
||||
|
||||
describe('session ID propagation', () => {
|
||||
it('sends session ID in subsequent requests', async () => {
|
||||
const session = makeSession();
|
||||
await session.initialize();
|
||||
|
||||
// First request should not have session ID
|
||||
expect(recorded[0].headers['mcp-session-id']).toBeUndefined();
|
||||
|
||||
// After initialize, session ID is set — subsequent requests should include it
|
||||
await session.listTools();
|
||||
|
||||
const toolsReq = recorded.find((r) => {
|
||||
try { return JSON.parse(r.body).method === 'tools/list'; } catch { return false; }
|
||||
});
|
||||
expect(toolsReq).toBeDefined();
|
||||
expect(toolsReq!.headers['mcp-session-id']).toBeDefined();
|
||||
|
||||
await session.close();
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -175,6 +175,7 @@ describe('create command', () => {
|
||||
expect(client.post).toHaveBeenCalledWith('/api/v1/projects', {
|
||||
name: 'my-project',
|
||||
description: 'A test project',
|
||||
proxyMode: 'direct',
|
||||
});
|
||||
expect(output.join('\n')).toContain("project 'test' created");
|
||||
});
|
||||
@@ -185,6 +186,7 @@ describe('create command', () => {
|
||||
expect(client.post).toHaveBeenCalledWith('/api/v1/projects', {
|
||||
name: 'minimal',
|
||||
description: '',
|
||||
proxyMode: 'direct',
|
||||
});
|
||||
});
|
||||
|
||||
@@ -193,7 +195,7 @@ describe('create command', () => {
|
||||
vi.mocked(client.get).mockResolvedValueOnce([{ id: 'proj-1', name: 'my-proj' }] as never);
|
||||
const cmd = createCreateCommand({ client, log });
|
||||
await cmd.parseAsync(['project', 'my-proj', '-d', 'updated', '--force'], { from: 'user' });
|
||||
expect(client.put).toHaveBeenCalledWith('/api/v1/projects/proj-1', { description: 'updated' });
|
||||
expect(client.put).toHaveBeenCalledWith('/api/v1/projects/proj-1', { description: 'updated', proxyMode: 'direct' });
|
||||
expect(output.join('\n')).toContain("project 'my-proj' updated");
|
||||
});
|
||||
});
|
||||
@@ -445,114 +447,4 @@ describe('create command', () => {
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('create prompt', () => {
|
||||
it('creates a prompt with content', async () => {
|
||||
vi.mocked(client.post).mockResolvedValueOnce({ id: 'p-1', name: 'test-prompt' });
|
||||
const cmd = createCreateCommand({ client, log });
|
||||
await cmd.parseAsync(['prompt', 'test-prompt', '--content', 'Hello world'], { from: 'user' });
|
||||
|
||||
expect(client.post).toHaveBeenCalledWith('/api/v1/prompts', {
|
||||
name: 'test-prompt',
|
||||
content: 'Hello world',
|
||||
});
|
||||
expect(output.join('\n')).toContain("prompt 'test-prompt' created");
|
||||
});
|
||||
|
||||
it('requires content or content-file', async () => {
|
||||
const cmd = createCreateCommand({ client, log });
|
||||
await expect(
|
||||
cmd.parseAsync(['prompt', 'no-content'], { from: 'user' }),
|
||||
).rejects.toThrow('--content or --content-file is required');
|
||||
});
|
||||
|
||||
it('--priority sets prompt priority', async () => {
|
||||
vi.mocked(client.post).mockResolvedValueOnce({ id: 'p-1', name: 'pri-prompt' });
|
||||
const cmd = createCreateCommand({ client, log });
|
||||
await cmd.parseAsync(['prompt', 'pri-prompt', '--content', 'x', '--priority', '8'], { from: 'user' });
|
||||
|
||||
expect(client.post).toHaveBeenCalledWith('/api/v1/prompts', expect.objectContaining({
|
||||
priority: 8,
|
||||
}));
|
||||
});
|
||||
|
||||
it('--priority validates range 1-10', async () => {
|
||||
const cmd = createCreateCommand({ client, log });
|
||||
await expect(
|
||||
cmd.parseAsync(['prompt', 'bad', '--content', 'x', '--priority', '15'], { from: 'user' }),
|
||||
).rejects.toThrow('--priority must be a number between 1 and 10');
|
||||
});
|
||||
|
||||
it('--priority rejects zero', async () => {
|
||||
const cmd = createCreateCommand({ client, log });
|
||||
await expect(
|
||||
cmd.parseAsync(['prompt', 'bad', '--content', 'x', '--priority', '0'], { from: 'user' }),
|
||||
).rejects.toThrow('--priority must be a number between 1 and 10');
|
||||
});
|
||||
|
||||
it('--link sets linkTarget', async () => {
|
||||
vi.mocked(client.post).mockResolvedValueOnce({ id: 'p-1', name: 'linked' });
|
||||
const cmd = createCreateCommand({ client, log });
|
||||
await cmd.parseAsync(['prompt', 'linked', '--content', 'x', '--link', 'proj/srv:docmost://pages/abc'], { from: 'user' });
|
||||
|
||||
expect(client.post).toHaveBeenCalledWith('/api/v1/prompts', expect.objectContaining({
|
||||
linkTarget: 'proj/srv:docmost://pages/abc',
|
||||
}));
|
||||
});
|
||||
|
||||
it('--project resolves project name to ID', async () => {
|
||||
vi.mocked(client.get).mockResolvedValueOnce([{ id: 'proj-1', name: 'my-project' }] as never);
|
||||
vi.mocked(client.post).mockResolvedValueOnce({ id: 'p-1', name: 'scoped' });
|
||||
const cmd = createCreateCommand({ client, log });
|
||||
await cmd.parseAsync(['prompt', 'scoped', '--content', 'x', '--project', 'my-project'], { from: 'user' });
|
||||
|
||||
expect(client.post).toHaveBeenCalledWith('/api/v1/prompts', expect.objectContaining({
|
||||
projectId: 'proj-1',
|
||||
}));
|
||||
});
|
||||
|
||||
it('--project throws when project not found', async () => {
|
||||
vi.mocked(client.get).mockResolvedValueOnce([] as never);
|
||||
const cmd = createCreateCommand({ client, log });
|
||||
await expect(
|
||||
cmd.parseAsync(['prompt', 'bad', '--content', 'x', '--project', 'nope'], { from: 'user' }),
|
||||
).rejects.toThrow("Project 'nope' not found");
|
||||
});
|
||||
});
|
||||
|
||||
describe('create promptrequest', () => {
|
||||
it('creates a prompt request with priority', async () => {
|
||||
vi.mocked(client.post).mockResolvedValueOnce({ id: 'r-1', name: 'req' });
|
||||
const cmd = createCreateCommand({ client, log });
|
||||
await cmd.parseAsync(['promptrequest', 'req', '--content', 'proposal', '--priority', '7'], { from: 'user' });
|
||||
|
||||
expect(client.post).toHaveBeenCalledWith('/api/v1/promptrequests', expect.objectContaining({
|
||||
name: 'req',
|
||||
content: 'proposal',
|
||||
priority: 7,
|
||||
}));
|
||||
});
|
||||
});
|
||||
|
||||
describe('create project', () => {
|
||||
it('creates a project with --gated', async () => {
|
||||
vi.mocked(client.post).mockResolvedValueOnce({ id: 'proj-1', name: 'gated-proj' });
|
||||
const cmd = createCreateCommand({ client, log });
|
||||
await cmd.parseAsync(['project', 'gated-proj', '--gated'], { from: 'user' });
|
||||
|
||||
expect(client.post).toHaveBeenCalledWith('/api/v1/projects', expect.objectContaining({
|
||||
gated: true,
|
||||
}));
|
||||
});
|
||||
|
||||
it('creates a project with --no-gated', async () => {
|
||||
vi.mocked(client.post).mockResolvedValueOnce({ id: 'proj-1', name: 'open-proj' });
|
||||
const cmd = createCreateCommand({ client, log });
|
||||
await cmd.parseAsync(['project', 'open-proj', '--no-gated'], { from: 'user' });
|
||||
|
||||
expect(client.post).toHaveBeenCalledWith('/api/v1/projects', expect.objectContaining({
|
||||
gated: false,
|
||||
}));
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -89,43 +89,6 @@ describe('describe command', () => {
|
||||
expect(text).toContain('user-1');
|
||||
});
|
||||
|
||||
it('shows project Plugin Config with proxyModel', async () => {
|
||||
const deps = makeDeps({
|
||||
id: 'proj-1',
|
||||
name: 'gated-project',
|
||||
description: 'A gated project',
|
||||
ownerId: 'user-1',
|
||||
proxyModel: 'default',
|
||||
createdAt: '2025-01-01',
|
||||
});
|
||||
const cmd = createDescribeCommand(deps);
|
||||
await cmd.parseAsync(['node', 'test', 'project', 'proj-1']);
|
||||
|
||||
const text = deps.output.join('\n');
|
||||
expect(text).toContain('Plugin Config:');
|
||||
expect(text).toContain('Plugin:');
|
||||
expect(text).toContain('default');
|
||||
expect(text).not.toContain('Gated:');
|
||||
});
|
||||
|
||||
it('shows project Plugin Config defaulting to "default" when proxyModel is empty', async () => {
|
||||
const deps = makeDeps({
|
||||
id: 'proj-1',
|
||||
name: 'old-project',
|
||||
description: '',
|
||||
ownerId: 'user-1',
|
||||
proxyModel: '',
|
||||
gated: true,
|
||||
createdAt: '2025-01-01',
|
||||
});
|
||||
const cmd = createDescribeCommand(deps);
|
||||
await cmd.parseAsync(['node', 'test', 'project', 'proj-1']);
|
||||
|
||||
const text = deps.output.join('\n');
|
||||
expect(text).toContain('Plugin Config:');
|
||||
expect(text).toContain('default');
|
||||
});
|
||||
|
||||
it('shows secret detail with masked values', async () => {
|
||||
const deps = makeDeps({
|
||||
id: 'sec-1',
|
||||
|
||||
@@ -20,7 +20,7 @@ describe('get command', () => {
|
||||
const cmd = createGetCommand(deps);
|
||||
await cmd.parseAsync(['node', 'test', 'servers']);
|
||||
|
||||
expect(deps.fetchResource).toHaveBeenCalledWith('servers', undefined, undefined);
|
||||
expect(deps.fetchResource).toHaveBeenCalledWith('servers', undefined);
|
||||
expect(deps.output[0]).toContain('NAME');
|
||||
expect(deps.output[0]).toContain('TRANSPORT');
|
||||
expect(deps.output.join('\n')).toContain('slack');
|
||||
@@ -31,38 +31,37 @@ describe('get command', () => {
|
||||
const deps = makeDeps([]);
|
||||
const cmd = createGetCommand(deps);
|
||||
await cmd.parseAsync(['node', 'test', 'srv']);
|
||||
expect(deps.fetchResource).toHaveBeenCalledWith('servers', undefined, undefined);
|
||||
expect(deps.fetchResource).toHaveBeenCalledWith('servers', undefined);
|
||||
});
|
||||
|
||||
it('passes ID when provided', async () => {
|
||||
const deps = makeDeps([{ id: 'srv-1', name: 'slack' }]);
|
||||
const cmd = createGetCommand(deps);
|
||||
await cmd.parseAsync(['node', 'test', 'servers', 'srv-1']);
|
||||
expect(deps.fetchResource).toHaveBeenCalledWith('servers', 'srv-1', undefined);
|
||||
expect(deps.fetchResource).toHaveBeenCalledWith('servers', 'srv-1');
|
||||
});
|
||||
|
||||
it('outputs apply-compatible JSON format (multi-doc)', async () => {
|
||||
it('outputs apply-compatible JSON format', async () => {
|
||||
const deps = makeDeps([{ id: 'srv-1', name: 'slack', createdAt: '2025-01-01', updatedAt: '2025-01-01', version: 1 }]);
|
||||
const cmd = createGetCommand(deps);
|
||||
await cmd.parseAsync(['node', 'test', 'servers', '-o', 'json']);
|
||||
|
||||
const parsed = JSON.parse(deps.output[0] ?? '');
|
||||
// Array of documents with kind field, internal fields stripped
|
||||
expect(Array.isArray(parsed)).toBe(true);
|
||||
expect(parsed[0].kind).toBe('server');
|
||||
expect(parsed[0].name).toBe('slack');
|
||||
expect(parsed[0]).not.toHaveProperty('id');
|
||||
expect(parsed[0]).not.toHaveProperty('createdAt');
|
||||
expect(parsed[0]).not.toHaveProperty('updatedAt');
|
||||
expect(parsed[0]).not.toHaveProperty('version');
|
||||
// Wrapped in resource key, internal fields stripped
|
||||
expect(parsed).toHaveProperty('servers');
|
||||
expect(parsed.servers[0].name).toBe('slack');
|
||||
expect(parsed.servers[0]).not.toHaveProperty('id');
|
||||
expect(parsed.servers[0]).not.toHaveProperty('createdAt');
|
||||
expect(parsed.servers[0]).not.toHaveProperty('updatedAt');
|
||||
expect(parsed.servers[0]).not.toHaveProperty('version');
|
||||
});
|
||||
|
||||
it('outputs apply-compatible YAML format (multi-doc)', async () => {
|
||||
it('outputs apply-compatible YAML format', async () => {
|
||||
const deps = makeDeps([{ id: 'srv-1', name: 'slack', createdAt: '2025-01-01' }]);
|
||||
const cmd = createGetCommand(deps);
|
||||
await cmd.parseAsync(['node', 'test', 'servers', '-o', 'yaml']);
|
||||
const text = deps.output[0];
|
||||
expect(text).toContain('kind: server');
|
||||
expect(text).toContain('servers:');
|
||||
expect(text).toContain('name: slack');
|
||||
expect(text).not.toContain('id:');
|
||||
expect(text).not.toContain('createdAt:');
|
||||
@@ -95,7 +94,7 @@ describe('get command', () => {
|
||||
const cmd = createGetCommand(deps);
|
||||
await cmd.parseAsync(['node', 'test', 'users']);
|
||||
|
||||
expect(deps.fetchResource).toHaveBeenCalledWith('users', undefined, undefined);
|
||||
expect(deps.fetchResource).toHaveBeenCalledWith('users', undefined);
|
||||
const text = deps.output.join('\n');
|
||||
expect(text).toContain('EMAIL');
|
||||
expect(text).toContain('NAME');
|
||||
@@ -111,7 +110,7 @@ describe('get command', () => {
|
||||
const deps = makeDeps([]);
|
||||
const cmd = createGetCommand(deps);
|
||||
await cmd.parseAsync(['node', 'test', 'user']);
|
||||
expect(deps.fetchResource).toHaveBeenCalledWith('users', undefined, undefined);
|
||||
expect(deps.fetchResource).toHaveBeenCalledWith('users', undefined);
|
||||
});
|
||||
|
||||
it('lists groups with correct columns', async () => {
|
||||
@@ -127,7 +126,7 @@ describe('get command', () => {
|
||||
const cmd = createGetCommand(deps);
|
||||
await cmd.parseAsync(['node', 'test', 'groups']);
|
||||
|
||||
expect(deps.fetchResource).toHaveBeenCalledWith('groups', undefined, undefined);
|
||||
expect(deps.fetchResource).toHaveBeenCalledWith('groups', undefined);
|
||||
const text = deps.output.join('\n');
|
||||
expect(text).toContain('NAME');
|
||||
expect(text).toContain('MEMBERS');
|
||||
@@ -142,7 +141,7 @@ describe('get command', () => {
|
||||
const deps = makeDeps([]);
|
||||
const cmd = createGetCommand(deps);
|
||||
await cmd.parseAsync(['node', 'test', 'group']);
|
||||
expect(deps.fetchResource).toHaveBeenCalledWith('groups', undefined, undefined);
|
||||
expect(deps.fetchResource).toHaveBeenCalledWith('groups', undefined);
|
||||
});
|
||||
|
||||
it('lists rbac definitions with correct columns', async () => {
|
||||
@@ -157,7 +156,7 @@ describe('get command', () => {
|
||||
const cmd = createGetCommand(deps);
|
||||
await cmd.parseAsync(['node', 'test', 'rbac']);
|
||||
|
||||
expect(deps.fetchResource).toHaveBeenCalledWith('rbac', undefined, undefined);
|
||||
expect(deps.fetchResource).toHaveBeenCalledWith('rbac', undefined);
|
||||
const text = deps.output.join('\n');
|
||||
expect(text).toContain('NAME');
|
||||
expect(text).toContain('SUBJECTS');
|
||||
@@ -171,7 +170,7 @@ describe('get command', () => {
|
||||
const deps = makeDeps([]);
|
||||
const cmd = createGetCommand(deps);
|
||||
await cmd.parseAsync(['node', 'test', 'rbac-definition']);
|
||||
expect(deps.fetchResource).toHaveBeenCalledWith('rbac', undefined, undefined);
|
||||
expect(deps.fetchResource).toHaveBeenCalledWith('rbac', undefined);
|
||||
});
|
||||
|
||||
it('lists projects with new columns', async () => {
|
||||
@@ -179,16 +178,22 @@ describe('get command', () => {
|
||||
id: 'proj-1',
|
||||
name: 'smart-home',
|
||||
description: 'Home automation',
|
||||
proxyMode: 'filtered',
|
||||
ownerId: 'usr-1',
|
||||
servers: [{ server: { name: 'grafana' } }],
|
||||
members: [{ user: { email: 'a@b.com' }, role: 'admin' }, { user: { email: 'c@d.com' }, role: 'member' }],
|
||||
}]);
|
||||
const cmd = createGetCommand(deps);
|
||||
await cmd.parseAsync(['node', 'test', 'projects']);
|
||||
|
||||
const text = deps.output.join('\n');
|
||||
expect(text).toContain('MODE');
|
||||
expect(text).toContain('SERVERS');
|
||||
expect(text).toContain('MEMBERS');
|
||||
expect(text).toContain('smart-home');
|
||||
expect(text).toContain('filtered');
|
||||
expect(text).toContain('1');
|
||||
expect(text).toContain('2');
|
||||
});
|
||||
|
||||
it('displays mixed resource and operation bindings', async () => {
|
||||
@@ -249,161 +254,4 @@ describe('get command', () => {
|
||||
await cmd.parseAsync(['node', 'test', 'rbac']);
|
||||
expect(deps.output[0]).toContain('No rbac found');
|
||||
});
|
||||
|
||||
it('lists prompts with project name column', async () => {
|
||||
const deps = makeDeps([
|
||||
{ id: 'p-1', name: 'debug-guide', projectId: 'proj-1', project: { name: 'smart-home' }, createdAt: '2025-01-01T00:00:00Z' },
|
||||
{ id: 'p-2', name: 'global-rules', projectId: null, project: null, createdAt: '2025-01-01T00:00:00Z' },
|
||||
]);
|
||||
const cmd = createGetCommand(deps);
|
||||
await cmd.parseAsync(['node', 'test', 'prompts']);
|
||||
|
||||
const text = deps.output.join('\n');
|
||||
expect(text).toContain('NAME');
|
||||
expect(text).toContain('PROJECT');
|
||||
expect(text).toContain('debug-guide');
|
||||
expect(text).toContain('smart-home');
|
||||
expect(text).toContain('global-rules');
|
||||
expect(text).toContain('(global)');
|
||||
});
|
||||
|
||||
it('lists promptrequests with project name column', async () => {
|
||||
const deps = makeDeps([
|
||||
{ id: 'pr-1', name: 'new-rule', projectId: 'proj-1', project: { name: 'my-project' }, createdBySession: 'sess-abc123def456', createdAt: '2025-01-01T00:00:00Z' },
|
||||
]);
|
||||
const cmd = createGetCommand(deps);
|
||||
await cmd.parseAsync(['node', 'test', 'promptrequests']);
|
||||
|
||||
const text = deps.output.join('\n');
|
||||
expect(text).toContain('new-rule');
|
||||
expect(text).toContain('my-project');
|
||||
expect(text).toContain('sess-abc123d');
|
||||
});
|
||||
|
||||
it('passes --project option to fetchResource', async () => {
|
||||
const deps = makeDeps([]);
|
||||
const cmd = createGetCommand(deps);
|
||||
await cmd.parseAsync(['node', 'test', 'prompts', '--project', 'smart-home']);
|
||||
|
||||
expect(deps.fetchResource).toHaveBeenCalledWith('prompts', undefined, { project: 'smart-home' });
|
||||
});
|
||||
|
||||
it('does not pass project when --project is not specified', async () => {
|
||||
const deps = makeDeps([]);
|
||||
const cmd = createGetCommand(deps);
|
||||
await cmd.parseAsync(['node', 'test', 'prompts']);
|
||||
|
||||
expect(deps.fetchResource).toHaveBeenCalledWith('prompts', undefined, undefined);
|
||||
});
|
||||
|
||||
it('passes --all flag to fetchResource', async () => {
|
||||
const deps = makeDeps([]);
|
||||
const cmd = createGetCommand(deps);
|
||||
await cmd.parseAsync(['node', 'test', 'prompts', '-A']);
|
||||
|
||||
expect(deps.fetchResource).toHaveBeenCalledWith('prompts', undefined, { all: true });
|
||||
});
|
||||
|
||||
it('passes both --project and --all when both given', async () => {
|
||||
const deps = makeDeps([]);
|
||||
const cmd = createGetCommand(deps);
|
||||
await cmd.parseAsync(['node', 'test', 'prompts', '--project', 'my-proj', '-A']);
|
||||
|
||||
expect(deps.fetchResource).toHaveBeenCalledWith('prompts', undefined, { project: 'my-proj', all: true });
|
||||
});
|
||||
|
||||
it('resolves prompt alias', async () => {
|
||||
const deps = makeDeps([]);
|
||||
const cmd = createGetCommand(deps);
|
||||
await cmd.parseAsync(['node', 'test', 'prompt']);
|
||||
expect(deps.fetchResource).toHaveBeenCalledWith('prompts', undefined, undefined);
|
||||
});
|
||||
|
||||
it('resolves pr alias to promptrequests', async () => {
|
||||
const deps = makeDeps([]);
|
||||
const cmd = createGetCommand(deps);
|
||||
await cmd.parseAsync(['node', 'test', 'pr']);
|
||||
expect(deps.fetchResource).toHaveBeenCalledWith('promptrequests', undefined, undefined);
|
||||
});
|
||||
|
||||
it('shows no results message for empty prompts list', async () => {
|
||||
const deps = makeDeps([]);
|
||||
const cmd = createGetCommand(deps);
|
||||
await cmd.parseAsync(['node', 'test', 'prompts']);
|
||||
expect(deps.output[0]).toContain('No prompts found');
|
||||
});
|
||||
|
||||
it('lists projects with PLUGIN column showing resolved proxyModel', async () => {
|
||||
const deps = makeDeps([{
|
||||
id: 'proj-1',
|
||||
name: 'home',
|
||||
description: '',
|
||||
proxyModel: '',
|
||||
gated: true,
|
||||
ownerId: 'usr-1',
|
||||
servers: [],
|
||||
}]);
|
||||
const cmd = createGetCommand(deps);
|
||||
await cmd.parseAsync(['node', 'test', 'projects']);
|
||||
|
||||
const text = deps.output.join('\n');
|
||||
expect(text).toContain('PLUGIN');
|
||||
expect(text).not.toContain('GATED');
|
||||
// proxyModel is empty but gated=true, table shows 'default'
|
||||
expect(text).toContain('default');
|
||||
});
|
||||
|
||||
it('project JSON output resolves proxyModel from gated=true', async () => {
|
||||
const deps = makeDeps([{
|
||||
id: 'proj-1',
|
||||
name: 'home',
|
||||
description: '',
|
||||
proxyModel: '',
|
||||
gated: true,
|
||||
ownerId: 'usr-1',
|
||||
servers: [],
|
||||
}]);
|
||||
const cmd = createGetCommand(deps);
|
||||
await cmd.parseAsync(['node', 'test', 'projects', '-o', 'json']);
|
||||
|
||||
const parsed = JSON.parse(deps.output[0] ?? '') as Array<Record<string, unknown>>;
|
||||
expect(parsed[0]!.proxyModel).toBe('default');
|
||||
expect(parsed[0]).not.toHaveProperty('gated');
|
||||
});
|
||||
|
||||
it('project JSON output resolves proxyModel from gated=false', async () => {
|
||||
const deps = makeDeps([{
|
||||
id: 'proj-1',
|
||||
name: 'tools',
|
||||
description: '',
|
||||
proxyModel: '',
|
||||
gated: false,
|
||||
ownerId: 'usr-1',
|
||||
servers: [],
|
||||
}]);
|
||||
const cmd = createGetCommand(deps);
|
||||
await cmd.parseAsync(['node', 'test', 'projects', '-o', 'json']);
|
||||
|
||||
const parsed = JSON.parse(deps.output[0] ?? '') as Array<Record<string, unknown>>;
|
||||
expect(parsed[0]!.proxyModel).toBe('content-pipeline');
|
||||
expect(parsed[0]).not.toHaveProperty('gated');
|
||||
});
|
||||
|
||||
it('project JSON output preserves explicit proxyModel and drops gated', async () => {
|
||||
const deps = makeDeps([{
|
||||
id: 'proj-1',
|
||||
name: 'custom',
|
||||
description: '',
|
||||
proxyModel: 'gate',
|
||||
gated: true,
|
||||
ownerId: 'usr-1',
|
||||
servers: [],
|
||||
}]);
|
||||
const cmd = createGetCommand(deps);
|
||||
await cmd.parseAsync(['node', 'test', 'projects', '-o', 'json']);
|
||||
|
||||
const parsed = JSON.parse(deps.output[0] ?? '') as Array<Record<string, unknown>>;
|
||||
expect(parsed[0]!.proxyModel).toBe('gate');
|
||||
expect(parsed[0]).not.toHaveProperty('gated');
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,53 +0,0 @@
|
||||
import { describe, it, expect } from 'vitest';
|
||||
|
||||
/**
|
||||
* Tests that the inspect-mcp tool definitions are well-formed.
|
||||
* The actual MCP server runs over stdin/stdout so we test the contract,
|
||||
* not the runtime.
|
||||
*/
|
||||
|
||||
const EXPECTED_TOOLS = [
|
||||
// Original inspector tools
|
||||
'list_sessions',
|
||||
'get_traffic',
|
||||
'get_session_info',
|
||||
// Studio tools (task 109)
|
||||
'list_models',
|
||||
'list_stages',
|
||||
'switch_model',
|
||||
'get_model_info',
|
||||
'reload_stages',
|
||||
'pause',
|
||||
'get_pause_queue',
|
||||
'release_paused',
|
||||
];
|
||||
|
||||
describe('inspect-mcp tool definitions', () => {
|
||||
it('exports all expected tools', async () => {
|
||||
// Import the module to check TOOLS array is consistent
|
||||
// We can't directly import TOOLS (it's module-scoped const), but
|
||||
// we can validate the expected tool names are in the right count
|
||||
expect(EXPECTED_TOOLS).toHaveLength(11);
|
||||
});
|
||||
|
||||
it('studio tools have required parameters', () => {
|
||||
// switch_model requires project + proxyModel
|
||||
const switchRequired = ['project', 'proxyModel'];
|
||||
expect(switchRequired).toHaveLength(2);
|
||||
|
||||
// pause requires paused boolean
|
||||
const pauseRequired = ['paused'];
|
||||
expect(pauseRequired).toHaveLength(1);
|
||||
|
||||
// release_paused requires id + action
|
||||
const releaseRequired = ['id', 'action'];
|
||||
expect(releaseRequired).toHaveLength(2);
|
||||
});
|
||||
|
||||
it('release_paused actions are release, edit, drop', () => {
|
||||
const validActions = ['release', 'edit', 'drop'];
|
||||
expect(validActions).toContain('release');
|
||||
expect(validActions).toContain('edit');
|
||||
expect(validActions).toContain('drop');
|
||||
});
|
||||
});
|
||||
@@ -1,481 +0,0 @@
|
||||
import { describe, it, expect, beforeAll, afterAll } from 'vitest';
|
||||
import http from 'node:http';
|
||||
import { Readable, Writable } from 'node:stream';
|
||||
import { runMcpBridge, createMcpCommand } from '../../src/commands/mcp.js';
|
||||
|
||||
// ---- Mock MCP server (simulates mcplocal project endpoint) ----
|
||||
|
||||
interface RecordedRequest {
|
||||
method: string;
|
||||
url: string;
|
||||
headers: http.IncomingHttpHeaders;
|
||||
body: string;
|
||||
}
|
||||
|
||||
let mockServer: http.Server;
|
||||
let mockPort: number;
|
||||
const recorded: RecordedRequest[] = [];
|
||||
let sessionCounter = 0;
|
||||
|
||||
function makeInitializeResponse(id: number | string) {
|
||||
return JSON.stringify({
|
||||
jsonrpc: '2.0',
|
||||
id,
|
||||
result: {
|
||||
protocolVersion: '2024-11-05',
|
||||
capabilities: { tools: {} },
|
||||
serverInfo: { name: 'test-server', version: '1.0.0' },
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
function makeToolsListResponse(id: number | string) {
|
||||
return JSON.stringify({
|
||||
jsonrpc: '2.0',
|
||||
id,
|
||||
result: {
|
||||
tools: [
|
||||
{ name: 'grafana/query', description: 'Query Grafana', inputSchema: { type: 'object', properties: {} } },
|
||||
],
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
function makeToolCallResponse(id: number | string) {
|
||||
return JSON.stringify({
|
||||
jsonrpc: '2.0',
|
||||
id,
|
||||
result: {
|
||||
content: [{ type: 'text', text: 'tool result' }],
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
beforeAll(async () => {
|
||||
mockServer = http.createServer((req, res) => {
|
||||
const chunks: Buffer[] = [];
|
||||
req.on('data', (c: Buffer) => chunks.push(c));
|
||||
req.on('end', () => {
|
||||
const body = Buffer.concat(chunks).toString('utf-8');
|
||||
recorded.push({ method: req.method ?? '', url: req.url ?? '', headers: req.headers, body });
|
||||
|
||||
if (req.method === 'DELETE') {
|
||||
res.writeHead(200);
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
|
||||
if (req.method === 'POST' && req.url?.startsWith('/projects/')) {
|
||||
let sessionId = req.headers['mcp-session-id'] as string | undefined;
|
||||
|
||||
// Assign session ID on first request
|
||||
if (!sessionId) {
|
||||
sessionCounter++;
|
||||
sessionId = `session-${sessionCounter}`;
|
||||
}
|
||||
res.setHeader('mcp-session-id', sessionId);
|
||||
|
||||
// Parse JSON-RPC and respond based on method
|
||||
try {
|
||||
const rpc = JSON.parse(body) as { id: number | string; method: string };
|
||||
let responseBody: string;
|
||||
|
||||
switch (rpc.method) {
|
||||
case 'initialize':
|
||||
responseBody = makeInitializeResponse(rpc.id);
|
||||
break;
|
||||
case 'tools/list':
|
||||
responseBody = makeToolsListResponse(rpc.id);
|
||||
break;
|
||||
case 'tools/call':
|
||||
responseBody = makeToolCallResponse(rpc.id);
|
||||
break;
|
||||
default:
|
||||
responseBody = JSON.stringify({ jsonrpc: '2.0', id: rpc.id, error: { code: -32601, message: 'Method not found' } });
|
||||
}
|
||||
|
||||
// Respond in SSE format for /projects/sse-project/mcp
|
||||
if (req.url?.includes('sse-project')) {
|
||||
res.writeHead(200, { 'Content-Type': 'text/event-stream' });
|
||||
res.end(`event: message\ndata: ${responseBody}\n\n`);
|
||||
} else {
|
||||
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||
res.end(responseBody);
|
||||
}
|
||||
} catch {
|
||||
res.writeHead(400, { 'Content-Type': 'application/json' });
|
||||
res.end(JSON.stringify({ error: 'Invalid JSON' }));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
res.writeHead(404);
|
||||
res.end();
|
||||
});
|
||||
});
|
||||
|
||||
await new Promise<void>((resolve) => {
|
||||
mockServer.listen(0, () => {
|
||||
const addr = mockServer.address();
|
||||
if (addr && typeof addr === 'object') {
|
||||
mockPort = addr.port;
|
||||
}
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
afterAll(() => {
|
||||
mockServer.close();
|
||||
});
|
||||
|
||||
// ---- Helper to run bridge with mock streams ----
|
||||
|
||||
function createMockStreams() {
|
||||
const stdoutChunks: string[] = [];
|
||||
const stderrChunks: string[] = [];
|
||||
|
||||
const stdout = new Writable({
|
||||
write(chunk: Buffer, _encoding, callback) {
|
||||
stdoutChunks.push(chunk.toString());
|
||||
callback();
|
||||
},
|
||||
});
|
||||
|
||||
const stderr = new Writable({
|
||||
write(chunk: Buffer, _encoding, callback) {
|
||||
stderrChunks.push(chunk.toString());
|
||||
callback();
|
||||
},
|
||||
});
|
||||
|
||||
return { stdout, stderr, stdoutChunks, stderrChunks };
|
||||
}
|
||||
|
||||
function pushAndEnd(stdin: Readable, lines: string[]) {
|
||||
for (const line of lines) {
|
||||
stdin.push(line + '\n');
|
||||
}
|
||||
stdin.push(null); // EOF
|
||||
}
|
||||
|
||||
// ---- Tests ----
|
||||
|
||||
describe('MCP STDIO Bridge', () => {
|
||||
beforeAll(() => {
|
||||
recorded.length = 0;
|
||||
sessionCounter = 0;
|
||||
});
|
||||
|
||||
it('forwards initialize request and returns response', async () => {
|
||||
recorded.length = 0;
|
||||
const stdin = new Readable({ read() {} });
|
||||
const { stdout, stdoutChunks } = createMockStreams();
|
||||
|
||||
const initMsg = JSON.stringify({
|
||||
jsonrpc: '2.0', id: 1, method: 'initialize',
|
||||
params: { protocolVersion: '2024-11-05', capabilities: {}, clientInfo: { name: 'test', version: '1.0' } },
|
||||
});
|
||||
|
||||
pushAndEnd(stdin, [initMsg]);
|
||||
|
||||
await runMcpBridge({
|
||||
projectName: 'test-project',
|
||||
mcplocalUrl: `http://localhost:${mockPort}`,
|
||||
stdin, stdout, stderr: new Writable({ write(_, __, cb) { cb(); } }),
|
||||
});
|
||||
|
||||
// Verify request was made to correct URL
|
||||
expect(recorded.some((r) => r.url === '/projects/test-project/mcp' && r.method === 'POST')).toBe(true);
|
||||
|
||||
// Verify response on stdout
|
||||
const output = stdoutChunks.join('');
|
||||
const parsed = JSON.parse(output.trim());
|
||||
expect(parsed.result.serverInfo.name).toBe('test-server');
|
||||
expect(parsed.result.protocolVersion).toBe('2024-11-05');
|
||||
});
|
||||
|
||||
it('sends session ID on subsequent requests', async () => {
|
||||
recorded.length = 0;
|
||||
const stdin = new Readable({ read() {} });
|
||||
const { stdout, stdoutChunks } = createMockStreams();
|
||||
|
||||
const initMsg = JSON.stringify({
|
||||
jsonrpc: '2.0', id: 1, method: 'initialize',
|
||||
params: { protocolVersion: '2024-11-05', capabilities: {}, clientInfo: { name: 'test', version: '1.0' } },
|
||||
});
|
||||
const toolsListMsg = JSON.stringify({ jsonrpc: '2.0', id: 2, method: 'tools/list', params: {} });
|
||||
|
||||
pushAndEnd(stdin, [initMsg, toolsListMsg]);
|
||||
|
||||
await runMcpBridge({
|
||||
projectName: 'test-project',
|
||||
mcplocalUrl: `http://localhost:${mockPort}`,
|
||||
stdin, stdout, stderr: new Writable({ write(_, __, cb) { cb(); } }),
|
||||
});
|
||||
|
||||
// First POST should NOT have mcp-session-id header
|
||||
const firstPost = recorded.find((r) => r.method === 'POST' && r.body.includes('initialize'));
|
||||
expect(firstPost).toBeDefined();
|
||||
expect(firstPost!.headers['mcp-session-id']).toBeUndefined();
|
||||
|
||||
// Second POST SHOULD have mcp-session-id header
|
||||
const secondPost = recorded.find((r) => r.method === 'POST' && r.body.includes('tools/list'));
|
||||
expect(secondPost).toBeDefined();
|
||||
expect(secondPost!.headers['mcp-session-id']).toMatch(/^session-/);
|
||||
|
||||
// Verify tools/list response
|
||||
const lines = stdoutChunks.join('').trim().split('\n');
|
||||
expect(lines.length).toBe(2);
|
||||
const toolsResponse = JSON.parse(lines[1]);
|
||||
expect(toolsResponse.result.tools[0].name).toBe('grafana/query');
|
||||
});
|
||||
|
||||
it('forwards tools/call and returns result', async () => {
|
||||
recorded.length = 0;
|
||||
const stdin = new Readable({ read() {} });
|
||||
const { stdout, stdoutChunks } = createMockStreams();
|
||||
|
||||
const initMsg = JSON.stringify({
|
||||
jsonrpc: '2.0', id: 1, method: 'initialize',
|
||||
params: { protocolVersion: '2024-11-05', capabilities: {}, clientInfo: { name: 'test', version: '1.0' } },
|
||||
});
|
||||
const callMsg = JSON.stringify({
|
||||
jsonrpc: '2.0', id: 2, method: 'tools/call',
|
||||
params: { name: 'grafana/query', arguments: { query: 'test' } },
|
||||
});
|
||||
|
||||
pushAndEnd(stdin, [initMsg, callMsg]);
|
||||
|
||||
await runMcpBridge({
|
||||
projectName: 'test-project',
|
||||
mcplocalUrl: `http://localhost:${mockPort}`,
|
||||
stdin, stdout, stderr: new Writable({ write(_, __, cb) { cb(); } }),
|
||||
});
|
||||
|
||||
const lines = stdoutChunks.join('').trim().split('\n');
|
||||
expect(lines.length).toBe(2);
|
||||
const callResponse = JSON.parse(lines[1]);
|
||||
expect(callResponse.result.content[0].text).toBe('tool result');
|
||||
});
|
||||
|
||||
it('forwards Authorization header when token provided', async () => {
|
||||
recorded.length = 0;
|
||||
const stdin = new Readable({ read() {} });
|
||||
const { stdout } = createMockStreams();
|
||||
|
||||
const initMsg = JSON.stringify({
|
||||
jsonrpc: '2.0', id: 1, method: 'initialize',
|
||||
params: { protocolVersion: '2024-11-05', capabilities: {}, clientInfo: { name: 'test', version: '1.0' } },
|
||||
});
|
||||
|
||||
pushAndEnd(stdin, [initMsg]);
|
||||
|
||||
await runMcpBridge({
|
||||
projectName: 'test-project',
|
||||
mcplocalUrl: `http://localhost:${mockPort}`,
|
||||
token: 'my-secret-token',
|
||||
stdin, stdout, stderr: new Writable({ write(_, __, cb) { cb(); } }),
|
||||
});
|
||||
|
||||
const post = recorded.find((r) => r.method === 'POST');
|
||||
expect(post).toBeDefined();
|
||||
expect(post!.headers['authorization']).toBe('Bearer my-secret-token');
|
||||
});
|
||||
|
||||
it('does not send Authorization header when no token', async () => {
|
||||
recorded.length = 0;
|
||||
const stdin = new Readable({ read() {} });
|
||||
const { stdout } = createMockStreams();
|
||||
|
||||
const initMsg = JSON.stringify({
|
||||
jsonrpc: '2.0', id: 1, method: 'initialize',
|
||||
params: { protocolVersion: '2024-11-05', capabilities: {}, clientInfo: { name: 'test', version: '1.0' } },
|
||||
});
|
||||
|
||||
pushAndEnd(stdin, [initMsg]);
|
||||
|
||||
await runMcpBridge({
|
||||
projectName: 'test-project',
|
||||
mcplocalUrl: `http://localhost:${mockPort}`,
|
||||
stdin, stdout, stderr: new Writable({ write(_, __, cb) { cb(); } }),
|
||||
});
|
||||
|
||||
const post = recorded.find((r) => r.method === 'POST');
|
||||
expect(post).toBeDefined();
|
||||
expect(post!.headers['authorization']).toBeUndefined();
|
||||
});
|
||||
|
||||
it('sends DELETE to clean up session on stdin EOF', async () => {
|
||||
recorded.length = 0;
|
||||
const stdin = new Readable({ read() {} });
|
||||
const { stdout } = createMockStreams();
|
||||
|
||||
const initMsg = JSON.stringify({
|
||||
jsonrpc: '2.0', id: 1, method: 'initialize',
|
||||
params: { protocolVersion: '2024-11-05', capabilities: {}, clientInfo: { name: 'test', version: '1.0' } },
|
||||
});
|
||||
|
||||
pushAndEnd(stdin, [initMsg]);
|
||||
|
||||
await runMcpBridge({
|
||||
projectName: 'test-project',
|
||||
mcplocalUrl: `http://localhost:${mockPort}`,
|
||||
stdin, stdout, stderr: new Writable({ write(_, __, cb) { cb(); } }),
|
||||
});
|
||||
|
||||
// Should have a DELETE request for session cleanup
|
||||
const deleteReq = recorded.find((r) => r.method === 'DELETE');
|
||||
expect(deleteReq).toBeDefined();
|
||||
expect(deleteReq!.headers['mcp-session-id']).toMatch(/^session-/);
|
||||
});
|
||||
|
||||
it('does not send DELETE if no session was established', async () => {
|
||||
recorded.length = 0;
|
||||
const stdin = new Readable({ read() {} });
|
||||
const { stdout } = createMockStreams();
|
||||
|
||||
// Push EOF immediately with no messages
|
||||
stdin.push(null);
|
||||
|
||||
await runMcpBridge({
|
||||
projectName: 'test-project',
|
||||
mcplocalUrl: `http://localhost:${mockPort}`,
|
||||
stdin, stdout, stderr: new Writable({ write(_, __, cb) { cb(); } }),
|
||||
});
|
||||
|
||||
expect(recorded.filter((r) => r.method === 'DELETE')).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('writes errors to stderr, not stdout', async () => {
|
||||
recorded.length = 0;
|
||||
const stdin = new Readable({ read() {} });
|
||||
const { stdout, stdoutChunks, stderr, stderrChunks } = createMockStreams();
|
||||
|
||||
// Send to a non-existent port to trigger connection error
|
||||
const badMsg = JSON.stringify({ jsonrpc: '2.0', id: 1, method: 'initialize', params: {} });
|
||||
pushAndEnd(stdin, [badMsg]);
|
||||
|
||||
await runMcpBridge({
|
||||
projectName: 'test-project',
|
||||
mcplocalUrl: 'http://localhost:1', // will fail to connect
|
||||
stdin, stdout, stderr,
|
||||
});
|
||||
|
||||
// Error should be on stderr
|
||||
expect(stderrChunks.join('')).toContain('MCP bridge error');
|
||||
// stdout should be empty (no corrupted output)
|
||||
expect(stdoutChunks.join('')).toBe('');
|
||||
});
|
||||
|
||||
it('skips blank lines in stdin', async () => {
|
||||
recorded.length = 0;
|
||||
const stdin = new Readable({ read() {} });
|
||||
const { stdout, stdoutChunks } = createMockStreams();
|
||||
|
||||
const initMsg = JSON.stringify({
|
||||
jsonrpc: '2.0', id: 1, method: 'initialize',
|
||||
params: { protocolVersion: '2024-11-05', capabilities: {}, clientInfo: { name: 'test', version: '1.0' } },
|
||||
});
|
||||
|
||||
pushAndEnd(stdin, ['', ' ', initMsg, '']);
|
||||
|
||||
await runMcpBridge({
|
||||
projectName: 'test-project',
|
||||
mcplocalUrl: `http://localhost:${mockPort}`,
|
||||
stdin, stdout, stderr: new Writable({ write(_, __, cb) { cb(); } }),
|
||||
});
|
||||
|
||||
// Only one POST (for the actual message)
|
||||
const posts = recorded.filter((r) => r.method === 'POST');
|
||||
expect(posts).toHaveLength(1);
|
||||
|
||||
// One response line
|
||||
const lines = stdoutChunks.join('').trim().split('\n');
|
||||
expect(lines).toHaveLength(1);
|
||||
});
|
||||
|
||||
it('handles SSE (text/event-stream) responses', async () => {
|
||||
recorded.length = 0;
|
||||
const stdin = new Readable({ read() {} });
|
||||
const { stdout, stdoutChunks } = createMockStreams();
|
||||
|
||||
const initMsg = JSON.stringify({
|
||||
jsonrpc: '2.0', id: 1, method: 'initialize',
|
||||
params: { protocolVersion: '2024-11-05', capabilities: {}, clientInfo: { name: 'test', version: '1.0' } },
|
||||
});
|
||||
|
||||
pushAndEnd(stdin, [initMsg]);
|
||||
|
||||
await runMcpBridge({
|
||||
projectName: 'sse-project', // triggers SSE response from mock server
|
||||
mcplocalUrl: `http://localhost:${mockPort}`,
|
||||
stdin, stdout, stderr: new Writable({ write(_, __, cb) { cb(); } }),
|
||||
});
|
||||
|
||||
// Should extract JSON from SSE data: lines
|
||||
const output = stdoutChunks.join('').trim();
|
||||
const parsed = JSON.parse(output);
|
||||
expect(parsed.result.serverInfo.name).toBe('test-server');
|
||||
});
|
||||
|
||||
it('URL-encodes project name', async () => {
|
||||
recorded.length = 0;
|
||||
const stdin = new Readable({ read() {} });
|
||||
const { stdout } = createMockStreams();
|
||||
const { stderr } = createMockStreams();
|
||||
|
||||
const initMsg = JSON.stringify({
|
||||
jsonrpc: '2.0', id: 1, method: 'initialize',
|
||||
params: { protocolVersion: '2024-11-05', capabilities: {}, clientInfo: { name: 'test', version: '1.0' } },
|
||||
});
|
||||
|
||||
pushAndEnd(stdin, [initMsg]);
|
||||
|
||||
await runMcpBridge({
|
||||
projectName: 'my project',
|
||||
mcplocalUrl: `http://localhost:${mockPort}`,
|
||||
stdin, stdout, stderr,
|
||||
});
|
||||
|
||||
const post = recorded.find((r) => r.method === 'POST');
|
||||
expect(post?.url).toBe('/projects/my%20project/mcp');
|
||||
});
|
||||
});
|
||||
|
||||
describe('createMcpCommand', () => {
|
||||
it('accepts --project option directly', () => {
|
||||
const cmd = createMcpCommand({
|
||||
getProject: () => undefined,
|
||||
configLoader: () => ({ mcplocalUrl: 'http://localhost:3200' }),
|
||||
credentialsLoader: () => null,
|
||||
});
|
||||
const opt = cmd.options.find((o) => o.long === '--project');
|
||||
expect(opt).toBeDefined();
|
||||
expect(opt!.short).toBe('-p');
|
||||
});
|
||||
|
||||
it('parses --project from command args', async () => {
|
||||
let capturedProject: string | undefined;
|
||||
const cmd = createMcpCommand({
|
||||
getProject: () => undefined,
|
||||
configLoader: () => ({ mcplocalUrl: `http://localhost:${mockPort}` }),
|
||||
credentialsLoader: () => null,
|
||||
});
|
||||
// Override the action to capture what project was parsed
|
||||
// We test by checking the option parsing works, not by running the full bridge
|
||||
const parsed = cmd.parse(['--project', 'test-proj'], { from: 'user' });
|
||||
capturedProject = parsed.opts().project;
|
||||
expect(capturedProject).toBe('test-proj');
|
||||
});
|
||||
|
||||
it('parses -p shorthand from command args', () => {
|
||||
const cmd = createMcpCommand({
|
||||
getProject: () => undefined,
|
||||
configLoader: () => ({ mcplocalUrl: `http://localhost:${mockPort}` }),
|
||||
credentialsLoader: () => null,
|
||||
});
|
||||
const parsed = cmd.parse(['-p', 'my-project'], { from: 'user' });
|
||||
expect(parsed.opts().project).toBe('my-project');
|
||||
});
|
||||
});
|
||||
@@ -24,11 +24,14 @@ describe('project with new fields', () => {
|
||||
});
|
||||
|
||||
describe('create project with enhanced options', () => {
|
||||
it('creates project with servers', async () => {
|
||||
it('creates project with proxy mode and servers', async () => {
|
||||
const cmd = createCreateCommand({ client, log });
|
||||
await cmd.parseAsync([
|
||||
'project', 'smart-home',
|
||||
'-d', 'Smart home project',
|
||||
'--proxy-mode', 'filtered',
|
||||
'--llm-provider', 'gemini-cli',
|
||||
'--llm-model', 'gemini-2.0-flash',
|
||||
'--server', 'my-grafana',
|
||||
'--server', 'my-ha',
|
||||
], { from: 'user' });
|
||||
@@ -36,21 +39,49 @@ describe('project with new fields', () => {
|
||||
expect(client.post).toHaveBeenCalledWith('/api/v1/projects', expect.objectContaining({
|
||||
name: 'smart-home',
|
||||
description: 'Smart home project',
|
||||
proxyMode: 'filtered',
|
||||
llmProvider: 'gemini-cli',
|
||||
llmModel: 'gemini-2.0-flash',
|
||||
servers: ['my-grafana', 'my-ha'],
|
||||
}));
|
||||
});
|
||||
|
||||
it('creates project with members', async () => {
|
||||
const cmd = createCreateCommand({ client, log });
|
||||
await cmd.parseAsync([
|
||||
'project', 'team-project',
|
||||
'--member', 'alice@test.com',
|
||||
'--member', 'bob@test.com',
|
||||
], { from: 'user' });
|
||||
|
||||
expect(client.post).toHaveBeenCalledWith('/api/v1/projects', expect.objectContaining({
|
||||
name: 'team-project',
|
||||
members: ['alice@test.com', 'bob@test.com'],
|
||||
}));
|
||||
});
|
||||
|
||||
it('defaults proxy mode to direct', async () => {
|
||||
const cmd = createCreateCommand({ client, log });
|
||||
await cmd.parseAsync(['project', 'basic'], { from: 'user' });
|
||||
|
||||
expect(client.post).toHaveBeenCalledWith('/api/v1/projects', expect.objectContaining({
|
||||
proxyMode: 'direct',
|
||||
}));
|
||||
});
|
||||
});
|
||||
|
||||
describe('get projects shows new columns', () => {
|
||||
it('shows SERVERS column', async () => {
|
||||
it('shows MODE, SERVERS, MEMBERS columns', async () => {
|
||||
const deps = {
|
||||
output: [] as string[],
|
||||
fetchResource: vi.fn(async () => [{
|
||||
id: 'proj-1',
|
||||
name: 'smart-home',
|
||||
description: 'Test',
|
||||
proxyMode: 'filtered',
|
||||
ownerId: 'user-1',
|
||||
servers: [{ server: { name: 'grafana' } }, { server: { name: 'ha' } }],
|
||||
members: [{ user: { email: 'alice@test.com' } }],
|
||||
}]),
|
||||
log: (...args: string[]) => deps.output.push(args.join(' ')),
|
||||
};
|
||||
@@ -58,13 +89,15 @@ describe('project with new fields', () => {
|
||||
await cmd.parseAsync(['node', 'test', 'projects']);
|
||||
|
||||
const text = deps.output.join('\n');
|
||||
expect(text).toContain('MODE');
|
||||
expect(text).toContain('SERVERS');
|
||||
expect(text).toContain('MEMBERS');
|
||||
expect(text).toContain('smart-home');
|
||||
});
|
||||
});
|
||||
|
||||
describe('describe project shows full detail', () => {
|
||||
it('shows servers and LLM config', async () => {
|
||||
it('shows servers and members', async () => {
|
||||
const deps = {
|
||||
output: [] as string[],
|
||||
client: mockClient(),
|
||||
@@ -72,6 +105,7 @@ describe('project with new fields', () => {
|
||||
id: 'proj-1',
|
||||
name: 'smart-home',
|
||||
description: 'Smart home',
|
||||
proxyMode: 'filtered',
|
||||
llmProvider: 'gemini-cli',
|
||||
llmModel: 'gemini-2.0-flash',
|
||||
ownerId: 'user-1',
|
||||
@@ -79,6 +113,10 @@ describe('project with new fields', () => {
|
||||
{ server: { name: 'my-grafana' } },
|
||||
{ server: { name: 'my-ha' } },
|
||||
],
|
||||
members: [
|
||||
{ user: { email: 'alice@test.com' } },
|
||||
{ user: { email: 'bob@test.com' } },
|
||||
],
|
||||
createdAt: '2025-01-01',
|
||||
updatedAt: '2025-01-01',
|
||||
})),
|
||||
@@ -89,9 +127,12 @@ describe('project with new fields', () => {
|
||||
|
||||
const text = deps.output.join('\n');
|
||||
expect(text).toContain('=== Project: smart-home ===');
|
||||
expect(text).toContain('filtered');
|
||||
expect(text).toContain('gemini-cli');
|
||||
expect(text).toContain('my-grafana');
|
||||
expect(text).toContain('my-ha');
|
||||
expect(text).toContain('alice@test.com');
|
||||
expect(text).toContain('bob@test.com');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,86 +0,0 @@
|
||||
import { describe, it, expect } from 'vitest';
|
||||
import type { ProxyModelDetails } from '../../src/commands/console/unified-types.js';
|
||||
|
||||
/**
|
||||
* Tests that ProxyModelDetails handles both pipeline and plugin types.
|
||||
* The ProvenanceView component renders these — a plugin has hooks but no stages/appliesTo.
|
||||
* This validates the type contract so rendering won't crash.
|
||||
*/
|
||||
|
||||
describe('ProxyModelDetails type contract', () => {
|
||||
it('pipeline-type has stages and appliesTo', () => {
|
||||
const details: ProxyModelDetails = {
|
||||
name: 'default',
|
||||
source: 'built-in',
|
||||
type: 'pipeline',
|
||||
controller: 'gate',
|
||||
stages: [
|
||||
{ type: 'passthrough' },
|
||||
{ type: 'paginate', config: { maxPageSize: 8000 } },
|
||||
],
|
||||
appliesTo: ['toolResult', 'prompt'],
|
||||
cacheable: true,
|
||||
};
|
||||
|
||||
expect(details.stages).toHaveLength(2);
|
||||
expect(details.appliesTo).toHaveLength(2);
|
||||
expect(details.cacheable).toBe(true);
|
||||
});
|
||||
|
||||
it('plugin-type has hooks but no stages/appliesTo', () => {
|
||||
const details: ProxyModelDetails = {
|
||||
name: 'gate',
|
||||
source: 'built-in',
|
||||
type: 'plugin',
|
||||
hooks: ['onInitialize', 'onToolsList', 'onToolCallBefore'],
|
||||
extends: [],
|
||||
description: 'Gate-only plugin',
|
||||
};
|
||||
|
||||
// These fields are undefined for plugins
|
||||
expect(details.stages).toBeUndefined();
|
||||
expect(details.appliesTo).toBeUndefined();
|
||||
expect(details.cacheable).toBeUndefined();
|
||||
expect(details.hooks).toHaveLength(3);
|
||||
});
|
||||
|
||||
it('safe access patterns for optional fields (what ProvenanceView does)', () => {
|
||||
const pluginDetails: ProxyModelDetails = {
|
||||
name: 'gate',
|
||||
source: 'built-in',
|
||||
type: 'plugin',
|
||||
hooks: ['onInitialize', 'onToolsList'],
|
||||
};
|
||||
|
||||
// These are the patterns used in ProvenanceView — must not crash
|
||||
const cacheLine = pluginDetails.cacheable ? ', cached' : '';
|
||||
expect(cacheLine).toBe('');
|
||||
|
||||
const appliesLine = pluginDetails.appliesTo && pluginDetails.appliesTo.length > 0
|
||||
? pluginDetails.appliesTo.join(', ')
|
||||
: '';
|
||||
expect(appliesLine).toBe('');
|
||||
|
||||
const stages = (pluginDetails.stages ?? []).map((s) => s.type);
|
||||
expect(stages).toEqual([]);
|
||||
|
||||
const hooks = pluginDetails.hooks && pluginDetails.hooks.length > 0
|
||||
? pluginDetails.hooks.join(', ')
|
||||
: '';
|
||||
expect(hooks).toBe('onInitialize, onToolsList');
|
||||
});
|
||||
|
||||
it('default plugin extends gate and content-pipeline', () => {
|
||||
const details: ProxyModelDetails = {
|
||||
name: 'default',
|
||||
source: 'built-in',
|
||||
type: 'plugin',
|
||||
hooks: ['onSessionCreate', 'onInitialize', 'onToolsList', 'onToolCallBefore', 'onToolCallAfter'],
|
||||
extends: ['gate', 'content-pipeline'],
|
||||
description: 'Default plugin with gating and content pipeline',
|
||||
};
|
||||
|
||||
expect(details.extends).toContain('gate');
|
||||
expect(details.extends).toContain('content-pipeline');
|
||||
});
|
||||
});
|
||||
@@ -3,39 +3,19 @@ import { mkdtempSync, rmSync } from 'node:fs';
|
||||
import { join } from 'node:path';
|
||||
import { tmpdir } from 'node:os';
|
||||
import { createStatusCommand } from '../../src/commands/status.js';
|
||||
import type { StatusCommandDeps } from '../../src/commands/status.js';
|
||||
import { saveConfig, DEFAULT_CONFIG } from '../../src/config/index.js';
|
||||
import { saveCredentials } from '../../src/auth/index.js';
|
||||
|
||||
let tempDir: string;
|
||||
let output: string[];
|
||||
let written: string[];
|
||||
|
||||
function log(...args: string[]) {
|
||||
output.push(args.join(' '));
|
||||
}
|
||||
|
||||
function write(text: string) {
|
||||
written.push(text);
|
||||
}
|
||||
|
||||
function baseDeps(overrides?: Partial<StatusCommandDeps>): Partial<StatusCommandDeps> {
|
||||
return {
|
||||
configDeps: { configDir: tempDir },
|
||||
credentialsDeps: { configDir: tempDir },
|
||||
log,
|
||||
write,
|
||||
checkHealth: async () => true,
|
||||
fetchProviders: async () => null,
|
||||
isTTY: false,
|
||||
...overrides,
|
||||
};
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
tempDir = mkdtempSync(join(tmpdir(), 'mcpctl-status-test-'));
|
||||
output = [];
|
||||
written = [];
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
@@ -44,7 +24,12 @@ afterEach(() => {
|
||||
|
||||
describe('status command', () => {
|
||||
it('shows status in table format', async () => {
|
||||
const cmd = createStatusCommand(baseDeps());
|
||||
const cmd = createStatusCommand({
|
||||
configDeps: { configDir: tempDir },
|
||||
credentialsDeps: { configDir: tempDir },
|
||||
log,
|
||||
checkHealth: async () => true,
|
||||
});
|
||||
await cmd.parseAsync([], { from: 'user' });
|
||||
const out = output.join('\n');
|
||||
expect(out).toContain('mcpctl v');
|
||||
@@ -54,35 +39,60 @@ describe('status command', () => {
|
||||
});
|
||||
|
||||
it('shows unreachable when daemons are down', async () => {
|
||||
const cmd = createStatusCommand(baseDeps({ checkHealth: async () => false }));
|
||||
const cmd = createStatusCommand({
|
||||
configDeps: { configDir: tempDir },
|
||||
credentialsDeps: { configDir: tempDir },
|
||||
log,
|
||||
checkHealth: async () => false,
|
||||
});
|
||||
await cmd.parseAsync([], { from: 'user' });
|
||||
expect(output.join('\n')).toContain('unreachable');
|
||||
});
|
||||
|
||||
it('shows not logged in when no credentials', async () => {
|
||||
const cmd = createStatusCommand(baseDeps());
|
||||
const cmd = createStatusCommand({
|
||||
configDeps: { configDir: tempDir },
|
||||
credentialsDeps: { configDir: tempDir },
|
||||
log,
|
||||
checkHealth: async () => true,
|
||||
});
|
||||
await cmd.parseAsync([], { from: 'user' });
|
||||
expect(output.join('\n')).toContain('not logged in');
|
||||
});
|
||||
|
||||
it('shows logged in user when credentials exist', async () => {
|
||||
saveCredentials({ token: 'tok', mcpdUrl: 'http://x:3100', user: 'alice@example.com' }, { configDir: tempDir });
|
||||
const cmd = createStatusCommand(baseDeps());
|
||||
const cmd = createStatusCommand({
|
||||
configDeps: { configDir: tempDir },
|
||||
credentialsDeps: { configDir: tempDir },
|
||||
log,
|
||||
checkHealth: async () => true,
|
||||
});
|
||||
await cmd.parseAsync([], { from: 'user' });
|
||||
expect(output.join('\n')).toContain('logged in as alice@example.com');
|
||||
});
|
||||
|
||||
it('shows status in JSON format', async () => {
|
||||
const cmd = createStatusCommand(baseDeps());
|
||||
const cmd = createStatusCommand({
|
||||
configDeps: { configDir: tempDir },
|
||||
credentialsDeps: { configDir: tempDir },
|
||||
log,
|
||||
checkHealth: async () => true,
|
||||
});
|
||||
await cmd.parseAsync(['-o', 'json'], { from: 'user' });
|
||||
const parsed = JSON.parse(output[0]) as Record<string, unknown>;
|
||||
expect(parsed['version']).toBe('0.0.1');
|
||||
expect(parsed['version']).toBe('0.1.0');
|
||||
expect(parsed['mcplocalReachable']).toBe(true);
|
||||
expect(parsed['mcpdReachable']).toBe(true);
|
||||
});
|
||||
|
||||
it('shows status in YAML format', async () => {
|
||||
const cmd = createStatusCommand(baseDeps({ checkHealth: async () => false }));
|
||||
const cmd = createStatusCommand({
|
||||
configDeps: { configDir: tempDir },
|
||||
credentialsDeps: { configDir: tempDir },
|
||||
log,
|
||||
checkHealth: async () => false,
|
||||
});
|
||||
await cmd.parseAsync(['-o', 'yaml'], { from: 'user' });
|
||||
expect(output[0]).toContain('mcplocalReachable: false');
|
||||
});
|
||||
@@ -90,12 +100,15 @@ describe('status command', () => {
|
||||
it('checks correct URLs from config', async () => {
|
||||
saveConfig({ ...DEFAULT_CONFIG, mcplocalUrl: 'http://local:3200', mcpdUrl: 'http://remote:3100' }, { configDir: tempDir });
|
||||
const checkedUrls: string[] = [];
|
||||
const cmd = createStatusCommand(baseDeps({
|
||||
const cmd = createStatusCommand({
|
||||
configDeps: { configDir: tempDir },
|
||||
credentialsDeps: { configDir: tempDir },
|
||||
log,
|
||||
checkHealth: async (url) => {
|
||||
checkedUrls.push(url);
|
||||
return false;
|
||||
},
|
||||
}));
|
||||
});
|
||||
await cmd.parseAsync([], { from: 'user' });
|
||||
expect(checkedUrls).toContain('http://local:3200');
|
||||
expect(checkedUrls).toContain('http://remote:3100');
|
||||
@@ -103,100 +116,14 @@ describe('status command', () => {
|
||||
|
||||
it('shows registries from config', async () => {
|
||||
saveConfig({ ...DEFAULT_CONFIG, registries: ['official'] }, { configDir: tempDir });
|
||||
const cmd = createStatusCommand(baseDeps());
|
||||
const cmd = createStatusCommand({
|
||||
configDeps: { configDir: tempDir },
|
||||
credentialsDeps: { configDir: tempDir },
|
||||
log,
|
||||
checkHealth: async () => true,
|
||||
});
|
||||
await cmd.parseAsync([], { from: 'user' });
|
||||
expect(output.join('\n')).toContain('official');
|
||||
expect(output.join('\n')).not.toContain('glama');
|
||||
});
|
||||
|
||||
it('shows LLM not configured hint when no LLM is set', async () => {
|
||||
const cmd = createStatusCommand(baseDeps());
|
||||
await cmd.parseAsync([], { from: 'user' });
|
||||
const out = output.join('\n');
|
||||
expect(out).toContain('LLM:');
|
||||
expect(out).toContain('not configured');
|
||||
expect(out).toContain('mcpctl config setup');
|
||||
});
|
||||
|
||||
it('shows green check when LLM is healthy (non-TTY)', async () => {
|
||||
saveConfig({ ...DEFAULT_CONFIG, llm: { provider: 'anthropic', model: 'claude-haiku-3-5-20241022' } }, { configDir: tempDir });
|
||||
const cmd = createStatusCommand(baseDeps({ checkLlm: async () => 'ok' }));
|
||||
await cmd.parseAsync([], { from: 'user' });
|
||||
const out = output.join('\n');
|
||||
expect(out).toContain('anthropic / claude-haiku-3-5-20241022');
|
||||
expect(out).toContain('✓ ok');
|
||||
});
|
||||
|
||||
it('shows red cross when LLM check fails (non-TTY)', async () => {
|
||||
saveConfig({ ...DEFAULT_CONFIG, llm: { provider: 'gemini-cli', model: 'gemini-2.5-flash' } }, { configDir: tempDir });
|
||||
const cmd = createStatusCommand(baseDeps({ checkLlm: async () => 'not authenticated' }));
|
||||
await cmd.parseAsync([], { from: 'user' });
|
||||
const out = output.join('\n');
|
||||
expect(out).toContain('✗ not authenticated');
|
||||
});
|
||||
|
||||
it('shows error message from mcplocal', async () => {
|
||||
saveConfig({ ...DEFAULT_CONFIG, llm: { provider: 'gemini-cli', model: 'gemini-2.5-flash' } }, { configDir: tempDir });
|
||||
const cmd = createStatusCommand(baseDeps({ checkLlm: async () => 'binary not found' }));
|
||||
await cmd.parseAsync([], { from: 'user' });
|
||||
expect(output.join('\n')).toContain('✗ binary not found');
|
||||
});
|
||||
|
||||
it('queries mcplocal URL for LLM health', async () => {
|
||||
saveConfig({ ...DEFAULT_CONFIG, mcplocalUrl: 'http://custom:9999', llm: { provider: 'gemini-cli', model: 'gemini-2.5-flash' } }, { configDir: tempDir });
|
||||
let queriedUrl = '';
|
||||
const cmd = createStatusCommand(baseDeps({
|
||||
checkLlm: async (url) => { queriedUrl = url; return 'ok'; },
|
||||
}));
|
||||
await cmd.parseAsync([], { from: 'user' });
|
||||
expect(queriedUrl).toBe('http://custom:9999');
|
||||
});
|
||||
|
||||
it('uses spinner on TTY and writes final result', async () => {
|
||||
saveConfig({ ...DEFAULT_CONFIG, llm: { provider: 'gemini-cli', model: 'gemini-2.5-flash' } }, { configDir: tempDir });
|
||||
const cmd = createStatusCommand(baseDeps({
|
||||
isTTY: true,
|
||||
checkLlm: async () => 'ok',
|
||||
}));
|
||||
await cmd.parseAsync([], { from: 'user' });
|
||||
// On TTY, the final LLM line goes through write(), not log()
|
||||
const finalWrite = written[written.length - 1];
|
||||
expect(finalWrite).toContain('gemini-cli / gemini-2.5-flash');
|
||||
expect(finalWrite).toContain('✓ ok');
|
||||
});
|
||||
|
||||
it('uses spinner on TTY and shows failure', async () => {
|
||||
saveConfig({ ...DEFAULT_CONFIG, llm: { provider: 'gemini-cli', model: 'gemini-2.5-flash' } }, { configDir: tempDir });
|
||||
const cmd = createStatusCommand(baseDeps({
|
||||
isTTY: true,
|
||||
checkLlm: async () => 'not authenticated',
|
||||
}));
|
||||
await cmd.parseAsync([], { from: 'user' });
|
||||
const finalWrite = written[written.length - 1];
|
||||
expect(finalWrite).toContain('✗ not authenticated');
|
||||
});
|
||||
|
||||
it('shows not configured when LLM provider is none', async () => {
|
||||
saveConfig({ ...DEFAULT_CONFIG, llm: { provider: 'none' } }, { configDir: tempDir });
|
||||
const cmd = createStatusCommand(baseDeps());
|
||||
await cmd.parseAsync([], { from: 'user' });
|
||||
expect(output.join('\n')).toContain('not configured');
|
||||
});
|
||||
|
||||
it('includes llm and llmStatus in JSON output', async () => {
|
||||
saveConfig({ ...DEFAULT_CONFIG, llm: { provider: 'gemini-cli', model: 'gemini-2.5-flash' } }, { configDir: tempDir });
|
||||
const cmd = createStatusCommand(baseDeps({ checkLlm: async () => 'ok' }));
|
||||
await cmd.parseAsync(['-o', 'json'], { from: 'user' });
|
||||
const parsed = JSON.parse(output[0]) as Record<string, unknown>;
|
||||
expect(parsed['llm']).toBe('gemini-cli / gemini-2.5-flash');
|
||||
expect(parsed['llmStatus']).toBe('ok');
|
||||
});
|
||||
|
||||
it('includes null llm in JSON output when not configured', async () => {
|
||||
const cmd = createStatusCommand(baseDeps());
|
||||
await cmd.parseAsync(['-o', 'json'], { from: 'user' });
|
||||
const parsed = JSON.parse(output[0]) as Record<string, unknown>;
|
||||
expect(parsed['llm']).toBeNull();
|
||||
expect(parsed['llmStatus']).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,185 +0,0 @@
|
||||
import { describe, it, expect } from 'vitest';
|
||||
import { readFileSync, existsSync } from 'node:fs';
|
||||
import { join, dirname } from 'node:path';
|
||||
import { fileURLToPath } from 'node:url';
|
||||
import { execSync } from 'node:child_process';
|
||||
|
||||
const root = join(dirname(fileURLToPath(import.meta.url)), '..', '..', '..');
|
||||
const fishFile = readFileSync(join(root, 'completions', 'mcpctl.fish'), 'utf-8');
|
||||
const bashFile = readFileSync(join(root, 'completions', 'mcpctl.bash'), 'utf-8');
|
||||
|
||||
describe('freshness', () => {
|
||||
it('committed completions match generator output', () => {
|
||||
const generatorPath = join(root, 'scripts', 'generate-completions.ts');
|
||||
expect(existsSync(generatorPath), 'generator script must exist').toBe(true);
|
||||
// Run the generator in --check mode; exit 0 means files are up to date
|
||||
execSync(`npx tsx ${generatorPath} --check`, { cwd: root, stdio: 'pipe' });
|
||||
});
|
||||
});
|
||||
|
||||
describe('fish completions', () => {
|
||||
it('erases stale completions at the top', () => {
|
||||
const lines = fishFile.split('\n');
|
||||
const firstComplete = lines.findIndex((l) => l.startsWith('complete '));
|
||||
expect(lines[firstComplete]).toContain('-e');
|
||||
});
|
||||
|
||||
it('does not offer resource types without __mcpctl_needs_resource_type guard', () => {
|
||||
const resourceTypes = ['servers', 'instances', 'secrets', 'templates', 'projects', 'users', 'groups', 'rbac', 'prompts', 'promptrequests'];
|
||||
const lines = fishFile.split('\n').filter((l) => l.startsWith('complete '));
|
||||
|
||||
for (const line of lines) {
|
||||
// Find lines that offer resource types as positional args
|
||||
const offersResourceType = resourceTypes.some((r) => {
|
||||
// Match `-a "...servers..."` or `-a 'servers projects'`
|
||||
const aMatch = line.match(/-a\s+['"]([^'"]+)['"]/);
|
||||
if (!aMatch) return false;
|
||||
return aMatch[1].split(/\s+/).includes(r);
|
||||
});
|
||||
|
||||
if (!offersResourceType) continue;
|
||||
|
||||
// Skip the help completions line and the -e line
|
||||
if (line.includes('__fish_seen_subcommand_from help')) continue;
|
||||
// Skip project-scoped command offerings (those offer commands, not resource types)
|
||||
if (line.includes('attach-server') || line.includes('detach-server')) continue;
|
||||
// Skip lines that offer commands (not resource types)
|
||||
if (line.includes("-d 'Show") || line.includes("-d 'Manage") || line.includes("-d 'Authenticate") ||
|
||||
line.includes("-d 'Log out'") || line.includes("-d 'Get instance") || line.includes("-d 'Create a resource'") ||
|
||||
line.includes("-d 'Edit a resource'") || line.includes("-d 'Apply") || line.includes("-d 'Backup") ||
|
||||
line.includes("-d 'Restore") || line.includes("-d 'List resources") || line.includes("-d 'Delete a resource'")) continue;
|
||||
|
||||
// Lines offering resource types MUST have __mcpctl_needs_resource_type in their condition
|
||||
expect(line, `Resource type completion missing guard: ${line}`).toContain('__mcpctl_needs_resource_type');
|
||||
}
|
||||
});
|
||||
|
||||
it('resource name completions require resource type to be selected', () => {
|
||||
const lines = fishFile.split('\n').filter((l) => l.startsWith('complete') && l.includes('__mcpctl_resource_names'));
|
||||
expect(lines.length).toBeGreaterThan(0);
|
||||
for (const line of lines) {
|
||||
expect(line).toContain('not __mcpctl_needs_resource_type');
|
||||
}
|
||||
});
|
||||
|
||||
it('defines --project option with -p shorthand', () => {
|
||||
expect(fishFile).toContain("-s p -l project");
|
||||
});
|
||||
|
||||
it('attach-server command only shows with --project', () => {
|
||||
// Only check lines that OFFER attach-server as a command (via -a attach-server), not argument completions
|
||||
const lines = fishFile.split('\n').filter((l) =>
|
||||
l.startsWith('complete') && l.includes("-a attach-server"));
|
||||
expect(lines.length).toBeGreaterThan(0);
|
||||
for (const line of lines) {
|
||||
expect(line).toContain('__mcpctl_has_project');
|
||||
}
|
||||
});
|
||||
|
||||
it('detach-server command only shows with --project', () => {
|
||||
const lines = fishFile.split('\n').filter((l) =>
|
||||
l.startsWith('complete') && l.includes("-a detach-server"));
|
||||
expect(lines.length).toBeGreaterThan(0);
|
||||
for (const line of lines) {
|
||||
expect(line).toContain('__mcpctl_has_project');
|
||||
}
|
||||
});
|
||||
|
||||
it('resource name functions use jq to extract names and avoid nested matches', () => {
|
||||
const resourceNamesFn = fishFile.match(/function __mcpctl_resource_names[\s\S]*?^end/m)?.[0] ?? '';
|
||||
const projectNamesFn = fishFile.match(/function __mcpctl_project_names[\s\S]*?^end/m)?.[0] ?? '';
|
||||
|
||||
// Resource names: uses .[].name for most resources, .[][].server.name for instances
|
||||
expect(resourceNamesFn, '__mcpctl_resource_names must use jq for name extraction').toContain("jq -r");
|
||||
expect(resourceNamesFn, '__mcpctl_resource_names must not use string match on name').not.toMatch(/string match.*"name"/);
|
||||
|
||||
expect(projectNamesFn, '__mcpctl_project_names must use jq for name extraction').toContain("jq -r");
|
||||
expect(projectNamesFn, '__mcpctl_project_names must not use string match on name').not.toMatch(/string match.*"name"/);
|
||||
});
|
||||
|
||||
it('instances use server.name instead of name', () => {
|
||||
const resourceNamesFn = fishFile.match(/function __mcpctl_resource_names[\s\S]*?^end/m)?.[0] ?? '';
|
||||
expect(resourceNamesFn, 'must handle instances via server.name').toContain('.server.name');
|
||||
});
|
||||
|
||||
it('attach-server completes with available (unattached) servers and guards against repeat', () => {
|
||||
const attachLine = fishFile.split('\n').find((l) =>
|
||||
l.startsWith('complete') && l.includes('__fish_seen_subcommand_from attach-server'));
|
||||
expect(attachLine, 'attach-server argument completion must exist').toBeDefined();
|
||||
expect(attachLine, 'attach-server must use __mcpctl_available_servers').toContain('__mcpctl_available_servers');
|
||||
expect(attachLine, 'attach-server must guard with __mcpctl_needs_server_arg').toContain('__mcpctl_needs_server_arg');
|
||||
});
|
||||
|
||||
it('detach-server completes with project servers and guards against repeat', () => {
|
||||
const detachLine = fishFile.split('\n').find((l) =>
|
||||
l.startsWith('complete') && l.includes('__fish_seen_subcommand_from detach-server'));
|
||||
expect(detachLine, 'detach-server argument completion must exist').toBeDefined();
|
||||
expect(detachLine, 'detach-server must use __mcpctl_project_servers').toContain('__mcpctl_project_servers');
|
||||
expect(detachLine, 'detach-server must guard with __mcpctl_needs_server_arg').toContain('__mcpctl_needs_server_arg');
|
||||
});
|
||||
|
||||
it('non-project commands do not show with --project', () => {
|
||||
const nonProjectCmds = ['status', 'login', 'logout', 'config', 'apply', 'backup'];
|
||||
const lines = fishFile.split('\n').filter((l) => l.startsWith('complete') && l.includes('-a '));
|
||||
|
||||
for (const cmd of nonProjectCmds) {
|
||||
const cmdLines = lines.filter((l) => {
|
||||
const aMatch = l.match(/-a\s+(\S+)/);
|
||||
return aMatch && aMatch[1].replace(/['"]/g, '') === cmd;
|
||||
});
|
||||
for (const line of cmdLines) {
|
||||
expect(line, `${cmd} should require 'not __mcpctl_has_project'`).toContain('not __mcpctl_has_project');
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
describe('bash completions', () => {
|
||||
it('separates project commands from regular commands', () => {
|
||||
expect(bashFile).toContain('project_commands=');
|
||||
expect(bashFile).toContain('attach-server detach-server');
|
||||
});
|
||||
|
||||
it('checks has_project before offering project commands', () => {
|
||||
expect(bashFile).toContain('if $has_project');
|
||||
expect(bashFile).toContain('$project_commands');
|
||||
});
|
||||
|
||||
it('fetches resource names dynamically after resource type', () => {
|
||||
expect(bashFile).toContain('_mcpctl_resource_names');
|
||||
// get, describe, and delete should each use resource_names when resource_type is set
|
||||
for (const cmd of ['get', 'describe', 'delete']) {
|
||||
const block = bashFile.match(new RegExp(`${cmd}\\)[\\s\\S]*?return ;;`))?.[0] ?? '';
|
||||
expect(block, `${cmd} case must use _mcpctl_resource_names`).toContain('_mcpctl_resource_names');
|
||||
}
|
||||
});
|
||||
|
||||
it('attach-server filters out already-attached servers and guards against repeat', () => {
|
||||
const attachBlock = bashFile.match(/attach-server\)[\s\S]*?return ;;/)?.[0] ?? '';
|
||||
expect(attachBlock, 'attach-server must use _mcpctl_get_project_value').toContain('_mcpctl_get_project_value');
|
||||
expect(attachBlock, 'attach-server must query project servers to exclude').toContain('--project');
|
||||
expect(attachBlock, 'attach-server must check position to prevent repeat').toContain('cword - subcmd_pos');
|
||||
});
|
||||
|
||||
it('detach-server shows only project servers and guards against repeat', () => {
|
||||
const detachBlock = bashFile.match(/detach-server\)[\s\S]*?return ;;/)?.[0] ?? '';
|
||||
expect(detachBlock, 'detach-server must use _mcpctl_get_project_value').toContain('_mcpctl_get_project_value');
|
||||
expect(detachBlock, 'detach-server must query project servers').toContain('--project');
|
||||
expect(detachBlock, 'detach-server must check position to prevent repeat').toContain('cword - subcmd_pos');
|
||||
});
|
||||
|
||||
it('instances use server.name instead of name', () => {
|
||||
const fnMatch = bashFile.match(/_mcpctl_resource_names\(\)[\s\S]*?\n\s*\}/)?.[0] ?? '';
|
||||
expect(fnMatch, 'must handle instances via .server.name').toContain('.server.name');
|
||||
});
|
||||
|
||||
it('defines --project option', () => {
|
||||
expect(bashFile).toContain('--project');
|
||||
});
|
||||
|
||||
it('resource name function uses jq to extract names and avoid nested matches', () => {
|
||||
const fnMatch = bashFile.match(/_mcpctl_resource_names\(\)[\s\S]*?\n\s*\}/)?.[0] ?? '';
|
||||
expect(fnMatch, '_mcpctl_resource_names must use jq for name extraction').toContain("jq -r");
|
||||
expect(fnMatch, '_mcpctl_resource_names must not use grep on name').not.toMatch(/grep.*"name"/);
|
||||
});
|
||||
});
|
||||
@@ -22,6 +22,7 @@ describe('CLI command registration (e2e)', () => {
|
||||
expect(commandNames).toContain('create');
|
||||
expect(commandNames).toContain('edit');
|
||||
expect(commandNames).toContain('backup');
|
||||
expect(commandNames).toContain('restore');
|
||||
});
|
||||
|
||||
it('old project and claude top-level commands are removed', () => {
|
||||
@@ -46,7 +47,7 @@ describe('CLI command registration (e2e)', () => {
|
||||
expect(subcommands).toContain('reset');
|
||||
});
|
||||
|
||||
it('create command has user, group, rbac, prompt, promptrequest subcommands', () => {
|
||||
it('create command has user, group, rbac subcommands', () => {
|
||||
const program = createProgram();
|
||||
const create = program.commands.find((c) => c.name() === 'create');
|
||||
expect(create).toBeDefined();
|
||||
@@ -58,24 +59,6 @@ describe('CLI command registration (e2e)', () => {
|
||||
expect(subcommands).toContain('user');
|
||||
expect(subcommands).toContain('group');
|
||||
expect(subcommands).toContain('rbac');
|
||||
expect(subcommands).toContain('prompt');
|
||||
expect(subcommands).toContain('promptrequest');
|
||||
});
|
||||
|
||||
it('get command accepts --project option', () => {
|
||||
const program = createProgram();
|
||||
const get = program.commands.find((c) => c.name() === 'get');
|
||||
expect(get).toBeDefined();
|
||||
|
||||
const projectOpt = get!.options.find((o) => o.long === '--project');
|
||||
expect(projectOpt).toBeDefined();
|
||||
expect(projectOpt!.description).toContain('project');
|
||||
});
|
||||
|
||||
it('program-level --project option is defined', () => {
|
||||
const program = createProgram();
|
||||
const projectOpt = program.options.find((o) => o.long === '--project');
|
||||
expect(projectOpt).toBeDefined();
|
||||
});
|
||||
|
||||
it('displays version', () => {
|
||||
|
||||
@@ -3,11 +3,9 @@
|
||||
"compilerOptions": {
|
||||
"rootDir": "src",
|
||||
"outDir": "dist",
|
||||
"types": ["node"],
|
||||
"jsx": "react-jsx",
|
||||
"exactOptionalPropertyTypes": false
|
||||
"types": ["node"]
|
||||
},
|
||||
"include": ["src/**/*.ts", "src/**/*.tsx"],
|
||||
"include": ["src/**/*.ts"],
|
||||
"references": [
|
||||
{ "path": "../shared" },
|
||||
{ "path": "../db" }
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@mcpctl/db",
|
||||
"version": "0.0.1",
|
||||
"version": "0.1.0",
|
||||
"private": true,
|
||||
"type": "module",
|
||||
"main": "./dist/index.js",
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
-- DropForeignKey
|
||||
ALTER TABLE "ProjectMember" DROP CONSTRAINT IF EXISTS "ProjectMember_projectId_fkey";
|
||||
|
||||
-- DropForeignKey
|
||||
ALTER TABLE "ProjectMember" DROP CONSTRAINT IF EXISTS "ProjectMember_userId_fkey";
|
||||
|
||||
-- DropTable
|
||||
DROP TABLE IF EXISTS "ProjectMember";
|
||||
@@ -1,11 +0,0 @@
|
||||
-- AlterTable: Add gated flag to Project
|
||||
ALTER TABLE "Project" ADD COLUMN "gated" BOOLEAN NOT NULL DEFAULT true;
|
||||
|
||||
-- AlterTable: Add priority, summary, chapters, linkTarget to Prompt
|
||||
ALTER TABLE "Prompt" ADD COLUMN "priority" INTEGER NOT NULL DEFAULT 5;
|
||||
ALTER TABLE "Prompt" ADD COLUMN "summary" TEXT;
|
||||
ALTER TABLE "Prompt" ADD COLUMN "chapters" JSONB;
|
||||
ALTER TABLE "Prompt" ADD COLUMN "linkTarget" TEXT;
|
||||
|
||||
-- AlterTable: Add priority to PromptRequest
|
||||
ALTER TABLE "PromptRequest" ADD COLUMN "priority" INTEGER NOT NULL DEFAULT 5;
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user