feat(agents): mcpctl chat REPL + agent CRUD + completions (Stage 5)

This is the moment the user can actually talk to an agent end-to-end:

  mcpctl create llm qwen3-thinking --type openai --model qwen3-thinking \
    --url http://litellm.nvidia-nim.svc.cluster.local:4000/v1 \
    --api-key-ref litellm-key/API_KEY
  mcpctl create agent reviewer --llm qwen3-thinking --project mcpctl-dev \
    --description "I review security design — ask me after each major change."
  mcpctl chat reviewer

Pieces:

* src/cli/src/commands/chat.ts (new) — REPL + one-shot. Streams the SSE
  endpoint and prints text deltas to stdout as they arrive; tool_call /
  tool_result events go to stderr in dim-style brackets so the chat
  output stays clean. LiteLLM-style flags (--temperature / --top-p /
  --top-k / --max-tokens / --seed / --stop / --allow-tool / --extra)
  layer over agent.defaultParams. In-REPL slash-commands: /set KEY VAL,
  /system <text>, /tools (list project's MCP servers), /clear (new
  thread), /save (PATCH agent.defaultParams = current overrides),
  /quit.

* src/cli/src/commands/create.ts — `create agent` mirroring the llm
  pattern. Every yaml-applyable field has a corresponding flag (memory
  rule); --default-temperature / --default-top-p / --default-top-k /
  --default-max-tokens / --default-seed / --default-stop /
  --default-extra / --default-params-file all populate agent.defaultParams.

* src/cli/src/commands/apply.ts — AgentSpecSchema accepts both `llm:
  qwen3-thinking` shorthand and `llm: { name: ... }` long form; runs
  after llms in the apply order so apiKey/llm references resolve. Round-
  trips with `get agent foo -o yaml | apply -f -` (memory rule).

* src/cli/src/commands/get.ts — agentColumns (NAME, LLM, PROJECT,
  DESCRIPTION, ID); RESOURCE_KIND mapping for yaml export.

* src/cli/src/commands/shared.ts — `agent`/`agents`/`thread`/`threads`
  added to RESOURCE_ALIASES.

* src/cli/src/index.ts — wires createChatCommand into the program; passes
  the resolved baseUrl + token so chat can stream SSE without going
  through ApiClient (which only does buffered request/response).

* completions/mcpctl.{fish,bash} regenerated. scripts/generate-completions.ts
  knows about agents (canonical + aliases) and emits a special-case
  `chat)` block that completes the first arg with `mcpctl get agents`
  names. tests/completions.test.ts: +9 new assertions covering agents in
  the resource list, chat in the commands list, --llm flag for create
  agent, agent-name completion for chat, etc.

CLI suite: 430/430 (was 421). Completions --check is clean.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
Michal
2026-04-25 17:02:38 +01:00
parent 285be11dd5
commit 727e7d628c
10 changed files with 701 additions and 13 deletions

View File

@@ -5,11 +5,11 @@ _mcpctl() {
local cur prev words cword
_init_completion || return
local commands="status login logout config get describe delete logs create edit apply patch backup approve console cache test migrate rotate"
local commands="status login logout config get describe delete logs create edit apply chat patch backup approve console cache test migrate rotate"
local project_commands="get describe delete logs create edit attach-server detach-server"
local global_opts="-v --version --daemon-url --direct -p --project -h --help"
local resources="servers instances secrets secretbackends llms templates projects users groups rbac prompts promptrequests serverattachments proxymodels all"
local resource_aliases="servers instances secrets secretbackends llms templates projects users groups rbac prompts promptrequests serverattachments proxymodels all server srv instance inst secret sec secretbackend sb llm template tpl project proj user group rbac-definition rbac-binding prompt promptrequest pr serverattachment sa proxymodel pm"
local resources="servers instances secrets secretbackends llms agents templates projects users groups rbac prompts promptrequests serverattachments proxymodels all"
local resource_aliases="servers instances secrets secretbackends llms agents templates projects users groups rbac prompts promptrequests serverattachments proxymodels all server srv instance inst secret sec secretbackend sb llm agent template tpl project proj user group rbac-definition rbac-binding prompt promptrequest pr serverattachment sa proxymodel pm"
# Check if --project/-p was given
local has_project=false
@@ -175,7 +175,7 @@ _mcpctl() {
create)
local create_sub=$(_mcpctl_get_subcmd $subcmd_pos)
if [[ -z "$create_sub" ]]; then
COMPREPLY=($(compgen -W "server secret llm secretbackend project user group rbac mcptoken prompt serverattachment promptrequest help" -- "$cur"))
COMPREPLY=($(compgen -W "server secret llm agent secretbackend project user group rbac mcptoken prompt serverattachment promptrequest help" -- "$cur"))
else
case "$create_sub" in
server)
@@ -187,6 +187,9 @@ _mcpctl() {
llm)
COMPREPLY=($(compgen -W "--type --model --url --tier --description --api-key-ref --extra --force -h --help" -- "$cur"))
;;
agent)
COMPREPLY=($(compgen -W "--llm --project --description --system-prompt --system-prompt-file --proxy-model --default-temperature --default-top-p --default-top-k --default-max-tokens --default-seed --default-stop --default-extra --default-params-file --force -h --help" -- "$cur"))
;;
secretbackend)
COMPREPLY=($(compgen -W "--type --description --default --url --namespace --mount --path-prefix --auth --token-secret --role --auth-mount --sa-token-path --config --wizard --setup-token --policy-name --token-role --no-promote-default --force -h --help" -- "$cur"))
;;
@@ -232,6 +235,15 @@ _mcpctl() {
apply)
COMPREPLY=($(compgen -f -W "-f --file --dry-run -h --help" -- "$cur"))
return ;;
chat)
if [[ $((cword - subcmd_pos)) -eq 1 ]]; then
local names
names=$(_mcpctl_resource_names "agents")
COMPREPLY=($(compgen -W "$names -m --message --thread --system --system-file --system-append --temperature --top-p --top-k --max-tokens --seed --stop --allow-tool --extra --no-stream -h --help" -- "$cur"))
else
COMPREPLY=($(compgen -W "-m --message --thread --system --system-file --system-append --temperature --top-p --top-k --max-tokens --seed --stop --allow-tool --extra --no-stream -h --help" -- "$cur"))
fi
return ;;
patch)
if [[ -z "$resource_type" ]]; then
COMPREPLY=($(compgen -W "$resources -h --help" -- "$cur"))

View File

@@ -4,7 +4,7 @@
# Erase any stale completions from previous versions
complete -c mcpctl -e
set -l commands status login logout config get describe delete logs create edit apply patch backup approve console cache test migrate rotate
set -l commands status login logout config get describe delete logs create edit apply chat patch backup approve console cache test migrate rotate
set -l project_commands get describe delete logs create edit attach-server detach-server
# Disable file completions by default
@@ -31,10 +31,10 @@ function __mcpctl_has_project
end
# Resource type detection
set -l resources servers instances secrets secretbackends llms templates projects users groups rbac prompts promptrequests serverattachments proxymodels all
set -l resources servers instances secrets secretbackends llms agents templates projects users groups rbac prompts promptrequests serverattachments proxymodels all
function __mcpctl_needs_resource_type
set -l resource_aliases servers instances secrets secretbackends llms templates projects users groups rbac prompts promptrequests serverattachments proxymodels all server srv instance inst secret sec secretbackend sb llm template tpl project proj user group rbac-definition rbac-binding prompt promptrequest pr serverattachment sa proxymodel pm
set -l resource_aliases servers instances secrets secretbackends llms agents templates projects users groups rbac prompts promptrequests serverattachments proxymodels all server srv instance inst secret sec secretbackend sb llm agent template tpl project proj user group rbac-definition rbac-binding prompt promptrequest pr serverattachment sa proxymodel pm
set -l tokens (commandline -opc)
set -l found_cmd false
for tok in $tokens
@@ -61,6 +61,7 @@ function __mcpctl_resolve_resource
case secret sec secrets; echo secrets
case secretbackend sb secretbackends; echo secretbackends
case llm llms; echo llms
case agent agents; echo agents
case template tpl templates; echo templates
case project proj projects; echo projects
case user users; echo users
@@ -76,7 +77,7 @@ function __mcpctl_resolve_resource
end
function __mcpctl_get_resource_type
set -l resource_aliases servers instances secrets secretbackends llms templates projects users groups rbac prompts promptrequests serverattachments proxymodels all server srv instance inst secret sec secretbackend sb llm template tpl project proj user group rbac-definition rbac-binding prompt promptrequest pr serverattachment sa proxymodel pm
set -l resource_aliases servers instances secrets secretbackends llms agents templates projects users groups rbac prompts promptrequests serverattachments proxymodels all server srv instance inst secret sec secretbackend sb llm agent template tpl project proj user group rbac-definition rbac-binding prompt promptrequest pr serverattachment sa proxymodel pm
set -l tokens (commandline -opc)
set -l found_cmd false
for tok in $tokens
@@ -225,9 +226,10 @@ complete -c mcpctl -n "not __mcpctl_has_project; and not __fish_seen_subcommand_
complete -c mcpctl -n "not __mcpctl_has_project; and not __fish_seen_subcommand_from $commands" -a describe -d 'Show detailed information about a resource'
complete -c mcpctl -n "not __mcpctl_has_project; and not __fish_seen_subcommand_from $commands" -a delete -d 'Delete a resource (server, instance, secret, project, user, group, rbac)'
complete -c mcpctl -n "not __mcpctl_has_project; and not __fish_seen_subcommand_from $commands" -a logs -d 'Get logs from an MCP server instance'
complete -c mcpctl -n "not __mcpctl_has_project; and not __fish_seen_subcommand_from $commands" -a create -d 'Create a resource (server, secret, secretbackend, llm, project, user, group, rbac, serverattachment, prompt)'
complete -c mcpctl -n "not __mcpctl_has_project; and not __fish_seen_subcommand_from $commands" -a create -d 'Create a resource (server, secret, secretbackend, llm, agent, project, user, group, rbac, serverattachment, prompt)'
complete -c mcpctl -n "not __mcpctl_has_project; and not __fish_seen_subcommand_from $commands" -a edit -d 'Edit a resource in your default editor (server, project)'
complete -c mcpctl -n "not __mcpctl_has_project; and not __fish_seen_subcommand_from $commands" -a apply -d 'Apply declarative configuration from a YAML or JSON file'
complete -c mcpctl -n "not __mcpctl_has_project; and not __fish_seen_subcommand_from $commands" -a chat -d 'Open an interactive chat session with an agent (REPL or one-shot).'
complete -c mcpctl -n "not __mcpctl_has_project; and not __fish_seen_subcommand_from $commands" -a patch -d 'Patch a resource field (e.g. mcpctl patch project myproj llmProvider=none)'
complete -c mcpctl -n "not __mcpctl_has_project; and not __fish_seen_subcommand_from $commands" -a backup -d 'Git-based backup status and management'
complete -c mcpctl -n "not __mcpctl_has_project; and not __fish_seen_subcommand_from $commands" -a approve -d 'Approve a pending prompt request (atomic: delete request, create prompt)'
@@ -242,7 +244,7 @@ complete -c mcpctl -n "__mcpctl_has_project; and not __fish_seen_subcommand_from
complete -c mcpctl -n "__mcpctl_has_project; and not __fish_seen_subcommand_from $project_commands" -a describe -d 'Show detailed information about a resource'
complete -c mcpctl -n "__mcpctl_has_project; and not __fish_seen_subcommand_from $project_commands" -a delete -d 'Delete a resource (server, instance, secret, project, user, group, rbac)'
complete -c mcpctl -n "__mcpctl_has_project; and not __fish_seen_subcommand_from $project_commands" -a logs -d 'Get logs from an MCP server instance'
complete -c mcpctl -n "__mcpctl_has_project; and not __fish_seen_subcommand_from $project_commands" -a create -d 'Create a resource (server, secret, secretbackend, llm, project, user, group, rbac, serverattachment, prompt)'
complete -c mcpctl -n "__mcpctl_has_project; and not __fish_seen_subcommand_from $project_commands" -a create -d 'Create a resource (server, secret, secretbackend, llm, agent, project, user, group, rbac, serverattachment, prompt)'
complete -c mcpctl -n "__mcpctl_has_project; and not __fish_seen_subcommand_from $project_commands" -a edit -d 'Edit a resource in your default editor (server, project)'
complete -c mcpctl -n "__mcpctl_has_project; and not __fish_seen_subcommand_from $project_commands" -a attach-server -d 'Attach a server to a project (requires --project)'
complete -c mcpctl -n "__mcpctl_has_project; and not __fish_seen_subcommand_from $project_commands" -a detach-server -d 'Detach a server from a project (requires --project)'
@@ -285,10 +287,11 @@ complete -c mcpctl -n "__mcpctl_subcmd_active config claude-generate" -l stdout
complete -c mcpctl -n "__mcpctl_subcmd_active config impersonate" -l quit -d 'Stop impersonating and return to original identity'
# create subcommands
set -l create_cmds server secret llm secretbackend project user group rbac mcptoken prompt serverattachment promptrequest
set -l create_cmds server secret llm agent secretbackend project user group rbac mcptoken prompt serverattachment promptrequest
complete -c mcpctl -n "__fish_seen_subcommand_from create; and not __fish_seen_subcommand_from $create_cmds" -a server -d 'Create an MCP server definition'
complete -c mcpctl -n "__fish_seen_subcommand_from create; and not __fish_seen_subcommand_from $create_cmds" -a secret -d 'Create a secret'
complete -c mcpctl -n "__fish_seen_subcommand_from create; and not __fish_seen_subcommand_from $create_cmds" -a llm -d 'Register a server-managed LLM (anthropic, openai, vllm, ollama, deepseek, gemini-cli)'
complete -c mcpctl -n "__fish_seen_subcommand_from create; and not __fish_seen_subcommand_from $create_cmds" -a agent -d 'Create an Agent (LLM persona pinned to an Llm, optionally attached to a Project)'
complete -c mcpctl -n "__fish_seen_subcommand_from create; and not __fish_seen_subcommand_from $create_cmds" -a secretbackend -d 'Create a secret backend (plaintext, openbao)'
complete -c mcpctl -n "__fish_seen_subcommand_from create; and not __fish_seen_subcommand_from $create_cmds" -a project -d 'Create a project'
complete -c mcpctl -n "__fish_seen_subcommand_from create; and not __fish_seen_subcommand_from $create_cmds" -a user -d 'Create a user'
@@ -329,6 +332,23 @@ complete -c mcpctl -n "__mcpctl_subcmd_active create llm" -l api-key-ref -d 'API
complete -c mcpctl -n "__mcpctl_subcmd_active create llm" -l extra -d 'Extra config key=value (repeat)' -x
complete -c mcpctl -n "__mcpctl_subcmd_active create llm" -l force -d 'Update if already exists'
# create agent options
complete -c mcpctl -n "__mcpctl_subcmd_active create agent" -l llm -d 'Pinned Llm (see `mcpctl get llms`)' -x
complete -c mcpctl -n "__mcpctl_subcmd_active create agent" -l project -d 'Attach to this Project (optional)' -xa '(__mcpctl_project_names)'
complete -c mcpctl -n "__mcpctl_subcmd_active create agent" -l description -d 'Description (shown in MCP tools/list)' -x
complete -c mcpctl -n "__mcpctl_subcmd_active create agent" -l system-prompt -d 'System prompt (persona)' -x
complete -c mcpctl -n "__mcpctl_subcmd_active create agent" -l system-prompt-file -d 'Read system prompt from a file' -x
complete -c mcpctl -n "__mcpctl_subcmd_active create agent" -l proxy-model -d 'Optional proxyModel name override (informational)' -x
complete -c mcpctl -n "__mcpctl_subcmd_active create agent" -l default-temperature -d 'Default sampling temperature' -x
complete -c mcpctl -n "__mcpctl_subcmd_active create agent" -l default-top-p -d 'Default top_p' -x
complete -c mcpctl -n "__mcpctl_subcmd_active create agent" -l default-top-k -d 'Default top_k' -x
complete -c mcpctl -n "__mcpctl_subcmd_active create agent" -l default-max-tokens -d 'Default max_tokens' -x
complete -c mcpctl -n "__mcpctl_subcmd_active create agent" -l default-seed -d 'Default seed' -x
complete -c mcpctl -n "__mcpctl_subcmd_active create agent" -l default-stop -d 'Default stop sequence (repeat for multiple)' -x
complete -c mcpctl -n "__mcpctl_subcmd_active create agent" -l default-extra -d 'Default provider-specific knob k=v (repeat)' -x
complete -c mcpctl -n "__mcpctl_subcmd_active create agent" -l default-params-file -d 'Read defaultParams from a JSON file' -x
complete -c mcpctl -n "__mcpctl_subcmd_active create agent" -l force -d 'Update if already exists'
# create secretbackend options
complete -c mcpctl -n "__mcpctl_subcmd_active create secretbackend" -l type -d 'Backend type (plaintext, openbao)' -x
complete -c mcpctl -n "__mcpctl_subcmd_active create secretbackend" -l description -d 'Description' -x
@@ -471,6 +491,22 @@ complete -c mcpctl -n "__fish_seen_subcommand_from logs" -s i -l instance -d 'In
complete -c mcpctl -n "__fish_seen_subcommand_from apply" -s f -l file -d 'Path to config file (alternative to positional arg)' -rF
complete -c mcpctl -n "__fish_seen_subcommand_from apply" -l dry-run -d 'Validate and show changes without applying'
# chat options
complete -c mcpctl -n "__fish_seen_subcommand_from chat" -s m -l message -d 'One-shot: send a single message and exit (no REPL)' -x
complete -c mcpctl -n "__fish_seen_subcommand_from chat" -l thread -d 'Resume an existing thread' -x
complete -c mcpctl -n "__fish_seen_subcommand_from chat" -l system -d 'Replace agent.systemPrompt for this session' -x
complete -c mcpctl -n "__fish_seen_subcommand_from chat" -l system-file -d 'Read --system text from a file' -x
complete -c mcpctl -n "__fish_seen_subcommand_from chat" -l system-append -d 'Append to the agent system block for this session' -x
complete -c mcpctl -n "__fish_seen_subcommand_from chat" -l temperature -d 'Sampling temperature (0..2)' -x
complete -c mcpctl -n "__fish_seen_subcommand_from chat" -l top-p -d 'Nucleus sampling cutoff (0..1)' -x
complete -c mcpctl -n "__fish_seen_subcommand_from chat" -l top-k -d 'Top-K sampling (Anthropic; OpenAI ignores)' -x
complete -c mcpctl -n "__fish_seen_subcommand_from chat" -l max-tokens -d 'Maximum tokens in the assistant reply' -x
complete -c mcpctl -n "__fish_seen_subcommand_from chat" -l seed -d 'Reproducibility seed (provider-dependent)' -x
complete -c mcpctl -n "__fish_seen_subcommand_from chat" -l stop -d 'Stop sequence (repeatable)' -x
complete -c mcpctl -n "__fish_seen_subcommand_from chat" -l allow-tool -d 'Restrict to this tool only (repeatable)' -x
complete -c mcpctl -n "__fish_seen_subcommand_from chat" -l extra -d 'Provider-specific knob k=v (repeatable)' -x
complete -c mcpctl -n "__fish_seen_subcommand_from chat" -l no-stream -d 'Disable SSE streaming (single JSON response)'
# console options
complete -c mcpctl -n "__fish_seen_subcommand_from console" -l stdin-mcp -d 'Run inspector as MCP server over stdin/stdout (for Claude)'
complete -c mcpctl -n "__fish_seen_subcommand_from console" -l audit -d 'Browse audit events from mcpd'

View File

@@ -184,7 +184,7 @@ async function extractTree(): Promise<CmdInfo> {
// ============================================================
const CANONICAL_RESOURCES = [
'servers', 'instances', 'secrets', 'secretbackends', 'llms', 'templates', 'projects',
'servers', 'instances', 'secrets', 'secretbackends', 'llms', 'agents', 'templates', 'projects',
'users', 'groups', 'rbac', 'prompts', 'promptrequests',
'serverattachments', 'proxymodels', 'all',
];
@@ -195,6 +195,7 @@ const ALIAS_ENTRIES: [string, string][] = [
['secret', 'secrets'], ['sec', 'secrets'],
['secretbackend', 'secretbackends'], ['sb', 'secretbackends'],
['llm', 'llms'], ['llms', 'llms'],
['agent', 'agents'], ['agents', 'agents'],
['template', 'templates'], ['tpl', 'templates'],
['project', 'projects'], ['proj', 'projects'],
['user', 'users'],
@@ -904,6 +905,20 @@ function emitBashCase(emit: (s: string) => void, cmd: CmdInfo, root: CmdInfo): v
return;
}
// chat: first arg is agent name
if (name === 'chat') {
emit(` ${name})`);
emit(' if [[ $((cword - subcmd_pos)) -eq 1 ]]; then');
emit(' local names');
emit(' names=$(_mcpctl_resource_names "agents")');
emit(` COMPREPLY=($(compgen -W "$names ${optFlags}" -- "$cur"))`);
emit(' else');
emit(` COMPREPLY=($(compgen -W "${optFlags}" -- "$cur"))`);
emit(' fi');
emit(' return ;;');
return;
}
// console: first arg is project name
if (name === 'console') {
emit(` ${name})`);

View File

@@ -63,6 +63,42 @@ const LlmSpecSchema = z.object({
extraConfig: z.record(z.unknown()).default({}),
});
const AgentChatParamsAppliedSchema = z.object({
temperature: z.number().optional(),
top_p: z.number().optional(),
top_k: z.number().int().optional(),
max_tokens: z.number().int().optional(),
stop: z.union([z.string(), z.array(z.string())]).optional(),
presence_penalty: z.number().optional(),
frequency_penalty: z.number().optional(),
seed: z.number().int().optional(),
response_format: z.record(z.unknown()).optional(),
tool_choice: z.unknown().optional(),
tools_allowlist: z.array(z.string()).optional(),
systemOverride: z.string().optional(),
systemAppend: z.string().optional(),
extra: z.record(z.unknown()).optional(),
}).strict();
const AgentSpecSchema = z.object({
name: z.string().min(1).max(100).regex(/^[a-z0-9-]+$/),
description: z.string().max(500).default(''),
systemPrompt: z.string().default(''),
llm: z.union([
z.object({ name: z.string().min(1) }),
z.object({ id: z.string().min(1) }),
// Allow string shorthand: `llm: qwen3-thinking` → `{ name: 'qwen3-thinking' }`
z.string().min(1).transform((name) => ({ name })),
]),
project: z.union([
z.object({ name: z.string().min(1) }),
z.string().min(1).transform((name) => ({ name })),
]).optional(),
proxyModelName: z.string().optional(),
defaultParams: AgentChatParamsAppliedSchema.default({}),
extras: z.record(z.unknown()).default({}),
});
const TemplateEnvEntrySchema = z.object({
name: z.string().min(1),
description: z.string().optional(),
@@ -172,6 +208,7 @@ const ApplyConfigSchema = z.object({
secretbackends: z.array(SecretBackendSpecSchema).default([]),
secrets: z.array(SecretSpecSchema).default([]),
llms: z.array(LlmSpecSchema).default([]),
agents: z.array(AgentSpecSchema).default([]),
servers: z.array(ServerSpecSchema).default([]),
users: z.array(UserSpecSchema).default([]),
groups: z.array(GroupSpecSchema).default([]),
@@ -215,6 +252,7 @@ export function createApplyCommand(deps: ApplyCommandDeps): Command {
if (config.secretbackends.length > 0) log(` ${config.secretbackends.length} secretbackend(s)`);
if (config.secrets.length > 0) log(` ${config.secrets.length} secret(s)`);
if (config.llms.length > 0) log(` ${config.llms.length} llm(s)`);
if (config.agents.length > 0) log(` ${config.agents.length} agent(s)`);
if (config.servers.length > 0) log(` ${config.servers.length} server(s)`);
if (config.users.length > 0) log(` ${config.users.length} user(s)`);
if (config.groups.length > 0) log(` ${config.groups.length} group(s)`);
@@ -262,6 +300,7 @@ const KIND_TO_RESOURCE: Record<string, string> = {
mcptoken: 'mcptokens',
secretbackend: 'secretbackends',
llm: 'llms',
agent: 'agents',
};
/**
@@ -434,6 +473,24 @@ async function applyConfig(client: ApiClient, config: ApplyConfig, log: (...args
}
}
// Apply agents (after llms — agent.llm references an existing Llm by name)
for (const agent of config.agents) {
try {
const existing = await cachedFindByName('agents', agent.name);
if (existing) {
const { name: _n, ...updateBody } = agent;
await withRetry(() => client.put(`/api/v1/agents/${existing.id}`, updateBody));
log(`Updated agent: ${agent.name}`);
} else {
await withRetry(() => client.post('/api/v1/agents', agent));
invalidateCache('agents');
log(`Created agent: ${agent.name}`);
}
} catch (err) {
log(`Error applying agent '${agent.name}': ${err instanceof Error ? err.message : err}`);
}
}
// Apply users (matched by email)
for (const user of config.users) {
try {

View File

@@ -0,0 +1,409 @@
/**
* `mcpctl chat <agent>` — interactive REPL + one-shot mode.
*
* Streams the agent's response over SSE so the user sees text appear as it's
* generated. Tool calls and tool results print to stderr in dim style so the
* REPL output stays clean. LiteLLM-style flags (--temperature, --max-tokens,
* --system, etc.) override the agent's defaultParams for this session only;
* use the in-REPL `/save` slash-command to persist them back to the agent.
*
* Modes:
* mcpctl chat <agent> # REPL, new thread
* mcpctl chat <agent> --thread <id> # REPL, resume thread
* mcpctl chat <agent> -m "hi" # one-shot, prints reply, no REPL
*
* Slash-commands inside the REPL:
* /set KEY VALUE # adjust an override (temperature 0.2)
* /system <text> # set systemAppend for this turn onward
* /tools # list tools the agent can call
* /clear # start a fresh thread (same agent)
* /save # PATCH agent.defaultParams = current overrides
* /quit # exit
*/
import { Command } from 'commander';
import http from 'node:http';
import https from 'node:https';
import readline from 'node:readline';
import { promises as fs } from 'node:fs';
import type { ApiClient } from '../api-client.js';
const STREAM_TIMEOUT_MS = 600_000; // 10 minutes — agent turns can include long tool calls
export interface ChatCommandDeps {
client: ApiClient;
baseUrl: string;
token?: string | undefined;
log: (...args: unknown[]) => void;
}
export function createChatCommand(deps: ChatCommandDeps): Command {
return new Command('chat')
.description('Open an interactive chat session with an agent (REPL or one-shot).')
.argument('<agent>', 'Agent name (see `mcpctl get agents`)')
.option('-m, --message <text>', 'One-shot: send a single message and exit (no REPL)')
.option('--thread <id>', 'Resume an existing thread')
.option('--system <text>', 'Replace agent.systemPrompt for this session')
.option('--system-file <path>', 'Read --system text from a file')
.option('--system-append <text>', 'Append to the agent system block for this session')
.option('--temperature <n>', 'Sampling temperature (0..2)', parseFloat)
.option('--top-p <n>', 'Nucleus sampling cutoff (0..1)', parseFloat)
.option('--top-k <n>', 'Top-K sampling (Anthropic; OpenAI ignores)', parseFloatInt)
.option('--max-tokens <n>', 'Maximum tokens in the assistant reply', parseFloatInt)
.option('--seed <n>', 'Reproducibility seed (provider-dependent)', parseFloatInt)
.option('--stop <text>', 'Stop sequence (repeatable)', collect, [])
.option('--allow-tool <name>', 'Restrict to this tool only (repeatable)', collect, [])
.option('--extra <kv>', 'Provider-specific knob k=v (repeatable)', collect, [])
.option('--no-stream', 'Disable SSE streaming (single JSON response)')
.action(async (agent: string, opts: ChatOpts) => {
const overrides = await buildInitialOverrides(opts);
if (opts.message !== undefined) {
await runOneShot(deps, agent, opts.message, opts.thread, overrides, opts.stream);
return;
}
await runRepl(deps, agent, opts.thread, overrides, opts.stream);
});
}
interface ChatOpts {
message?: string;
thread?: string;
system?: string;
systemFile?: string;
systemAppend?: string;
temperature?: number;
topP?: number;
topK?: number;
maxTokens?: number;
seed?: number;
stop?: string[];
allowTool?: string[];
extra?: string[];
stream?: boolean;
}
interface Overrides {
systemOverride?: string;
systemAppend?: string;
temperature?: number;
top_p?: number;
top_k?: number;
max_tokens?: number;
seed?: number;
stop?: string[];
tools_allowlist?: string[];
extra?: Record<string, unknown>;
}
async function buildInitialOverrides(opts: ChatOpts): Promise<Overrides> {
const out: Overrides = {};
let system = opts.system;
if (system === undefined && opts.systemFile !== undefined) {
system = (await fs.readFile(opts.systemFile, 'utf-8')).trim();
}
if (system !== undefined) out.systemOverride = system;
if (opts.systemAppend !== undefined) out.systemAppend = opts.systemAppend;
if (opts.temperature !== undefined) out.temperature = opts.temperature;
if (opts.topP !== undefined) out.top_p = opts.topP;
if (opts.topK !== undefined) out.top_k = opts.topK;
if (opts.maxTokens !== undefined) out.max_tokens = opts.maxTokens;
if (opts.seed !== undefined) out.seed = opts.seed;
if (opts.stop !== undefined && opts.stop.length > 0) out.stop = opts.stop;
if (opts.allowTool !== undefined && opts.allowTool.length > 0) out.tools_allowlist = opts.allowTool;
if (opts.extra !== undefined && opts.extra.length > 0) {
const extra: Record<string, unknown> = {};
for (const kv of opts.extra) {
const eq = kv.indexOf('=');
if (eq < 1) throw new Error(`--extra '${kv}' must be key=value`);
extra[kv.slice(0, eq)] = parseExtraValue(kv.slice(eq + 1));
}
out.extra = extra;
}
return out;
}
function parseExtraValue(raw: string): unknown {
if (raw === 'true') return true;
if (raw === 'false') return false;
if (raw === 'null') return null;
if (/^-?\d+(\.\d+)?$/.test(raw)) return Number(raw);
return raw;
}
async function runOneShot(
deps: ChatCommandDeps,
agent: string,
message: string,
threadId: string | undefined,
overrides: Overrides,
stream: boolean | undefined,
): Promise<void> {
if (stream === false) {
const body: Record<string, unknown> = { message, ...overrides };
if (threadId !== undefined) body.threadId = threadId;
const res = await deps.client.post<{ assistant: string; threadId: string; turnIndex: number }>(
`/api/v1/agents/${encodeURIComponent(agent)}/chat`,
body,
);
process.stdout.write(`${res.assistant}\n`);
process.stderr.write(`(thread: ${res.threadId})\n`);
return;
}
const finalThread = await streamOnce(deps, agent, message, threadId, overrides);
process.stderr.write(`\n(thread: ${finalThread})\n`);
}
async function runRepl(
deps: ChatCommandDeps,
agent: string,
initialThread: string | undefined,
initialOverrides: Overrides,
stream: boolean | undefined,
): Promise<void> {
const overrides: Overrides = { ...initialOverrides };
let threadId = initialThread;
const rl = readline.createInterface({ input: process.stdin, output: process.stdout });
const ask = (q: string): Promise<string> => new Promise((resolve) => rl.question(q, resolve));
process.stderr.write(`Chat with agent '${agent}'. Slash commands: /set /system /tools /clear /save /quit. Ctrl-D to exit.\n`);
if (threadId !== undefined) {
process.stderr.write(`(resuming thread ${threadId})\n`);
}
while (true) {
let line: string;
try {
line = await ask('> ');
} catch {
break;
}
if (line === '') continue;
if (line.startsWith('/')) {
const handled = await handleSlash(line, deps, agent, overrides, () => { threadId = undefined; });
if (handled === 'quit') break;
continue;
}
try {
if (stream === false) {
const body: Record<string, unknown> = { message: line, ...overrides };
if (threadId !== undefined) body.threadId = threadId;
const res = await deps.client.post<{ assistant: string; threadId: string }>(
`/api/v1/agents/${encodeURIComponent(agent)}/chat`,
body,
);
threadId = res.threadId;
process.stdout.write(`${res.assistant}\n`);
} else {
threadId = await streamOnce(deps, agent, line, threadId, overrides);
process.stdout.write('\n');
}
} catch (err) {
process.stderr.write(`error: ${(err as Error).message}\n`);
}
}
rl.close();
}
async function handleSlash(
raw: string,
deps: ChatCommandDeps,
agent: string,
overrides: Overrides,
resetThread: () => void,
): Promise<'quit' | 'continue'> {
const [cmd, ...rest] = raw.slice(1).split(/\s+/);
switch (cmd) {
case 'quit':
case 'exit':
return 'quit';
case 'clear':
resetThread();
process.stderr.write('(new thread\n)');
return 'continue';
case 'system': {
const text = rest.join(' ');
if (text === '') {
delete overrides.systemAppend;
process.stderr.write('(systemAppend cleared)\n');
} else {
overrides.systemAppend = text;
process.stderr.write('(systemAppend set)\n');
}
return 'continue';
}
case 'set': {
const [key, ...vparts] = rest;
if (key === undefined || vparts.length === 0) {
process.stderr.write('usage: /set KEY VALUE\n');
return 'continue';
}
applySetCommand(overrides, key, vparts.join(' '));
process.stderr.write(`(${key}=${vparts.join(' ')})\n`);
return 'continue';
}
case 'tools': {
try {
const a = await deps.client.get<{ project: { name: string } | null }>(
`/api/v1/agents/${encodeURIComponent(agent)}`,
);
if (a.project === null) {
process.stderr.write('(agent has no project — no tools available)\n');
return 'continue';
}
const servers = await deps.client.get<Array<{ server: { name: string } }>>(
`/api/v1/projects/${encodeURIComponent(a.project.name)}/servers`,
);
if (servers.length === 0) {
process.stderr.write('(project has no MCP servers attached)\n');
} else {
for (const s of servers) {
process.stderr.write(` ${s.server.name}\n`);
}
}
} catch (err) {
process.stderr.write(`error listing tools: ${(err as Error).message}\n`);
}
return 'continue';
}
case 'save': {
try {
await deps.client.put(`/api/v1/agents/${encodeURIComponent(agent)}`, {
defaultParams: stripSession(overrides),
});
process.stderr.write('(saved current overrides as agent.defaultParams)\n');
} catch (err) {
process.stderr.write(`error saving: ${(err as Error).message}\n`);
}
return 'continue';
}
default:
process.stderr.write(`unknown command: /${cmd ?? ''}\n`);
return 'continue';
}
}
function stripSession(o: Overrides): Record<string, unknown> {
// /save persists sampling defaults but not the per-session systemOverride / systemAppend.
const out: Record<string, unknown> = { ...o };
delete out.systemOverride;
delete out.systemAppend;
return out;
}
function applySetCommand(o: Overrides, key: string, valueRaw: string): void {
const num = Number(valueRaw);
switch (key) {
case 'temperature': o.temperature = num; break;
case 'top_p': case 'top-p': o.top_p = num; break;
case 'top_k': case 'top-k': o.top_k = Math.trunc(num); break;
case 'max_tokens': case 'max-tokens': o.max_tokens = Math.trunc(num); break;
case 'seed': o.seed = Math.trunc(num); break;
case 'stop': o.stop = [valueRaw]; break;
default:
// Anything unknown drops into `extra` so the user can still pass it.
o.extra = { ...(o.extra ?? {}), [key]: parseExtraValue(valueRaw) };
}
}
/** Stream a single chat call. Returns the resolved threadId. */
async function streamOnce(
deps: ChatCommandDeps,
agent: string,
message: string,
threadId: string | undefined,
overrides: Overrides,
): Promise<string> {
const url = new URL(`${deps.baseUrl}/api/v1/agents/${encodeURIComponent(agent)}/chat`);
const body = JSON.stringify({ message, threadId, stream: true, ...overrides });
return new Promise<string>((resolve, reject) => {
const driver = url.protocol === 'https:' ? https : http;
const req = driver.request({
hostname: url.hostname,
port: url.port || (url.protocol === 'https:' ? 443 : 80),
path: url.pathname + url.search,
method: 'POST',
timeout: STREAM_TIMEOUT_MS,
headers: {
'Content-Type': 'application/json',
...(deps.token !== undefined ? { Authorization: `Bearer ${deps.token}` } : {}),
},
}, (res) => {
const status = res.statusCode ?? 0;
if (status >= 400) {
const chunks: Buffer[] = [];
res.on('data', (c: Buffer) => chunks.push(c));
res.on('end', () => reject(new Error(`HTTP ${String(status)}: ${Buffer.concat(chunks).toString('utf-8')}`)));
return;
}
let buf = '';
let resolvedThread = threadId ?? '';
res.setEncoding('utf-8');
res.on('data', (chunk: string) => {
buf += chunk;
let nl: number;
while ((nl = buf.indexOf('\n\n')) !== -1) {
const frame = buf.slice(0, nl);
buf = buf.slice(nl + 2);
for (const line of frame.split('\n')) {
if (!line.startsWith('data: ')) continue;
const data = line.slice(6);
if (data === '[DONE]') continue;
try {
const evt = JSON.parse(data) as ChatStreamFrame;
switch (evt.type) {
case 'text':
if (typeof evt.delta === 'string') process.stdout.write(evt.delta);
break;
case 'tool_call':
process.stderr.write(`\n[tool_call: ${evt.toolName ?? ''}]\n`);
break;
case 'tool_result':
process.stderr.write(`[tool_result: ${evt.toolName ?? ''} ${evt.ok === false ? 'FAIL' : 'ok'}]\n`);
break;
case 'final':
if (evt.threadId !== undefined) resolvedThread = evt.threadId;
break;
case 'error':
process.stderr.write(`\n[error: ${evt.message ?? ''}]\n`);
break;
}
} catch {
// ignore malformed frames
}
}
}
});
res.on('end', () => resolve(resolvedThread));
res.on('error', reject);
});
req.on('error', reject);
req.on('timeout', () => {
req.destroy();
reject(new Error('chat stream timed out'));
});
req.write(body);
req.end();
});
}
interface ChatStreamFrame {
type: 'text' | 'tool_call' | 'tool_result' | 'final' | 'error';
delta?: string;
toolName?: string;
ok?: boolean;
threadId?: string;
turnIndex?: number;
message?: string;
}
function collect(value: string, prev: string[]): string[] {
return [...prev, value];
}
function parseFloatInt(value: string): number {
const n = Number(value);
if (!Number.isInteger(n)) throw new Error(`expected integer, got '${value}'`);
return n;
}

View File

@@ -88,7 +88,7 @@ export function createCreateCommand(deps: CreateCommandDeps): Command {
const { client, log } = deps;
const cmd = new Command('create')
.description('Create a resource (server, secret, secretbackend, llm, project, user, group, rbac, serverattachment, prompt)');
.description('Create a resource (server, secret, secretbackend, llm, agent, project, user, group, rbac, serverattachment, prompt)');
// --- create server ---
cmd.command('server')
@@ -307,6 +307,81 @@ export function createCreateCommand(deps: CreateCommandDeps): Command {
}
});
// --- create agent ---
cmd.command('agent')
.description('Create an Agent (LLM persona pinned to an Llm, optionally attached to a Project)')
.argument('<name>', 'Agent name (lowercase alphanumeric with hyphens)')
.requiredOption('--llm <name>', 'Pinned Llm (see `mcpctl get llms`)')
.option('--project <name>', 'Attach to this Project (optional)')
.option('--description <text>', 'Description (shown in MCP tools/list)')
.option('--system-prompt <text>', 'System prompt (persona)')
.option('--system-prompt-file <path>', 'Read system prompt from a file')
.option('--proxy-model <name>', 'Optional proxyModel name override (informational)')
.option('--default-temperature <n>', 'Default sampling temperature', parseFloat)
.option('--default-top-p <n>', 'Default top_p', parseFloat)
.option('--default-top-k <n>', 'Default top_k', (s: string) => parseInt(s, 10))
.option('--default-max-tokens <n>', 'Default max_tokens', (s: string) => parseInt(s, 10))
.option('--default-seed <n>', 'Default seed', (s: string) => parseInt(s, 10))
.option('--default-stop <text>', 'Default stop sequence (repeat for multiple)', collect, [])
.option('--default-extra <kv>', 'Default provider-specific knob k=v (repeat)', collect, [])
.option('--default-params-file <path>', 'Read defaultParams from a JSON file')
.option('--force', 'Update if already exists')
.action(async (name: string, opts) => {
const body: Record<string, unknown> = {
name,
llm: { name: opts.llm },
};
if (opts.project) body.project = { name: opts.project };
if (opts.description !== undefined) body.description = opts.description;
let systemPrompt = opts.systemPrompt as string | undefined;
if (systemPrompt === undefined && opts.systemPromptFile !== undefined) {
const fs = await import('node:fs/promises');
systemPrompt = (await fs.readFile(opts.systemPromptFile as string, 'utf-8')).trim();
}
if (systemPrompt !== undefined) body.systemPrompt = systemPrompt;
if (opts.proxyModel !== undefined) body.proxyModelName = opts.proxyModel;
let defaults: Record<string, unknown> = {};
if (opts.defaultParamsFile !== undefined) {
const fs = await import('node:fs/promises');
defaults = JSON.parse(await fs.readFile(opts.defaultParamsFile as string, 'utf-8')) as Record<string, unknown>;
}
if (opts.defaultTemperature !== undefined) defaults.temperature = opts.defaultTemperature;
if (opts.defaultTopP !== undefined) defaults.top_p = opts.defaultTopP;
if (opts.defaultTopK !== undefined) defaults.top_k = opts.defaultTopK;
if (opts.defaultMaxTokens !== undefined) defaults.max_tokens = opts.defaultMaxTokens;
if (opts.defaultSeed !== undefined) defaults.seed = opts.defaultSeed;
if (opts.defaultStop && (opts.defaultStop as string[]).length > 0) {
defaults.stop = (opts.defaultStop as string[]).length === 1 ? (opts.defaultStop as string[])[0] : opts.defaultStop;
}
if (opts.defaultExtra && (opts.defaultExtra as string[]).length > 0) {
const extra: Record<string, unknown> = (defaults.extra as Record<string, unknown> | undefined) ?? {};
for (const kv of opts.defaultExtra as string[]) {
const eq = (kv as string).indexOf('=');
if (eq < 1) throw new Error(`--default-extra '${kv}' must be key=value`);
extra[(kv as string).slice(0, eq)] = (kv as string).slice(eq + 1);
}
defaults.extra = extra;
}
if (Object.keys(defaults).length > 0) body.defaultParams = defaults;
try {
const row = await client.post<{ id: string; name: string }>('/api/v1/agents', body);
log(`agent '${row.name}' created (id: ${row.id})`);
} catch (err) {
if (err instanceof ApiError && err.status === 409 && opts.force) {
const existing = (await client.get<Array<{ id: string; name: string }>>('/api/v1/agents')).find((a) => a.name === name);
if (!existing) throw err;
const { name: _n, ...updateBody } = body;
await client.put(`/api/v1/agents/${existing.id}`, updateBody);
log(`agent '${name}' updated (id: ${existing.id})`);
} else {
throw err;
}
}
});
// --- create secretbackend ---
cmd.command('secretbackend')
.alias('sb')

View File

@@ -143,6 +143,27 @@ const llmColumns: Column<LlmRow>[] = [
{ header: 'ID', key: 'id' },
];
interface AgentRow {
id: string;
name: string;
description: string;
llm: { id: string; name: string };
project: { id: string; name: string } | null;
}
const agentColumns: Column<AgentRow>[] = [
{ header: 'NAME', key: 'name' },
{ header: 'LLM', key: (r) => r.llm.name, width: 24 },
{ header: 'PROJECT', key: (r) => r.project?.name ?? '-', width: 20 },
{ header: 'DESCRIPTION', key: (r) => truncate(r.description, 50) || '-', width: 50 },
{ header: 'ID', key: 'id' },
];
function truncate(s: string, max: number): string {
if (s.length <= max) return s;
return s.slice(0, max - 1) + '…';
}
interface SecretBackendRow {
id: string;
name: string;
@@ -322,6 +343,8 @@ function getColumnsForResource(resource: string): Column<Record<string, unknown>
return secretBackendColumns as unknown as Column<Record<string, unknown>>[];
case 'llms':
return llmColumns as unknown as Column<Record<string, unknown>>[];
case 'agents':
return agentColumns as unknown as Column<Record<string, unknown>>[];
default:
return [
{ header: 'ID', key: 'id' as keyof Record<string, unknown> },
@@ -346,6 +369,7 @@ const RESOURCE_KIND: Record<string, string> = {
mcptokens: 'mcptoken',
secretbackends: 'secretbackend',
llms: 'llm',
agents: 'agent',
};
/**

View File

@@ -36,6 +36,10 @@ export const RESOURCE_ALIASES: Record<string, string> = {
sb: 'secretbackends',
llm: 'llms',
llms: 'llms',
agent: 'agents',
agents: 'agents',
thread: 'threads',
threads: 'threads',
all: 'all',
};

View File

@@ -18,6 +18,7 @@ import { createMcpCommand } from './commands/mcp.js';
import { createPatchCommand } from './commands/patch.js';
import { createConsoleCommand } from './commands/console/index.js';
import { createCacheCommand } from './commands/cache.js';
import { createChatCommand } from './commands/chat.js';
import { createMigrateCommand } from './commands/migrate.js';
import { createRotateCommand } from './commands/rotate.js';
import { ApiClient, ApiError } from './api-client.js';
@@ -216,6 +217,13 @@ export function createProgram(): Command {
log: (...args) => console.log(...args),
}));
program.addCommand(createChatCommand({
client,
baseUrl,
...(creds?.token !== undefined ? { token: creds.token } : {}),
log: (...args) => console.log(...args),
}));
program.addCommand(createPatchCommand({
client,
log: (...args) => console.log(...args),

View File

@@ -183,3 +183,51 @@ describe('bash completions', () => {
expect(fnMatch, '_mcpctl_resource_names must not use grep on name').not.toMatch(/grep.*"name"/);
});
});
describe('agent + chat completions', () => {
it('fish lists agents as a resource type', () => {
expect(fishFile).toMatch(/set -l resources [^\n]*\bagents\b/);
});
it('fish accepts both `agent` and `agents` aliases', () => {
const aliasLine = fishFile.split('\n').find((l) => l.startsWith(' set -l resource_aliases'));
expect(aliasLine).toMatch(/\bagent\b/);
expect(aliasLine).toMatch(/\bagents\b/);
});
it('fish offers `chat` as a top-level command', () => {
expect(fishFile).toMatch(/set -l commands [^\n]*\bchat\b/);
});
it('fish offers `agent` under `mcpctl create`', () => {
expect(fishFile).toMatch(/-a agent\b[^\n]*Create an Agent/);
});
it('fish wires --llm flag for create agent', () => {
expect(fishFile).toMatch(/__mcpctl_subcmd_active create agent[^\n]*-l llm\b/);
});
it('bash lists agents in resources and resource_aliases', () => {
expect(bashFile).toMatch(/local resources="[^"]*\bagents\b[^"]*"/);
expect(bashFile).toMatch(/local resource_aliases="[^"]*\bagent\b[^"]*"/);
});
it('bash includes `chat` in the commands list', () => {
expect(bashFile).toMatch(/local commands="[^"]*\bchat\b[^"]*"/);
});
it('bash dispatches a `chat)` case that completes with agent names + LiteLLM-style flags', () => {
const chatBlock = bashFile.match(/chat\)[\s\S]*?return ;;/)?.[0] ?? '';
expect(chatBlock, 'chat must call _mcpctl_resource_names with "agents"').toContain('"agents"');
expect(chatBlock, 'chat must offer --temperature').toContain('--temperature');
expect(chatBlock, 'chat must offer --thread').toContain('--thread');
expect(chatBlock, 'chat must offer --no-stream').toContain('--no-stream');
});
it('bash dispatches `create agent` with the correct flags', () => {
const createBlock = bashFile.match(/agent\)[\s\S]*?;;/)?.[0] ?? '';
expect(createBlock).toContain('--llm');
expect(createBlock).toContain('--system-prompt');
expect(createBlock).toContain('--default-temperature');
});
});