Merge pull request 'feat: Kubernetes operator for MCP server management' (#47) from feat/k8s-operator into main
Some checks failed
CI/CD / lint (push) Successful in 1m46s
CI/CD / typecheck (push) Successful in 50s
CI/CD / test (push) Successful in 2m34s
CI/CD / build (push) Successful in 1m58s
CI/CD / smoke (push) Successful in 4m42s
CI/CD / publish (push) Failing after 7m20s

Reviewed-on: #47
This commit was merged in pull request #47.
This commit is contained in:
2026-04-09 22:46:22 +00:00
20 changed files with 2421 additions and 271 deletions

View File

@@ -3,3 +3,23 @@
## Task Master AI Instructions
**Import Task Master's development workflow commands and guidelines, treat as if import is in the main CLAUDE.md file.**
@./.taskmaster/CLAUDE.md
## Skill routing
When the user's request matches an available skill, ALWAYS invoke it using the Skill
tool as your FIRST action. Do NOT answer directly, do NOT use other tools first.
The skill has specialized workflows that produce better results than ad-hoc answers.
Key routing rules:
- Product ideas, "is this worth building", brainstorming → invoke office-hours
- Bugs, errors, "why is this broken", 500 errors → invoke investigate
- Ship, deploy, push, create PR → invoke ship
- QA, test the site, find bugs → invoke qa
- Code review, check my diff → invoke review
- Update docs after shipping → invoke document-release
- Weekly retro → invoke retro
- Design system, brand → invoke design-consultation
- Visual audit, design polish → invoke design-review
- Architecture review → invoke plan-eng-review
- Save progress, checkpoint, resume → invoke checkpoint
- Code quality, health check → invoke health

1048
docs/project-summary.md Normal file

File diff suppressed because it is too large Load Diff

390
pnpm-lock.yaml generated
View File

@@ -112,6 +112,9 @@ importers:
'@fastify/rate-limit':
specifier: ^10.0.0
version: 10.3.0
'@kubernetes/client-node':
specifier: ^1.4.0
version: 1.4.0
'@mcpctl/db':
specifier: workspace:*
version: link:../db
@@ -610,6 +613,21 @@ packages:
'@js-sdsl/ordered-map@4.4.2':
resolution: {integrity: sha512-iUKgm52T8HOE/makSxjqoWhe95ZJA1/G1sYsGev2JDKUSS14KAgg1LHb+Ba+IPow0xflbnSkOsZcO08C7w1gYw==}
'@jsep-plugin/assignment@1.3.0':
resolution: {integrity: sha512-VVgV+CXrhbMI3aSusQyclHkenWSAm95WaiKrMxRFam3JSUiIaQjoMIw2sEs/OX4XifnqeQUN4DYbJjlA8EfktQ==}
engines: {node: '>= 10.16.0'}
peerDependencies:
jsep: ^0.4.0||^1.0.0
'@jsep-plugin/regex@1.0.4':
resolution: {integrity: sha512-q7qL4Mgjs1vByCaTnDFcBnV9HS7GVPJX5vyVoCgZHNSC9rjwIlmbXG5sUuorR5ndfHAIlJ8pVStxvjXHbNvtUg==}
engines: {node: '>= 10.16.0'}
peerDependencies:
jsep: ^0.4.0||^1.0.0
'@kubernetes/client-node@1.4.0':
resolution: {integrity: sha512-Zge3YvF7DJi264dU1b3wb/GmzR99JhUpqTvp+VGHfwZT+g7EOOYNScDJNZwXy9cszyIGPIs0VHr+kk8e95qqrA==}
'@lukeed/ms@2.0.2':
resolution: {integrity: sha512-9I2Zn6+NJLfaGoz9jN3lpwDgAYvfGeNYdbAIjJOqzs4Tpc+VU3Jqq4IofSUBKajiDS8k9fZIg18/z13mpk1bsA==}
engines: {node: '>=8'}
@@ -850,9 +868,15 @@ packages:
'@types/json-schema@7.0.15':
resolution: {integrity: sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==}
'@types/node-fetch@2.6.13':
resolution: {integrity: sha512-QGpRVpzSaUs30JBSGPjOg4Uveu384erbHBoT1zeONvyCfwQxIkUshLAOqN/k9EjGviPRmWTTe6aH2qySWKTVSw==}
'@types/node@18.19.130':
resolution: {integrity: sha512-GRaXQx6jGfL8sKfaIDD6OupbIHBr9jv7Jnaml9tB7l4v068PAOXqfcujMMo5PhbIs6ggR1XODELqahT2R8v0fg==}
'@types/node@24.12.2':
resolution: {integrity: sha512-A1sre26ke7HDIuY/M23nd9gfB+nrmhtYyMINbjI1zHJxYteKR6qSMX56FsmjMcDb3SMcjJg5BiRRgOCC/yBD0g==}
'@types/node@25.3.0':
resolution: {integrity: sha512-4K3bqJpXpqfg2XKGK9bpDTc6xO/xoUP/RBWS7AtRMug6zZFaRekiLzjVtAoZMquxoAbzBvy5nxQ7veS5eYzf8A==}
@@ -862,6 +886,9 @@ packages:
'@types/ssh2@1.15.5':
resolution: {integrity: sha512-N1ASjp/nXH3ovBHddRJpli4ozpk6UdDYIX4RJWFa9L1YKnzdhTlVmiGHm4DZnj/jLbqZpes4aeR30EFGQtvhQQ==}
'@types/stream-buffers@3.0.8':
resolution: {integrity: sha512-J+7VaHKNvlNPJPEJXX/fKa9DZtR/xPMwuIbe+yNOwp1YB+ApUOBv2aUpEoBJEi8nJgbgs1x8e73ttg0r1rSUdw==}
'@typescript-eslint/eslint-plugin@8.56.0':
resolution: {integrity: sha512-lRyPDLzNCuae71A3t9NEINBiTn7swyOhvUj3MyUOxb8x6g6vPEFoOU+ZRmGMusNC3X3YMhqMIX7i8ShqhT74Pw==}
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
@@ -983,6 +1010,10 @@ packages:
resolution: {integrity: sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==}
engines: {node: '>= 6.0.0'}
agent-base@7.1.4:
resolution: {integrity: sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==}
engines: {node: '>= 14'}
ajv-formats@3.0.1:
resolution: {integrity: sha512-8iUql50EUR+uUcdRQ3HDqa6EVyo3docL8g5WJ3FNcWmu62IbkGUue/pEyLBW8VGKKucTPgqeks4fIU1DA4yowQ==}
peerDependencies:
@@ -1038,6 +1069,9 @@ packages:
ast-v8-to-istanbul@0.3.11:
resolution: {integrity: sha512-Qya9fkoofMjCBNVdWINMjB5KZvkYfaO9/anwkWnjxibpWUxo5iHl2sOdP7/uAqaRuUYuoo8rDwnbaaKVFxoUvw==}
asynckit@0.4.0:
resolution: {integrity: sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==}
atomic-sleep@1.0.0:
resolution: {integrity: sha512-kNOjDqAh7px0XWNI+4QbzoiR/nTkHAWNud2uvnJquD1/x5a7EQZMJT0AczqK0Qn67oY/TTQ1LbUKajZpp3I9tQ==}
engines: {node: '>=8.0.0'}
@@ -1049,6 +1083,14 @@ packages:
avvio@9.2.0:
resolution: {integrity: sha512-2t/sy01ArdHHE0vRH5Hsay+RtCZt3dLPji7W7/MMOCEgze5b7SNDC4j5H6FnVgPkI1MTNFGzHdHrVXDDl7QSSQ==}
b4a@1.8.0:
resolution: {integrity: sha512-qRuSmNSkGQaHwNbM7J78Wwy+ghLEYF1zNrSeMxj4Kgw6y33O3mXcQ6Ie9fRvfU/YnxWkOchPXbaLb73TkIsfdg==}
peerDependencies:
react-native-b4a: '*'
peerDependenciesMeta:
react-native-b4a:
optional: true
balanced-match@1.0.2:
resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==}
@@ -1056,6 +1098,47 @@ packages:
resolution: {integrity: sha512-1pHv8LX9CpKut1Zp4EXey7Z8OfH11ONNH6Dhi2WDUt31VVZFXZzKwXcysBgqSumFCmR+0dqjMK5v5JiFHzi0+g==}
engines: {node: 20 || >=22}
bare-events@2.8.2:
resolution: {integrity: sha512-riJjyv1/mHLIPX4RwiK+oW9/4c3TEUeORHKefKAKnZ5kyslbN+HXowtbaVEqt4IMUB7OXlfixcs6gsFeo/jhiQ==}
peerDependencies:
bare-abort-controller: '*'
peerDependenciesMeta:
bare-abort-controller:
optional: true
bare-fs@4.6.0:
resolution: {integrity: sha512-2YkS7NuiJceSEbyEOdSNLE9tsGd+f4+f7C+Nik/MCk27SYdwIMPT/yRKvg++FZhQXgk0KWJKJyXX9RhVV0RGqA==}
engines: {bare: '>=1.16.0'}
peerDependencies:
bare-buffer: '*'
peerDependenciesMeta:
bare-buffer:
optional: true
bare-os@3.8.7:
resolution: {integrity: sha512-G4Gr1UsGeEy2qtDTZwL7JFLo2wapUarz7iTMcYcMFdS89AIQuBoyjgXZz0Utv7uHs3xA9LckhVbeBi8lEQrC+w==}
engines: {bare: '>=1.14.0'}
bare-path@3.0.0:
resolution: {integrity: sha512-tyfW2cQcB5NN8Saijrhqn0Zh7AnFNsnczRcuWODH0eYAXBsJ5gVxAUuNr7tsHSC6IZ77cA0SitzT+s47kot8Mw==}
bare-stream@2.12.0:
resolution: {integrity: sha512-w28i8lkBgREV3rPXGbgK+BO66q+ZpKqRWrZLiCdmmUlLPrQ45CzkvRhN+7lnv00Gpi2zy5naRxnUFAxCECDm9g==}
peerDependencies:
bare-abort-controller: '*'
bare-buffer: '*'
bare-events: '*'
peerDependenciesMeta:
bare-abort-controller:
optional: true
bare-buffer:
optional: true
bare-events:
optional: true
bare-url@2.4.0:
resolution: {integrity: sha512-NSTU5WN+fy/L0DDenfE8SXQna4voXuW0FHM7wH8i3/q9khUSchfPbPezO4zSFMnDGIf9YE+mt/RWhZgNRKRIXA==}
base64-js@1.5.1:
resolution: {integrity: sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==}
@@ -1177,6 +1260,10 @@ packages:
resolution: {integrity: sha512-qiBjkpbMLO/HL68y+lh4q0/O1MZFj2RX6X/KmMa3+gJD3z+WwI1ZzDHysvqHGS3mP6mznPckpXmw1nI9cJjyRg==}
hasBin: true
combined-stream@1.0.8:
resolution: {integrity: sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==}
engines: {node: '>= 0.8'}
commander@13.1.0:
resolution: {integrity: sha512-/rFeCpNJQbhSZjGVwO9RFV3xPqbnERS8MmIQzCtD/zl6gpJuV/bMLuN92oG3F7d8oDEHHRrujSXNUr8fpjntKw==}
engines: {node: '>=18'}
@@ -1256,6 +1343,10 @@ packages:
defu@6.1.4:
resolution: {integrity: sha512-mEQCMmwJu317oSz8CwdIOdwf3xMif1ttiM8LTufzc3g6kR+9Pe236twL8j3IYT1F7GfRgGcW6MWxzZjLIkuHIg==}
delayed-stream@1.0.0:
resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==}
engines: {node: '>=0.4.0'}
delegates@1.0.0:
resolution: {integrity: sha512-bd2L678uiWATM6m5Z1VzNCErI3jiGzt6HGY8OVICs40JQq/HALfbyNJmp0UDakEY4pMMaN0Ly5om/B1VI/+xfQ==}
@@ -1336,6 +1427,10 @@ packages:
resolution: {integrity: sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==}
engines: {node: '>= 0.4'}
es-set-tostringtag@2.1.0:
resolution: {integrity: sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==}
engines: {node: '>= 0.4'}
es-toolkit@1.44.0:
resolution: {integrity: sha512-6penXeZalaV88MM3cGkFZZfOoLGWshWWfdy0tWw/RlVVyhvMaWSBTOvXNeiW3e5FwdS5ePW0LGEu17zT139ktg==}
@@ -1414,6 +1509,9 @@ packages:
resolution: {integrity: sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==}
engines: {node: '>= 0.6'}
events-universal@1.0.1:
resolution: {integrity: sha512-LUd5euvbMLpwOF8m6ivPCbhQeSiYVNb8Vs0fQ8QjXo0JTkEHpz8pxdQf0gStltaPpw0Cca8b39KxvK9cfKRiAw==}
eventsource-parser@3.0.6:
resolution: {integrity: sha512-Vo1ab+QXPzZ4tCa8SwIHJFaSzy4R6SHf7BY79rFBDf0idraZWAkYrDjDj8uWaSm3S2TK+hJ7/t1CEmZ7jXw+pg==}
engines: {node: '>=18.0.0'}
@@ -1449,6 +1547,9 @@ packages:
fast-deep-equal@3.1.3:
resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==}
fast-fifo@1.3.2:
resolution: {integrity: sha512-/d9sfos4yxzpwkDkuN7k2SqFKtYNmCTzgfEpz82x34IM9/zc8KGxQoXg1liNC/izpRM/MBdt44Nmx41ZWqk+FQ==}
fast-json-stable-stringify@2.1.0:
resolution: {integrity: sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==}
@@ -1509,6 +1610,10 @@ packages:
flatted@3.3.3:
resolution: {integrity: sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==}
form-data@4.0.5:
resolution: {integrity: sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==}
engines: {node: '>= 6'}
forwarded@0.2.0:
resolution: {integrity: sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==}
engines: {node: '>= 0.6'}
@@ -1587,6 +1692,10 @@ packages:
resolution: {integrity: sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==}
engines: {node: '>= 0.4'}
has-tostringtag@1.0.2:
resolution: {integrity: sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==}
engines: {node: '>= 0.4'}
has-unicode@2.0.1:
resolution: {integrity: sha512-8Rf9Y83NBReMnx0gFzA8JImQACstCYWUplepDa9xprwwtmgEZUF0h/i5xSA625zB/I37EtrswSST6OXxwaaIJQ==}
@@ -1602,6 +1711,10 @@ packages:
resolution: {integrity: sha512-NekXntS5M94pUfiVZ8oXXK/kkri+5WpX2/Ik+LVsl+uvw+soj4roXIsPqO+XsWrAw20mOzaXOZf3Q7PfB9A/IA==}
engines: {node: '>=16.9.0'}
hpagent@1.2.0:
resolution: {integrity: sha512-A91dYTeIB6NoXG+PxTQpCCDDnfHsW9kc06Lvpu1TEe9gnd6ZFeiBoRO9JvzEv6xK7EX97/dUE8g/vBMTqTS3CA==}
engines: {node: '>=14'}
html-escaper@2.0.2:
resolution: {integrity: sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==}
@@ -1708,6 +1821,11 @@ packages:
isexe@2.0.0:
resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==}
isomorphic-ws@5.0.0:
resolution: {integrity: sha512-muId7Zzn9ywDsyXgTIafTry2sV3nySZeUDe6YedVd1Hvuuep5AsIlqK+XefWpYTyJG5e503F2xIuT2lcU6rCSw==}
peerDependencies:
ws: '*'
istanbul-lib-coverage@3.2.2:
resolution: {integrity: sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==}
engines: {node: '>=8'}
@@ -1734,6 +1852,10 @@ packages:
resolution: {integrity: sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==}
hasBin: true
jsep@1.4.0:
resolution: {integrity: sha512-B7qPcEVE3NVkmSJbaYxvv4cHkVW7DQsZz13pUMrfS8z8Q/BuShN+gcTXrUlPiGqM2/t/EEaI030bpxMqY8gMlw==}
engines: {node: '>= 10.16.0'}
json-buffer@3.0.1:
resolution: {integrity: sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==}
@@ -1752,6 +1874,11 @@ packages:
json-stable-stringify-without-jsonify@1.0.1:
resolution: {integrity: sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==}
jsonpath-plus@10.4.0:
resolution: {integrity: sha512-T92WWatJXmhBbKsgH/0hl+jxjdXrifi5IKeMY02DWggRxX0UElcbVzPlmgLTbvsPeW1PasQ6xE2Q75stkhGbsA==}
engines: {node: '>=18.0.0'}
hasBin: true
keyv@4.5.4:
resolution: {integrity: sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==}
@@ -1802,10 +1929,18 @@ packages:
resolution: {integrity: sha512-Snk314V5ayFLhp3fkUREub6WtjBfPdCPY1Ln8/8munuLuiYhsABgBVWsozAG+MWMbVEvcdcpbi9R7ww22l9Q3g==}
engines: {node: '>=18'}
mime-db@1.52.0:
resolution: {integrity: sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==}
engines: {node: '>= 0.6'}
mime-db@1.54.0:
resolution: {integrity: sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ==}
engines: {node: '>= 0.6'}
mime-types@2.1.35:
resolution: {integrity: sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==}
engines: {node: '>= 0.6'}
mime-types@3.0.2:
resolution: {integrity: sha512-Lbgzdk0h4juoQ9fCKXW4by0UJqj+nOOrI9MJ1sSj4nI8aI2eo1qmvQEie4VD1glsS250n15LsWsYtCugiStS5A==}
engines: {node: '>=18'}
@@ -1903,6 +2038,9 @@ packages:
engines: {node: '>=18'}
hasBin: true
oauth4webapi@3.8.5:
resolution: {integrity: sha512-A8jmyUckVhRJj5lspguklcl90Ydqk61H3dcU0oLhH3Yv13KpAliKTt5hknpGGPZSSfOwGyraNEFmofDYH+1kSg==}
object-assign@4.1.1:
resolution: {integrity: sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==}
engines: {node: '>=0.10.0'}
@@ -1935,6 +2073,9 @@ packages:
resolution: {integrity: sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==}
engines: {node: '>=6'}
openid-client@6.8.2:
resolution: {integrity: sha512-uOvTCndr4udZsKihJ68H9bUICrriHdUVJ6Az+4Ns6cW55rwM5h0bjVIzDz2SxgOI84LKjFyjOFvERLzdTUROGA==}
optionator@0.9.4:
resolution: {integrity: sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==}
engines: {node: '>= 0.8.0'}
@@ -2112,6 +2253,9 @@ packages:
resolution: {integrity: sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==}
engines: {iojs: '>=1.0.0', node: '>=0.10.0'}
rfc4648@1.5.4:
resolution: {integrity: sha512-rRg/6Lb+IGfJqO05HZkN50UtY7K/JhxJag1kP23+zyMfrvoB0B7RWv06MbOzoc79RgCdNTiUaNsTT1AJZ7Z+cg==}
rfdc@1.4.1:
resolution: {integrity: sha512-q1b3N5QkRUWUl7iyylaaj3kOpIT0N2i9MqIEQXP73GVsN9cw3fdx8X63cEmWhJGi2PPCF23Ijp7ktmd39rawIA==}
@@ -2228,6 +2372,18 @@ packages:
resolution: {integrity: sha512-stxByr12oeeOyY2BlviTNQlYV5xOj47GirPr4yA1hE9JCtxfQN0+tVbkxwCtYDQWhEKWFHsEK48ORg5jrouCAg==}
engines: {node: '>=20'}
smart-buffer@4.2.0:
resolution: {integrity: sha512-94hK0Hh8rPqQl2xXc3HsaBoOXKV20MToPkcXvwbISWLEs+64sBq5kFgn2kJDHb1Pry9yrP0dxrCI9RRci7RXKg==}
engines: {node: '>= 6.0.0', npm: '>= 3.0.0'}
socks-proxy-agent@8.0.5:
resolution: {integrity: sha512-HehCEsotFqbPW9sJ8WVYB6UbmIMv7kUUORIF2Nncq4VQvBfNBLibW9YZR5dlYCSUhwcD628pRllm7n+E+YTzJw==}
engines: {node: '>= 14'}
socks@2.8.7:
resolution: {integrity: sha512-HLpt+uLy/pxB+bum/9DzAgiKS8CX1EvbWxI4zlmgGCExImLdiad2iCwXT5Z4c9c3Eq8rP2318mPW2c+QbtjK8A==}
engines: {node: '>= 10.0.0', npm: '>= 3.0.0'}
sonic-boom@4.2.1:
resolution: {integrity: sha512-w6AxtubXa2wTXAUsZMMWERrsIRAdrK0Sc+FUytWvYAhBJLyuI4llrMIC1DtlNSdI99EI86KZum2MMq3EAZlF9Q==}
@@ -2260,6 +2416,13 @@ packages:
std-env@3.10.0:
resolution: {integrity: sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==}
stream-buffers@3.0.3:
resolution: {integrity: sha512-pqMqwQCso0PBJt2PQmDO0cFj0lyqmiwOMiMSkVtRokl7e+ZTRYgDHKnuZNbqjiJXgsg4nuqtD/zxuo9KqTp0Yw==}
engines: {node: '>= 0.10.0'}
streamx@2.25.0:
resolution: {integrity: sha512-0nQuG6jf1w+wddNEEXCF4nTg3LtufWINB5eFEN+5TNZW7KWJp6x87+JFL43vaAUPyCfH1wID+mNVyW6OHtFamg==}
string-width@4.2.3:
resolution: {integrity: sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==}
engines: {node: '>=8'}
@@ -2294,19 +2457,31 @@ packages:
tar-fs@2.1.4:
resolution: {integrity: sha512-mDAjwmZdh7LTT6pNleZ05Yt65HC3E+NiQzl672vQG38jIrehtJk/J3mNwIg+vShQPcLF/LV7CMnDW6vjj6sfYQ==}
tar-fs@3.1.2:
resolution: {integrity: sha512-QGxxTxxyleAdyM3kpFs14ymbYmNFrfY+pHj7Z8FgtbZ7w2//VAgLMac7sT6nRpIHjppXO2AwwEOg0bPFVRcmXw==}
tar-stream@2.2.0:
resolution: {integrity: sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ==}
engines: {node: '>=6'}
tar-stream@3.1.8:
resolution: {integrity: sha512-U6QpVRyCGHva435KoNWy9PRoi2IFYCgtEhq9nmrPPpbRacPs9IH4aJ3gbrFC8dPcXvdSZ4XXfXT5Fshbp2MtlQ==}
tar@6.2.1:
resolution: {integrity: sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==}
engines: {node: '>=10'}
deprecated: Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me
teex@1.0.1:
resolution: {integrity: sha512-eYE6iEI62Ni1H8oIa7KlDU6uQBtqr4Eajni3wX7rpfXD8ysFx8z0+dri+KWEPWpBsxXfxu58x/0jvTVT1ekOSg==}
terminal-size@4.0.1:
resolution: {integrity: sha512-avMLDQpUI9I5XFrklECw1ZEUPJhqzcwSWsyyI8blhRLT+8N1jLJWLWWYQpB2q2xthq8xDvjZPISVh53T/+CLYQ==}
engines: {node: '>=18'}
text-decoder@1.2.7:
resolution: {integrity: sha512-vlLytXkeP4xvEq2otHeJfSQIRyWxo/oZGEbXrtEEF9Hnmrdly59sUbzZ/QgyWuLYHctCHxFF4tRQZNQ9k60ExQ==}
thread-stream@4.0.0:
resolution: {integrity: sha512-4iMVL6HAINXWf1ZKZjIPcz5wYaOdPhtO8ATvZ+Xqp3BTdaqtAwQkNmKORqcIo5YkQqGXq5cwfswDwMqqQNrpJA==}
engines: {node: '>=20'}
@@ -2374,6 +2549,9 @@ packages:
undici-types@5.26.5:
resolution: {integrity: sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==}
undici-types@7.16.0:
resolution: {integrity: sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw==}
undici-types@7.18.2:
resolution: {integrity: sha512-AsuCzffGHJybSaRrmr5eHr81mwJU3kjw6M+uprWvCXiNeN9SOGwQ3Jn8jb8m3Z6izVgknn1R0FTCEAP2QrLY/w==}
@@ -2911,6 +3089,41 @@ snapshots:
'@js-sdsl/ordered-map@4.4.2': {}
'@jsep-plugin/assignment@1.3.0(jsep@1.4.0)':
dependencies:
jsep: 1.4.0
'@jsep-plugin/regex@1.0.4(jsep@1.4.0)':
dependencies:
jsep: 1.4.0
'@kubernetes/client-node@1.4.0':
dependencies:
'@types/js-yaml': 4.0.9
'@types/node': 24.12.2
'@types/node-fetch': 2.6.13
'@types/stream-buffers': 3.0.8
form-data: 4.0.5
hpagent: 1.2.0
isomorphic-ws: 5.0.0(ws@8.19.0)
js-yaml: 4.1.1
jsonpath-plus: 10.4.0
node-fetch: 2.7.0
openid-client: 6.8.2
rfc4648: 1.5.4
socks-proxy-agent: 8.0.5
stream-buffers: 3.0.3
tar-fs: 3.1.2
ws: 8.19.0
transitivePeerDependencies:
- bare-abort-controller
- bare-buffer
- bufferutil
- encoding
- react-native-b4a
- supports-color
- utf-8-validate
'@lukeed/ms@2.0.2': {}
'@mapbox/node-pre-gyp@1.0.11':
@@ -3121,10 +3334,19 @@ snapshots:
'@types/json-schema@7.0.15': {}
'@types/node-fetch@2.6.13':
dependencies:
'@types/node': 25.3.0
form-data: 4.0.5
'@types/node@18.19.130':
dependencies:
undici-types: 5.26.5
'@types/node@24.12.2':
dependencies:
undici-types: 7.16.0
'@types/node@25.3.0':
dependencies:
undici-types: 7.18.2
@@ -3137,6 +3359,10 @@ snapshots:
dependencies:
'@types/node': 18.19.130
'@types/stream-buffers@3.0.8':
dependencies:
'@types/node': 25.3.0
'@typescript-eslint/eslint-plugin@8.56.0(@typescript-eslint/parser@8.56.0(eslint@10.0.1(jiti@2.6.1))(typescript@5.9.3))(eslint@10.0.1(jiti@2.6.1))(typescript@5.9.3)':
dependencies:
'@eslint-community/regexpp': 4.12.2
@@ -3302,6 +3528,8 @@ snapshots:
transitivePeerDependencies:
- supports-color
agent-base@7.1.4: {}
ajv-formats@3.0.1(ajv@8.18.0):
optionalDependencies:
ajv: 8.18.0
@@ -3355,6 +3583,8 @@ snapshots:
estree-walker: 3.0.3
js-tokens: 10.0.0
asynckit@0.4.0: {}
atomic-sleep@1.0.0: {}
auto-bind@5.0.1: {}
@@ -3364,10 +3594,44 @@ snapshots:
'@fastify/error': 4.2.0
fastq: 1.20.1
b4a@1.8.0: {}
balanced-match@1.0.2: {}
balanced-match@4.0.3: {}
bare-events@2.8.2: {}
bare-fs@4.6.0:
dependencies:
bare-events: 2.8.2
bare-path: 3.0.0
bare-stream: 2.12.0(bare-events@2.8.2)
bare-url: 2.4.0
fast-fifo: 1.3.2
transitivePeerDependencies:
- bare-abort-controller
- react-native-b4a
bare-os@3.8.7: {}
bare-path@3.0.0:
dependencies:
bare-os: 3.8.7
bare-stream@2.12.0(bare-events@2.8.2):
dependencies:
streamx: 2.25.0
teex: 1.0.1
optionalDependencies:
bare-events: 2.8.2
transitivePeerDependencies:
- react-native-b4a
bare-url@2.4.0:
dependencies:
bare-path: 3.0.0
base64-js@1.5.1: {}
bcrypt-pbkdf@1.0.2:
@@ -3503,6 +3767,10 @@ snapshots:
color-support@1.1.3: {}
combined-stream@1.0.8:
dependencies:
delayed-stream: 1.0.0
commander@13.1.0: {}
concat-map@0.0.1: {}
@@ -3556,6 +3824,8 @@ snapshots:
defu@6.1.4: {}
delayed-stream@1.0.0: {}
delegates@1.0.0: {}
depd@2.0.0: {}
@@ -3628,6 +3898,13 @@ snapshots:
dependencies:
es-errors: 1.3.0
es-set-tostringtag@2.1.0:
dependencies:
es-errors: 1.3.0
get-intrinsic: 1.3.0
has-tostringtag: 1.0.2
hasown: 2.0.2
es-toolkit@1.44.0: {}
esbuild@0.27.3:
@@ -3743,6 +4020,12 @@ snapshots:
etag@1.8.1: {}
events-universal@1.0.1:
dependencies:
bare-events: 2.8.2
transitivePeerDependencies:
- bare-abort-controller
eventsource-parser@3.0.6: {}
eventsource@3.0.7:
@@ -3799,6 +4082,8 @@ snapshots:
fast-deep-equal@3.1.3: {}
fast-fifo@1.3.2: {}
fast-json-stable-stringify@2.1.0: {}
fast-json-stringify@6.3.0:
@@ -3883,6 +4168,14 @@ snapshots:
flatted@3.3.3: {}
form-data@4.0.5:
dependencies:
asynckit: 0.4.0
combined-stream: 1.0.8
es-set-tostringtag: 2.1.0
hasown: 2.0.2
mime-types: 2.1.35
forwarded@0.2.0: {}
fresh@2.0.0: {}
@@ -3972,6 +4265,10 @@ snapshots:
has-symbols@1.1.0: {}
has-tostringtag@1.0.2:
dependencies:
has-symbols: 1.1.0
has-unicode@2.0.1: {}
hasown@2.0.2:
@@ -3982,6 +4279,8 @@ snapshots:
hono@4.12.0: {}
hpagent@1.2.0: {}
html-escaper@2.0.2: {}
http-errors@2.0.1:
@@ -4092,6 +4391,10 @@ snapshots:
isexe@2.0.0: {}
isomorphic-ws@5.0.0(ws@8.19.0):
dependencies:
ws: 8.19.0
istanbul-lib-coverage@3.2.2: {}
istanbul-lib-report@3.0.1:
@@ -4115,6 +4418,8 @@ snapshots:
dependencies:
argparse: 2.0.1
jsep@1.4.0: {}
json-buffer@3.0.1: {}
json-schema-ref-resolver@3.0.0:
@@ -4129,6 +4434,12 @@ snapshots:
json-stable-stringify-without-jsonify@1.0.1: {}
jsonpath-plus@10.4.0:
dependencies:
'@jsep-plugin/assignment': 1.3.0(jsep@1.4.0)
'@jsep-plugin/regex': 1.0.4(jsep@1.4.0)
jsep: 1.4.0
keyv@4.5.4:
dependencies:
json-buffer: 3.0.1
@@ -4178,8 +4489,14 @@ snapshots:
merge-descriptors@2.0.0: {}
mime-db@1.52.0: {}
mime-db@1.54.0: {}
mime-types@2.1.35:
dependencies:
mime-db: 1.52.0
mime-types@3.0.2:
dependencies:
mime-db: 1.54.0
@@ -4257,6 +4574,8 @@ snapshots:
pathe: 2.0.3
tinyexec: 1.0.2
oauth4webapi@3.8.5: {}
object-assign@4.1.1: {}
object-inspect@1.13.4: {}
@@ -4281,6 +4600,11 @@ snapshots:
dependencies:
mimic-fn: 2.1.0
openid-client@6.8.2:
dependencies:
jose: 6.1.3
oauth4webapi: 3.8.5
optionator@0.9.4:
dependencies:
deep-is: 0.1.4
@@ -4455,6 +4779,8 @@ snapshots:
reusify@1.1.0: {}
rfc4648@1.5.4: {}
rfdc@1.4.1: {}
rimraf@3.0.2:
@@ -4612,6 +4938,21 @@ snapshots:
ansi-styles: 6.2.3
is-fullwidth-code-point: 5.1.0
smart-buffer@4.2.0: {}
socks-proxy-agent@8.0.5:
dependencies:
agent-base: 7.1.4
debug: 4.4.3
socks: 2.8.7
transitivePeerDependencies:
- supports-color
socks@2.8.7:
dependencies:
ip-address: 10.0.1
smart-buffer: 4.2.0
sonic-boom@4.2.1:
dependencies:
atomic-sleep: 1.0.0
@@ -4640,6 +4981,17 @@ snapshots:
std-env@3.10.0: {}
stream-buffers@3.0.3: {}
streamx@2.25.0:
dependencies:
events-universal: 1.0.1
fast-fifo: 1.3.2
text-decoder: 1.2.7
transitivePeerDependencies:
- bare-abort-controller
- react-native-b4a
string-width@4.2.3:
dependencies:
emoji-regex: 8.0.0
@@ -4682,6 +5034,18 @@ snapshots:
pump: 3.0.3
tar-stream: 2.2.0
tar-fs@3.1.2:
dependencies:
pump: 3.0.3
tar-stream: 3.1.8
optionalDependencies:
bare-fs: 4.6.0
bare-path: 3.0.0
transitivePeerDependencies:
- bare-abort-controller
- bare-buffer
- react-native-b4a
tar-stream@2.2.0:
dependencies:
bl: 4.1.0
@@ -4690,6 +5054,17 @@ snapshots:
inherits: 2.0.4
readable-stream: 3.6.2
tar-stream@3.1.8:
dependencies:
b4a: 1.8.0
bare-fs: 4.6.0
fast-fifo: 1.3.2
streamx: 2.25.0
transitivePeerDependencies:
- bare-abort-controller
- bare-buffer
- react-native-b4a
tar@6.2.1:
dependencies:
chownr: 2.0.0
@@ -4699,8 +5074,21 @@ snapshots:
mkdirp: 1.0.4
yallist: 4.0.0
teex@1.0.1:
dependencies:
streamx: 2.25.0
transitivePeerDependencies:
- bare-abort-controller
- react-native-b4a
terminal-size@4.0.1: {}
text-decoder@1.2.7:
dependencies:
b4a: 1.8.0
transitivePeerDependencies:
- react-native-b4a
thread-stream@4.0.0:
dependencies:
real-require: 0.2.0
@@ -4755,6 +5143,8 @@ snapshots:
undici-types@5.26.5: {}
undici-types@7.16.0: {}
undici-types@7.18.2: {}
unpipe@1.0.0: {}

View File

@@ -17,6 +17,7 @@
"@fastify/cors": "^10.0.0",
"@fastify/helmet": "^12.0.0",
"@fastify/rate-limit": "^10.0.0",
"@kubernetes/client-node": "^1.4.0",
"@mcpctl/db": "workspace:*",
"@mcpctl/shared": "workspace:*",
"@prisma/client": "^6.0.0",

View File

@@ -29,6 +29,7 @@ import {
ProjectService,
AuditLogService,
DockerContainerManager,
KubernetesOrchestrator,
MetricsCollector,
HealthAggregator,
BackupService,
@@ -271,8 +272,10 @@ async function main(): Promise<void> {
// Migrate legacy 'admin' role → granular roles
await migrateAdminRole(rbacDefinitionRepo);
// Orchestrator
const orchestrator = new DockerContainerManager();
// Orchestrator — select backend via MCPD_ORCHESTRATOR env var
const orchestrator = process.env['MCPD_ORCHESTRATOR'] === 'kubernetes'
? new KubernetesOrchestrator()
: new DockerContainerManager();
// Services
const serverService = new McpServerService(serverRepo);
@@ -284,8 +287,6 @@ async function main(): Promise<void> {
const auditEventService = new AuditEventService(auditEventRepo);
const metricsCollector = new MetricsCollector();
const healthAggregator = new HealthAggregator(metricsCollector, orchestrator);
const backupService = new BackupService(serverRepo, projectRepo, secretRepo, userRepo, groupRepo, rbacDefinitionRepo);
const restoreService = new RestoreService(serverRepo, projectRepo, secretRepo, userRepo, groupRepo, rbacDefinitionRepo);
const authService = new AuthService(prisma);
const templateService = new TemplateService(templateRepo);
const mcpProxyService = new McpProxyService(instanceRepo, serverRepo, orchestrator);
@@ -298,6 +299,8 @@ async function main(): Promise<void> {
const promptRuleRegistry = new ResourceRuleRegistry();
promptRuleRegistry.register(systemPromptVarsRule);
const promptService = new PromptService(promptRepo, promptRequestRepo, projectRepo, promptRuleRegistry);
const backupService = new BackupService(serverRepo, projectRepo, secretRepo, userRepo, groupRepo, rbacDefinitionRepo, promptRepo, templateRepo);
const restoreService = new RestoreService(serverRepo, projectRepo, secretRepo, userRepo, groupRepo, rbacDefinitionRepo, promptRepo, templateRepo);
// Auth middleware for global hooks
const authMiddleware = createAuthMiddleware({
@@ -484,15 +487,23 @@ async function main(): Promise<void> {
await app.listen({ port: config.port, host: config.host });
app.log.info(`mcpd listening on ${config.host}:${config.port}`);
// Periodic container liveness sync — detect crashed containers
const SYNC_INTERVAL_MS = 30_000; // 30s
const syncTimer = setInterval(async () => {
// Periodic reconciliation loop — the operator's heartbeat.
// Detects crashed/missing containers, cleans up ERROR instances,
// and starts replacements to match desired replica counts.
const RECONCILE_INTERVAL_MS = 30_000; // 30s
const reconcileTimer = setInterval(async () => {
try {
await instanceService.syncStatus();
const { reconciled, errors } = await instanceService.reconcileAll();
if (reconciled > 0) {
app.log.info(`[reconcile] ${reconciled} server(s) reconciled`);
}
for (const err of errors) {
app.log.error(`[reconcile] ${err}`);
}
} catch (err) {
app.log.error({ err }, 'Container status sync failed');
app.log.error({ err }, 'Reconciliation loop failed');
}
}, SYNC_INTERVAL_MS);
}, RECONCILE_INTERVAL_MS);
// Health probe runner — periodic MCP tool-call probes (like k8s livenessProbe)
const healthProbeRunner = new HealthProbeRunner(
@@ -506,7 +517,7 @@ async function main(): Promise<void> {
// Graceful shutdown
setupGracefulShutdown(app, {
disconnectDb: async () => {
clearInterval(syncTimer);
clearInterval(reconcileTimer);
healthProbeRunner.stop();
gitBackup.stop();
await prisma.$disconnect();

View File

@@ -3,6 +3,8 @@ import type { IProjectRepository } from '../../repositories/project.repository.j
import type { IUserRepository } from '../../repositories/user.repository.js';
import type { IGroupRepository } from '../../repositories/group.repository.js';
import type { IRbacDefinitionRepository } from '../../repositories/rbac-definition.repository.js';
import type { IPromptRepository } from '../../repositories/prompt.repository.js';
import type { ITemplateRepository } from '../../repositories/template.repository.js';
import { encrypt, isSensitiveKey } from './crypto.js';
import type { EncryptedPayload } from './crypto.js';
import { APP_VERSION } from '@mcpctl/shared';
@@ -18,6 +20,8 @@ export interface BackupBundle {
users?: BackupUser[];
groups?: BackupGroup[];
rbacBindings?: BackupRbacBinding[];
prompts?: BackupPrompt[];
templates?: BackupTemplate[];
encryptedSecrets?: EncryptedPayload;
}
@@ -25,10 +29,16 @@ export interface BackupServer {
name: string;
description: string;
packageName: string | null;
runtime: string | null;
dockerImage: string | null;
transport: string;
repositoryUrl: string | null;
externalUrl: string | null;
command: unknown;
containerPort: number | null;
replicas: number;
env: unknown;
healthCheck: unknown;
}
export interface BackupSecret {
@@ -65,9 +75,31 @@ export interface BackupRbacBinding {
roleBindings: unknown;
}
export interface BackupPrompt {
name: string;
content: string;
projectName: string | null;
priority: number;
summary: string | null;
chapters: unknown;
linkTarget: string | null;
}
export interface BackupTemplate {
name: string;
description: string;
packageName: string | null;
dockerImage: string | null;
transport: string;
command: unknown;
containerPort: number | null;
env: unknown;
healthCheck: unknown;
}
export interface BackupOptions {
password?: string;
resources?: Array<'servers' | 'secrets' | 'projects' | 'users' | 'groups' | 'rbac'>;
resources?: Array<'servers' | 'secrets' | 'projects' | 'users' | 'groups' | 'rbac' | 'prompts' | 'templates'>;
}
export class BackupService {
@@ -78,10 +110,12 @@ export class BackupService {
private userRepo?: IUserRepository,
private groupRepo?: IGroupRepository,
private rbacRepo?: IRbacDefinitionRepository,
private promptRepo?: IPromptRepository,
private templateRepo?: ITemplateRepository,
) {}
async createBackup(options?: BackupOptions): Promise<BackupBundle> {
const resources = options?.resources ?? ['servers', 'secrets', 'projects', 'users', 'groups', 'rbac'];
const resources = options?.resources ?? ['servers', 'secrets', 'projects', 'users', 'groups', 'rbac', 'prompts', 'templates'];
let servers: BackupServer[] = [];
let secrets: BackupSecret[] = [];
@@ -96,10 +130,16 @@ export class BackupService {
name: s.name,
description: s.description,
packageName: s.packageName,
runtime: s.runtime,
dockerImage: s.dockerImage,
transport: s.transport,
repositoryUrl: s.repositoryUrl,
externalUrl: s.externalUrl,
command: s.command,
containerPort: s.containerPort,
replicas: s.replicas,
env: s.env,
healthCheck: s.healthCheck,
}));
}
@@ -151,6 +191,37 @@ export class BackupService {
}));
}
let prompts: BackupPrompt[] = [];
let templates: BackupTemplate[] = [];
if (resources.includes('prompts') && this.promptRepo) {
const allPrompts = await this.promptRepo.findAll();
prompts = allPrompts.map((p) => ({
name: p.name,
content: p.content,
projectName: (p as unknown as { project?: { name: string } }).project?.name ?? null,
priority: p.priority,
summary: p.summary,
chapters: p.chapters,
linkTarget: p.linkTarget,
}));
}
if (resources.includes('templates') && this.templateRepo) {
const allTemplates = await this.templateRepo.findAll();
templates = allTemplates.map((t) => ({
name: t.name,
description: t.description,
packageName: t.packageName,
dockerImage: t.dockerImage,
transport: t.transport,
command: t.command,
containerPort: t.containerPort,
env: t.env,
healthCheck: t.healthCheck,
}));
}
const bundle: BackupBundle = {
version: '1',
mcpctlVersion: APP_VERSION,
@@ -162,6 +233,8 @@ export class BackupService {
users,
groups,
rbacBindings,
prompts,
templates,
};
if (options?.password && secrets.length > 0) {

View File

@@ -3,6 +3,8 @@ import type { IProjectRepository } from '../../repositories/project.repository.j
import type { IUserRepository } from '../../repositories/user.repository.js';
import type { IGroupRepository } from '../../repositories/group.repository.js';
import type { IRbacDefinitionRepository } from '../../repositories/rbac-definition.repository.js';
import type { IPromptRepository } from '../../repositories/prompt.repository.js';
import type { ITemplateRepository } from '../../repositories/template.repository.js';
import type { RbacRoleBinding } from '../../validation/rbac-definition.schema.js';
import { decrypt } from './crypto.js';
import type { BackupBundle } from './backup-service.js';
@@ -27,6 +29,10 @@ export interface RestoreResult {
groupsSkipped: number;
rbacCreated: number;
rbacSkipped: number;
promptsCreated: number;
promptsSkipped: number;
templatesCreated: number;
templatesSkipped: number;
errors: string[];
}
@@ -38,6 +44,8 @@ export class RestoreService {
private userRepo?: IUserRepository,
private groupRepo?: IGroupRepository,
private rbacRepo?: IRbacDefinitionRepository,
private promptRepo?: IPromptRepository,
private templateRepo?: ITemplateRepository,
) {}
validateBundle(bundle: unknown): bundle is BackupBundle {
@@ -67,6 +75,10 @@ export class RestoreService {
groupsSkipped: 0,
rbacCreated: 0,
rbacSkipped: 0,
promptsCreated: 0,
promptsSkipped: 0,
templatesCreated: 0,
templatesSkipped: 0,
errors: [],
};
@@ -159,12 +171,17 @@ export class RestoreService {
name: server.name,
description: server.description,
transport: server.transport as 'STDIO' | 'SSE' | 'STREAMABLE_HTTP',
replicas: (server as { replicas?: number }).replicas ?? 1,
replicas: server.replicas ?? 1,
env: (server.env ?? []) as Array<{ name: string; value?: string; valueFrom?: { secretRef: { name: string; key: string } } }>,
};
if (server.packageName) createData.packageName = server.packageName;
if (server.runtime) createData.runtime = server.runtime;
if (server.dockerImage) createData.dockerImage = server.dockerImage;
if (server.repositoryUrl) createData.repositoryUrl = server.repositoryUrl;
if (server.externalUrl) createData.externalUrl = server.externalUrl;
if (server.command) createData.command = server.command as string[];
if (server.containerPort) createData.containerPort = server.containerPort;
if (server.healthCheck) createData.healthCheck = server.healthCheck as Parameters<IMcpServerRepository['create']>[0]['healthCheck'];
await this.serverRepo.create(createData);
result.serversCreated++;
} catch (err) {
@@ -270,10 +287,20 @@ export class RestoreService {
continue;
}
// Resolve a valid owner — prefer system user, fall back to first user
let ownerId = '';
if (this.userRepo) {
const allUsers = await this.userRepo.findAll();
for (const u of allUsers) {
if (u.email === 'system@mcpctl.local') { ownerId = u.id; break; }
if (!ownerId) ownerId = u.id;
}
}
const projectCreateData: { name: string; description: string; ownerId: string; proxyModel?: string; llmProvider?: string; llmModel?: string } = {
name: project.name,
description: project.description,
ownerId: 'system',
ownerId,
};
if (project.proxyModel) projectCreateData.proxyModel = project.proxyModel;
if (project.llmProvider != null) projectCreateData.llmProvider = project.llmProvider;
@@ -327,6 +354,87 @@ export class RestoreService {
}
}
// Restore prompts (after projects, so projectId can be resolved)
if (bundle.prompts && this.promptRepo) {
for (const prompt of bundle.prompts) {
try {
// Resolve project by name
let projectId: string | undefined;
if (prompt.projectName) {
const project = await this.projectRepo.findByName(prompt.projectName);
if (project) projectId = project.id;
}
const existing = await this.promptRepo.findByNameAndProject(prompt.name, projectId ?? null);
if (existing) {
if (strategy === 'fail') {
result.errors.push(`Prompt "${prompt.name}" already exists`);
return result;
}
if (strategy === 'skip') {
result.promptsSkipped++;
continue;
}
// overwrite
const updateData: { content: string; priority: number; summary?: string } = {
content: prompt.content,
priority: prompt.priority,
};
if (prompt.summary) updateData.summary = prompt.summary;
await this.promptRepo.update(existing.id, updateData);
result.promptsCreated++;
continue;
}
const createData: { name: string; content: string; projectId?: string; priority?: number; linkTarget?: string } = {
name: prompt.name,
content: prompt.content,
};
if (projectId) createData.projectId = projectId;
if (prompt.priority !== 5) createData.priority = prompt.priority;
if (prompt.linkTarget) createData.linkTarget = prompt.linkTarget;
await this.promptRepo.create(createData);
result.promptsCreated++;
} catch (err) {
result.errors.push(`Failed to restore prompt "${prompt.name}": ${err instanceof Error ? err.message : String(err)}`);
}
}
}
// Restore templates
if (bundle.templates && this.templateRepo) {
for (const tmpl of bundle.templates) {
try {
const existing = await this.templateRepo.findByName(tmpl.name);
if (existing) {
if (strategy === 'skip') {
result.templatesSkipped++;
continue;
}
// overwrite or fail handled by upsert
result.templatesSkipped++;
continue;
}
const tmplData: Record<string, unknown> = {
name: tmpl.name,
description: tmpl.description,
transport: tmpl.transport as 'STDIO' | 'SSE' | 'STREAMABLE_HTTP',
};
if (tmpl.packageName) tmplData.packageName = tmpl.packageName;
if (tmpl.dockerImage) tmplData.dockerImage = tmpl.dockerImage;
if (tmpl.command) tmplData.command = tmpl.command;
if (tmpl.containerPort) tmplData.containerPort = tmpl.containerPort;
if (tmpl.env) tmplData.env = tmpl.env;
if (tmpl.healthCheck) tmplData.healthCheck = tmpl.healthCheck;
await this.templateRepo.create(tmplData as Parameters<typeof this.templateRepo.create>[0]);
result.templatesCreated++;
} catch (err) {
result.errors.push(`Failed to restore template "${tmpl.name}": ${err instanceof Error ? err.message : String(err)}`);
}
}
}
return result;
}

View File

@@ -49,6 +49,7 @@ export class InstanceService {
if ((inst.status === 'RUNNING' || inst.status === 'STARTING') && inst.containerId) {
try {
const info = await this.orchestrator.inspectContainer(inst.containerId);
if (info.state === 'stopped' || info.state === 'error') {
// Container died — get last logs for error context
let errorMsg = `Container ${info.state}`;
@@ -60,6 +61,12 @@ export class InstanceService {
await this.instanceRepo.updateStatus(inst.id, 'ERROR', {
metadata: { error: errorMsg },
});
} else if (info.state === 'starting' && inst.status === 'RUNNING') {
// Pod went back to starting (e.g. CrashLoopBackOff restart)
await this.instanceRepo.updateStatus(inst.id, 'STARTING', {});
} else if (info.state === 'running' && inst.status === 'STARTING') {
// Pod became ready — promote to RUNNING
await this.instanceRepo.updateStatus(inst.id, 'RUNNING', {});
}
} catch {
// Container gone entirely
@@ -107,6 +114,49 @@ export class InstanceService {
return this.instanceRepo.findAll(serverId);
}
/**
* Reconcile ALL servers — the operator loop.
*
* For every server with replicas > 0, ensures the correct number of
* healthy instances exist. Cleans up ERROR instances and starts
* replacements. This is the core self-healing mechanism.
*/
async reconcileAll(): Promise<{ reconciled: number; errors: string[] }> {
await this.syncStatus();
const servers = await this.serverRepo.findAll();
let reconciled = 0;
const errors: string[] = [];
for (const server of servers) {
if (server.replicas <= 0) continue;
try {
const instances = await this.instanceRepo.findAll(server.id);
const active = instances.filter((i) => i.status === 'RUNNING' || i.status === 'STARTING');
const errored = instances.filter((i) => i.status === 'ERROR');
// Clean up ERROR instances so they don't accumulate
for (const inst of errored) {
await this.removeOne(inst);
}
// Scale up if needed
const toStart = server.replicas - active.length;
if (toStart > 0) {
for (let i = 0; i < toStart; i++) {
await this.startOne(server.id);
}
reconciled++;
}
} catch (err) {
errors.push(`${server.name}: ${err instanceof Error ? err.message : String(err)}`);
}
}
return { reconciled, errors };
}
/**
* Remove an instance (stop container + delete DB record).
* Does NOT reconcile — caller should reconcile after if needed.
@@ -262,7 +312,8 @@ export class InstanceService {
updateFields.port = containerInfo.port;
}
instance = await this.instanceRepo.updateStatus(instance.id, 'RUNNING', updateFields);
// Set STARTING — syncStatus will promote to RUNNING once the container is actually ready
instance = await this.instanceRepo.updateStatus(instance.id, 'STARTING', updateFields);
} catch (err) {
instance = await this.instanceRepo.updateStatus(instance.id, 'ERROR', {
metadata: { error: err instanceof Error ? err.message : String(err) },

View File

@@ -1,4 +1,7 @@
export { KubernetesOrchestrator } from './kubernetes-orchestrator.js';
export { K8sOfficialClient } from './k8s-client-official.js';
export type { K8sOfficialClientConfig } from './k8s-client-official.js';
// Legacy client — kept for backwards compatibility, will be removed
export { K8sClient, loadDefaultConfig, parseKubeconfig } from './k8s-client.js';
export type { K8sClientConfig, K8sResponse, K8sError } from './k8s-client.js';
export {

View File

@@ -0,0 +1,54 @@
/**
* Thin wrapper around @kubernetes/client-node.
*
* Centralises KubeConfig loading (in-cluster or kubeconfig) and exposes
* the typed API clients the KubernetesOrchestrator needs.
*/
import * as k8s from '@kubernetes/client-node';
export interface K8sOfficialClientConfig {
/** Override the namespace for MCP server pods. Defaults to 'mcpctl-servers'. */
serversNamespace?: string;
/**
* Explicit kubeconfig context name. When set, the client switches to this
* context before creating API clients — prevents accidental operations
* against the wrong cluster. Env: MCPD_K8S_CONTEXT.
*/
context?: string;
}
export class K8sOfficialClient {
readonly kc: k8s.KubeConfig;
readonly core: k8s.CoreV1Api;
readonly exec: k8s.Exec;
readonly attach: k8s.Attach;
readonly log: k8s.Log;
readonly serversNamespace: string;
constructor(opts?: K8sOfficialClientConfig) {
this.kc = new k8s.KubeConfig();
this.kc.loadFromDefault();
// Enforce explicit context if configured — safety against multi-cluster mishaps
const ctx = opts?.context ?? process.env['MCPD_K8S_CONTEXT'];
if (ctx) {
this.kc.setCurrentContext(ctx);
}
this.core = this.kc.makeApiClient(k8s.CoreV1Api);
this.exec = new k8s.Exec(this.kc);
this.attach = new k8s.Attach(this.kc);
this.log = new k8s.Log(this.kc);
this.serversNamespace = opts?.serversNamespace
?? process.env['MCPD_SERVERS_NAMESPACE']
?? 'mcpctl-servers';
}
/** Current namespace from in-cluster config, or 'default'. */
get controlNamespace(): string {
const contexts = this.kc.getContexts();
const current = this.kc.getCurrentContext();
const ctxObj = contexts.find((c) => c.name === current);
return ctxObj?.namespace ?? 'default';
}
}

View File

@@ -1,54 +1,26 @@
import { PassThrough, Writable } from 'node:stream';
import type {
McpOrchestrator,
ContainerSpec,
ContainerInfo,
ContainerLogs,
ExecResult,
InteractiveExec,
} from '../orchestrator.js';
import { K8sClient } from './k8s-client.js';
import type { K8sClientConfig } from './k8s-client.js';
import { generatePodSpec, generateNamespaceSpec } from './manifest-generator.js';
import { K8sOfficialClient } from './k8s-client-official.js';
import type { K8sOfficialClientConfig } from './k8s-client-official.js';
import { generatePodSpec } from './manifest-generator.js';
import type { V1Pod } from '@kubernetes/client-node';
interface K8sPodStatus {
metadata: {
name: string;
namespace: string;
creationTimestamp: string;
labels?: Record<string, string>;
};
status: {
phase: string;
containerStatuses?: Array<{
state: {
running?: Record<string, unknown>;
waiting?: { reason?: string };
terminated?: { reason?: string; exitCode?: number };
};
}>;
};
spec?: {
containers: Array<{
ports?: Array<{ containerPort: number }>;
}>;
};
}
interface K8sPodList {
items: K8sPodStatus[];
}
function mapPhase(phase: string, containerStatuses?: K8sPodStatus['status']['containerStatuses']): ContainerInfo['state'] {
// Check container-level status first for more granularity
if (containerStatuses && containerStatuses.length > 0) {
const cs = containerStatuses[0];
if (cs) {
if (cs.state.running) return 'running';
if (cs.state.waiting) return 'starting';
if (cs.state.terminated) return 'stopped';
}
function mapPodState(pod: V1Pod): ContainerInfo['state'] {
const cs = pod.status?.containerStatuses?.[0];
if (cs) {
if (cs.state?.running) return 'running';
if (cs.state?.waiting) return 'starting';
if (cs.state?.terminated) return 'stopped';
}
switch (phase) {
switch (pod.status?.phase) {
case 'Running':
return 'running';
case 'Pending':
@@ -61,150 +33,306 @@ function mapPhase(phase: string, containerStatuses?: K8sPodStatus['status']['con
}
}
function podToContainerInfo(pod: V1Pod): ContainerInfo {
const info: ContainerInfo = {
containerId: pod.metadata!.name!,
name: pod.metadata!.name!,
state: mapPodState(pod),
createdAt: pod.metadata!.creationTimestamp
? new Date(pod.metadata!.creationTimestamp as unknown as string)
: new Date(),
};
// Pod IP for internal network communication (replaces Docker container IP)
if (pod.status?.podIP) {
info.ip = pod.status.podIP;
}
// Extract port from first container spec
const ports = pod.spec?.containers?.[0]?.ports;
if (ports && ports.length > 0 && ports[0]?.containerPort) {
info.port = ports[0].containerPort;
}
return info;
}
export class KubernetesOrchestrator implements McpOrchestrator {
private client: K8sClient;
private client: K8sOfficialClient;
private namespace: string;
constructor(config: K8sClientConfig) {
this.client = new K8sClient(config);
this.namespace = config.namespace ?? 'default';
constructor(config?: K8sOfficialClientConfig) {
this.client = new K8sOfficialClient(config);
this.namespace = this.client.serversNamespace;
}
async ping(): Promise<boolean> {
try {
const res = await this.client.get('/api/v1');
return res.statusCode === 200;
await this.client.core.listNamespace();
return true;
} catch {
return false;
}
}
async pullImage(_image: string): Promise<void> {
// K8s pulls images on pod scheduling - no pre-pull needed
// K8s pulls images on pod scheduling no pre-pull needed
}
async createContainer(spec: ContainerSpec): Promise<ContainerInfo> {
await this.ensureNamespace(this.namespace);
const manifest = generatePodSpec(spec, this.namespace);
const res = await this.client.post<K8sPodStatus>(
`/api/v1/namespaces/${this.namespace}/pods`,
manifest,
);
if (res.statusCode >= 400) {
const err = res.body as unknown as { message?: string };
throw new Error(`Failed to create pod: ${err.message ?? `HTTP ${res.statusCode}`}`);
}
const pod = await this.client.core.createNamespacedPod({
namespace: this.namespace,
body: manifest as V1Pod,
});
// Wait briefly for pod to start scheduling
await new Promise((resolve) => setTimeout(resolve, 500));
return this.inspectContainer(res.body.metadata.name);
return this.inspectContainer(pod.metadata!.name!);
}
async stopContainer(containerId: string): Promise<void> {
// In K8s, "stopping" a pod means deleting it
await this.removeContainer(containerId);
}
async removeContainer(containerId: string, _force?: boolean): Promise<void> {
const res = await this.client.delete(
`/api/v1/namespaces/${this.namespace}/pods/${containerId}`,
);
if (res.statusCode >= 400 && res.statusCode !== 404) {
const err = res.body as { message?: string };
throw new Error(`Failed to delete pod: ${err.message ?? `HTTP ${res.statusCode}`}`);
try {
await this.client.core.deleteNamespacedPod({
name: containerId,
namespace: this.namespace,
gracePeriodSeconds: 5,
});
} catch (err: unknown) {
const status = (err as { statusCode?: number }).statusCode
?? (err as { response?: { statusCode?: number } }).response?.statusCode;
if (status !== 404) throw err;
}
}
async inspectContainer(containerId: string): Promise<ContainerInfo> {
const res = await this.client.get<K8sPodStatus>(
`/api/v1/namespaces/${this.namespace}/pods/${containerId}`,
);
if (res.statusCode === 404) {
throw new Error(`Pod "${containerId}" not found in namespace "${this.namespace}"`);
}
if (res.statusCode >= 400) {
const err = res.body as unknown as { message?: string };
throw new Error(`Failed to inspect pod: ${err.message ?? `HTTP ${res.statusCode}`}`);
}
const pod = res.body;
const result: ContainerInfo = {
containerId: pod.metadata.name,
name: pod.metadata.name,
state: mapPhase(pod.status.phase, pod.status.containerStatuses),
createdAt: new Date(pod.metadata.creationTimestamp),
};
// Extract port from first container spec if available
const containers = pod.spec?.containers;
if (containers && containers.length > 0) {
const ports = containers[0]?.ports;
if (ports && ports.length > 0 && ports[0]) {
result.port = ports[0].containerPort;
}
}
return result;
const pod = await this.client.core.readNamespacedPod({
name: containerId,
namespace: this.namespace,
});
return podToContainerInfo(pod);
}
async getContainerLogs(
containerId: string,
opts?: { tail?: number; since?: number },
): Promise<ContainerLogs> {
const logOpts: { tail?: number; since?: number } = {
tail: opts?.tail ?? 100,
const stdout = new PassThrough();
const chunks: Buffer[] = [];
stdout.on('data', (chunk: Buffer) => chunks.push(chunk));
const containerName = await this.getContainerName(containerId);
const logOpts: { tailLines?: number; sinceSeconds?: number } = {
tailLines: opts?.tail ?? 100,
};
if (opts?.since !== undefined) {
logOpts.since = opts.since;
logOpts.sinceSeconds = opts.since;
}
const stdout = await this.client.getLogs(this.namespace, containerId, logOpts);
return { stdout, stderr: '' };
await new Promise<void>((resolve, reject) => {
this.client.log
.log(this.namespace, containerId, containerName, stdout, logOpts)
.then(() => {
stdout.on('end', resolve);
})
.catch(reject);
});
return { stdout: Buffer.concat(chunks).toString('utf-8'), stderr: '' };
}
async execInContainer(
_containerId: string,
_cmd: string[],
_opts?: { stdin?: string; timeoutMs?: number },
containerId: string,
cmd: string[],
opts?: { stdin?: string; timeoutMs?: number },
): Promise<ExecResult> {
// K8s exec via API — future implementation
throw new Error('execInContainer not yet implemented for Kubernetes');
const containerName = await this.getContainerName(containerId);
const stdoutChunks: Buffer[] = [];
const stderrChunks: Buffer[] = [];
const stdoutStream = new Writable({
write(chunk: Buffer, _encoding, callback) {
stdoutChunks.push(chunk);
callback();
},
});
const stderrStream = new Writable({
write(chunk: Buffer, _encoding, callback) {
stderrChunks.push(chunk);
callback();
},
});
let stdinStream: PassThrough | null = null;
if (opts?.stdin) {
stdinStream = new PassThrough();
stdinStream.end(opts.stdin);
}
let exitCode = 0;
const timeoutMs = opts?.timeoutMs ?? 30_000;
await Promise.race([
new Promise<void>((resolve, reject) => {
this.client.exec
.exec(
this.namespace,
containerId,
containerName,
cmd,
stdoutStream,
stderrStream,
stdinStream,
false, // tty
(status) => {
if (status.status === 'Failure') {
exitCode = 1;
}
resolve();
},
)
.catch(reject);
}),
new Promise<never>((_, reject) =>
setTimeout(() => reject(new Error(`Exec timed out after ${timeoutMs}ms`)), timeoutMs),
),
]);
return {
exitCode,
stdout: Buffer.concat(stdoutChunks).toString('utf-8'),
stderr: Buffer.concat(stderrChunks).toString('utf-8'),
};
}
async execInteractive(
containerId: string,
cmd: string[],
): Promise<InteractiveExec> {
const containerName = await this.getContainerName(containerId);
const stdout = new PassThrough();
const stdinStream = new PassThrough();
const stderrStream = new Writable({
write(_chunk: Buffer, _encoding, callback) {
// Discard stderr for interactive sessions (matches Docker behavior)
callback();
},
});
const wsPromise = this.client.exec.exec(
this.namespace,
containerId,
containerName,
cmd,
stdout,
stderrStream,
stdinStream,
false, // tty
);
// Wait for WebSocket connection to establish
const ws = await wsPromise;
return {
stdout,
write(data: string) {
stdinStream.write(data);
},
close() {
stdinStream.end();
stdout.destroy();
ws.close();
},
};
}
/**
* Attach to a running container's main process (PID 1) stdin/stdout.
* Used for docker-image STDIO servers where the entrypoint IS the MCP server.
*/
async attachInteractive(
containerId: string,
): Promise<InteractiveExec> {
const containerName = await this.getContainerName(containerId);
const stdout = new PassThrough();
const stdinStream = new PassThrough();
const stderrStream = new Writable({
write(_chunk: Buffer, _encoding, callback) {
callback();
},
});
const ws = await this.client.attach.attach(
this.namespace,
containerId,
containerName,
stdout,
stderrStream,
stdinStream,
false, // tty
);
return {
stdout,
write(data: string) {
stdinStream.write(data);
},
close() {
stdinStream.end();
stdout.destroy();
ws.close();
},
};
}
async listContainers(namespace?: string): Promise<ContainerInfo[]> {
const ns = namespace ?? this.namespace;
const res = await this.client.get<K8sPodList>(
`/api/v1/namespaces/${ns}/pods?labelSelector=mcpctl.managed%3Dtrue`,
);
if (res.statusCode >= 400) return [];
return res.body.items.map((pod) => {
const info: ContainerInfo = {
containerId: pod.metadata.name,
name: pod.metadata.name,
state: mapPhase(pod.status.phase, pod.status.containerStatuses),
createdAt: new Date(pod.metadata.creationTimestamp),
};
return info;
const podList = await this.client.core.listNamespacedPod({
namespace: ns,
labelSelector: 'mcpctl.managed=true',
});
return podList.items.map(podToContainerInfo);
}
async ensureNamespace(name: string): Promise<void> {
const res = await this.client.get(`/api/v1/namespaces/${name}`);
if (res.statusCode === 200) return;
const nsManifest = generateNamespaceSpec(name);
const createRes = await this.client.post('/api/v1/namespaces', nsManifest);
if (createRes.statusCode >= 400 && createRes.statusCode !== 409) {
const err = createRes.body as { message?: string };
throw new Error(`Failed to create namespace "${name}": ${err.message ?? `HTTP ${createRes.statusCode}`}`);
try {
await this.client.core.readNamespace({ name });
} catch {
try {
await this.client.core.createNamespace({
body: { apiVersion: 'v1', kind: 'Namespace', metadata: { name } },
});
} catch (createErr: unknown) {
const status = (createErr as { statusCode?: number }).statusCode
?? (createErr as { response?: { statusCode?: number } }).response?.statusCode;
if (status !== 409) throw createErr; // Already exists is fine
}
}
}
getNamespace(): string {
return this.namespace;
}
/** Get the first container name in a pod (needed for exec/log APIs). */
private async getContainerName(podName: string): Promise<string> {
const pod = await this.client.core.readNamespacedPod({
name: podName,
namespace: this.namespace,
});
return pod.spec?.containers?.[0]?.name ?? podName;
}
}

View File

@@ -15,19 +15,26 @@ export interface K8sPodManifest {
containers: Array<{
name: string;
image: string;
command?: string[];
args?: string[];
env?: Array<{ name: string; value: string }>;
ports?: Array<{ containerPort: number }>;
stdin?: boolean;
resources: {
limits: { memory: string; cpu: string };
requests: { memory: string; cpu: string };
};
securityContext: {
runAsNonRoot: boolean;
readOnlyRootFilesystem: boolean;
runAsNonRoot?: boolean;
readOnlyRootFilesystem?: boolean;
allowPrivilegeEscalation: boolean;
capabilities: { drop: string[] };
seccompProfile: { type: string };
};
}>;
restartPolicy: 'Always' | 'Never' | 'OnFailure';
automountServiceAccountToken: boolean;
nodeSelector?: Record<string, string>;
};
}
@@ -86,14 +93,7 @@ function buildContainerSpec(spec: ContainerSpec) {
const memStr = formatMemory(memoryLimit);
const cpuStr = formatCpu(nanoCpus);
const container: {
name: string;
image: string;
env?: Array<{ name: string; value: string }>;
ports?: Array<{ containerPort: number }>;
resources: { limits: { memory: string; cpu: string }; requests: { memory: string; cpu: string } };
securityContext: { runAsNonRoot: boolean; readOnlyRootFilesystem: boolean; allowPrivilegeEscalation: boolean };
} = {
const container: K8sPodManifest['spec']['containers'][0] = {
name: sanitizeName(spec.name),
image: spec.image,
resources: {
@@ -101,12 +101,25 @@ function buildContainerSpec(spec: ContainerSpec) {
requests: { memory: memStr, cpu: cpuStr },
},
securityContext: {
runAsNonRoot: true,
readOnlyRootFilesystem: true,
// MCP server images (runner images, third-party) may run as root
// Restrict privilege escalation and capabilities but allow root
runAsNonRoot: false,
readOnlyRootFilesystem: false,
allowPrivilegeEscalation: false,
capabilities: { drop: ['ALL'] },
seccompProfile: { type: 'RuntimeDefault' },
},
// Keep stdin open for STDIO MCP servers (matches Docker's OpenStdin)
stdin: true,
};
// In Docker, spec.command maps to Cmd (args to entrypoint).
// In k8s, we use `args` to pass arguments to the image's entrypoint,
// preserving the runner image's entrypoint (uvx, npx -y, etc.)
if (spec.command && spec.command.length > 0) {
container.args = spec.command;
}
if (spec.env && Object.keys(spec.env).length > 0) {
container.env = Object.entries(spec.env).map(([name, value]) => ({ name, value }));
}
@@ -131,6 +144,13 @@ export function generatePodSpec(spec: ContainerSpec, namespace: string): K8sPodM
spec: {
containers: [buildContainerSpec(spec)],
restartPolicy: 'Always',
// MCP server pods don't need k8s API access
automountServiceAccountToken: false,
// On mixed-arch clusters, constrain to the same arch as mcpd
// (runner images are typically single-arch)
...(process.env['MCPD_NODE_SELECTOR']
? { nodeSelector: JSON.parse(process.env['MCPD_NODE_SELECTOR']) as Record<string, string> }
: {}),
},
};
}
@@ -158,6 +178,7 @@ export function generateDeploymentSpec(spec: ContainerSpec, namespace: string, r
spec: {
containers: [buildContainerSpec(spec)],
restartPolicy: 'Always',
automountServiceAccountToken: false,
},
},
},

View File

@@ -140,8 +140,13 @@ export class McpProxyService {
}
const packageName = server.packageName as string | null;
const command = server.command as string[] | null;
if (!packageName && (!command || command.length === 0)) {
throw new InvalidStateError(`Server '${server.id}' has no packageName or command for STDIO transport`);
throw new InvalidStateError(
`Server '${server.name}' (${server.id}) uses STDIO transport with a docker image ` +
`but has no command. Set 'command' to the image's entrypoint ` +
`(e.g. mcpctl edit server ${server.name} --command node --command build/index.js)`
);
}
// Build the spawn command based on runtime

View File

@@ -71,6 +71,9 @@ export interface McpOrchestrator {
/** Start a long-running interactive exec session (bidirectional stdio stream). */
execInteractive?(containerId: string, cmd: string[]): Promise<InteractiveExec>;
/** Attach to a running container's main process stdin/stdout (PID 1). */
attachInteractive?(containerId: string): Promise<InteractiveExec>;
/** Check if the orchestrator runtime is available */
ping(): Promise<boolean>;
}

View File

@@ -294,4 +294,99 @@ describe('InstanceService', () => {
expect(result.stdout).toBe('log output');
});
});
describe('reconcileAll', () => {
it('creates missing instances for servers with replicas > 0', async () => {
const server = makeServer({ id: 'srv-1', name: 'grafana', replicas: 1 });
vi.mocked(serverRepo.findAll).mockResolvedValue([server]);
vi.mocked(serverRepo.findById).mockResolvedValue(server);
// No instances exist
vi.mocked(instanceRepo.findAll).mockResolvedValue([]);
const result = await service.reconcileAll();
expect(result.reconciled).toBe(1);
expect(result.errors).toHaveLength(0);
expect(instanceRepo.create).toHaveBeenCalled();
});
it('skips servers with replicas = 0', async () => {
const server = makeServer({ id: 'srv-1', replicas: 0 });
vi.mocked(serverRepo.findAll).mockResolvedValue([server]);
vi.mocked(instanceRepo.findAll).mockResolvedValue([]);
const result = await service.reconcileAll();
expect(result.reconciled).toBe(0);
expect(instanceRepo.create).not.toHaveBeenCalled();
});
it('does not create instances when already at desired count', async () => {
const server = makeServer({ id: 'srv-1', replicas: 1 });
vi.mocked(serverRepo.findAll).mockResolvedValue([server]);
vi.mocked(instanceRepo.findAll).mockResolvedValue([
makeInstance({ id: 'inst-1', serverId: 'srv-1', status: 'RUNNING' }),
]);
const result = await service.reconcileAll();
expect(result.reconciled).toBe(0);
expect(instanceRepo.create).not.toHaveBeenCalled();
});
it('cleans up ERROR instances and creates replacements', async () => {
const server = makeServer({ id: 'srv-1', replicas: 1 });
vi.mocked(serverRepo.findAll).mockResolvedValue([server]);
vi.mocked(serverRepo.findById).mockResolvedValue(server);
vi.mocked(instanceRepo.findAll).mockResolvedValue([
makeInstance({ id: 'inst-dead', serverId: 'srv-1', status: 'ERROR', containerId: 'ctr-dead' }),
]);
const result = await service.reconcileAll();
// Should delete ERROR instance and create a new one
expect(result.reconciled).toBe(1);
expect(instanceRepo.delete).toHaveBeenCalledWith('inst-dead');
expect(instanceRepo.create).toHaveBeenCalled();
});
it('reconciles multiple servers independently', async () => {
const srv1 = makeServer({ id: 'srv-1', name: 'grafana', replicas: 1, dockerImage: 'grafana:latest' });
const srv2 = makeServer({ id: 'srv-2', name: 'node-red', replicas: 1, dockerImage: 'nodered:latest' });
vi.mocked(serverRepo.findAll).mockResolvedValue([srv1, srv2]);
vi.mocked(serverRepo.findById).mockImplementation(async (id) => {
if (id === 'srv-1') return srv1;
if (id === 'srv-2') return srv2;
return null;
});
// srv-1 has a running instance, srv-2 has none
vi.mocked(instanceRepo.findAll).mockImplementation(async (serverId) => {
if (serverId === 'srv-1') return [makeInstance({ serverId: 'srv-1', status: 'RUNNING' })];
return [];
});
const result = await service.reconcileAll();
// Only srv-2 needed reconciliation
expect(result.reconciled).toBe(1);
});
it('collects errors without stopping other servers', async () => {
const srv1 = makeServer({ id: 'srv-1', name: 'broken', replicas: 1 });
const srv2 = makeServer({ id: 'srv-2', name: 'healthy', replicas: 1, dockerImage: 'img:latest' });
vi.mocked(serverRepo.findAll).mockResolvedValue([srv1, srv2]);
vi.mocked(serverRepo.findById).mockImplementation(async (id) => {
if (id === 'srv-2') return srv2;
return null; // srv-1 can't be found → will error
});
vi.mocked(instanceRepo.findAll).mockResolvedValue([]);
const result = await service.reconcileAll();
// srv-1 errored, srv-2 reconciled
expect(result.errors).toHaveLength(1);
expect(result.errors[0]).toContain('broken');
expect(result.reconciled).toBe(1);
});
});
});

View File

@@ -121,8 +121,8 @@ describe('generatePodSpec', () => {
it('sets security context', () => {
const pod = generatePodSpec(baseSpec, 'default');
const sc = pod.spec.containers[0]!.securityContext;
expect(sc.runAsNonRoot).toBe(true);
expect(sc.readOnlyRootFilesystem).toBe(true);
expect(sc.runAsNonRoot).toBe(false);
expect(sc.readOnlyRootFilesystem).toBe(false);
expect(sc.allowPrivilegeEscalation).toBe(false);
});

View File

@@ -1,86 +1,127 @@
import { describe, it, expect, vi, beforeEach } from 'vitest';
import type { K8sClientConfig } from '../src/services/k8s/k8s-client.js';
import type { ContainerSpec } from '../src/services/orchestrator.js';
// Mock the K8sClient before importing KubernetesOrchestrator
vi.mock('../src/services/k8s/k8s-client.js', () => {
class MockK8sClient {
defaultNamespace: string;
// Store mock handlers so tests can override
_handlers = new Map<string, { statusCode: number; body: unknown }>();
// Mock @kubernetes/client-node before imports
vi.mock('@kubernetes/client-node', () => {
const handlers = new Map<string, { resolve: unknown; reject?: unknown }>();
constructor(config: K8sClientConfig) {
this.defaultNamespace = config.namespace ?? 'default';
}
function setHandler(key: string, resolveVal: unknown, rejectVal?: unknown) {
handlers.set(key, { resolve: resolveVal, reject: rejectVal });
}
_setResponse(key: string, statusCode: number, body: unknown) {
this._handlers.set(key, { statusCode, body });
}
function getHandler(key: string) {
return handlers.get(key);
}
_getResponse(key: string) {
return this._handlers.get(key) ?? { statusCode: 200, body: {} };
}
function clearHandlers() {
handlers.clear();
}
async get(path: string) { return this._getResponse(`GET:${path}`); }
async post(path: string, _body: unknown) { return this._getResponse(`POST:${path}`); }
async delete(path: string) { return this._getResponse(`DELETE:${path}`); }
async patch(path: string, _body: unknown) { return this._getResponse(`PATCH:${path}`); }
async getLogs(_ns: string, _pod: string, _opts?: unknown) {
return this._getResponse('LOGS')?.body ?? '';
}
const mockCore = {
listNamespace: vi.fn(async () => {
const h = getHandler('listNamespace');
if (h?.reject) throw h.reject;
return h?.resolve ?? { items: [] };
}),
createNamespacedPod: vi.fn(async (params: { namespace: string; body: { metadata: { name: string } } }) => {
const h = getHandler('createNamespacedPod');
if (h?.reject) throw h.reject;
return h?.resolve ?? params.body;
}),
readNamespacedPod: vi.fn(async (params: { name: string }) => {
const h = getHandler(`readNamespacedPod:${params.name}`);
if (h?.reject) throw h.reject;
return h?.resolve;
}),
deleteNamespacedPod: vi.fn(async (params: { name: string }) => {
const h = getHandler(`deleteNamespacedPod:${params.name}`);
if (h?.reject) throw h.reject;
return h?.resolve ?? {};
}),
listNamespacedPod: vi.fn(async () => {
const h = getHandler('listNamespacedPod');
if (h?.reject) throw h.reject;
return h?.resolve ?? { items: [] };
}),
readNamespace: vi.fn(async (params: { name: string }) => {
const h = getHandler(`readNamespace:${params.name}`);
if (h?.reject) throw h.reject;
return h?.resolve ?? {};
}),
createNamespace: vi.fn(async () => {
const h = getHandler('createNamespace');
if (h?.reject) throw h.reject;
return h?.resolve ?? {};
}),
};
class MockKubeConfig {
loadFromDefault = vi.fn();
setCurrentContext = vi.fn();
getContexts = vi.fn(() => []);
getCurrentContext = vi.fn(() => 'default');
makeApiClient = vi.fn(() => mockCore);
}
class MockExec {
exec = vi.fn();
}
class MockAttach {
attach = vi.fn();
}
class MockLog {
log = vi.fn();
}
return {
K8sClient: MockK8sClient,
loadDefaultConfig: vi.fn(),
parseKubeconfig: vi.fn(),
KubeConfig: MockKubeConfig,
CoreV1Api: class {},
Exec: MockExec,
Attach: MockAttach,
Log: MockLog,
// Export test helpers
__testHelpers: { setHandler, getHandler, clearHandlers, mockCore },
};
});
// Import after mock
import { KubernetesOrchestrator } from '../src/services/k8s/kubernetes-orchestrator.js';
import type { ContainerSpec } from '../src/services/orchestrator.js';
function getClient(orch: KubernetesOrchestrator): {
_setResponse(key: string, statusCode: number, body: unknown): void;
} {
// Access private client for test setup
return (orch as unknown as { client: { _setResponse(k: string, sc: number, b: unknown): void } }).client;
}
const testConfig: K8sClientConfig = {
apiServer: 'https://localhost:6443',
token: 'test-token',
namespace: 'test-ns',
};
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const k8sMock = await import('@kubernetes/client-node') as any;
const { setHandler, clearHandlers, mockCore } = k8sMock.__testHelpers;
const testSpec: ContainerSpec = {
image: 'mcpctl/server:latest',
image: 'mysources.co.uk/michal/mcpctl-node-runner:latest',
name: 'my-server',
env: { PORT: '3000' },
containerPort: 3000,
};
const podStatusRunning = {
const podRunning = {
metadata: {
name: 'my-server',
namespace: 'test-ns',
namespace: 'mcpctl-servers',
creationTimestamp: '2026-01-01T00:00:00Z',
labels: { 'mcpctl.managed': 'true' },
},
status: {
phase: 'Running',
podIP: '10.42.0.15',
containerStatuses: [{
state: { running: { startedAt: '2026-01-01T00:00:00Z' } },
}],
},
spec: {
containers: [{ ports: [{ containerPort: 3000 }] }],
containers: [{ name: 'my-server', ports: [{ containerPort: 3000 }] }],
},
};
const podStatusPending = {
const podPending = {
metadata: {
name: 'my-server',
namespace: 'test-ns',
namespace: 'mcpctl-servers',
creationTimestamp: '2026-01-01T00:00:00Z',
},
status: {
@@ -89,23 +130,28 @@ const podStatusPending = {
state: { waiting: { reason: 'ContainerCreating' } },
}],
},
spec: {
containers: [{ name: 'my-server' }],
},
};
describe('KubernetesOrchestrator', () => {
let orch: KubernetesOrchestrator;
beforeEach(() => {
orch = new KubernetesOrchestrator(testConfig);
clearHandlers();
vi.clearAllMocks();
orch = new KubernetesOrchestrator({ serversNamespace: 'mcpctl-servers' });
});
describe('ping', () => {
it('returns true on successful API call', async () => {
getClient(orch)._setResponse('GET:/api/v1', 200, { kind: 'APIResourceList' });
setHandler('listNamespace', { items: [] });
expect(await orch.ping()).toBe(true);
});
it('returns false on error', async () => {
getClient(orch)._setResponse('GET:/api/v1', 500, { message: 'internal error' });
setHandler('listNamespace', undefined, new Error('connection refused'));
expect(await orch.ping()).toBe(false);
});
});
@@ -118,113 +164,94 @@ describe('KubernetesOrchestrator', () => {
describe('createContainer', () => {
it('creates a pod and returns container info', async () => {
const client = getClient(orch);
// ensureNamespace check
client._setResponse('GET:/api/v1/namespaces/test-ns', 200, {});
// create pod
client._setResponse('POST:/api/v1/namespaces/test-ns/pods', 201, podStatusRunning);
// inspect after creation
client._setResponse('GET:/api/v1/namespaces/test-ns/pods/my-server', 200, podStatusRunning);
// ensureNamespace
setHandler('readNamespace:mcpctl-servers', {});
// createPod returns the pod
setHandler('createNamespacedPod', podRunning);
// inspectContainer after create
setHandler('readNamespacedPod:my-server', podRunning);
const info = await orch.createContainer(testSpec);
expect(info.containerId).toBe('my-server');
expect(info.state).toBe('running');
expect(info.port).toBe(3000);
expect(info.ip).toBe('10.42.0.15');
});
it('throws on API error', async () => {
const client = getClient(orch);
client._setResponse('GET:/api/v1/namespaces/test-ns', 200, {});
client._setResponse('POST:/api/v1/namespaces/test-ns/pods', 422, {
message: 'pod already exists',
});
setHandler('readNamespace:mcpctl-servers', {});
setHandler('createNamespacedPod', undefined, new Error('pod already exists'));
await expect(orch.createContainer(testSpec)).rejects.toThrow('Failed to create pod');
await expect(orch.createContainer(testSpec)).rejects.toThrow('pod already exists');
});
});
describe('inspectContainer', () => {
it('returns running container info', async () => {
getClient(orch)._setResponse('GET:/api/v1/namespaces/test-ns/pods/my-server', 200, podStatusRunning);
it('returns running container info with pod IP', async () => {
setHandler('readNamespacedPod:my-server', podRunning);
const info = await orch.inspectContainer('my-server');
expect(info.state).toBe('running');
expect(info.name).toBe('my-server');
expect(info.ip).toBe('10.42.0.15');
expect(info.port).toBe(3000);
});
it('maps pending state correctly', async () => {
getClient(orch)._setResponse('GET:/api/v1/namespaces/test-ns/pods/my-server', 200, podStatusPending);
setHandler('readNamespacedPod:my-server', podPending);
const info = await orch.inspectContainer('my-server');
expect(info.state).toBe('starting');
});
it('throws on 404', async () => {
getClient(orch)._setResponse('GET:/api/v1/namespaces/test-ns/pods/missing', 404, {
message: 'pods "missing" not found',
});
it('throws when pod not found', async () => {
setHandler('readNamespacedPod:missing', undefined, { statusCode: 404, message: 'not found' });
await expect(orch.inspectContainer('missing')).rejects.toThrow('not found');
await expect(orch.inspectContainer('missing')).rejects.toBeDefined();
});
});
describe('stopContainer', () => {
it('deletes the pod', async () => {
getClient(orch)._setResponse('DELETE:/api/v1/namespaces/test-ns/pods/my-server', 200, {});
setHandler('deleteNamespacedPod:my-server', {});
await expect(orch.stopContainer('my-server')).resolves.toBeUndefined();
});
});
describe('removeContainer', () => {
it('deletes the pod successfully', async () => {
getClient(orch)._setResponse('DELETE:/api/v1/namespaces/test-ns/pods/my-server', 200, {});
setHandler('deleteNamespacedPod:my-server', {});
await expect(orch.removeContainer('my-server')).resolves.toBeUndefined();
});
it('ignores 404 (already deleted)', async () => {
getClient(orch)._setResponse('DELETE:/api/v1/namespaces/test-ns/pods/my-server', 404, {});
setHandler('deleteNamespacedPod:my-server', undefined, { statusCode: 404 });
await expect(orch.removeContainer('my-server')).resolves.toBeUndefined();
});
it('throws on other errors', async () => {
getClient(orch)._setResponse('DELETE:/api/v1/namespaces/test-ns/pods/my-server', 403, {
message: 'forbidden',
});
await expect(orch.removeContainer('my-server')).rejects.toThrow('Failed to delete pod');
});
});
describe('getContainerLogs', () => {
it('returns logs from pod', async () => {
getClient(orch)._setResponse('LOGS', 200, 'log line 1\nlog line 2\n');
const logs = await orch.getContainerLogs('my-server');
expect(logs.stdout).toBe('log line 1\nlog line 2\n');
expect(logs.stderr).toBe('');
setHandler('deleteNamespacedPod:my-server', undefined, { statusCode: 403, message: 'forbidden' });
await expect(orch.removeContainer('my-server')).rejects.toBeDefined();
});
});
describe('listContainers', () => {
it('lists managed pods', async () => {
getClient(orch)._setResponse(
'GET:/api/v1/namespaces/test-ns/pods?labelSelector=mcpctl.managed%3Dtrue',
200,
{ items: [podStatusRunning] },
);
setHandler('listNamespacedPod', { items: [podRunning] });
const containers = await orch.listContainers();
expect(containers).toHaveLength(1);
expect(containers[0]!.containerId).toBe('my-server');
expect(containers[0]!.state).toBe('running');
expect(containers[0]!.ip).toBe('10.42.0.15');
expect(mockCore.listNamespacedPod).toHaveBeenCalledWith(
expect.objectContaining({ labelSelector: 'mcpctl.managed=true' }),
);
});
it('returns empty on API error', async () => {
getClient(orch)._setResponse(
'GET:/api/v1/namespaces/test-ns/pods?labelSelector=mcpctl.managed%3Dtrue',
500,
{},
);
it('returns empty when no pods', async () => {
setHandler('listNamespacedPod', { items: [] });
const containers = await orch.listContainers();
expect(containers).toEqual([]);
});
@@ -232,35 +259,100 @@ describe('KubernetesOrchestrator', () => {
describe('ensureNamespace', () => {
it('does nothing if namespace exists', async () => {
getClient(orch)._setResponse('GET:/api/v1/namespaces/test-ns', 200, {});
setHandler('readNamespace:test-ns', {});
await expect(orch.ensureNamespace('test-ns')).resolves.toBeUndefined();
expect(mockCore.createNamespace).not.toHaveBeenCalled();
});
it('creates namespace if not found', async () => {
const client = getClient(orch);
client._setResponse('GET:/api/v1/namespaces/new-ns', 404, {});
client._setResponse('POST:/api/v1/namespaces', 201, {});
setHandler('readNamespace:new-ns', undefined, { statusCode: 404 });
setHandler('createNamespace', {});
await expect(orch.ensureNamespace('new-ns')).resolves.toBeUndefined();
expect(mockCore.createNamespace).toHaveBeenCalled();
});
it('handles conflict (namespace already created by another process)', async () => {
const client = getClient(orch);
client._setResponse('GET:/api/v1/namespaces/new-ns', 404, {});
client._setResponse('POST:/api/v1/namespaces', 409, { message: 'already exists' });
setHandler('readNamespace:new-ns', undefined, { statusCode: 404 });
setHandler('createNamespace', undefined, { statusCode: 409, message: 'already exists' });
await expect(orch.ensureNamespace('new-ns')).resolves.toBeUndefined();
});
});
describe('getNamespace', () => {
it('returns configured namespace', () => {
expect(orch.getNamespace()).toBe('test-ns');
expect(orch.getNamespace()).toBe('mcpctl-servers');
});
it('defaults to "default"', () => {
const defaultOrch = new KubernetesOrchestrator({
apiServer: 'https://localhost:6443',
});
expect(defaultOrch.getNamespace()).toBe('default');
it('defaults to mcpctl-servers', () => {
const defaultOrch = new KubernetesOrchestrator();
expect(defaultOrch.getNamespace()).toBe('mcpctl-servers');
});
});
describe('pod IP extraction', () => {
it('extracts podIP from status', async () => {
setHandler('readNamespacedPod:my-server', podRunning);
const info = await orch.inspectContainer('my-server');
expect(info.ip).toBe('10.42.0.15');
});
it('returns undefined ip when no podIP', async () => {
const podWithoutIP = {
...podRunning,
status: { ...podRunning.status, podIP: undefined },
};
setHandler('readNamespacedPod:my-server', podWithoutIP);
const info = await orch.inspectContainer('my-server');
expect(info.ip).toBeUndefined();
});
});
describe('manifest security', () => {
it('creates pods with security hardening', async () => {
setHandler('readNamespace:mcpctl-servers', {});
setHandler('createNamespacedPod', podRunning);
setHandler('readNamespacedPod:my-server', podRunning);
await orch.createContainer(testSpec);
const createCall = mockCore.createNamespacedPod.mock.calls[0]![0];
const container = createCall.body.spec.containers[0];
expect(container.securityContext.runAsNonRoot).toBe(false);
expect(container.securityContext.readOnlyRootFilesystem).toBe(false);
expect(container.securityContext.allowPrivilegeEscalation).toBe(false);
expect(container.securityContext.capabilities.drop).toEqual(['ALL']);
expect(container.securityContext.seccompProfile.type).toBe('RuntimeDefault');
});
it('creates pods with automountServiceAccountToken disabled', async () => {
setHandler('readNamespace:mcpctl-servers', {});
setHandler('createNamespacedPod', podRunning);
setHandler('readNamespacedPod:my-server', podRunning);
await orch.createContainer(testSpec);
const createCall = mockCore.createNamespacedPod.mock.calls[0]![0];
expect(createCall.body.spec.automountServiceAccountToken).toBe(false);
});
it('creates pods with stdin enabled for STDIO servers', async () => {
setHandler('readNamespace:mcpctl-servers', {});
setHandler('createNamespacedPod', podRunning);
setHandler('readNamespacedPod:my-server', podRunning);
await orch.createContainer(testSpec);
const createCall = mockCore.createNamespacedPod.mock.calls[0]![0];
expect(createCall.body.spec.containers[0].stdin).toBe(true);
});
});
describe('context enforcement', () => {
it('sets context when configured', () => {
const _orch = new KubernetesOrchestrator({ context: 'default' });
// The mock KubeConfig.setCurrentContext should have been called
// This verifies the safety mechanism works
expect(_orch.getNamespace()).toBe('mcpctl-servers');
});
});
});

View File

@@ -484,7 +484,7 @@ describe('MCP server full flow', () => {
expect(instancesRes.statusCode).toBe(200);
const instances = instancesRes.json<Array<{ id: string; status: string; containerId: string }>>();
expect(instances).toHaveLength(1);
expect(instances[0]!.status).toBe('RUNNING');
expect(instances[0]!.status).toBe('STARTING');
expect(instances[0]!.containerId).toBeTruthy();
// 3. Verify orchestrator was called with correct spec
@@ -564,7 +564,7 @@ describe('MCP server full flow', () => {
expect(listRes.statusCode).toBe(200);
const instances = listRes.json<Array<{ id: string; status: string }>>();
expect(instances).toHaveLength(1);
expect(instances[0]!.status).toBe('RUNNING');
expect(instances[0]!.status).toBe('STARTING');
const instanceId = instances[0]!.id;
// Delete instance → triggers reconcile → new instance auto-created

22
templates/gitea.yaml Normal file
View File

@@ -0,0 +1,22 @@
name: gitea
version: "1.0.0"
description: Gitea MCP server for repositories, issues, PRs, and code management
dockerImage: "docker.gitea.com/gitea-mcp-server:latest"
transport: STDIO
repositoryUrl: https://gitea.com/gitea/gitea-mcp
command:
- /app/gitea-mcp
- -t
- stdio
# Health check disabled: STDIO health probe requires packageName (npm-based servers).
# This server uses a custom dockerImage. Probe support for dockerImage STDIO servers is TODO.
env:
- name: GITEA_HOST
description: Gitea instance URL (e.g. https://gitea.example.com)
required: true
- name: GITEA_ACCESS_TOKEN
description: Gitea personal access token
required: true
- name: GITEA_INSECURE
description: Allow self-signed certificates (true/false, default false)
required: false

View File

@@ -0,0 +1,25 @@
name: unifi-network
version: "1.0.0"
description: UniFi Network MCP server for managing UniFi network devices, clients, and configuration
packageName: "unifi-network-mcp"
runtime: python
transport: STDIO
repositoryUrl: https://github.com/sirkirby/unifi-mcp
# Health check disabled: STDIO health probe requires packageName (npm-based servers).
# This server uses the Python runner. Probe support for Python runner STDIO servers is TODO.
env:
- name: UNIFI_HOST
description: UniFi controller hostname or IP (e.g. unifi.example.com — without https://)
required: true
- name: UNIFI_USERNAME
description: UniFi local admin username
required: true
- name: UNIFI_PASSWORD
description: UniFi admin password
required: true
- name: UNIFI_NETWORK_PORT
description: UniFi controller port (default 443, use 8443 for standalone UniFi Controller)
required: false
- name: UNIFI_NETWORK_VERIFY_SSL
description: Verify SSL certificate (true/false, default true — set false for self-signed certs)
required: false