fix: accurate instance status — STARTING until pod is actually running
All checks were successful
CI/CD / typecheck (pull_request) Successful in 52s
CI/CD / lint (pull_request) Successful in 1m53s
CI/CD / test (pull_request) Successful in 1m2s
CI/CD / build (pull_request) Successful in 4m0s
CI/CD / smoke (pull_request) Successful in 8m38s
CI/CD / publish-rpm (pull_request) Has been skipped
CI/CD / publish-deb (pull_request) Has been skipped
All checks were successful
CI/CD / typecheck (pull_request) Successful in 52s
CI/CD / lint (pull_request) Successful in 1m53s
CI/CD / test (pull_request) Successful in 1m2s
CI/CD / build (pull_request) Successful in 4m0s
CI/CD / smoke (pull_request) Successful in 8m38s
CI/CD / publish-rpm (pull_request) Has been skipped
CI/CD / publish-deb (pull_request) Has been skipped
Instance status now reflects actual container state: - startOne() sets STARTING (not RUNNING) after container creation - syncStatus() promotes STARTING→RUNNING when pod is ready - syncStatus() demotes RUNNING→STARTING if pod restarts (CrashLoop) - External servers still get RUNNING immediately (no container) Previously, CrashLooping pods showed as RUNNING in mcpctl get instances. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -49,6 +49,7 @@ export class InstanceService {
|
||||
if ((inst.status === 'RUNNING' || inst.status === 'STARTING') && inst.containerId) {
|
||||
try {
|
||||
const info = await this.orchestrator.inspectContainer(inst.containerId);
|
||||
|
||||
if (info.state === 'stopped' || info.state === 'error') {
|
||||
// Container died — get last logs for error context
|
||||
let errorMsg = `Container ${info.state}`;
|
||||
@@ -60,6 +61,12 @@ export class InstanceService {
|
||||
await this.instanceRepo.updateStatus(inst.id, 'ERROR', {
|
||||
metadata: { error: errorMsg },
|
||||
});
|
||||
} else if (info.state === 'starting' && inst.status === 'RUNNING') {
|
||||
// Pod went back to starting (e.g. CrashLoopBackOff restart)
|
||||
await this.instanceRepo.updateStatus(inst.id, 'STARTING', {});
|
||||
} else if (info.state === 'running' && inst.status === 'STARTING') {
|
||||
// Pod became ready — promote to RUNNING
|
||||
await this.instanceRepo.updateStatus(inst.id, 'RUNNING', {});
|
||||
}
|
||||
} catch {
|
||||
// Container gone entirely
|
||||
@@ -305,7 +312,8 @@ export class InstanceService {
|
||||
updateFields.port = containerInfo.port;
|
||||
}
|
||||
|
||||
instance = await this.instanceRepo.updateStatus(instance.id, 'RUNNING', updateFields);
|
||||
// Set STARTING — syncStatus will promote to RUNNING once the container is actually ready
|
||||
instance = await this.instanceRepo.updateStatus(instance.id, 'STARTING', updateFields);
|
||||
} catch (err) {
|
||||
instance = await this.instanceRepo.updateStatus(instance.id, 'ERROR', {
|
||||
metadata: { error: err instanceof Error ? err.message : String(err) },
|
||||
|
||||
@@ -484,7 +484,7 @@ describe('MCP server full flow', () => {
|
||||
expect(instancesRes.statusCode).toBe(200);
|
||||
const instances = instancesRes.json<Array<{ id: string; status: string; containerId: string }>>();
|
||||
expect(instances).toHaveLength(1);
|
||||
expect(instances[0]!.status).toBe('RUNNING');
|
||||
expect(instances[0]!.status).toBe('STARTING');
|
||||
expect(instances[0]!.containerId).toBeTruthy();
|
||||
|
||||
// 3. Verify orchestrator was called with correct spec
|
||||
@@ -564,7 +564,7 @@ describe('MCP server full flow', () => {
|
||||
expect(listRes.statusCode).toBe(200);
|
||||
const instances = listRes.json<Array<{ id: string; status: string }>>();
|
||||
expect(instances).toHaveLength(1);
|
||||
expect(instances[0]!.status).toBe('RUNNING');
|
||||
expect(instances[0]!.status).toBe('STARTING');
|
||||
const instanceId = instances[0]!.id;
|
||||
|
||||
// Delete instance → triggers reconcile → new instance auto-created
|
||||
|
||||
Reference in New Issue
Block a user