commit d0aa0c5d631d64b08a7f0b2fe9101316f35b661f Author: Michal Date: Sat Feb 21 03:10:39 2026 +0000 first commit diff --git a/.claude/commands/tm/add-dependency.md b/.claude/commands/tm/add-dependency.md new file mode 100644 index 0000000..f479b5e --- /dev/null +++ b/.claude/commands/tm/add-dependency.md @@ -0,0 +1,58 @@ +Add Dependency + +Arguments: $ARGUMENTS +Add a dependency between tasks. + +Arguments: $ARGUMENTS + +Parse the task IDs to establish dependency relationship. + +## Adding Dependencies + +Creates a dependency where one task must be completed before another can start. + +## Argument Parsing + +Parse natural language or IDs: +- "make 5 depend on 3" → task 5 depends on task 3 +- "5 needs 3" → task 5 depends on task 3 +- "5 3" → task 5 depends on task 3 +- "5 after 3" → task 5 depends on task 3 + +## Execution + +```bash +task-master add-dependency --id= --depends-on= +``` + +## Validation + +Before adding: +1. **Verify both tasks exist** +2. **Check for circular dependencies** +3. **Ensure dependency makes logical sense** +4. **Warn if creating complex chains** + +## Smart Features + +- Detect if dependency already exists +- Suggest related dependencies +- Show impact on task flow +- Update task priorities if needed + +## Post-Addition + +After adding dependency: +1. Show updated dependency graph +2. Identify any newly blocked tasks +3. Suggest task order changes +4. Update project timeline + +## Example Flows + +``` +/taskmaster:add-dependency 5 needs 3 +→ Task #5 now depends on Task #3 +→ Task #5 is now blocked until #3 completes +→ Suggested: Also consider if #5 needs #4 +``` \ No newline at end of file diff --git a/.claude/commands/tm/add-subtask.md b/.claude/commands/tm/add-subtask.md new file mode 100644 index 0000000..731d41a --- /dev/null +++ b/.claude/commands/tm/add-subtask.md @@ -0,0 +1,79 @@ +Add Subtask + +Arguments: $ARGUMENTS +Add a subtask to a parent task. + +Arguments: $ARGUMENTS + +Parse arguments to create a new subtask or convert existing task. + +## Adding Subtasks + +Creates subtasks to break down complex parent tasks into manageable pieces. + +## Argument Parsing + +Flexible natural language: +- "add subtask to 5: implement login form" +- "break down 5 with: setup, implement, test" +- "subtask for 5: handle edge cases" +- "5: validate user input" → adds subtask to task 5 + +## Execution Modes + +### 1. Create New Subtask +```bash +task-master add-subtask --parent= --title="" --description="<desc>" +``` + +### 2. Convert Existing Task +```bash +task-master add-subtask --parent=<id> --task-id=<existing-id> +``` + +## Smart Features + +1. **Automatic Subtask Generation** + - If title contains "and" or commas, create multiple + - Suggest common subtask patterns + - Inherit parent's context + +2. **Intelligent Defaults** + - Priority based on parent + - Appropriate time estimates + - Logical dependencies between subtasks + +3. **Validation** + - Check parent task complexity + - Warn if too many subtasks + - Ensure subtask makes sense + +## Creation Process + +1. Parse parent task context +2. Generate subtask with ID like "5.1" +3. Set appropriate defaults +4. Link to parent task +5. Update parent's time estimate + +## Example Flows + +``` +/taskmaster:add-subtask to 5: implement user authentication +→ Created subtask #5.1: "implement user authentication" +→ Parent task #5 now has 1 subtask +→ Suggested next subtasks: tests, documentation + +/taskmaster:add-subtask 5: setup, implement, test +→ Created 3 subtasks: + #5.1: setup + #5.2: implement + #5.3: test +``` + +## Post-Creation + +- Show updated task hierarchy +- Suggest logical next subtasks +- Update complexity estimates +- Recommend subtask order \ No newline at end of file diff --git a/.claude/commands/tm/add-task.md b/.claude/commands/tm/add-task.md new file mode 100644 index 0000000..27ffe61 --- /dev/null +++ b/.claude/commands/tm/add-task.md @@ -0,0 +1,81 @@ +Add Task + +Arguments: $ARGUMENTS +Add new tasks with intelligent parsing and context awareness. + +Arguments: $ARGUMENTS + +## Smart Task Addition + +Parse natural language to create well-structured tasks. + +### 1. **Input Understanding** + +I'll intelligently parse your request: +- Natural language → Structured task +- Detect priority from keywords (urgent, ASAP, important) +- Infer dependencies from context +- Suggest complexity based on description +- Determine task type (feature, bug, refactor, test, docs) + +### 2. **Smart Parsing Examples** + +**"Add urgent task to fix login bug"** +→ Title: Fix login bug +→ Priority: high +→ Type: bug +→ Suggested complexity: medium + +**"Create task for API documentation after task 23 is done"** +→ Title: API documentation +→ Dependencies: [23] +→ Type: documentation +→ Priority: medium + +**"Need to refactor auth module - depends on 12 and 15, high complexity"** +→ Title: Refactor auth module +→ Dependencies: [12, 15] +→ Complexity: high +→ Type: refactor + +### 3. **Context Enhancement** + +Based on current project state: +- Suggest related existing tasks +- Warn about potential conflicts +- Recommend dependencies +- Propose subtasks if complex + +### 4. **Interactive Refinement** + +```yaml +Task Preview: +───────────── +Title: [Extracted title] +Priority: [Inferred priority] +Dependencies: [Detected dependencies] +Complexity: [Estimated complexity] + +Suggestions: +- Similar task #34 exists, consider as dependency? +- This seems complex, break into subtasks? +- Tasks #45-47 work on same module +``` + +### 5. **Validation & Creation** + +Before creating: +- Validate dependencies exist +- Check for duplicates +- Ensure logical ordering +- Verify task completeness + +### 6. **Smart Defaults** + +Intelligent defaults based on: +- Task type patterns +- Team conventions +- Historical data +- Current sprint/phase + +Result: High-quality tasks from minimal input. \ No newline at end of file diff --git a/.claude/commands/tm/analyze-complexity.md b/.claude/commands/tm/analyze-complexity.md new file mode 100644 index 0000000..980c468 --- /dev/null +++ b/.claude/commands/tm/analyze-complexity.md @@ -0,0 +1,124 @@ +Analyze Complexity + +Arguments: $ARGUMENTS +Analyze task complexity and generate expansion recommendations. + +Arguments: $ARGUMENTS + +Perform deep analysis of task complexity across the project. + +## Complexity Analysis + +Uses AI to analyze tasks and recommend which ones need breakdown. + +## Execution Options + +```bash +task-master analyze-complexity [--research] [--threshold=5] +``` + +## Analysis Parameters + +- `--research` → Use research AI for deeper analysis +- `--threshold=5` → Only flag tasks above complexity 5 +- Default: Analyze all pending tasks + +## Analysis Process + +### 1. **Task Evaluation** +For each task, AI evaluates: +- Technical complexity +- Time requirements +- Dependency complexity +- Risk factors +- Knowledge requirements + +### 2. **Complexity Scoring** +Assigns score 1-10 based on: +- Implementation difficulty +- Integration challenges +- Testing requirements +- Unknown factors +- Technical debt risk + +### 3. **Recommendations** +For complex tasks: +- Suggest expansion approach +- Recommend subtask breakdown +- Identify risk areas +- Propose mitigation strategies + +## Smart Analysis Features + +1. **Pattern Recognition** + - Similar task comparisons + - Historical complexity accuracy + - Team velocity consideration + - Technology stack factors + +2. **Contextual Factors** + - Team expertise + - Available resources + - Timeline constraints + - Business criticality + +3. **Risk Assessment** + - Technical risks + - Timeline risks + - Dependency risks + - Knowledge gaps + +## Output Format + +``` +Task Complexity Analysis Report +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +High Complexity Tasks (>7): +📍 #5 "Implement real-time sync" - Score: 9/10 + Factors: WebSocket complexity, state management, conflict resolution + Recommendation: Expand into 5-7 subtasks + Risks: Performance, data consistency + +📍 #12 "Migrate database schema" - Score: 8/10 + Factors: Data migration, zero downtime, rollback strategy + Recommendation: Expand into 4-5 subtasks + Risks: Data loss, downtime + +Medium Complexity Tasks (5-7): +📍 #23 "Add export functionality" - Score: 6/10 + Consider expansion if timeline tight + +Low Complexity Tasks (<5): +✅ 15 tasks - No expansion needed + +Summary: +- Expand immediately: 2 tasks +- Consider expanding: 5 tasks +- Keep as-is: 15 tasks +``` + +## Actionable Output + +For each high-complexity task: +1. Complexity score with reasoning +2. Specific expansion suggestions +3. Risk mitigation approaches +4. Recommended subtask structure + +## Integration + +Results are: +- Saved to `.taskmaster/reports/complexity-analysis.md` +- Used by expand command +- Inform sprint planning +- Guide resource allocation + +## Next Steps + +After analysis: +``` +/taskmaster:expand 5 # Expand specific task +/taskmaster:expand-all # Expand all recommended +/taskmaster:complexity-report # View detailed report +``` \ No newline at end of file diff --git a/.claude/commands/tm/analyze-project.md b/.claude/commands/tm/analyze-project.md new file mode 100644 index 0000000..2b32edc --- /dev/null +++ b/.claude/commands/tm/analyze-project.md @@ -0,0 +1,100 @@ +Analyze Project + +Arguments: $ARGUMENTS +Advanced project analysis with actionable insights and recommendations. + +Arguments: $ARGUMENTS + +## Comprehensive Project Analysis + +Multi-dimensional analysis based on requested focus area. + +### 1. **Analysis Modes** + +Based on $ARGUMENTS: +- "velocity" → Sprint velocity and trends +- "quality" → Code quality metrics +- "risk" → Risk assessment and mitigation +- "dependencies" → Dependency graph analysis +- "team" → Workload and skill distribution +- "architecture" → System design coherence +- Default → Full spectrum analysis + +### 2. **Velocity Analytics** + +``` +📊 Velocity Analysis +━━━━━━━━━━━━━━━━━━━ +Current Sprint: 24 points/week ↗️ +20% +Rolling Average: 20 points/week +Efficiency: 85% (17/20 tasks on time) + +Bottlenecks Detected: +- Code review delays (avg 4h wait) +- Test environment availability +- Dependency on external team + +Recommendations: +1. Implement parallel review process +2. Add staging environment +3. Mock external dependencies +``` + +### 3. **Risk Assessment** + +**Technical Risks** +- High complexity tasks without backup assignee +- Single points of failure in architecture +- Insufficient test coverage in critical paths +- Technical debt accumulation rate + +**Project Risks** +- Critical path dependencies +- Resource availability gaps +- Deadline feasibility analysis +- Scope creep indicators + +### 4. **Dependency Intelligence** + +Visual dependency analysis: +``` +Critical Path: +#12 → #15 → #23 → #45 → #50 (20 days) + ↘ #24 → #46 ↗ + +Optimization: Parallelize #15 and #24 +Time Saved: 3 days +``` + +### 5. **Quality Metrics** + +**Code Quality** +- Test coverage trends +- Complexity scores +- Technical debt ratio +- Review feedback patterns + +**Process Quality** +- Rework frequency +- Bug introduction rate +- Time to resolution +- Knowledge distribution + +### 6. **Predictive Insights** + +Based on patterns: +- Completion probability by deadline +- Resource needs projection +- Risk materialization likelihood +- Suggested interventions + +### 7. **Executive Dashboard** + +High-level summary with: +- Health score (0-100) +- Top 3 risks +- Top 3 opportunities +- Recommended actions +- Success probability + +Result: Data-driven decisions with clear action paths. \ No newline at end of file diff --git a/.claude/commands/tm/auto-implement-tasks.md b/.claude/commands/tm/auto-implement-tasks.md new file mode 100644 index 0000000..b44e808 --- /dev/null +++ b/.claude/commands/tm/auto-implement-tasks.md @@ -0,0 +1,100 @@ +Auto Implement Tasks + +Arguments: $ARGUMENTS +Enhanced auto-implementation with intelligent code generation and testing. + +Arguments: $ARGUMENTS + +## Intelligent Auto-Implementation + +Advanced implementation with context awareness and quality checks. + +### 1. **Pre-Implementation Analysis** + +Before starting: +- Analyze task complexity and requirements +- Check codebase patterns and conventions +- Identify similar completed tasks +- Assess test coverage needs +- Detect potential risks + +### 2. **Smart Implementation Strategy** + +Based on task type and context: + +**Feature Tasks** +1. Research existing patterns +2. Design component architecture +3. Implement with tests +4. Integrate with system +5. Update documentation + +**Bug Fix Tasks** +1. Reproduce issue +2. Identify root cause +3. Implement minimal fix +4. Add regression tests +5. Verify side effects + +**Refactoring Tasks** +1. Analyze current structure +2. Plan incremental changes +3. Maintain test coverage +4. Refactor step-by-step +5. Verify behavior unchanged + +### 3. **Code Intelligence** + +**Pattern Recognition** +- Learn from existing code +- Follow team conventions +- Use preferred libraries +- Match style guidelines + +**Test-Driven Approach** +- Write tests first when possible +- Ensure comprehensive coverage +- Include edge cases +- Performance considerations + +### 4. **Progressive Implementation** + +Step-by-step with validation: +``` +Step 1/5: Setting up component structure ✓ +Step 2/5: Implementing core logic ✓ +Step 3/5: Adding error handling ⚡ (in progress) +Step 4/5: Writing tests ⏳ +Step 5/5: Integration testing ⏳ + +Current: Adding try-catch blocks and validation... +``` + +### 5. **Quality Assurance** + +Automated checks: +- Linting and formatting +- Test execution +- Type checking +- Dependency validation +- Performance analysis + +### 6. **Smart Recovery** + +If issues arise: +- Diagnostic analysis +- Suggestion generation +- Fallback strategies +- Manual intervention points +- Learning from failures + +### 7. **Post-Implementation** + +After completion: +- Generate PR description +- Update documentation +- Log lessons learned +- Suggest follow-up tasks +- Update task relationships + +Result: High-quality, production-ready implementations. \ No newline at end of file diff --git a/.claude/commands/tm/command-pipeline.md b/.claude/commands/tm/command-pipeline.md new file mode 100644 index 0000000..5403a01 --- /dev/null +++ b/.claude/commands/tm/command-pipeline.md @@ -0,0 +1,80 @@ +Command Pipeline + +Arguments: $ARGUMENTS +Execute a pipeline of commands based on a specification. + +Arguments: $ARGUMENTS + +## Command Pipeline Execution + +Parse pipeline specification from arguments. Supported formats: + +### Simple Pipeline +`init → expand-all → sprint-plan` + +### Conditional Pipeline +`status → if:pending>10 → sprint-plan → else → next` + +### Iterative Pipeline +`for:pending-tasks → expand → complexity-check` + +### Smart Pipeline Patterns + +**1. Project Setup Pipeline** +``` +init [prd] → +expand-all → +complexity-report → +sprint-plan → +show first-sprint +``` + +**2. Daily Work Pipeline** +``` +standup → +if:in-progress → continue → +else → next → start +``` + +**3. Task Completion Pipeline** +``` +complete [id] → +git-commit → +if:blocked-tasks-freed → show-freed → +next +``` + +**4. Quality Check Pipeline** +``` +list in-progress → +for:each → check-idle-time → +if:idle>1day → prompt-update +``` + +### Pipeline Features + +**Variables** +- Store results: `status → $count=pending-count` +- Use in conditions: `if:$count>10` +- Pass between commands: `expand $high-priority-tasks` + +**Error Handling** +- On failure: `try:complete → catch:show-blockers` +- Skip on error: `optional:test-run` +- Retry logic: `retry:3:commit` + +**Parallel Execution** +- Parallel branches: `[analyze | test | lint]` +- Join results: `parallel → join:report` + +### Execution Flow + +1. Parse pipeline specification +2. Validate command sequence +3. Execute with state passing +4. Handle conditions and loops +5. Aggregate results +6. Show summary + +This enables complex workflows like: +`parse-prd → expand-all → filter:complex>70 → assign:senior → sprint-plan:weighted` \ No newline at end of file diff --git a/.claude/commands/tm/complexity-report.md b/.claude/commands/tm/complexity-report.md new file mode 100644 index 0000000..84dfc5c --- /dev/null +++ b/.claude/commands/tm/complexity-report.md @@ -0,0 +1,120 @@ +Complexity Report + +Arguments: $ARGUMENTS +Display the task complexity analysis report. + +Arguments: $ARGUMENTS + +View the detailed complexity analysis generated by analyze-complexity command. + +## Viewing Complexity Report + +Shows comprehensive task complexity analysis with actionable insights. + +## Execution + +```bash +task-master complexity-report [--file=<path>] +``` + +## Report Location + +Default: `.taskmaster/reports/complexity-analysis.md` +Custom: Specify with --file parameter + +## Report Contents + +### 1. **Executive Summary** +``` +Complexity Analysis Summary +━━━━━━━━━━━━━━━━━━━━━━━━ +Analysis Date: 2024-01-15 +Tasks Analyzed: 32 +High Complexity: 5 (16%) +Medium Complexity: 12 (37%) +Low Complexity: 15 (47%) + +Critical Findings: +- 5 tasks need immediate expansion +- 3 tasks have high technical risk +- 2 tasks block critical path +``` + +### 2. **Detailed Task Analysis** +For each complex task: +- Complexity score breakdown +- Contributing factors +- Specific risks identified +- Expansion recommendations +- Similar completed tasks + +### 3. **Risk Matrix** +Visual representation: +``` +Risk vs Complexity Matrix +━━━━━━━━━━━━━━━━━━━━━━━ +High Risk | #5(9) #12(8) | #23(6) +Med Risk | #34(7) | #45(5) #67(5) +Low Risk | #78(8) | [15 tasks] + | High Complex | Med Complex +``` + +### 4. **Recommendations** + +**Immediate Actions:** +1. Expand task #5 - Critical path + high complexity +2. Expand task #12 - High risk + dependencies +3. Review task #34 - Consider splitting + +**Sprint Planning:** +- Don't schedule multiple high-complexity tasks together +- Ensure expertise available for complex tasks +- Build in buffer time for unknowns + +## Interactive Features + +When viewing report: +1. **Quick Actions** + - Press 'e' to expand a task + - Press 'd' for task details + - Press 'r' to refresh analysis + +2. **Filtering** + - View by complexity level + - Filter by risk factors + - Show only actionable items + +3. **Export Options** + - Markdown format + - CSV for spreadsheets + - JSON for tools + +## Report Intelligence + +- Compares with historical data +- Shows complexity trends +- Identifies patterns +- Suggests process improvements + +## Integration + +Use report for: +- Sprint planning sessions +- Resource allocation +- Risk assessment +- Team discussions +- Client updates + +## Example Usage + +``` +/taskmaster:complexity-report +→ Opens latest analysis + +/taskmaster:complexity-report --file=archived/2024-01-01.md +→ View historical analysis + +After viewing: +/taskmaster:expand 5 +→ Expand high-complexity task +``` \ No newline at end of file diff --git a/.claude/commands/tm/convert-task-to-subtask.md b/.claude/commands/tm/convert-task-to-subtask.md new file mode 100644 index 0000000..b0d740a --- /dev/null +++ b/.claude/commands/tm/convert-task-to-subtask.md @@ -0,0 +1,74 @@ +Convert Task To Subtask + +Arguments: $ARGUMENTS +Convert an existing task into a subtask. + +Arguments: $ARGUMENTS + +Parse parent ID and task ID to convert. + +## Task Conversion + +Converts an existing standalone task into a subtask of another task. + +## Argument Parsing + +- "move task 8 under 5" +- "make 8 a subtask of 5" +- "nest 8 in 5" +- "5 8" → make task 8 a subtask of task 5 + +## Execution + +```bash +task-master add-subtask --parent=<parent-id> --task-id=<task-to-convert> +``` + +## Pre-Conversion Checks + +1. **Validation** + - Both tasks exist and are valid + - No circular parent relationships + - Task isn't already a subtask + - Logical hierarchy makes sense + +2. **Impact Analysis** + - Dependencies that will be affected + - Tasks that depend on converting task + - Priority alignment needed + - Status compatibility + +## Conversion Process + +1. Change task ID from "8" to "5.1" (next available) +2. Update all dependency references +3. Inherit parent's context where appropriate +4. Adjust priorities if needed +5. Update time estimates + +## Smart Features + +- Preserve task history +- Maintain dependencies +- Update all references +- Create conversion log + +## Example + +``` +/taskmaster:add-subtask/from-task 5 8 +→ Converting: Task #8 becomes subtask #5.1 +→ Updated: 3 dependency references +→ Parent task #5 now has 1 subtask +→ Note: Subtask inherits parent's priority + +Before: #8 "Implement validation" (standalone) +After: #5.1 "Implement validation" (subtask of #5) +``` + +## Post-Conversion + +- Show new task hierarchy +- List updated dependencies +- Verify project integrity +- Suggest related conversions \ No newline at end of file diff --git a/.claude/commands/tm/expand-all-tasks.md b/.claude/commands/tm/expand-all-tasks.md new file mode 100644 index 0000000..675ccb0 --- /dev/null +++ b/.claude/commands/tm/expand-all-tasks.md @@ -0,0 +1,52 @@ +Expand All Tasks +Expand all pending tasks that need subtasks. + +## Bulk Task Expansion + +Intelligently expands all tasks that would benefit from breakdown. + +## Execution + +```bash +task-master expand --all +``` + +## Smart Selection + +Only expands tasks that: +- Are marked as pending +- Have high complexity (>5) +- Lack existing subtasks +- Would benefit from breakdown + +## Expansion Process + +1. **Analysis Phase** + - Identify expansion candidates + - Group related tasks + - Plan expansion strategy + +2. **Batch Processing** + - Expand tasks in logical order + - Maintain consistency + - Preserve relationships + - Optimize for parallelism + +3. **Quality Control** + - Ensure subtask quality + - Avoid over-decomposition + - Maintain task coherence + - Update dependencies + +## Options + +- Add `force` to expand all regardless of complexity +- Add `research` for enhanced AI analysis + +## Results + +After bulk expansion: +- Summary of tasks expanded +- New subtask count +- Updated complexity metrics +- Suggested task order \ No newline at end of file diff --git a/.claude/commands/tm/expand-task.md b/.claude/commands/tm/expand-task.md new file mode 100644 index 0000000..4c9313e --- /dev/null +++ b/.claude/commands/tm/expand-task.md @@ -0,0 +1,52 @@ +Expand Task + +Arguments: $ARGUMENTS +Break down a complex task into subtasks. + +Arguments: $ARGUMENTS (task ID) + +## Intelligent Task Expansion + +Analyzes a task and creates detailed subtasks for better manageability. + +## Execution + +```bash +task-master expand --id=$ARGUMENTS +``` + +## Expansion Process + +1. **Task Analysis** + - Review task complexity + - Identify components + - Detect technical challenges + - Estimate time requirements + +2. **Subtask Generation** + - Create 3-7 subtasks typically + - Each subtask 1-4 hours + - Logical implementation order + - Clear acceptance criteria + +3. **Smart Breakdown** + - Setup/configuration tasks + - Core implementation + - Testing components + - Integration steps + - Documentation updates + +## Enhanced Features + +Based on task type: +- **Feature**: Setup → Implement → Test → Integrate +- **Bug Fix**: Reproduce → Diagnose → Fix → Verify +- **Refactor**: Analyze → Plan → Refactor → Validate + +## Post-Expansion + +After expansion: +1. Show subtask hierarchy +2. Update time estimates +3. Suggest implementation order +4. Highlight critical path \ No newline at end of file diff --git a/.claude/commands/tm/fix-dependencies.md b/.claude/commands/tm/fix-dependencies.md new file mode 100644 index 0000000..26cd842 --- /dev/null +++ b/.claude/commands/tm/fix-dependencies.md @@ -0,0 +1,82 @@ +Fix Dependencies +Automatically fix dependency issues found during validation. + +## Automatic Dependency Repair + +Intelligently fixes common dependency problems while preserving project logic. + +## Execution + +```bash +task-master fix-dependencies +``` + +## What Gets Fixed + +### 1. **Auto-Fixable Issues** +- Remove references to deleted tasks +- Break simple circular dependencies +- Remove self-dependencies +- Clean up duplicate dependencies + +### 2. **Smart Resolutions** +- Reorder dependencies to maintain logic +- Suggest task merging for over-dependent tasks +- Flatten unnecessary dependency chains +- Remove redundant transitive dependencies + +### 3. **Manual Review Required** +- Complex circular dependencies +- Critical path modifications +- Business logic dependencies +- High-impact changes + +## Fix Process + +1. **Analysis Phase** + - Run validation check + - Categorize issues by type + - Determine fix strategy + +2. **Execution Phase** + - Apply automatic fixes + - Log all changes made + - Preserve task relationships + +3. **Verification Phase** + - Re-validate after fixes + - Show before/after comparison + - Highlight manual fixes needed + +## Smart Features + +- Preserves intended task flow +- Minimal disruption approach +- Creates fix history/log +- Suggests manual interventions + +## Output Example + +``` +Dependency Auto-Fix Report +━━━━━━━━━━━━━━━━━━━━━━━━ +Fixed Automatically: +✅ Removed 2 references to deleted tasks +✅ Resolved 1 self-dependency +✅ Cleaned 3 redundant dependencies + +Manual Review Needed: +⚠️ Complex circular dependency: #12 → #15 → #18 → #12 + Suggestion: Make #15 not depend on #12 +⚠️ Task #45 has 8 dependencies + Suggestion: Break into subtasks + +Run '/taskmaster:validate-dependencies' to verify fixes +``` + +## Safety + +- Preview mode available +- Rollback capability +- Change logging +- No data loss \ No newline at end of file diff --git a/.claude/commands/tm/help.md b/.claude/commands/tm/help.md new file mode 100644 index 0000000..90b0c56 --- /dev/null +++ b/.claude/commands/tm/help.md @@ -0,0 +1,101 @@ +Help + +Arguments: $ARGUMENTS +Show help for Task Master AI commands. + +Arguments: $ARGUMENTS + +Display help for Task Master commands and available options. + +## Task Master AI Command Help + +### Quick Navigation + +Type `/taskmaster:` and use tab completion to explore all commands. + +### Command Categories + +#### 🚀 Setup & Installation +- `/taskmaster:install-taskmaster` - Comprehensive installation guide +- `/taskmaster:quick-install-taskmaster` - One-line global install + +#### 📋 Project Setup +- `/taskmaster:init-project` - Initialize new project +- `/taskmaster:init-project-quick` - Quick setup with auto-confirm +- `/taskmaster:view-models` - View AI configuration +- `/taskmaster:setup-models` - Configure AI providers + +#### 🎯 Task Generation +- `/taskmaster:parse-prd` - Generate tasks from PRD +- `/taskmaster:parse-prd-with-research` - Enhanced parsing +- `/taskmaster:generate-tasks` - Create task files + +#### 📝 Task Management +- `/taskmaster:list-tasks` - List all tasks +- `/taskmaster:list-tasks-by-status` - List tasks filtered by status +- `/taskmaster:list-tasks-with-subtasks` - List tasks with subtasks +- `/taskmaster:show-task` - Display task details +- `/taskmaster:add-task` - Create new task +- `/taskmaster:update-task` - Update single task +- `/taskmaster:update-tasks-from-id` - Update multiple tasks +- `/taskmaster:next-task` - Get next task recommendation + +#### 🔄 Status Management +- `/taskmaster:to-pending` - Set task to pending +- `/taskmaster:to-in-progress` - Set task to in-progress +- `/taskmaster:to-done` - Set task to done +- `/taskmaster:to-review` - Set task to review +- `/taskmaster:to-deferred` - Set task to deferred +- `/taskmaster:to-cancelled` - Set task to cancelled + +#### 🔍 Analysis & Breakdown +- `/taskmaster:analyze-complexity` - Analyze task complexity +- `/taskmaster:complexity-report` - View complexity report +- `/taskmaster:expand-task` - Break down complex task +- `/taskmaster:expand-all-tasks` - Expand all eligible tasks + +#### 🔗 Dependencies +- `/taskmaster:add-dependency` - Add task dependency +- `/taskmaster:remove-dependency` - Remove dependency +- `/taskmaster:validate-dependencies` - Check for issues +- `/taskmaster:fix-dependencies` - Auto-fix dependency issues + +#### 📦 Subtasks +- `/taskmaster:add-subtask` - Add subtask to task +- `/taskmaster:convert-task-to-subtask` - Convert task to subtask +- `/taskmaster:remove-subtask` - Remove subtask +- `/taskmaster:remove-subtasks` - Clear specific task subtasks +- `/taskmaster:remove-all-subtasks` - Clear all subtasks + +#### 🗑️ Task Removal +- `/taskmaster:remove-task` - Remove task permanently + +#### 🤖 Workflows +- `/taskmaster:smart-workflow` - Intelligent workflows +- `/taskmaster:command-pipeline` - Command chaining +- `/taskmaster:auto-implement-tasks` - Auto-implementation + +#### 📊 Utilities +- `/taskmaster:analyze-project` - Project analysis +- `/taskmaster:project-status` - Project dashboard +- `/taskmaster:sync-readme` - Sync README with tasks +- `/taskmaster:learn` - Interactive learning +- `/taskmaster:tm-main` - Main Task Master interface + +### Quick Start Examples + +``` +/taskmaster:list-tasks +/taskmaster:show-task 1.2 +/taskmaster:add-task +/taskmaster:next-task +``` + +### Getting Started + +1. Install: `/taskmaster:quick-install-taskmaster` +2. Initialize: `/taskmaster:init-project-quick` +3. Learn: `/taskmaster:learn` +4. Work: `/taskmaster:smart-workflow` + +For detailed command info, run the specific command with `--help` or check command documentation. \ No newline at end of file diff --git a/.claude/commands/tm/init-project-quick.md b/.claude/commands/tm/init-project-quick.md new file mode 100644 index 0000000..020907d --- /dev/null +++ b/.claude/commands/tm/init-project-quick.md @@ -0,0 +1,49 @@ +Init Project Quick + +Arguments: $ARGUMENTS +Quick initialization with auto-confirmation. + +Arguments: $ARGUMENTS + +Initialize a Task Master project without prompts, accepting all defaults. + +## Quick Setup + +```bash +task-master init -y +``` + +## What It Does + +1. Creates `.taskmaster/` directory structure +2. Initializes empty `tasks.json` +3. Sets up default configuration +4. Uses directory name as project name +5. Skips all confirmation prompts + +## Smart Defaults + +- Project name: Current directory name +- Description: "Task Master Project" +- Model config: Existing environment vars +- Task structure: Standard format + +## Next Steps + +After quick init: +1. Configure AI models if needed: + ``` + /taskmaster:models/setup + ``` + +2. Parse PRD if available: + ``` + /taskmaster:parse-prd <file> + ``` + +3. Or create first task: + ``` + /taskmaster:add-task create initial setup + ``` + +Perfect for rapid project setup! \ No newline at end of file diff --git a/.claude/commands/tm/init-project.md b/.claude/commands/tm/init-project.md new file mode 100644 index 0000000..5d6019b --- /dev/null +++ b/.claude/commands/tm/init-project.md @@ -0,0 +1,53 @@ +Init Project + +Arguments: $ARGUMENTS +Initialize a new Task Master project. + +Arguments: $ARGUMENTS + +Parse arguments to determine initialization preferences. + +## Initialization Process + +1. **Parse Arguments** + - PRD file path (if provided) + - Project name + - Auto-confirm flag (-y) + +2. **Project Setup** + ```bash + task-master init + ``` + +3. **Smart Initialization** + - Detect existing project files + - Suggest project name from directory + - Check for git repository + - Verify AI provider configuration + +## Configuration Options + +Based on arguments: +- `quick` / `-y` → Skip confirmations +- `<file.md>` → Use as PRD after init +- `--name=<name>` → Set project name +- `--description=<desc>` → Set description + +## Post-Initialization + +After successful init: +1. Show project structure created +2. Verify AI models configured +3. Suggest next steps: + - Parse PRD if available + - Configure AI providers + - Set up git hooks + - Create first tasks + +## Integration + +If PRD file provided: +``` +/taskmaster:init my-prd.md +→ Automatically runs parse-prd after init +``` \ No newline at end of file diff --git a/.claude/commands/tm/install-taskmaster.md b/.claude/commands/tm/install-taskmaster.md new file mode 100644 index 0000000..65847c3 --- /dev/null +++ b/.claude/commands/tm/install-taskmaster.md @@ -0,0 +1,118 @@ +Install TaskMaster +Check if Task Master is installed and install it if needed. + +This command helps you get Task Master set up globally on your system. + +## Detection and Installation Process + +1. **Check Current Installation** + ```bash + # Check if task-master command exists + which task-master || echo "Task Master not found" + + # Check npm global packages + npm list -g task-master-ai + ``` + +2. **System Requirements Check** + ```bash + # Verify Node.js is installed + node --version + + # Verify npm is installed + npm --version + + # Check Node version (need 16+) + ``` + +3. **Install Task Master Globally** + If not installed, run: + ```bash + npm install -g task-master-ai + ``` + +4. **Verify Installation** + ```bash + # Check version + task-master --version + + # Verify command is available + which task-master + ``` + +5. **Initial Setup** + ```bash + # Initialize in current directory + task-master init + ``` + +6. **Configure AI Provider** + Ensure you have at least one AI provider API key set: + ```bash + # Check current configuration + task-master models --status + + # If no API keys found, guide setup + echo "You'll need at least one API key:" + echo "- ANTHROPIC_API_KEY for Claude" + echo "- OPENAI_API_KEY for GPT models" + echo "- PERPLEXITY_API_KEY for research" + echo "" + echo "Set them in your shell profile or .env file" + ``` + +7. **Quick Test** + ```bash + # Create a test PRD + echo "Build a simple hello world API" > test-prd.txt + + # Try parsing it + task-master parse-prd test-prd.txt -n 3 + ``` + +## Troubleshooting + +If installation fails: + +**Permission Errors:** +```bash +# Try with sudo (macOS/Linux) +sudo npm install -g task-master-ai + +# Or fix npm permissions +npm config set prefix ~/.npm-global +export PATH=~/.npm-global/bin:$PATH +``` + +**Network Issues:** +```bash +# Use different registry +npm install -g task-master-ai --registry https://registry.npmjs.org/ +``` + +**Node Version Issues:** +```bash +# Install Node 20+ via nvm +curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.0/install.sh | bash +nvm install 20 +nvm use 20 +``` + +## Success Confirmation + +Once installed, you should see: +``` +✅ Task Master installed +✅ Command 'task-master' available globally +✅ AI provider configured +✅ Ready to use slash commands! + +Try: /taskmaster:init your-prd.md +``` + +## Next Steps + +After installation: +1. Run `/taskmaster:status` to verify setup +2. Configure AI providers with `/taskmaster:setup-models` +3. Start using Task Master commands! \ No newline at end of file diff --git a/.claude/commands/tm/learn.md b/.claude/commands/tm/learn.md new file mode 100644 index 0000000..af4fb08 --- /dev/null +++ b/.claude/commands/tm/learn.md @@ -0,0 +1,106 @@ +Learn + +Arguments: $ARGUMENTS +Learn about Task Master capabilities through interactive exploration. + +Arguments: $ARGUMENTS + +## Interactive Task Master Learning + +Based on your input, I'll help you discover capabilities: + +### 1. **What are you trying to do?** + +If $ARGUMENTS contains: +- "start" / "begin" → Show project initialization workflows +- "manage" / "organize" → Show task management commands +- "automate" / "auto" → Show automation workflows +- "analyze" / "report" → Show analysis tools +- "fix" / "problem" → Show troubleshooting commands +- "fast" / "quick" → Show efficiency shortcuts + +### 2. **Intelligent Suggestions** + +Based on your project state: + +**No tasks yet?** +``` +You'll want to start with: +1. /project:task-master:init <prd-file> + → Creates tasks from requirements + +2. /project:task-master:parse-prd <file> + → Alternative task generation + +Try: /project:task-master:init demo-prd.md +``` + +**Have tasks?** +Let me analyze what you might need... +- Many pending tasks? → Learn sprint planning +- Complex tasks? → Learn task expansion +- Daily work? → Learn workflow automation + +### 3. **Command Discovery** + +**By Category:** +- 📋 Task Management: list, show, add, update, complete +- 🔄 Workflows: auto-implement, sprint-plan, daily-standup +- 🛠️ Utilities: check-health, complexity-report, sync-memory +- 🔍 Analysis: validate-deps, show dependencies + +**By Scenario:** +- "I want to see what to work on" → `/project:task-master:next` +- "I need to break this down" → `/project:task-master:expand <id>` +- "Show me everything" → `/project:task-master:status` +- "Just do it for me" → `/project:workflows:auto-implement` + +### 4. **Power User Patterns** + +**Command Chaining:** +``` +/project:task-master:next +/project:task-master:start <id> +/project:workflows:auto-implement +``` + +**Smart Filters:** +``` +/project:task-master:list pending high +/project:task-master:list blocked +/project:task-master:list 1-5 tree +``` + +**Automation:** +``` +/project:workflows:pipeline init → expand-all → sprint-plan +``` + +### 5. **Learning Path** + +Based on your experience level: + +**Beginner Path:** +1. init → Create project +2. status → Understand state +3. next → Find work +4. complete → Finish task + +**Intermediate Path:** +1. expand → Break down complex tasks +2. sprint-plan → Organize work +3. complexity-report → Understand difficulty +4. validate-deps → Ensure consistency + +**Advanced Path:** +1. pipeline → Chain operations +2. smart-flow → Context-aware automation +3. Custom commands → Extend the system + +### 6. **Try This Now** + +Based on what you asked about, try: +[Specific command suggestion based on $ARGUMENTS] + +Want to learn more about a specific command? +Type: /project:help <command-name> \ No newline at end of file diff --git a/.claude/commands/tm/list-tasks-by-status.md b/.claude/commands/tm/list-tasks-by-status.md new file mode 100644 index 0000000..61360e8 --- /dev/null +++ b/.claude/commands/tm/list-tasks-by-status.md @@ -0,0 +1,42 @@ +List Tasks By Status + +Arguments: $ARGUMENTS +List tasks filtered by a specific status. + +Arguments: $ARGUMENTS + +Parse the status from arguments and list only tasks matching that status. + +## Status Options +- `pending` - Not yet started +- `in-progress` - Currently being worked on +- `done` - Completed +- `review` - Awaiting review +- `deferred` - Postponed +- `cancelled` - Cancelled + +## Execution + +Based on $ARGUMENTS, run: +```bash +task-master list --status=$ARGUMENTS +``` + +## Enhanced Display + +For the filtered results: +- Group by priority within the status +- Show time in current status +- Highlight tasks approaching deadlines +- Display blockers and dependencies +- Suggest next actions for each status group + +## Intelligent Insights + +Based on the status filter: +- **Pending**: Show recommended start order +- **In-Progress**: Display idle time warnings +- **Done**: Show newly unblocked tasks +- **Review**: Indicate review duration +- **Deferred**: Show reactivation criteria +- **Cancelled**: Display impact analysis \ No newline at end of file diff --git a/.claude/commands/tm/list-tasks-with-subtasks.md b/.claude/commands/tm/list-tasks-with-subtasks.md new file mode 100644 index 0000000..86cfd9e --- /dev/null +++ b/.claude/commands/tm/list-tasks-with-subtasks.md @@ -0,0 +1,30 @@ +List Tasks With Subtasks +List all tasks including their subtasks in a hierarchical view. + +This command shows all tasks with their nested subtasks, providing a complete project overview. + +## Execution + +Run the Task Master list command with subtasks flag: +```bash +task-master list --with-subtasks +``` + +## Enhanced Display + +I'll organize the output to show: +- Parent tasks with clear indicators +- Nested subtasks with proper indentation +- Status badges for quick scanning +- Dependencies and blockers highlighted +- Progress indicators for tasks with subtasks + +## Smart Filtering + +Based on the task hierarchy: +- Show completion percentage for parent tasks +- Highlight blocked subtask chains +- Group by functional areas +- Indicate critical path items + +This gives you a complete tree view of your project structure. \ No newline at end of file diff --git a/.claude/commands/tm/list-tasks.md b/.claude/commands/tm/list-tasks.md new file mode 100644 index 0000000..39c9f83 --- /dev/null +++ b/.claude/commands/tm/list-tasks.md @@ -0,0 +1,46 @@ +List Tasks + +Arguments: $ARGUMENTS +List tasks with intelligent argument parsing. + +Parse arguments to determine filters and display options: +- Status: pending, in-progress, done, review, deferred, cancelled +- Priority: high, medium, low (or priority:high) +- Special: subtasks, tree, dependencies, blocked +- IDs: Direct numbers (e.g., "1,3,5" or "1-5") +- Complex: "pending high" = pending AND high priority + +Arguments: $ARGUMENTS + +Let me parse your request intelligently: + +1. **Detect Filter Intent** + - If arguments contain status keywords → filter by status + - If arguments contain priority → filter by priority + - If arguments contain "subtasks" → include subtasks + - If arguments contain "tree" → hierarchical view + - If arguments contain numbers → show specific tasks + - If arguments contain "blocked" → show blocked tasks only + +2. **Smart Combinations** + Examples of what I understand: + - "pending high" → pending tasks with high priority + - "done today" → tasks completed today + - "blocked" → tasks with unmet dependencies + - "1-5" → tasks 1 through 5 + - "subtasks tree" → hierarchical view with subtasks + +3. **Execute Appropriate Query** + Based on parsed intent, run the most specific task-master command + +4. **Enhanced Display** + - Group by relevant criteria + - Show most important information first + - Use visual indicators for quick scanning + - Include relevant metrics + +5. **Intelligent Suggestions** + Based on what you're viewing, suggest next actions: + - Many pending? → Suggest priority order + - Many blocked? → Show dependency resolution + - Looking at specific tasks? → Show related tasks \ No newline at end of file diff --git a/.claude/commands/tm/next-task.md b/.claude/commands/tm/next-task.md new file mode 100644 index 0000000..25e6b16 --- /dev/null +++ b/.claude/commands/tm/next-task.md @@ -0,0 +1,69 @@ +Next Task + +Arguments: $ARGUMENTS +Intelligently determine and prepare the next action based on comprehensive context. + +This enhanced version of 'next' considers: +- Current task states +- Recent activity +- Time constraints +- Dependencies +- Your working patterns + +Arguments: $ARGUMENTS + +## Intelligent Next Action + +### 1. **Context Gathering** +Let me analyze the current situation: +- Active tasks (in-progress) +- Recently completed tasks +- Blocked tasks +- Time since last activity +- Arguments provided: $ARGUMENTS + +### 2. **Smart Decision Tree** + +**If you have an in-progress task:** +- Has it been idle > 2 hours? → Suggest resuming or switching +- Near completion? → Show remaining steps +- Blocked? → Find alternative task + +**If no in-progress tasks:** +- Unblocked high-priority tasks? → Start highest +- Complex tasks need breakdown? → Suggest expansion +- All tasks blocked? → Show dependency resolution + +**Special arguments handling:** +- "quick" → Find task < 2 hours +- "easy" → Find low complexity task +- "important" → Find high priority regardless of complexity +- "continue" → Resume last worked task + +### 3. **Preparation Workflow** + +Based on selected task: +1. Show full context and history +2. Set up development environment +3. Run relevant tests +4. Open related files +5. Show similar completed tasks +6. Estimate completion time + +### 4. **Alternative Suggestions** + +Always provide options: +- Primary recommendation +- Quick alternative (< 1 hour) +- Strategic option (unblocks most tasks) +- Learning option (new technology/skill) + +### 5. **Workflow Integration** + +Seamlessly connect to: +- `/project:task-master:start [selected]` +- `/project:workflows:auto-implement` +- `/project:task-master:expand` (if complex) +- `/project:utils:complexity-report` (if unsure) + +The goal: Zero friction from decision to implementation. \ No newline at end of file diff --git a/.claude/commands/tm/parse-prd-with-research.md b/.claude/commands/tm/parse-prd-with-research.md new file mode 100644 index 0000000..d432597 --- /dev/null +++ b/.claude/commands/tm/parse-prd-with-research.md @@ -0,0 +1,51 @@ +Parse PRD With Research + +Arguments: $ARGUMENTS +Parse PRD with enhanced research mode for better task generation. + +Arguments: $ARGUMENTS (PRD file path) + +## Research-Enhanced Parsing + +Uses the research AI provider (typically Perplexity) for more comprehensive task generation with current best practices. + +## Execution + +```bash +task-master parse-prd --input=$ARGUMENTS --research +``` + +## Research Benefits + +1. **Current Best Practices** + - Latest framework patterns + - Security considerations + - Performance optimizations + - Accessibility requirements + +2. **Technical Deep Dive** + - Implementation approaches + - Library recommendations + - Architecture patterns + - Testing strategies + +3. **Comprehensive Coverage** + - Edge cases consideration + - Error handling tasks + - Monitoring setup + - Deployment tasks + +## Enhanced Output + +Research mode typically: +- Generates more detailed tasks +- Includes industry standards +- Adds compliance considerations +- Suggests modern tooling + +## When to Use + +- New technology domains +- Complex requirements +- Regulatory compliance needed +- Best practices crucial \ No newline at end of file diff --git a/.claude/commands/tm/parse-prd.md b/.claude/commands/tm/parse-prd.md new file mode 100644 index 0000000..070f9b9 --- /dev/null +++ b/.claude/commands/tm/parse-prd.md @@ -0,0 +1,52 @@ +Parse PRD + +Arguments: $ARGUMENTS +Parse a PRD document to generate tasks. + +Arguments: $ARGUMENTS (PRD file path) + +## Intelligent PRD Parsing + +Analyzes your requirements document and generates a complete task breakdown. + +## Execution + +```bash +task-master parse-prd --input=$ARGUMENTS +``` + +## Parsing Process + +1. **Document Analysis** + - Extract key requirements + - Identify technical components + - Detect dependencies + - Estimate complexity + +2. **Task Generation** + - Create 10-15 tasks by default + - Include implementation tasks + - Add testing tasks + - Include documentation tasks + - Set logical dependencies + +3. **Smart Enhancements** + - Group related functionality + - Set appropriate priorities + - Add acceptance criteria + - Include test strategies + +## Options + +Parse arguments for modifiers: +- Number after filename → `--num-tasks` +- `research` → Use research mode +- `comprehensive` → Generate more tasks + +## Post-Generation + +After parsing: +1. Display task summary +2. Show dependency graph +3. Suggest task expansion for complex items +4. Recommend sprint planning \ No newline at end of file diff --git a/.claude/commands/tm/project-status.md b/.claude/commands/tm/project-status.md new file mode 100644 index 0000000..bf6f3f1 --- /dev/null +++ b/.claude/commands/tm/project-status.md @@ -0,0 +1,67 @@ +Project Status + +Arguments: $ARGUMENTS +Enhanced status command with comprehensive project insights. + +Arguments: $ARGUMENTS + +## Intelligent Status Overview + +### 1. **Executive Summary** +Quick dashboard view: +- 🏃 Active work (in-progress tasks) +- 📊 Progress metrics (% complete, velocity) +- 🚧 Blockers and risks +- ⏱️ Time analysis (estimated vs actual) +- 🎯 Sprint/milestone progress + +### 2. **Contextual Analysis** + +Based on $ARGUMENTS, focus on: +- "sprint" → Current sprint progress and burndown +- "blocked" → Dependency chains and resolution paths +- "team" → Task distribution and workload +- "timeline" → Schedule adherence and projections +- "risk" → High complexity or overdue items + +### 3. **Smart Insights** + +**Workflow Health:** +- Idle tasks (in-progress > 24h without updates) +- Bottlenecks (multiple tasks waiting on same dependency) +- Quick wins (low complexity, high impact) + +**Predictive Analytics:** +- Completion projections based on velocity +- Risk of missing deadlines +- Recommended task order for optimal flow + +### 4. **Visual Intelligence** + +Dynamic visualization based on data: +``` +Sprint Progress: ████████░░ 80% (16/20 tasks) +Velocity Trend: ↗️ +15% this week +Blocked Tasks: 🔴 3 critical path items + +Priority Distribution: +High: ████████ 8 tasks (2 blocked) +Medium: ████░░░░ 4 tasks +Low: ██░░░░░░ 2 tasks +``` + +### 5. **Actionable Recommendations** + +Based on analysis: +1. **Immediate actions** (unblock critical path) +2. **Today's focus** (optimal task sequence) +3. **Process improvements** (recurring patterns) +4. **Resource needs** (skills, time, dependencies) + +### 6. **Historical Context** + +Compare to previous periods: +- Velocity changes +- Pattern recognition +- Improvement areas +- Success patterns to repeat \ No newline at end of file diff --git a/.claude/commands/tm/quick-install-taskmaster.md b/.claude/commands/tm/quick-install-taskmaster.md new file mode 100644 index 0000000..f4a94da --- /dev/null +++ b/.claude/commands/tm/quick-install-taskmaster.md @@ -0,0 +1,23 @@ +Quick Install TaskMaster +Quick install Task Master globally if not already installed. + +Execute this streamlined installation: + +```bash +# Check and install in one command +task-master --version 2>/dev/null || npm install -g task-master-ai + +# Verify installation +task-master --version + +# Quick setup check +task-master models --status || echo "Note: You'll need to set up an AI provider API key" +``` + +If you see "command not found" after installation, you may need to: +1. Restart your terminal +2. Or add npm global bin to PATH: `export PATH=$(npm bin -g):$PATH` + +Once installed, you can use all the Task Master commands! + +Quick test: Run `/taskmaster:help` to see all available commands. \ No newline at end of file diff --git a/.claude/commands/tm/remove-all-subtasks.md b/.claude/commands/tm/remove-all-subtasks.md new file mode 100644 index 0000000..28bcc4c --- /dev/null +++ b/.claude/commands/tm/remove-all-subtasks.md @@ -0,0 +1,94 @@ +Remove All Subtasks +Clear all subtasks from all tasks globally. + +## Global Subtask Clearing + +Remove all subtasks across the entire project. Use with extreme caution. + +## Execution + +```bash +task-master clear-subtasks --all +``` + +## Pre-Clear Analysis + +1. **Project-Wide Summary** + ``` + Global Subtask Summary + ━━━━━━━━━━━━━━━━━━━━ + Total parent tasks: 12 + Total subtasks: 47 + - Completed: 15 + - In-progress: 8 + - Pending: 24 + + Work at risk: ~120 hours + ``` + +2. **Critical Warnings** + - In-progress subtasks that will lose work + - Completed subtasks with valuable history + - Complex dependency chains + - Integration test results + +## Double Confirmation + +``` +⚠️ DESTRUCTIVE OPERATION WARNING ⚠️ +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +This will remove ALL 47 subtasks from your project +Including 8 in-progress and 15 completed subtasks + +This action CANNOT be undone + +Type 'CLEAR ALL SUBTASKS' to confirm: +``` + +## Smart Safeguards + +- Require explicit confirmation phrase +- Create automatic backup +- Log all removed data +- Option to export first + +## Use Cases + +Valid reasons for global clear: +- Project restructuring +- Major pivot in approach +- Starting fresh breakdown +- Switching to different task organization + +## Process + +1. Full project analysis +2. Create backup file +3. Show detailed impact +4. Require confirmation +5. Execute removal +6. Generate summary report + +## Alternative Suggestions + +Before clearing all: +- Export subtasks to file +- Clear only pending subtasks +- Clear by task category +- Archive instead of delete + +## Post-Clear Report + +``` +Global Subtask Clear Complete +━━━━━━━━━━━━━━━━━━━━━━━━━━━ +Removed: 47 subtasks from 12 tasks +Backup saved: .taskmaster/backup/subtasks-20240115.json +Parent tasks updated: 12 +Time estimates adjusted: Yes + +Next steps: +- Review updated task list +- Re-expand complex tasks as needed +- Check project timeline +``` \ No newline at end of file diff --git a/.claude/commands/tm/remove-dependency.md b/.claude/commands/tm/remove-dependency.md new file mode 100644 index 0000000..abef7e7 --- /dev/null +++ b/.claude/commands/tm/remove-dependency.md @@ -0,0 +1,65 @@ +Remove Dependency + +Arguments: $ARGUMENTS +Remove a dependency between tasks. + +Arguments: $ARGUMENTS + +Parse the task IDs to remove dependency relationship. + +## Removing Dependencies + +Removes a dependency relationship, potentially unblocking tasks. + +## Argument Parsing + +Parse natural language or IDs: +- "remove dependency between 5 and 3" +- "5 no longer needs 3" +- "unblock 5 from 3" +- "5 3" → remove dependency of 5 on 3 + +## Execution + +```bash +task-master remove-dependency --id=<task-id> --depends-on=<dependency-id> +``` + +## Pre-Removal Checks + +1. **Verify dependency exists** +2. **Check impact on task flow** +3. **Warn if it breaks logical sequence** +4. **Show what will be unblocked** + +## Smart Analysis + +Before removing: +- Show why dependency might have existed +- Check if removal makes tasks executable +- Verify no critical path disruption +- Suggest alternative dependencies + +## Post-Removal + +After removing: +1. Show updated task status +2. List newly unblocked tasks +3. Update project timeline +4. Suggest next actions + +## Safety Features + +- Confirm if removing critical dependency +- Show tasks that become immediately actionable +- Warn about potential issues +- Keep removal history + +## Example + +``` +/taskmaster:remove-dependency 5 from 3 +→ Removed: Task #5 no longer depends on #3 +→ Task #5 is now UNBLOCKED and ready to start +→ Warning: Consider if #5 still needs #2 completed first +``` \ No newline at end of file diff --git a/.claude/commands/tm/remove-subtask.md b/.claude/commands/tm/remove-subtask.md new file mode 100644 index 0000000..aec002f --- /dev/null +++ b/.claude/commands/tm/remove-subtask.md @@ -0,0 +1,87 @@ +Remove Subtask + +Arguments: $ARGUMENTS +Remove a subtask from its parent task. + +Arguments: $ARGUMENTS + +Parse subtask ID to remove, with option to convert to standalone task. + +## Removing Subtasks + +Remove a subtask and optionally convert it back to a standalone task. + +## Argument Parsing + +- "remove subtask 5.1" +- "delete 5.1" +- "convert 5.1 to task" → remove and convert +- "5.1 standalone" → convert to standalone + +## Execution Options + +### 1. Delete Subtask +```bash +task-master remove-subtask --id=<parentId.subtaskId> +``` + +### 2. Convert to Standalone +```bash +task-master remove-subtask --id=<parentId.subtaskId> --convert +``` + +## Pre-Removal Checks + +1. **Validate Subtask** + - Verify subtask exists + - Check completion status + - Review dependencies + +2. **Impact Analysis** + - Other subtasks that depend on it + - Parent task implications + - Data that will be lost + +## Removal Process + +### For Deletion: +1. Confirm if subtask has work done +2. Update parent task estimates +3. Remove subtask and its data +4. Clean up dependencies + +### For Conversion: +1. Assign new standalone task ID +2. Preserve all task data +3. Update dependency references +4. Maintain task history + +## Smart Features + +- Warn if subtask is in-progress +- Show impact on parent task +- Preserve important data +- Update related estimates + +## Example Flows + +``` +/taskmaster:remove-subtask 5.1 +→ Warning: Subtask #5.1 is in-progress +→ This will delete all subtask data +→ Parent task #5 will be updated +Confirm deletion? (y/n) + +/taskmaster:remove-subtask 5.1 convert +→ Converting subtask #5.1 to standalone task #89 +→ Preserved: All task data and history +→ Updated: 2 dependency references +→ New task #89 is now independent +``` + +## Post-Removal + +- Update parent task status +- Recalculate estimates +- Show updated hierarchy +- Suggest next actions \ No newline at end of file diff --git a/.claude/commands/tm/remove-subtasks.md b/.claude/commands/tm/remove-subtasks.md new file mode 100644 index 0000000..27d95d2 --- /dev/null +++ b/.claude/commands/tm/remove-subtasks.md @@ -0,0 +1,89 @@ +Remove Subtasks + +Arguments: $ARGUMENTS +Clear all subtasks from a specific task. + +Arguments: $ARGUMENTS (task ID) + +Remove all subtasks from a parent task at once. + +## Clearing Subtasks + +Bulk removal of all subtasks from a parent task. + +## Execution + +```bash +task-master remove-subtasks --id=$ARGUMENTS +``` + +## Pre-Clear Analysis + +1. **Subtask Summary** + - Number of subtasks + - Completion status of each + - Work already done + - Dependencies affected + +2. **Impact Assessment** + - Data that will be lost + - Dependencies to be removed + - Effect on project timeline + - Parent task implications + +## Confirmation Required + +``` +Remove Subtasks Confirmation +━━━━━━━━━━━━━━━━━━━━━━━━━ +Parent Task: #5 "Implement user authentication" +Subtasks to remove: 4 +- #5.1 "Setup auth framework" (done) +- #5.2 "Create login form" (in-progress) +- #5.3 "Add validation" (pending) +- #5.4 "Write tests" (pending) + +⚠️ This will permanently delete all subtask data +Continue? (y/n) +``` + +## Smart Features + +- Option to convert to standalone tasks +- Backup task data before clearing +- Preserve completed work history +- Update parent task appropriately + +## Process + +1. List all subtasks for confirmation +2. Check for in-progress work +3. Remove all subtasks +4. Update parent task +5. Clean up dependencies + +## Alternative Options + +Suggest alternatives: +- Convert important subtasks to tasks +- Keep completed subtasks +- Archive instead of delete +- Export subtask data first + +## Post-Clear + +- Show updated parent task +- Recalculate time estimates +- Update task complexity +- Suggest next steps + +## Example + +``` +/taskmaster:remove-subtasks 5 +→ Found 4 subtasks to remove +→ Warning: Subtask #5.2 is in-progress +→ Cleared all subtasks from task #5 +→ Updated parent task estimates +→ Suggestion: Consider re-expanding with better breakdown +``` \ No newline at end of file diff --git a/.claude/commands/tm/remove-task.md b/.claude/commands/tm/remove-task.md new file mode 100644 index 0000000..0ffb9a9 --- /dev/null +++ b/.claude/commands/tm/remove-task.md @@ -0,0 +1,110 @@ +Remove Task + +Arguments: $ARGUMENTS +Remove a task permanently from the project. + +Arguments: $ARGUMENTS (task ID) + +Delete a task and handle all its relationships properly. + +## Task Removal + +Permanently removes a task while maintaining project integrity. + +## Argument Parsing + +- "remove task 5" +- "delete 5" +- "5" → remove task 5 +- Can include "-y" for auto-confirm + +## Execution + +```bash +task-master remove-task --id=<id> [-y] +``` + +## Pre-Removal Analysis + +1. **Task Details** + - Current status + - Work completed + - Time invested + - Associated data + +2. **Relationship Check** + - Tasks that depend on this + - Dependencies this task has + - Subtasks that will be removed + - Blocking implications + +3. **Impact Assessment** + ``` + Task Removal Impact + ━━━━━━━━━━━━━━━━━━ + Task: #5 "Implement authentication" (in-progress) + Status: 60% complete (~8 hours work) + + Will affect: + - 3 tasks depend on this (will be blocked) + - Has 4 subtasks (will be deleted) + - Part of critical path + + ⚠️ This action cannot be undone + ``` + +## Smart Warnings + +- Warn if task is in-progress +- Show dependent tasks that will be blocked +- Highlight if part of critical path +- Note any completed work being lost + +## Removal Process + +1. Show comprehensive impact +2. Require confirmation (unless -y) +3. Update dependent task references +4. Remove task and subtasks +5. Clean up orphaned dependencies +6. Log removal with timestamp + +## Alternative Actions + +Suggest before deletion: +- Mark as cancelled instead +- Convert to documentation +- Archive task data +- Transfer work to another task + +## Post-Removal + +- List affected tasks +- Show broken dependencies +- Update project statistics +- Suggest dependency fixes +- Recalculate timeline + +## Example Flows + +``` +/taskmaster:remove-task 5 +→ Task #5 is in-progress with 8 hours logged +→ 3 other tasks depend on this +→ Suggestion: Mark as cancelled instead? +Remove anyway? (y/n) + +/taskmaster:remove-task 5 -y +→ Removed: Task #5 and 4 subtasks +→ Updated: 3 task dependencies +→ Warning: Tasks #7, #8, #9 now have missing dependency +→ Run /taskmaster:fix-dependencies to resolve +``` + +## Safety Features + +- Confirmation required +- Impact preview +- Removal logging +- Suggest alternatives +- No cascade delete of dependents \ No newline at end of file diff --git a/.claude/commands/tm/setup-models.md b/.claude/commands/tm/setup-models.md new file mode 100644 index 0000000..be5c017 --- /dev/null +++ b/.claude/commands/tm/setup-models.md @@ -0,0 +1,52 @@ +Setup Models +Run interactive setup to configure AI models. + +## Interactive Model Configuration + +Guides you through setting up AI providers for Task Master. + +## Execution + +```bash +task-master models --setup +``` + +## Setup Process + +1. **Environment Check** + - Detect existing API keys + - Show current configuration + - Identify missing providers + +2. **Provider Selection** + - Choose main provider (required) + - Select research provider (recommended) + - Configure fallback (optional) + +3. **API Key Configuration** + - Prompt for missing keys + - Validate key format + - Test connectivity + - Save configuration + +## Smart Recommendations + +Based on your needs: +- **For best results**: Claude + Perplexity +- **Budget conscious**: GPT-3.5 + Perplexity +- **Maximum capability**: GPT-4 + Perplexity + Claude fallback + +## Configuration Storage + +Keys can be stored in: +1. Environment variables (recommended) +2. `.env` file in project +3. Global `.taskmaster/config` + +## Post-Setup + +After configuration: +- Test each provider +- Show usage examples +- Suggest next steps +- Verify parse-prd works \ No newline at end of file diff --git a/.claude/commands/tm/show-task.md b/.claude/commands/tm/show-task.md new file mode 100644 index 0000000..8af00f8 --- /dev/null +++ b/.claude/commands/tm/show-task.md @@ -0,0 +1,85 @@ +Show Task + +Arguments: $ARGUMENTS +Show detailed task information with rich context and insights. + +Arguments: $ARGUMENTS + +## Enhanced Task Display + +Parse arguments to determine what to show and how. + +### 1. **Smart Task Selection** + +Based on $ARGUMENTS: +- Number → Show specific task with full context +- "current" → Show active in-progress task(s) +- "next" → Show recommended next task +- "blocked" → Show all blocked tasks with reasons +- "critical" → Show critical path tasks +- Multiple IDs → Comparative view + +### 2. **Contextual Information** + +For each task, intelligently include: + +**Core Details** +- Full task information (id, title, description, details) +- Current status with history +- Test strategy and acceptance criteria +- Priority and complexity analysis + +**Relationships** +- Dependencies (what it needs) +- Dependents (what needs it) +- Parent/subtask hierarchy +- Related tasks (similar work) + +**Time Intelligence** +- Created/updated timestamps +- Time in current status +- Estimated vs actual time +- Historical completion patterns + +### 3. **Visual Enhancements** + +``` +📋 Task #45: Implement User Authentication +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +Status: 🟡 in-progress (2 hours) +Priority: 🔴 High | Complexity: 73/100 + +Dependencies: ✅ #41, ✅ #42, ⏳ #43 (blocked) +Blocks: #46, #47, #52 + +Progress: ████████░░ 80% complete + +Recent Activity: +- 2h ago: Status changed to in-progress +- 4h ago: Dependency #42 completed +- Yesterday: Task expanded with 3 subtasks +``` + +### 4. **Intelligent Insights** + +Based on task analysis: +- **Risk Assessment**: Complexity vs time remaining +- **Bottleneck Analysis**: Is this blocking critical work? +- **Recommendation**: Suggested approach or concerns +- **Similar Tasks**: How others completed similar work + +### 5. **Action Suggestions** + +Context-aware next steps: +- If blocked → Show how to unblock +- If complex → Suggest expansion +- If in-progress → Show completion checklist +- If done → Show dependent tasks ready to start + +### 6. **Multi-Task View** + +When showing multiple tasks: +- Common dependencies +- Optimal completion order +- Parallel work opportunities +- Combined complexity analysis \ No newline at end of file diff --git a/.claude/commands/tm/smart-workflow.md b/.claude/commands/tm/smart-workflow.md new file mode 100644 index 0000000..82383ff --- /dev/null +++ b/.claude/commands/tm/smart-workflow.md @@ -0,0 +1,58 @@ +Smart Workflow + +Arguments: $ARGUMENTS +Execute an intelligent workflow based on current project state and recent commands. + +This command analyzes: +1. Recent commands you've run +2. Current project state +3. Time of day / day of week +4. Your working patterns + +Arguments: $ARGUMENTS + +## Intelligent Workflow Selection + +Based on context, I'll determine the best workflow: + +### Context Analysis +- Previous command executed +- Current task states +- Unfinished work from last session +- Your typical patterns + +### Smart Execution + +If last command was: +- `status` → Likely starting work → Run daily standup +- `complete` → Task finished → Find next task +- `list pending` → Planning → Suggest sprint planning +- `expand` → Breaking down work → Show complexity analysis +- `init` → New project → Show onboarding workflow + +If no recent commands: +- Morning? → Daily standup workflow +- Many pending tasks? → Sprint planning +- Tasks blocked? → Dependency resolution +- Friday? → Weekly review + +### Workflow Composition + +I'll chain appropriate commands: +1. Analyze current state +2. Execute primary workflow +3. Suggest follow-up actions +4. Prepare environment for coding + +### Learning Mode + +This command learns from your patterns: +- Track command sequences +- Note time preferences +- Remember common workflows +- Adapt to your style + +Example flows detected: +- Morning: standup → next → start +- After lunch: status → continue task +- End of day: complete → commit → status \ No newline at end of file diff --git a/.claude/commands/tm/sync-readme.md b/.claude/commands/tm/sync-readme.md new file mode 100644 index 0000000..7cd1f2a --- /dev/null +++ b/.claude/commands/tm/sync-readme.md @@ -0,0 +1,120 @@ +Sync README + +Arguments: $ARGUMENTS +Export tasks to README.md with professional formatting. + +Arguments: $ARGUMENTS + +Generate a well-formatted README with current task information. + +## README Synchronization + +Creates or updates README.md with beautifully formatted task information. + +## Argument Parsing + +Optional filters: +- "pending" → Only pending tasks +- "with-subtasks" → Include subtask details +- "by-priority" → Group by priority +- "sprint" → Current sprint only + +## Execution + +```bash +task-master sync-readme [--with-subtasks] [--status=<status>] +``` + +## README Generation + +### 1. **Project Header** +```markdown +# Project Name + +## 📋 Task Progress + +Last Updated: 2024-01-15 10:30 AM + +### Summary +- Total Tasks: 45 +- Completed: 15 (33%) +- In Progress: 5 (11%) +- Pending: 25 (56%) +``` + +### 2. **Task Sections** +Organized by status or priority: +- Progress indicators +- Task descriptions +- Dependencies noted +- Time estimates + +### 3. **Visual Elements** +- Progress bars +- Status badges +- Priority indicators +- Completion checkmarks + +## Smart Features + +1. **Intelligent Grouping** + - By feature area + - By sprint/milestone + - By assigned developer + - By priority + +2. **Progress Tracking** + - Overall completion + - Sprint velocity + - Burndown indication + - Time tracking + +3. **Formatting Options** + - GitHub-flavored markdown + - Task checkboxes + - Collapsible sections + - Table format available + +## Example Output + +```markdown +## 🚀 Current Sprint + +### In Progress +- [ ] 🔄 #5 **Implement user authentication** (60% complete) + - Dependencies: API design (#3 ✅) + - Subtasks: 4 (2 completed) + - Est: 8h / Spent: 5h + +### Pending (High Priority) +- [ ] ⚡ #8 **Create dashboard UI** + - Blocked by: #5 + - Complexity: High + - Est: 12h +``` + +## Customization + +Based on arguments: +- Include/exclude sections +- Detail level control +- Custom grouping +- Filter by criteria + +## Post-Sync + +After generation: +1. Show diff preview +2. Backup existing README +3. Write new content +4. Commit reminder +5. Update timestamp + +## Integration + +Works well with: +- Git workflows +- CI/CD pipelines +- Project documentation +- Team updates +- Client reports \ No newline at end of file diff --git a/.claude/commands/tm/tm-main.md b/.claude/commands/tm/tm-main.md new file mode 100644 index 0000000..57d6877 --- /dev/null +++ b/.claude/commands/tm/tm-main.md @@ -0,0 +1,147 @@ +Task Master Main +# Task Master Command Reference + +Comprehensive command structure for Task Master integration with Claude Code. + +## Command Organization + +Commands are organized hierarchically to match Task Master's CLI structure while providing enhanced Claude Code integration. + +## Project Setup & Configuration + +### `/taskmaster:init` +- `init-project` - Initialize new project (handles PRD files intelligently) +- `init-project-quick` - Quick setup with auto-confirmation (-y flag) + +### `/taskmaster:models` +- `view-models` - View current AI model configuration +- `setup-models` - Interactive model configuration +- `set-main` - Set primary generation model +- `set-research` - Set research model +- `set-fallback` - Set fallback model + +## Task Generation + +### `/taskmaster:parse-prd` +- `parse-prd` - Generate tasks from PRD document +- `parse-prd-with-research` - Enhanced parsing with research mode + +### `/taskmaster:generate` +- `generate-tasks` - Create individual task files from tasks.json + +## Task Management + +### `/taskmaster:list` +- `list-tasks` - Smart listing with natural language filters +- `list-tasks-with-subtasks` - Include subtasks in hierarchical view +- `list-tasks-by-status` - Filter by specific status + +### `/taskmaster:set-status` +- `to-pending` - Reset task to pending +- `to-in-progress` - Start working on task +- `to-done` - Mark task complete +- `to-review` - Submit for review +- `to-deferred` - Defer task +- `to-cancelled` - Cancel task + +### `/taskmaster:sync-readme` +- `sync-readme` - Export tasks to README.md with formatting + +### `/taskmaster:update` +- `update-task` - Update tasks with natural language +- `update-tasks-from-id` - Update multiple tasks from a starting point +- `update-single-task` - Update specific task + +### `/taskmaster:add-task` +- `add-task` - Add new task with AI assistance + +### `/taskmaster:remove-task` +- `remove-task` - Remove task with confirmation + +## Subtask Management + +### `/taskmaster:add-subtask` +- `add-subtask` - Add new subtask to parent +- `convert-task-to-subtask` - Convert existing task to subtask + +### `/taskmaster:remove-subtask` +- `remove-subtask` - Remove subtask (with optional conversion) + +### `/taskmaster:clear-subtasks` +- `clear-subtasks` - Clear subtasks from specific task +- `clear-all-subtasks` - Clear all subtasks globally + +## Task Analysis & Breakdown + +### `/taskmaster:analyze-complexity` +- `analyze-complexity` - Analyze and generate expansion recommendations + +### `/taskmaster:complexity-report` +- `complexity-report` - Display complexity analysis report + +### `/taskmaster:expand` +- `expand-task` - Break down specific task +- `expand-all-tasks` - Expand all eligible tasks +- `with-research` - Enhanced expansion + +## Task Navigation + +### `/taskmaster:next` +- `next-task` - Intelligent next task recommendation + +### `/taskmaster:show` +- `show-task` - Display detailed task information + +### `/taskmaster:status` +- `project-status` - Comprehensive project dashboard + +## Dependency Management + +### `/taskmaster:add-dependency` +- `add-dependency` - Add task dependency + +### `/taskmaster:remove-dependency` +- `remove-dependency` - Remove task dependency + +### `/taskmaster:validate-dependencies` +- `validate-dependencies` - Check for dependency issues + +### `/taskmaster:fix-dependencies` +- `fix-dependencies` - Automatically fix dependency problems + +## Workflows & Automation + +### `/taskmaster:workflows` +- `smart-workflow` - Context-aware intelligent workflow execution +- `command-pipeline` - Chain multiple commands together +- `auto-implement-tasks` - Advanced auto-implementation with code generation + +## Utilities + +### `/taskmaster:utils` +- `analyze-project` - Deep project analysis and insights + +### `/taskmaster:setup` +- `install-taskmaster` - Comprehensive installation guide +- `quick-install-taskmaster` - One-line global installation + +## Usage Patterns + +### Natural Language +Most commands accept natural language arguments: +``` +/taskmaster:add-task create user authentication system +/taskmaster:update mark all API tasks as high priority +/taskmaster:list show blocked tasks +``` + +### ID-Based Commands +Commands requiring IDs intelligently parse from $ARGUMENTS: +``` +/taskmaster:show 45 +/taskmaster:expand 23 +/taskmaster:set-status/to-done 67 +``` + +### Smart Defaults +Commands provide intelligent defaults and suggestions based on context. \ No newline at end of file diff --git a/.claude/commands/tm/to-cancelled.md b/.claude/commands/tm/to-cancelled.md new file mode 100644 index 0000000..bce2410 --- /dev/null +++ b/.claude/commands/tm/to-cancelled.md @@ -0,0 +1,58 @@ +To Cancelled + +Arguments: $ARGUMENTS +Cancel a task permanently. + +Arguments: $ARGUMENTS (task ID) + +## Cancelling a Task + +This status indicates a task is no longer needed and won't be completed. + +## Valid Reasons for Cancellation + +- Requirements changed +- Feature deprecated +- Duplicate of another task +- Strategic pivot +- Technical approach invalidated + +## Pre-Cancellation Checks + +1. Confirm no critical dependencies +2. Check for partial implementation +3. Verify cancellation rationale +4. Document lessons learned + +## Execution + +```bash +task-master set-status --id=$ARGUMENTS --status=cancelled +``` + +## Cancellation Impact + +When cancelling: +1. **Dependency Updates** + - Notify dependent tasks + - Update project scope + - Recalculate timelines + +2. **Clean-up Actions** + - Remove related branches + - Archive any work done + - Update documentation + - Close related issues + +3. **Learning Capture** + - Document why cancelled + - Note what was learned + - Update estimation models + - Prevent future duplicates + +## Historical Preservation + +- Keep for reference +- Tag with cancellation reason +- Link to replacement if any +- Maintain audit trail \ No newline at end of file diff --git a/.claude/commands/tm/to-deferred.md b/.claude/commands/tm/to-deferred.md new file mode 100644 index 0000000..0169239 --- /dev/null +++ b/.claude/commands/tm/to-deferred.md @@ -0,0 +1,50 @@ +To Deferred + +Arguments: $ARGUMENTS +Defer a task for later consideration. + +Arguments: $ARGUMENTS (task ID) + +## Deferring a Task + +This status indicates a task is valid but not currently actionable or prioritized. + +## Valid Reasons for Deferral + +- Waiting for external dependencies +- Reprioritized for future sprint +- Blocked by technical limitations +- Resource constraints +- Strategic timing considerations + +## Execution + +```bash +task-master set-status --id=$ARGUMENTS --status=deferred +``` + +## Deferral Management + +When deferring: +1. **Document Reason** + - Capture why it's being deferred + - Set reactivation criteria + - Note any partial work completed + +2. **Impact Analysis** + - Check dependent tasks + - Update project timeline + - Notify affected stakeholders + +3. **Future Planning** + - Set review reminders + - Tag for specific milestone + - Preserve context for reactivation + - Link to blocking issues + +## Smart Tracking + +- Monitor deferral duration +- Alert when criteria met +- Prevent scope creep +- Regular review cycles \ No newline at end of file diff --git a/.claude/commands/tm/to-done.md b/.claude/commands/tm/to-done.md new file mode 100644 index 0000000..962a687 --- /dev/null +++ b/.claude/commands/tm/to-done.md @@ -0,0 +1,47 @@ +To Done + +Arguments: $ARGUMENTS +Mark a task as completed. + +Arguments: $ARGUMENTS (task ID) + +## Completing a Task + +This command validates task completion and updates project state intelligently. + +## Pre-Completion Checks + +1. Verify test strategy was followed +2. Check if all subtasks are complete +3. Validate acceptance criteria met +4. Ensure code is committed + +## Execution + +```bash +task-master set-status --id=$ARGUMENTS --status=done +``` + +## Post-Completion Actions + +1. **Update Dependencies** + - Identify newly unblocked tasks + - Update sprint progress + - Recalculate project timeline + +2. **Documentation** + - Generate completion summary + - Update CLAUDE.md with learnings + - Log implementation approach + +3. **Next Steps** + - Show newly available tasks + - Suggest logical next task + - Update velocity metrics + +## Celebration & Learning + +- Show impact of completion +- Display unblocked work +- Recognize achievement +- Capture lessons learned \ No newline at end of file diff --git a/.claude/commands/tm/to-in-progress.md b/.claude/commands/tm/to-in-progress.md new file mode 100644 index 0000000..728c502 --- /dev/null +++ b/.claude/commands/tm/to-in-progress.md @@ -0,0 +1,39 @@ +To In Progress + +Arguments: $ARGUMENTS +Start working on a task by setting its status to in-progress. + +Arguments: $ARGUMENTS (task ID) + +## Starting Work on Task + +This command does more than just change status - it prepares your environment for productive work. + +## Pre-Start Checks + +1. Verify dependencies are met +2. Check if another task is already in-progress +3. Ensure task details are complete +4. Validate test strategy exists + +## Execution + +```bash +task-master set-status --id=$ARGUMENTS --status=in-progress +``` + +## Environment Setup + +After setting to in-progress: +1. Create/checkout appropriate git branch +2. Open relevant documentation +3. Set up test watchers if applicable +4. Display task details and acceptance criteria +5. Show similar completed tasks for reference + +## Smart Suggestions + +- Estimated completion time based on complexity +- Related files from similar tasks +- Potential blockers to watch for +- Recommended first steps \ No newline at end of file diff --git a/.claude/commands/tm/to-pending.md b/.claude/commands/tm/to-pending.md new file mode 100644 index 0000000..7f2aeea --- /dev/null +++ b/.claude/commands/tm/to-pending.md @@ -0,0 +1,35 @@ +To Pending + +Arguments: $ARGUMENTS +Set a task's status to pending. + +Arguments: $ARGUMENTS (task ID) + +## Setting Task to Pending + +This moves a task back to the pending state, useful for: +- Resetting erroneously started tasks +- Deferring work that was prematurely begun +- Reorganizing sprint priorities + +## Execution + +```bash +task-master set-status --id=$ARGUMENTS --status=pending +``` + +## Validation + +Before setting to pending: +- Warn if task is currently in-progress +- Check if this will block other tasks +- Suggest documenting why it's being reset +- Preserve any work already done + +## Smart Actions + +After setting to pending: +- Update sprint planning if needed +- Notify about freed resources +- Suggest priority reassessment +- Log the status change with context \ No newline at end of file diff --git a/.claude/commands/tm/to-review.md b/.claude/commands/tm/to-review.md new file mode 100644 index 0000000..efb94ab --- /dev/null +++ b/.claude/commands/tm/to-review.md @@ -0,0 +1,43 @@ +To Review + +Arguments: $ARGUMENTS +Set a task's status to review. + +Arguments: $ARGUMENTS (task ID) + +## Marking Task for Review + +This status indicates work is complete but needs verification before final approval. + +## When to Use Review Status + +- Code complete but needs peer review +- Implementation done but needs testing +- Documentation written but needs proofreading +- Design complete but needs stakeholder approval + +## Execution + +```bash +task-master set-status --id=$ARGUMENTS --status=review +``` + +## Review Preparation + +When setting to review: +1. **Generate Review Checklist** + - Link to PR/MR if applicable + - Highlight key changes + - Note areas needing attention + - Include test results + +2. **Documentation** + - Update task with review notes + - Link relevant artifacts + - Specify reviewers if known + +3. **Smart Actions** + - Create review reminders + - Track review duration + - Suggest reviewers based on expertise + - Prepare rollback plan if needed \ No newline at end of file diff --git a/.claude/commands/tm/update-single-task.md b/.claude/commands/tm/update-single-task.md new file mode 100644 index 0000000..d2fdc1a --- /dev/null +++ b/.claude/commands/tm/update-single-task.md @@ -0,0 +1,122 @@ +Update Single Task + +Arguments: $ARGUMENTS +Update a single specific task with new information. + +Arguments: $ARGUMENTS + +Parse task ID and update details. + +## Single Task Update + +Precisely update one task with AI assistance to maintain consistency. + +## Argument Parsing + +Natural language updates: +- "5: add caching requirement" +- "update 5 to include error handling" +- "task 5 needs rate limiting" +- "5 change priority to high" + +## Execution + +```bash +task-master update-task --id=<id> --prompt="<context>" +``` + +## Update Types + +### 1. **Content Updates** +- Enhance description +- Add requirements +- Clarify details +- Update acceptance criteria + +### 2. **Metadata Updates** +- Change priority +- Adjust time estimates +- Update complexity +- Modify dependencies + +### 3. **Strategic Updates** +- Revise approach +- Change test strategy +- Update implementation notes +- Adjust subtask needs + +## AI-Powered Updates + +The AI: +1. **Understands Context** + - Reads current task state + - Identifies update intent + - Maintains consistency + - Preserves important info + +2. **Applies Changes** + - Updates relevant fields + - Keeps style consistent + - Adds without removing + - Enhances clarity + +3. **Validates Results** + - Checks coherence + - Verifies completeness + - Maintains relationships + - Suggests related updates + +## Example Updates + +``` +/taskmaster:update/single 5: add rate limiting +→ Updating Task #5: "Implement API endpoints" + +Current: Basic CRUD endpoints +Adding: Rate limiting requirements + +Updated sections: +✓ Description: Added rate limiting mention +✓ Details: Added specific limits (100/min) +✓ Test Strategy: Added rate limit tests +✓ Complexity: Increased from 5 to 6 +✓ Time Estimate: Increased by 2 hours + +Suggestion: Also update task #6 (API Gateway) for consistency? +``` + +## Smart Features + +1. **Incremental Updates** + - Adds without overwriting + - Preserves work history + - Tracks what changed + - Shows diff view + +2. **Consistency Checks** + - Related task alignment + - Subtask compatibility + - Dependency validity + - Timeline impact + +3. **Update History** + - Timestamp changes + - Track who/what updated + - Reason for update + - Previous versions + +## Field-Specific Updates + +Quick syntax for specific fields: +- "5 priority:high" → Update priority only +- "5 add-time:4h" → Add to time estimate +- "5 status:review" → Change status +- "5 depends:3,4" → Add dependencies + +## Post-Update + +- Show updated task +- Highlight changes +- Check related tasks +- Update suggestions +- Timeline adjustments \ No newline at end of file diff --git a/.claude/commands/tm/update-task.md b/.claude/commands/tm/update-task.md new file mode 100644 index 0000000..fb96d63 --- /dev/null +++ b/.claude/commands/tm/update-task.md @@ -0,0 +1,75 @@ +Update Task + +Arguments: $ARGUMENTS +Update tasks with intelligent field detection and bulk operations. + +Arguments: $ARGUMENTS + +## Intelligent Task Updates + +Parse arguments to determine update intent and execute smartly. + +### 1. **Natural Language Processing** + +Understand update requests like: +- "mark 23 as done" → Update status to done +- "increase priority of 45" → Set priority to high +- "add dependency on 12 to task 34" → Add dependency +- "tasks 20-25 need review" → Bulk status update +- "all API tasks high priority" → Pattern-based update + +### 2. **Smart Field Detection** + +Automatically detect what to update: +- Status keywords: done, complete, start, pause, review +- Priority changes: urgent, high, low, deprioritize +- Dependency updates: depends on, blocks, after +- Assignment: assign to, owner, responsible +- Time: estimate, spent, deadline + +### 3. **Bulk Operations** + +Support for multiple task updates: +``` +Examples: +- "complete tasks 12, 15, 18" +- "all pending auth tasks to in-progress" +- "increase priority for tasks blocking 45" +- "defer all documentation tasks" +``` + +### 4. **Contextual Validation** + +Before updating, check: +- Status transitions are valid +- Dependencies don't create cycles +- Priority changes make sense +- Bulk updates won't break project flow + +Show preview: +``` +Update Preview: +───────────────── +Tasks to update: #23, #24, #25 +Change: status → in-progress +Impact: Will unblock tasks #30, #31 +Warning: Task #24 has unmet dependencies +``` + +### 5. **Smart Suggestions** + +Based on update: +- Completing task? → Show newly unblocked tasks +- Changing priority? → Show impact on sprint +- Adding dependency? → Check for conflicts +- Bulk update? → Show summary of changes + +### 6. **Workflow Integration** + +After updates: +- Auto-update dependent task states +- Trigger status recalculation +- Update sprint/milestone progress +- Log changes with context + +Result: Flexible, intelligent task updates with safety checks. \ No newline at end of file diff --git a/.claude/commands/tm/update-tasks-from-id.md b/.claude/commands/tm/update-tasks-from-id.md new file mode 100644 index 0000000..9440f03 --- /dev/null +++ b/.claude/commands/tm/update-tasks-from-id.md @@ -0,0 +1,111 @@ +Update Tasks From ID + +Arguments: $ARGUMENTS +Update multiple tasks starting from a specific ID. + +Arguments: $ARGUMENTS + +Parse starting task ID and update context. + +## Bulk Task Updates + +Update multiple related tasks based on new requirements or context changes. + +## Argument Parsing + +- "from 5: add security requirements" +- "5 onwards: update API endpoints" +- "starting at 5: change to use new framework" + +## Execution + +```bash +task-master update --from=<id> --prompt="<context>" +``` + +## Update Process + +### 1. **Task Selection** +Starting from specified ID: +- Include the task itself +- Include all dependent tasks +- Include related subtasks +- Smart boundary detection + +### 2. **Context Application** +AI analyzes the update context and: +- Identifies what needs changing +- Maintains consistency +- Preserves completed work +- Updates related information + +### 3. **Intelligent Updates** +- Modify descriptions appropriately +- Update test strategies +- Adjust time estimates +- Revise dependencies if needed + +## Smart Features + +1. **Scope Detection** + - Find natural task groupings + - Identify related features + - Stop at logical boundaries + - Avoid over-updating + +2. **Consistency Maintenance** + - Keep naming conventions + - Preserve relationships + - Update cross-references + - Maintain task flow + +3. **Change Preview** + ``` + Bulk Update Preview + ━━━━━━━━━━━━━━━━━━ + Starting from: Task #5 + Tasks to update: 8 tasks + 12 subtasks + + Context: "add security requirements" + + Changes will include: + - Add security sections to descriptions + - Update test strategies for security + - Add security-related subtasks where needed + - Adjust time estimates (+20% average) + + Continue? (y/n) + ``` + +## Example Updates + +``` +/taskmaster:update-tasks-from-id 5: change database to PostgreSQL +→ Analyzing impact starting from task #5 +→ Found 6 related tasks to update +→ Updates will maintain consistency +→ Preview changes? (y/n) + +Applied updates: +✓ Task #5: Updated connection logic references +✓ Task #6: Changed migration approach +✓ Task #7: Updated query syntax notes +✓ Task #8: Revised testing strategy +✓ Task #9: Updated deployment steps +✓ Task #12: Changed backup procedures +``` + +## Safety Features + +- Preview all changes +- Selective confirmation +- Rollback capability +- Change logging +- Validation checks + +## Post-Update + +- Summary of changes +- Consistency verification +- Suggest review tasks +- Update timeline if needed \ No newline at end of file diff --git a/.claude/commands/tm/validate-dependencies.md b/.claude/commands/tm/validate-dependencies.md new file mode 100644 index 0000000..0d0c1b9 --- /dev/null +++ b/.claude/commands/tm/validate-dependencies.md @@ -0,0 +1,72 @@ +Validate Dependencies +Validate all task dependencies for issues. + +## Dependency Validation + +Comprehensive check for dependency problems across the entire project. + +## Execution + +```bash +task-master validate-dependencies +``` + +## Validation Checks + +1. **Circular Dependencies** + - A depends on B, B depends on A + - Complex circular chains + - Self-dependencies + +2. **Missing Dependencies** + - References to non-existent tasks + - Deleted task references + - Invalid task IDs + +3. **Logical Issues** + - Completed tasks depending on pending + - Cancelled tasks in dependency chains + - Impossible sequences + +4. **Complexity Warnings** + - Over-complex dependency chains + - Too many dependencies per task + - Bottleneck tasks + +## Smart Analysis + +The validation provides: +- Visual dependency graph +- Critical path analysis +- Bottleneck identification +- Suggested optimizations + +## Report Format + +``` +Dependency Validation Report +━━━━━━━━━━━━━━━━━━━━━━━━━━ +✅ No circular dependencies found +⚠️ 2 warnings found: + - Task #23 has 7 dependencies (consider breaking down) + - Task #45 blocks 5 other tasks (potential bottleneck) +❌ 1 error found: + - Task #67 depends on deleted task #66 + +Critical Path: #1 → #5 → #23 → #45 → #50 (15 days) +``` + +## Actionable Output + +For each issue found: +- Clear description +- Impact assessment +- Suggested fix +- Command to resolve + +## Next Steps + +After validation: +- Run `/taskmaster:fix-dependencies` to auto-fix +- Manually adjust problematic dependencies +- Rerun to verify fixes \ No newline at end of file diff --git a/.claude/commands/tm/view-models.md b/.claude/commands/tm/view-models.md new file mode 100644 index 0000000..68f0463 --- /dev/null +++ b/.claude/commands/tm/view-models.md @@ -0,0 +1,52 @@ +View Models +View current AI model configuration. + +## Model Configuration Display + +Shows the currently configured AI providers and models for Task Master. + +## Execution + +```bash +task-master models +``` + +## Information Displayed + +1. **Main Provider** + - Model ID and name + - API key status (configured/missing) + - Usage: Primary task generation + +2. **Research Provider** + - Model ID and name + - API key status + - Usage: Enhanced research mode + +3. **Fallback Provider** + - Model ID and name + - API key status + - Usage: Backup when main fails + +## Visual Status + +``` +Task Master AI Model Configuration +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +Main: ✅ claude-3-5-sonnet (configured) +Research: ✅ perplexity-sonar (configured) +Fallback: ⚠️ Not configured (optional) + +Available Models: +- claude-3-5-sonnet +- gpt-4-turbo +- gpt-3.5-turbo +- perplexity-sonar +``` + +## Next Actions + +Based on configuration: +- If missing API keys → Suggest setup +- If no research model → Explain benefits +- If all configured → Show usage tips \ No newline at end of file diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..60bd23e --- /dev/null +++ b/.env.example @@ -0,0 +1,12 @@ +# API Keys (Required to enable respective provider) +ANTHROPIC_API_KEY="your_anthropic_api_key_here" # Required: Format: sk-ant-api03-... +PERPLEXITY_API_KEY="your_perplexity_api_key_here" # Optional: Format: pplx-... +OPENAI_API_KEY="your_openai_api_key_here" # Optional, for OpenAI models. Format: sk-proj-... +GOOGLE_API_KEY="your_google_api_key_here" # Optional, for Google Gemini models. +MISTRAL_API_KEY="your_mistral_key_here" # Optional, for Mistral AI models. +XAI_API_KEY="YOUR_XAI_KEY_HERE" # Optional, for xAI AI models. +GROQ_API_KEY="YOUR_GROQ_KEY_HERE" # Optional, for Groq models. +OPENROUTER_API_KEY="YOUR_OPENROUTER_KEY_HERE" # Optional, for OpenRouter models. +AZURE_OPENAI_API_KEY="your_azure_key_here" # Optional, for Azure OpenAI models (requires endpoint in .taskmaster/config.json). +OLLAMA_API_KEY="your_ollama_api_key_here" # Optional: For remote Ollama servers that require authentication. +GITHUB_API_KEY="your_github_api_key_here" # Optional: For GitHub import/export features. Format: ghp_... or github_pat_... \ No newline at end of file diff --git a/.gemini/commands/tm/add-dependency.toml b/.gemini/commands/tm/add-dependency.toml new file mode 100644 index 0000000..0b03737 --- /dev/null +++ b/.gemini/commands/tm/add-dependency.toml @@ -0,0 +1,58 @@ +description="Add Dependency" +prompt = """ +Add a dependency between tasks. + +Arguments: $ARGUMENTS + +Parse the task IDs to establish dependency relationship. + +## Adding Dependencies + +Creates a dependency where one task must be completed before another can start. + +## Argument Parsing + +Parse natural language or IDs: +- "make 5 depend on 3" → task 5 depends on task 3 +- "5 needs 3" → task 5 depends on task 3 +- "5 3" → task 5 depends on task 3 +- "5 after 3" → task 5 depends on task 3 + +## Execution + +```bash +task-master add-dependency --id=<task-id> --depends-on=<dependency-id> +``` + +## Validation + +Before adding: +1. **Verify both tasks exist** +2. **Check for circular dependencies** +3. **Ensure dependency makes logical sense** +4. **Warn if creating complex chains** + +## Smart Features + +- Detect if dependency already exists +- Suggest related dependencies +- Show impact on task flow +- Update task priorities if needed + +## Post-Addition + +After adding dependency: +1. Show updated dependency graph +2. Identify any newly blocked tasks +3. Suggest task order changes +4. Update project timeline + +## Example Flows + +``` +/taskmaster:add-dependency 5 needs 3 +→ Task #5 now depends on Task #3 +→ Task #5 is now blocked until #3 completes +→ Suggested: Also consider if #5 needs #4 +``` +""" diff --git a/.gemini/commands/tm/add-subtask.toml b/.gemini/commands/tm/add-subtask.toml new file mode 100644 index 0000000..227d1f4 --- /dev/null +++ b/.gemini/commands/tm/add-subtask.toml @@ -0,0 +1,79 @@ +description="Add Subtask" +prompt = """ +Add a subtask to a parent task. + +Arguments: $ARGUMENTS + +Parse arguments to create a new subtask or convert existing task. + +## Adding Subtasks + +Creates subtasks to break down complex parent tasks into manageable pieces. + +## Argument Parsing + +Flexible natural language: +- "add subtask to 5: implement login form" +- "break down 5 with: setup, implement, test" +- "subtask for 5: handle edge cases" +- "5: validate user input" → adds subtask to task 5 + +## Execution Modes + +### 1. Create New Subtask +```bash +task-master add-subtask --parent=<id> --title="<title>" --description="<desc>" +``` + +### 2. Convert Existing Task +```bash +task-master add-subtask --parent=<id> --task-id=<existing-id> +``` + +## Smart Features + +1. **Automatic Subtask Generation** + - If title contains "and" or commas, create multiple + - Suggest common subtask patterns + - Inherit parent's context + +2. **Intelligent Defaults** + - Priority based on parent + - Appropriate time estimates + - Logical dependencies between subtasks + +3. **Validation** + - Check parent task complexity + - Warn if too many subtasks + - Ensure subtask makes sense + +## Creation Process + +1. Parse parent task context +2. Generate subtask with ID like "5.1" +3. Set appropriate defaults +4. Link to parent task +5. Update parent's time estimate + +## Example Flows + +``` +/taskmaster:add-subtask to 5: implement user authentication +→ Created subtask #5.1: "implement user authentication" +→ Parent task #5 now has 1 subtask +→ Suggested next subtasks: tests, documentation + +/taskmaster:add-subtask 5: setup, implement, test +→ Created 3 subtasks: + #5.1: setup + #5.2: implement + #5.3: test +``` + +## Post-Creation + +- Show updated task hierarchy +- Suggest logical next subtasks +- Update complexity estimates +- Recommend subtask order +""" diff --git a/.gemini/commands/tm/add-task.toml b/.gemini/commands/tm/add-task.toml new file mode 100644 index 0000000..20bedaf --- /dev/null +++ b/.gemini/commands/tm/add-task.toml @@ -0,0 +1,81 @@ +description="Add Task" +prompt = """ +Add new tasks with intelligent parsing and context awareness. + +Arguments: $ARGUMENTS + +## Smart Task Addition + +Parse natural language to create well-structured tasks. + +### 1. **Input Understanding** + +I'll intelligently parse your request: +- Natural language → Structured task +- Detect priority from keywords (urgent, ASAP, important) +- Infer dependencies from context +- Suggest complexity based on description +- Determine task type (feature, bug, refactor, test, docs) + +### 2. **Smart Parsing Examples** + +**"Add urgent task to fix login bug"** +→ Title: Fix login bug +→ Priority: high +→ Type: bug +→ Suggested complexity: medium + +**"Create task for API documentation after task 23 is done"** +→ Title: API documentation +→ Dependencies: [23] +→ Type: documentation +→ Priority: medium + +**"Need to refactor auth module - depends on 12 and 15, high complexity"** +→ Title: Refactor auth module +→ Dependencies: [12, 15] +→ Complexity: high +→ Type: refactor + +### 3. **Context Enhancement** + +Based on current project state: +- Suggest related existing tasks +- Warn about potential conflicts +- Recommend dependencies +- Propose subtasks if complex + +### 4. **Interactive Refinement** + +```yaml +Task Preview: +───────────── +Title: [Extracted title] +Priority: [Inferred priority] +Dependencies: [Detected dependencies] +Complexity: [Estimated complexity] + +Suggestions: +- Similar task #34 exists, consider as dependency? +- This seems complex, break into subtasks? +- Tasks #45-47 work on same module +``` + +### 5. **Validation & Creation** + +Before creating: +- Validate dependencies exist +- Check for duplicates +- Ensure logical ordering +- Verify task completeness + +### 6. **Smart Defaults** + +Intelligent defaults based on: +- Task type patterns +- Team conventions +- Historical data +- Current sprint/phase + +Result: High-quality tasks from minimal input. +""" diff --git a/.gemini/commands/tm/analyze-complexity.toml b/.gemini/commands/tm/analyze-complexity.toml new file mode 100644 index 0000000..5210bff --- /dev/null +++ b/.gemini/commands/tm/analyze-complexity.toml @@ -0,0 +1,124 @@ +description="Analyze Complexity" +prompt = """ +Analyze task complexity and generate expansion recommendations. + +Arguments: $ARGUMENTS + +Perform deep analysis of task complexity across the project. + +## Complexity Analysis + +Uses AI to analyze tasks and recommend which ones need breakdown. + +## Execution Options + +```bash +task-master analyze-complexity [--research] [--threshold=5] +``` + +## Analysis Parameters + +- `--research` → Use research AI for deeper analysis +- `--threshold=5` → Only flag tasks above complexity 5 +- Default: Analyze all pending tasks + +## Analysis Process + +### 1. **Task Evaluation** +For each task, AI evaluates: +- Technical complexity +- Time requirements +- Dependency complexity +- Risk factors +- Knowledge requirements + +### 2. **Complexity Scoring** +Assigns score 1-10 based on: +- Implementation difficulty +- Integration challenges +- Testing requirements +- Unknown factors +- Technical debt risk + +### 3. **Recommendations** +For complex tasks: +- Suggest expansion approach +- Recommend subtask breakdown +- Identify risk areas +- Propose mitigation strategies + +## Smart Analysis Features + +1. **Pattern Recognition** + - Similar task comparisons + - Historical complexity accuracy + - Team velocity consideration + - Technology stack factors + +2. **Contextual Factors** + - Team expertise + - Available resources + - Timeline constraints + - Business criticality + +3. **Risk Assessment** + - Technical risks + - Timeline risks + - Dependency risks + - Knowledge gaps + +## Output Format + +``` +Task Complexity Analysis Report +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +High Complexity Tasks (>7): +📍 #5 "Implement real-time sync" - Score: 9/10 + Factors: WebSocket complexity, state management, conflict resolution + Recommendation: Expand into 5-7 subtasks + Risks: Performance, data consistency + +📍 #12 "Migrate database schema" - Score: 8/10 + Factors: Data migration, zero downtime, rollback strategy + Recommendation: Expand into 4-5 subtasks + Risks: Data loss, downtime + +Medium Complexity Tasks (5-7): +📍 #23 "Add export functionality" - Score: 6/10 + Consider expansion if timeline tight + +Low Complexity Tasks (<5): +✅ 15 tasks - No expansion needed + +Summary: +- Expand immediately: 2 tasks +- Consider expanding: 5 tasks +- Keep as-is: 15 tasks +``` + +## Actionable Output + +For each high-complexity task: +1. Complexity score with reasoning +2. Specific expansion suggestions +3. Risk mitigation approaches +4. Recommended subtask structure + +## Integration + +Results are: +- Saved to `.taskmaster/reports/complexity-analysis.md` +- Used by expand command +- Inform sprint planning +- Guide resource allocation + +## Next Steps + +After analysis: +``` +/taskmaster:expand 5 # Expand specific task +/taskmaster:expand-all # Expand all recommended +/taskmaster:complexity-report # View detailed report +``` +""" diff --git a/.gemini/commands/tm/analyze-project.toml b/.gemini/commands/tm/analyze-project.toml new file mode 100644 index 0000000..d8ac27d --- /dev/null +++ b/.gemini/commands/tm/analyze-project.toml @@ -0,0 +1,100 @@ +description="Analyze Project" +prompt = """ +Advanced project analysis with actionable insights and recommendations. + +Arguments: $ARGUMENTS + +## Comprehensive Project Analysis + +Multi-dimensional analysis based on requested focus area. + +### 1. **Analysis Modes** + +Based on $ARGUMENTS: +- "velocity" → Sprint velocity and trends +- "quality" → Code quality metrics +- "risk" → Risk assessment and mitigation +- "dependencies" → Dependency graph analysis +- "team" → Workload and skill distribution +- "architecture" → System design coherence +- Default → Full spectrum analysis + +### 2. **Velocity Analytics** + +``` +📊 Velocity Analysis +━━━━━━━━━━━━━━━━━━━ +Current Sprint: 24 points/week ↗️ +20% +Rolling Average: 20 points/week +Efficiency: 85% (17/20 tasks on time) + +Bottlenecks Detected: +- Code review delays (avg 4h wait) +- Test environment availability +- Dependency on external team + +Recommendations: +1. Implement parallel review process +2. Add staging environment +3. Mock external dependencies +``` + +### 3. **Risk Assessment** + +**Technical Risks** +- High complexity tasks without backup assignee +- Single points of failure in architecture +- Insufficient test coverage in critical paths +- Technical debt accumulation rate + +**Project Risks** +- Critical path dependencies +- Resource availability gaps +- Deadline feasibility analysis +- Scope creep indicators + +### 4. **Dependency Intelligence** + +Visual dependency analysis: +``` +Critical Path: +#12 → #15 → #23 → #45 → #50 (20 days) + ↘ #24 → #46 ↗ + +Optimization: Parallelize #15 and #24 +Time Saved: 3 days +``` + +### 5. **Quality Metrics** + +**Code Quality** +- Test coverage trends +- Complexity scores +- Technical debt ratio +- Review feedback patterns + +**Process Quality** +- Rework frequency +- Bug introduction rate +- Time to resolution +- Knowledge distribution + +### 6. **Predictive Insights** + +Based on patterns: +- Completion probability by deadline +- Resource needs projection +- Risk materialization likelihood +- Suggested interventions + +### 7. **Executive Dashboard** + +High-level summary with: +- Health score (0-100) +- Top 3 risks +- Top 3 opportunities +- Recommended actions +- Success probability + +Result: Data-driven decisions with clear action paths. +""" diff --git a/.gemini/commands/tm/auto-implement-tasks.toml b/.gemini/commands/tm/auto-implement-tasks.toml new file mode 100644 index 0000000..492165b --- /dev/null +++ b/.gemini/commands/tm/auto-implement-tasks.toml @@ -0,0 +1,100 @@ +description="Auto Implement Tasks" +prompt = """ +Enhanced auto-implementation with intelligent code generation and testing. + +Arguments: $ARGUMENTS + +## Intelligent Auto-Implementation + +Advanced implementation with context awareness and quality checks. + +### 1. **Pre-Implementation Analysis** + +Before starting: +- Analyze task complexity and requirements +- Check codebase patterns and conventions +- Identify similar completed tasks +- Assess test coverage needs +- Detect potential risks + +### 2. **Smart Implementation Strategy** + +Based on task type and context: + +**Feature Tasks** +1. Research existing patterns +2. Design component architecture +3. Implement with tests +4. Integrate with system +5. Update documentation + +**Bug Fix Tasks** +1. Reproduce issue +2. Identify root cause +3. Implement minimal fix +4. Add regression tests +5. Verify side effects + +**Refactoring Tasks** +1. Analyze current structure +2. Plan incremental changes +3. Maintain test coverage +4. Refactor step-by-step +5. Verify behavior unchanged + +### 3. **Code Intelligence** + +**Pattern Recognition** +- Learn from existing code +- Follow team conventions +- Use preferred libraries +- Match style guidelines + +**Test-Driven Approach** +- Write tests first when possible +- Ensure comprehensive coverage +- Include edge cases +- Performance considerations + +### 4. **Progressive Implementation** + +Step-by-step with validation: +``` +Step 1/5: Setting up component structure ✓ +Step 2/5: Implementing core logic ✓ +Step 3/5: Adding error handling ⚡ (in progress) +Step 4/5: Writing tests ⏳ +Step 5/5: Integration testing ⏳ + +Current: Adding try-catch blocks and validation... +``` + +### 5. **Quality Assurance** + +Automated checks: +- Linting and formatting +- Test execution +- Type checking +- Dependency validation +- Performance analysis + +### 6. **Smart Recovery** + +If issues arise: +- Diagnostic analysis +- Suggestion generation +- Fallback strategies +- Manual intervention points +- Learning from failures + +### 7. **Post-Implementation** + +After completion: +- Generate PR description +- Update documentation +- Log lessons learned +- Suggest follow-up tasks +- Update task relationships + +Result: High-quality, production-ready implementations. +""" diff --git a/.gemini/commands/tm/command-pipeline.toml b/.gemini/commands/tm/command-pipeline.toml new file mode 100644 index 0000000..af0eef2 --- /dev/null +++ b/.gemini/commands/tm/command-pipeline.toml @@ -0,0 +1,80 @@ +description="Command Pipeline" +prompt = """ +Execute a pipeline of commands based on a specification. + +Arguments: $ARGUMENTS + +## Command Pipeline Execution + +Parse pipeline specification from arguments. Supported formats: + +### Simple Pipeline +`init → expand-all → sprint-plan` + +### Conditional Pipeline +`status → if:pending>10 → sprint-plan → else → next` + +### Iterative Pipeline +`for:pending-tasks → expand → complexity-check` + +### Smart Pipeline Patterns + +**1. Project Setup Pipeline** +``` +init [prd] → +expand-all → +complexity-report → +sprint-plan → +show first-sprint +``` + +**2. Daily Work Pipeline** +``` +standup → +if:in-progress → continue → +else → next → start +``` + +**3. Task Completion Pipeline** +``` +complete [id] → +git-commit → +if:blocked-tasks-freed → show-freed → +next +``` + +**4. Quality Check Pipeline** +``` +list in-progress → +for:each → check-idle-time → +if:idle>1day → prompt-update +``` + +### Pipeline Features + +**Variables** +- Store results: `status → $count=pending-count` +- Use in conditions: `if:$count>10` +- Pass between commands: `expand $high-priority-tasks` + +**Error Handling** +- On failure: `try:complete → catch:show-blockers` +- Skip on error: `optional:test-run` +- Retry logic: `retry:3:commit` + +**Parallel Execution** +- Parallel branches: `[analyze | test | lint]` +- Join results: `parallel → join:report` + +### Execution Flow + +1. Parse pipeline specification +2. Validate command sequence +3. Execute with state passing +4. Handle conditions and loops +5. Aggregate results +6. Show summary + +This enables complex workflows like: +`parse-prd → expand-all → filter:complex>70 → assign:senior → sprint-plan:weighted` +""" diff --git a/.gemini/commands/tm/complexity-report.toml b/.gemini/commands/tm/complexity-report.toml new file mode 100644 index 0000000..cc76c19 --- /dev/null +++ b/.gemini/commands/tm/complexity-report.toml @@ -0,0 +1,120 @@ +description="Complexity Report" +prompt = """ +Display the task complexity analysis report. + +Arguments: $ARGUMENTS + +View the detailed complexity analysis generated by analyze-complexity command. + +## Viewing Complexity Report + +Shows comprehensive task complexity analysis with actionable insights. + +## Execution + +```bash +task-master complexity-report [--file=<path>] +``` + +## Report Location + +Default: `.taskmaster/reports/complexity-analysis.md` +Custom: Specify with --file parameter + +## Report Contents + +### 1. **Executive Summary** +``` +Complexity Analysis Summary +━━━━━━━━━━━━━━━━━━━━━━━━ +Analysis Date: 2024-01-15 +Tasks Analyzed: 32 +High Complexity: 5 (16%) +Medium Complexity: 12 (37%) +Low Complexity: 15 (47%) + +Critical Findings: +- 5 tasks need immediate expansion +- 3 tasks have high technical risk +- 2 tasks block critical path +``` + +### 2. **Detailed Task Analysis** +For each complex task: +- Complexity score breakdown +- Contributing factors +- Specific risks identified +- Expansion recommendations +- Similar completed tasks + +### 3. **Risk Matrix** +Visual representation: +``` +Risk vs Complexity Matrix +━━━━━━━━━━━━━━━━━━━━━━━ +High Risk | #5(9) #12(8) | #23(6) +Med Risk | #34(7) | #45(5) #67(5) +Low Risk | #78(8) | [15 tasks] + | High Complex | Med Complex +``` + +### 4. **Recommendations** + +**Immediate Actions:** +1. Expand task #5 - Critical path + high complexity +2. Expand task #12 - High risk + dependencies +3. Review task #34 - Consider splitting + +**Sprint Planning:** +- Don't schedule multiple high-complexity tasks together +- Ensure expertise available for complex tasks +- Build in buffer time for unknowns + +## Interactive Features + +When viewing report: +1. **Quick Actions** + - Press 'e' to expand a task + - Press 'd' for task details + - Press 'r' to refresh analysis + +2. **Filtering** + - View by complexity level + - Filter by risk factors + - Show only actionable items + +3. **Export Options** + - Markdown format + - CSV for spreadsheets + - JSON for tools + +## Report Intelligence + +- Compares with historical data +- Shows complexity trends +- Identifies patterns +- Suggests process improvements + +## Integration + +Use report for: +- Sprint planning sessions +- Resource allocation +- Risk assessment +- Team discussions +- Client updates + +## Example Usage + +``` +/taskmaster:complexity-report +→ Opens latest analysis + +/taskmaster:complexity-report --file=archived/2024-01-01.md +→ View historical analysis + +After viewing: +/taskmaster:expand 5 +→ Expand high-complexity task +``` +""" diff --git a/.gemini/commands/tm/convert-task-to-subtask.toml b/.gemini/commands/tm/convert-task-to-subtask.toml new file mode 100644 index 0000000..442e2ca --- /dev/null +++ b/.gemini/commands/tm/convert-task-to-subtask.toml @@ -0,0 +1,74 @@ +description="Convert Task To Subtask" +prompt = """ +Convert an existing task into a subtask. + +Arguments: $ARGUMENTS + +Parse parent ID and task ID to convert. + +## Task Conversion + +Converts an existing standalone task into a subtask of another task. + +## Argument Parsing + +- "move task 8 under 5" +- "make 8 a subtask of 5" +- "nest 8 in 5" +- "5 8" → make task 8 a subtask of task 5 + +## Execution + +```bash +task-master add-subtask --parent=<parent-id> --task-id=<task-to-convert> +``` + +## Pre-Conversion Checks + +1. **Validation** + - Both tasks exist and are valid + - No circular parent relationships + - Task isn't already a subtask + - Logical hierarchy makes sense + +2. **Impact Analysis** + - Dependencies that will be affected + - Tasks that depend on converting task + - Priority alignment needed + - Status compatibility + +## Conversion Process + +1. Change task ID from "8" to "5.1" (next available) +2. Update all dependency references +3. Inherit parent's context where appropriate +4. Adjust priorities if needed +5. Update time estimates + +## Smart Features + +- Preserve task history +- Maintain dependencies +- Update all references +- Create conversion log + +## Example + +``` +/taskmaster:add-subtask/from-task 5 8 +→ Converting: Task #8 becomes subtask #5.1 +→ Updated: 3 dependency references +→ Parent task #5 now has 1 subtask +→ Note: Subtask inherits parent's priority + +Before: #8 "Implement validation" (standalone) +After: #5.1 "Implement validation" (subtask of #5) +``` + +## Post-Conversion + +- Show new task hierarchy +- List updated dependencies +- Verify project integrity +- Suggest related conversions +""" diff --git a/.gemini/commands/tm/expand-all-tasks.toml b/.gemini/commands/tm/expand-all-tasks.toml new file mode 100644 index 0000000..77f8ff7 --- /dev/null +++ b/.gemini/commands/tm/expand-all-tasks.toml @@ -0,0 +1,54 @@ +description="Expand All Tasks" +prompt = """ +Expand all pending tasks that need subtasks. + +## Bulk Task Expansion + +Intelligently expands all tasks that would benefit from breakdown. + +## Execution + +```bash +task-master expand --all +``` + +## Smart Selection + +Only expands tasks that: +- Are marked as pending +- Have high complexity (>5) +- Lack existing subtasks +- Would benefit from breakdown + +## Expansion Process + +1. **Analysis Phase** + - Identify expansion candidates + - Group related tasks + - Plan expansion strategy + +2. **Batch Processing** + - Expand tasks in logical order + - Maintain consistency + - Preserve relationships + - Optimize for parallelism + +3. **Quality Control** + - Ensure subtask quality + - Avoid over-decomposition + - Maintain task coherence + - Update dependencies + +## Options + +- Add `force` to expand all regardless of complexity +- Add `research` for enhanced AI analysis + +## Results + +After bulk expansion: +- Summary of tasks expanded +- New subtask count +- Updated complexity metrics +- Suggested task order +""" diff --git a/.gemini/commands/tm/expand-task.toml b/.gemini/commands/tm/expand-task.toml new file mode 100644 index 0000000..00a242b --- /dev/null +++ b/.gemini/commands/tm/expand-task.toml @@ -0,0 +1,52 @@ +description="Expand Task" +prompt = """ +Break down a complex task into subtasks. + +Arguments: $ARGUMENTS (task ID) + +## Intelligent Task Expansion + +Analyzes a task and creates detailed subtasks for better manageability. + +## Execution + +```bash +task-master expand --id=$ARGUMENTS +``` + +## Expansion Process + +1. **Task Analysis** + - Review task complexity + - Identify components + - Detect technical challenges + - Estimate time requirements + +2. **Subtask Generation** + - Create 3-7 subtasks typically + - Each subtask 1-4 hours + - Logical implementation order + - Clear acceptance criteria + +3. **Smart Breakdown** + - Setup/configuration tasks + - Core implementation + - Testing components + - Integration steps + - Documentation updates + +## Enhanced Features + +Based on task type: +- **Feature**: Setup → Implement → Test → Integrate +- **Bug Fix**: Reproduce → Diagnose → Fix → Verify +- **Refactor**: Analyze → Plan → Refactor → Validate + +## Post-Expansion + +After expansion: +1. Show subtask hierarchy +2. Update time estimates +3. Suggest implementation order +4. Highlight critical path +""" diff --git a/.gemini/commands/tm/fix-dependencies.toml b/.gemini/commands/tm/fix-dependencies.toml new file mode 100644 index 0000000..42e18ec --- /dev/null +++ b/.gemini/commands/tm/fix-dependencies.toml @@ -0,0 +1,84 @@ +description="Fix Dependencies" +prompt = """ +Automatically fix dependency issues found during validation. + +## Automatic Dependency Repair + +Intelligently fixes common dependency problems while preserving project logic. + +## Execution + +```bash +task-master fix-dependencies +``` + +## What Gets Fixed + +### 1. **Auto-Fixable Issues** +- Remove references to deleted tasks +- Break simple circular dependencies +- Remove self-dependencies +- Clean up duplicate dependencies + +### 2. **Smart Resolutions** +- Reorder dependencies to maintain logic +- Suggest task merging for over-dependent tasks +- Flatten unnecessary dependency chains +- Remove redundant transitive dependencies + +### 3. **Manual Review Required** +- Complex circular dependencies +- Critical path modifications +- Business logic dependencies +- High-impact changes + +## Fix Process + +1. **Analysis Phase** + - Run validation check + - Categorize issues by type + - Determine fix strategy + +2. **Execution Phase** + - Apply automatic fixes + - Log all changes made + - Preserve task relationships + +3. **Verification Phase** + - Re-validate after fixes + - Show before/after comparison + - Highlight manual fixes needed + +## Smart Features + +- Preserves intended task flow +- Minimal disruption approach +- Creates fix history/log +- Suggests manual interventions + +## Output Example + +``` +Dependency Auto-Fix Report +━━━━━━━━━━━━━━━━━━━━━━━━ +Fixed Automatically: +✅ Removed 2 references to deleted tasks +✅ Resolved 1 self-dependency +✅ Cleaned 3 redundant dependencies + +Manual Review Needed: +⚠️ Complex circular dependency: #12 → #15 → #18 → #12 + Suggestion: Make #15 not depend on #12 +⚠️ Task #45 has 8 dependencies + Suggestion: Break into subtasks + +Run '/taskmaster:validate-dependencies' to verify fixes +``` + +## Safety + +- Preview mode available +- Rollback capability +- Change logging +- No data loss +""" diff --git a/.gemini/commands/tm/help.toml b/.gemini/commands/tm/help.toml new file mode 100644 index 0000000..2cf000b --- /dev/null +++ b/.gemini/commands/tm/help.toml @@ -0,0 +1,101 @@ +description="Help" +prompt = """ +Show help for Task Master AI commands. + +Arguments: $ARGUMENTS + +Display help for Task Master commands and available options. + +## Task Master AI Command Help + +### Quick Navigation + +Type `/taskmaster:` and use tab completion to explore all commands. + +### Command Categories + +#### 🚀 Setup & Installation +- `/taskmaster:install-taskmaster` - Comprehensive installation guide +- `/taskmaster:quick-install-taskmaster` - One-line global install + +#### 📋 Project Setup +- `/taskmaster:init-project` - Initialize new project +- `/taskmaster:init-project-quick` - Quick setup with auto-confirm +- `/taskmaster:view-models` - View AI configuration +- `/taskmaster:setup-models` - Configure AI providers + +#### 🎯 Task Generation +- `/taskmaster:parse-prd` - Generate tasks from PRD +- `/taskmaster:parse-prd-with-research` - Enhanced parsing +- `/taskmaster:generate-tasks` - Create task files + +#### 📝 Task Management +- `/taskmaster:list-tasks` - List all tasks +- `/taskmaster:list-tasks-by-status` - List tasks filtered by status +- `/taskmaster:list-tasks-with-subtasks` - List tasks with subtasks +- `/taskmaster:show-task` - Display task details +- `/taskmaster:add-task` - Create new task +- `/taskmaster:update-task` - Update single task +- `/taskmaster:update-tasks-from-id` - Update multiple tasks +- `/taskmaster:next-task` - Get next task recommendation + +#### 🔄 Status Management +- `/taskmaster:to-pending` - Set task to pending +- `/taskmaster:to-in-progress` - Set task to in-progress +- `/taskmaster:to-done` - Set task to done +- `/taskmaster:to-review` - Set task to review +- `/taskmaster:to-deferred` - Set task to deferred +- `/taskmaster:to-cancelled` - Set task to cancelled + +#### 🔍 Analysis & Breakdown +- `/taskmaster:analyze-complexity` - Analyze task complexity +- `/taskmaster:complexity-report` - View complexity report +- `/taskmaster:expand-task` - Break down complex task +- `/taskmaster:expand-all-tasks` - Expand all eligible tasks + +#### 🔗 Dependencies +- `/taskmaster:add-dependency` - Add task dependency +- `/taskmaster:remove-dependency` - Remove dependency +- `/taskmaster:validate-dependencies` - Check for issues +- `/taskmaster:fix-dependencies` - Auto-fix dependency issues + +#### 📦 Subtasks +- `/taskmaster:add-subtask` - Add subtask to task +- `/taskmaster:convert-task-to-subtask` - Convert task to subtask +- `/taskmaster:remove-subtask` - Remove subtask +- `/taskmaster:remove-subtasks` - Clear specific task subtasks +- `/taskmaster:remove-all-subtasks` - Clear all subtasks + +#### 🗑️ Task Removal +- `/taskmaster:remove-task` - Remove task permanently + +#### 🤖 Workflows +- `/taskmaster:smart-workflow` - Intelligent workflows +- `/taskmaster:command-pipeline` - Command chaining +- `/taskmaster:auto-implement-tasks` - Auto-implementation + +#### 📊 Utilities +- `/taskmaster:analyze-project` - Project analysis +- `/taskmaster:project-status` - Project dashboard +- `/taskmaster:sync-readme` - Sync README with tasks +- `/taskmaster:learn` - Interactive learning +- `/taskmaster:tm-main` - Main Task Master interface + +### Quick Start Examples + +``` +/taskmaster:list-tasks +/taskmaster:show-task 1.2 +/taskmaster:add-task +/taskmaster:next-task +``` + +### Getting Started + +1. Install: `/taskmaster:quick-install-taskmaster` +2. Initialize: `/taskmaster:init-project-quick` +3. Learn: `/taskmaster:learn` +4. Work: `/taskmaster:smart-workflow` + +For detailed command info, run the specific command with `--help` or check command documentation. +""" diff --git a/.gemini/commands/tm/init-project-quick.toml b/.gemini/commands/tm/init-project-quick.toml new file mode 100644 index 0000000..694d804 --- /dev/null +++ b/.gemini/commands/tm/init-project-quick.toml @@ -0,0 +1,49 @@ +description="Init Project Quick" +prompt = """ +Quick initialization with auto-confirmation. + +Arguments: $ARGUMENTS + +Initialize a Task Master project without prompts, accepting all defaults. + +## Quick Setup + +```bash +task-master init -y +``` + +## What It Does + +1. Creates `.taskmaster/` directory structure +2. Initializes empty `tasks.json` +3. Sets up default configuration +4. Uses directory name as project name +5. Skips all confirmation prompts + +## Smart Defaults + +- Project name: Current directory name +- Description: "Task Master Project" +- Model config: Existing environment vars +- Task structure: Standard format + +## Next Steps + +After quick init: +1. Configure AI models if needed: + ``` + /taskmaster:models/setup + ``` + +2. Parse PRD if available: + ``` + /taskmaster:parse-prd <file> + ``` + +3. Or create first task: + ``` + /taskmaster:add-task create initial setup + ``` + +Perfect for rapid project setup! +""" diff --git a/.gemini/commands/tm/init-project.toml b/.gemini/commands/tm/init-project.toml new file mode 100644 index 0000000..8743afa --- /dev/null +++ b/.gemini/commands/tm/init-project.toml @@ -0,0 +1,53 @@ +description="Init Project" +prompt = """ +Initialize a new Task Master project. + +Arguments: $ARGUMENTS + +Parse arguments to determine initialization preferences. + +## Initialization Process + +1. **Parse Arguments** + - PRD file path (if provided) + - Project name + - Auto-confirm flag (-y) + +2. **Project Setup** + ```bash + task-master init + ``` + +3. **Smart Initialization** + - Detect existing project files + - Suggest project name from directory + - Check for git repository + - Verify AI provider configuration + +## Configuration Options + +Based on arguments: +- `quick` / `-y` → Skip confirmations +- `<file.md>` → Use as PRD after init +- `--name=<name>` → Set project name +- `--description=<desc>` → Set description + +## Post-Initialization + +After successful init: +1. Show project structure created +2. Verify AI models configured +3. Suggest next steps: + - Parse PRD if available + - Configure AI providers + - Set up git hooks + - Create first tasks + +## Integration + +If PRD file provided: +``` +/taskmaster:init my-prd.md +→ Automatically runs parse-prd after init +``` +""" diff --git a/.gemini/commands/tm/install-taskmaster.toml b/.gemini/commands/tm/install-taskmaster.toml new file mode 100644 index 0000000..d585538 --- /dev/null +++ b/.gemini/commands/tm/install-taskmaster.toml @@ -0,0 +1,120 @@ +description="Install TaskMaster" +prompt = """ +Check if Task Master is installed and install it if needed. + +This command helps you get Task Master set up globally on your system. + +## Detection and Installation Process + +1. **Check Current Installation** + ```bash + # Check if task-master command exists + which task-master || echo "Task Master not found" + + # Check npm global packages + npm list -g task-master-ai + ``` + +2. **System Requirements Check** + ```bash + # Verify Node.js is installed + node --version + + # Verify npm is installed + npm --version + + # Check Node version (need 16+) + ``` + +3. **Install Task Master Globally** + If not installed, run: + ```bash + npm install -g task-master-ai + ``` + +4. **Verify Installation** + ```bash + # Check version + task-master --version + + # Verify command is available + which task-master + ``` + +5. **Initial Setup** + ```bash + # Initialize in current directory + task-master init + ``` + +6. **Configure AI Provider** + Ensure you have at least one AI provider API key set: + ```bash + # Check current configuration + task-master models --status + + # If no API keys found, guide setup + echo "You'll need at least one API key:" + echo "- ANTHROPIC_API_KEY for Claude" + echo "- OPENAI_API_KEY for GPT models" + echo "- PERPLEXITY_API_KEY for research" + echo "" + echo "Set them in your shell profile or .env file" + ``` + +7. **Quick Test** + ```bash + # Create a test PRD + echo "Build a simple hello world API" > test-prd.txt + + # Try parsing it + task-master parse-prd test-prd.txt -n 3 + ``` + +## Troubleshooting + +If installation fails: + +**Permission Errors:** +```bash +# Try with sudo (macOS/Linux) +sudo npm install -g task-master-ai + +# Or fix npm permissions +npm config set prefix ~/.npm-global +export PATH=~/.npm-global/bin:$PATH +``` + +**Network Issues:** +```bash +# Use different registry +npm install -g task-master-ai --registry https://registry.npmjs.org/ +``` + +**Node Version Issues:** +```bash +# Install Node 20+ via nvm +curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.0/install.sh | bash +nvm install 20 +nvm use 20 +``` + +## Success Confirmation + +Once installed, you should see: +``` +✅ Task Master installed +✅ Command 'task-master' available globally +✅ AI provider configured +✅ Ready to use slash commands! + +Try: /taskmaster:init your-prd.md +``` + +## Next Steps + +After installation: +1. Run `/taskmaster:status` to verify setup +2. Configure AI providers with `/taskmaster:setup-models` +3. Start using Task Master commands! +""" diff --git a/.gemini/commands/tm/learn.toml b/.gemini/commands/tm/learn.toml new file mode 100644 index 0000000..cdf106c --- /dev/null +++ b/.gemini/commands/tm/learn.toml @@ -0,0 +1,106 @@ +description="Learn" +prompt = """ +Learn about Task Master capabilities through interactive exploration. + +Arguments: $ARGUMENTS + +## Interactive Task Master Learning + +Based on your input, I'll help you discover capabilities: + +### 1. **What are you trying to do?** + +If $ARGUMENTS contains: +- "start" / "begin" → Show project initialization workflows +- "manage" / "organize" → Show task management commands +- "automate" / "auto" → Show automation workflows +- "analyze" / "report" → Show analysis tools +- "fix" / "problem" → Show troubleshooting commands +- "fast" / "quick" → Show efficiency shortcuts + +### 2. **Intelligent Suggestions** + +Based on your project state: + +**No tasks yet?** +``` +You'll want to start with: +1. /project:task-master:init <prd-file> + → Creates tasks from requirements + +2. /project:task-master:parse-prd <file> + → Alternative task generation + +Try: /project:task-master:init demo-prd.md +``` + +**Have tasks?** +Let me analyze what you might need... +- Many pending tasks? → Learn sprint planning +- Complex tasks? → Learn task expansion +- Daily work? → Learn workflow automation + +### 3. **Command Discovery** + +**By Category:** +- 📋 Task Management: list, show, add, update, complete +- 🔄 Workflows: auto-implement, sprint-plan, daily-standup +- 🛠️ Utilities: check-health, complexity-report, sync-memory +- 🔍 Analysis: validate-deps, show dependencies + +**By Scenario:** +- "I want to see what to work on" → `/project:task-master:next` +- "I need to break this down" → `/project:task-master:expand <id>` +- "Show me everything" → `/project:task-master:status` +- "Just do it for me" → `/project:workflows:auto-implement` + +### 4. **Power User Patterns** + +**Command Chaining:** +``` +/project:task-master:next +/project:task-master:start <id> +/project:workflows:auto-implement +``` + +**Smart Filters:** +``` +/project:task-master:list pending high +/project:task-master:list blocked +/project:task-master:list 1-5 tree +``` + +**Automation:** +``` +/project:workflows:pipeline init → expand-all → sprint-plan +``` + +### 5. **Learning Path** + +Based on your experience level: + +**Beginner Path:** +1. init → Create project +2. status → Understand state +3. next → Find work +4. complete → Finish task + +**Intermediate Path:** +1. expand → Break down complex tasks +2. sprint-plan → Organize work +3. complexity-report → Understand difficulty +4. validate-deps → Ensure consistency + +**Advanced Path:** +1. pipeline → Chain operations +2. smart-flow → Context-aware automation +3. Custom commands → Extend the system + +### 6. **Try This Now** + +Based on what you asked about, try: +[Specific command suggestion based on $ARGUMENTS] + +Want to learn more about a specific command? +Type: /project:help <command-name> +""" diff --git a/.gemini/commands/tm/list-tasks-by-status.toml b/.gemini/commands/tm/list-tasks-by-status.toml new file mode 100644 index 0000000..9b2e063 --- /dev/null +++ b/.gemini/commands/tm/list-tasks-by-status.toml @@ -0,0 +1,42 @@ +description="List Tasks By Status" +prompt = """ +List tasks filtered by a specific status. + +Arguments: $ARGUMENTS + +Parse the status from arguments and list only tasks matching that status. + +## Status Options +- `pending` - Not yet started +- `in-progress` - Currently being worked on +- `done` - Completed +- `review` - Awaiting review +- `deferred` - Postponed +- `cancelled` - Cancelled + +## Execution + +Based on $ARGUMENTS, run: +```bash +task-master list --status=$ARGUMENTS +``` + +## Enhanced Display + +For the filtered results: +- Group by priority within the status +- Show time in current status +- Highlight tasks approaching deadlines +- Display blockers and dependencies +- Suggest next actions for each status group + +## Intelligent Insights + +Based on the status filter: +- **Pending**: Show recommended start order +- **In-Progress**: Display idle time warnings +- **Done**: Show newly unblocked tasks +- **Review**: Indicate review duration +- **Deferred**: Show reactivation criteria +- **Cancelled**: Display impact analysis +""" diff --git a/.gemini/commands/tm/list-tasks-with-subtasks.toml b/.gemini/commands/tm/list-tasks-with-subtasks.toml new file mode 100644 index 0000000..06f38cd --- /dev/null +++ b/.gemini/commands/tm/list-tasks-with-subtasks.toml @@ -0,0 +1,32 @@ +description="List Tasks With Subtasks" +prompt = """ +List all tasks including their subtasks in a hierarchical view. + +This command shows all tasks with their nested subtasks, providing a complete project overview. + +## Execution + +Run the Task Master list command with subtasks flag: +```bash +task-master list --with-subtasks +``` + +## Enhanced Display + +I'll organize the output to show: +- Parent tasks with clear indicators +- Nested subtasks with proper indentation +- Status badges for quick scanning +- Dependencies and blockers highlighted +- Progress indicators for tasks with subtasks + +## Smart Filtering + +Based on the task hierarchy: +- Show completion percentage for parent tasks +- Highlight blocked subtask chains +- Group by functional areas +- Indicate critical path items + +This gives you a complete tree view of your project structure. +""" diff --git a/.gemini/commands/tm/list-tasks.toml b/.gemini/commands/tm/list-tasks.toml new file mode 100644 index 0000000..cac70f7 --- /dev/null +++ b/.gemini/commands/tm/list-tasks.toml @@ -0,0 +1,46 @@ +description="List Tasks" +prompt = """ +List tasks with intelligent argument parsing. + +Parse arguments to determine filters and display options: +- Status: pending, in-progress, done, review, deferred, cancelled +- Priority: high, medium, low (or priority:high) +- Special: subtasks, tree, dependencies, blocked +- IDs: Direct numbers (e.g., "1,3,5" or "1-5") +- Complex: "pending high" = pending AND high priority + +Arguments: $ARGUMENTS + +Let me parse your request intelligently: + +1. **Detect Filter Intent** + - If arguments contain status keywords → filter by status + - If arguments contain priority → filter by priority + - If arguments contain "subtasks" → include subtasks + - If arguments contain "tree" → hierarchical view + - If arguments contain numbers → show specific tasks + - If arguments contain "blocked" → show blocked tasks only + +2. **Smart Combinations** + Examples of what I understand: + - "pending high" → pending tasks with high priority + - "done today" → tasks completed today + - "blocked" → tasks with unmet dependencies + - "1-5" → tasks 1 through 5 + - "subtasks tree" → hierarchical view with subtasks + +3. **Execute Appropriate Query** + Based on parsed intent, run the most specific task-master command + +4. **Enhanced Display** + - Group by relevant criteria + - Show most important information first + - Use visual indicators for quick scanning + - Include relevant metrics + +5. **Intelligent Suggestions** + Based on what you're viewing, suggest next actions: + - Many pending? → Suggest priority order + - Many blocked? → Show dependency resolution + - Looking at specific tasks? → Show related tasks +""" diff --git a/.gemini/commands/tm/next-task.toml b/.gemini/commands/tm/next-task.toml new file mode 100644 index 0000000..eeb0d1b --- /dev/null +++ b/.gemini/commands/tm/next-task.toml @@ -0,0 +1,69 @@ +description="Next Task" +prompt = """ +Intelligently determine and prepare the next action based on comprehensive context. + +This enhanced version of 'next' considers: +- Current task states +- Recent activity +- Time constraints +- Dependencies +- Your working patterns + +Arguments: $ARGUMENTS + +## Intelligent Next Action + +### 1. **Context Gathering** +Let me analyze the current situation: +- Active tasks (in-progress) +- Recently completed tasks +- Blocked tasks +- Time since last activity +- Arguments provided: $ARGUMENTS + +### 2. **Smart Decision Tree** + +**If you have an in-progress task:** +- Has it been idle > 2 hours? → Suggest resuming or switching +- Near completion? → Show remaining steps +- Blocked? → Find alternative task + +**If no in-progress tasks:** +- Unblocked high-priority tasks? → Start highest +- Complex tasks need breakdown? → Suggest expansion +- All tasks blocked? → Show dependency resolution + +**Special arguments handling:** +- "quick" → Find task < 2 hours +- "easy" → Find low complexity task +- "important" → Find high priority regardless of complexity +- "continue" → Resume last worked task + +### 3. **Preparation Workflow** + +Based on selected task: +1. Show full context and history +2. Set up development environment +3. Run relevant tests +4. Open related files +5. Show similar completed tasks +6. Estimate completion time + +### 4. **Alternative Suggestions** + +Always provide options: +- Primary recommendation +- Quick alternative (< 1 hour) +- Strategic option (unblocks most tasks) +- Learning option (new technology/skill) + +### 5. **Workflow Integration** + +Seamlessly connect to: +- `/project:task-master:start [selected]` +- `/project:workflows:auto-implement` +- `/project:task-master:expand` (if complex) +- `/project:utils:complexity-report` (if unsure) + +The goal: Zero friction from decision to implementation. +""" diff --git a/.gemini/commands/tm/parse-prd-with-research.toml b/.gemini/commands/tm/parse-prd-with-research.toml new file mode 100644 index 0000000..f3332a7 --- /dev/null +++ b/.gemini/commands/tm/parse-prd-with-research.toml @@ -0,0 +1,51 @@ +description="Parse PRD With Research" +prompt = """ +Parse PRD with enhanced research mode for better task generation. + +Arguments: $ARGUMENTS (PRD file path) + +## Research-Enhanced Parsing + +Uses the research AI provider (typically Perplexity) for more comprehensive task generation with current best practices. + +## Execution + +```bash +task-master parse-prd --input=$ARGUMENTS --research +``` + +## Research Benefits + +1. **Current Best Practices** + - Latest framework patterns + - Security considerations + - Performance optimizations + - Accessibility requirements + +2. **Technical Deep Dive** + - Implementation approaches + - Library recommendations + - Architecture patterns + - Testing strategies + +3. **Comprehensive Coverage** + - Edge cases consideration + - Error handling tasks + - Monitoring setup + - Deployment tasks + +## Enhanced Output + +Research mode typically: +- Generates more detailed tasks +- Includes industry standards +- Adds compliance considerations +- Suggests modern tooling + +## When to Use + +- New technology domains +- Complex requirements +- Regulatory compliance needed +- Best practices crucial +""" diff --git a/.gemini/commands/tm/parse-prd.toml b/.gemini/commands/tm/parse-prd.toml new file mode 100644 index 0000000..ef3d4f7 --- /dev/null +++ b/.gemini/commands/tm/parse-prd.toml @@ -0,0 +1,52 @@ +description="Parse PRD" +prompt = """ +Parse a PRD document to generate tasks. + +Arguments: $ARGUMENTS (PRD file path) + +## Intelligent PRD Parsing + +Analyzes your requirements document and generates a complete task breakdown. + +## Execution + +```bash +task-master parse-prd --input=$ARGUMENTS +``` + +## Parsing Process + +1. **Document Analysis** + - Extract key requirements + - Identify technical components + - Detect dependencies + - Estimate complexity + +2. **Task Generation** + - Create 10-15 tasks by default + - Include implementation tasks + - Add testing tasks + - Include documentation tasks + - Set logical dependencies + +3. **Smart Enhancements** + - Group related functionality + - Set appropriate priorities + - Add acceptance criteria + - Include test strategies + +## Options + +Parse arguments for modifiers: +- Number after filename → `--num-tasks` +- `research` → Use research mode +- `comprehensive` → Generate more tasks + +## Post-Generation + +After parsing: +1. Display task summary +2. Show dependency graph +3. Suggest task expansion for complex items +4. Recommend sprint planning +""" diff --git a/.gemini/commands/tm/project-status.toml b/.gemini/commands/tm/project-status.toml new file mode 100644 index 0000000..ffa6e36 --- /dev/null +++ b/.gemini/commands/tm/project-status.toml @@ -0,0 +1,67 @@ +description="Project Status" +prompt = """ +Enhanced status command with comprehensive project insights. + +Arguments: $ARGUMENTS + +## Intelligent Status Overview + +### 1. **Executive Summary** +Quick dashboard view: +- 🏃 Active work (in-progress tasks) +- 📊 Progress metrics (% complete, velocity) +- 🚧 Blockers and risks +- ⏱️ Time analysis (estimated vs actual) +- 🎯 Sprint/milestone progress + +### 2. **Contextual Analysis** + +Based on $ARGUMENTS, focus on: +- "sprint" → Current sprint progress and burndown +- "blocked" → Dependency chains and resolution paths +- "team" → Task distribution and workload +- "timeline" → Schedule adherence and projections +- "risk" → High complexity or overdue items + +### 3. **Smart Insights** + +**Workflow Health:** +- Idle tasks (in-progress > 24h without updates) +- Bottlenecks (multiple tasks waiting on same dependency) +- Quick wins (low complexity, high impact) + +**Predictive Analytics:** +- Completion projections based on velocity +- Risk of missing deadlines +- Recommended task order for optimal flow + +### 4. **Visual Intelligence** + +Dynamic visualization based on data: +``` +Sprint Progress: ████████░░ 80% (16/20 tasks) +Velocity Trend: ↗️ +15% this week +Blocked Tasks: 🔴 3 critical path items + +Priority Distribution: +High: ████████ 8 tasks (2 blocked) +Medium: ████░░░░ 4 tasks +Low: ██░░░░░░ 2 tasks +``` + +### 5. **Actionable Recommendations** + +Based on analysis: +1. **Immediate actions** (unblock critical path) +2. **Today's focus** (optimal task sequence) +3. **Process improvements** (recurring patterns) +4. **Resource needs** (skills, time, dependencies) + +### 6. **Historical Context** + +Compare to previous periods: +- Velocity changes +- Pattern recognition +- Improvement areas +- Success patterns to repeat +""" diff --git a/.gemini/commands/tm/quick-install-taskmaster.toml b/.gemini/commands/tm/quick-install-taskmaster.toml new file mode 100644 index 0000000..f405f73 --- /dev/null +++ b/.gemini/commands/tm/quick-install-taskmaster.toml @@ -0,0 +1,25 @@ +description="Quick Install TaskMaster" +prompt = """ +Quick install Task Master globally if not already installed. + +Execute this streamlined installation: + +```bash +# Check and install in one command +task-master --version 2>/dev/null || npm install -g task-master-ai + +# Verify installation +task-master --version + +# Quick setup check +task-master models --status || echo "Note: You'll need to set up an AI provider API key" +``` + +If you see "command not found" after installation, you may need to: +1. Restart your terminal +2. Or add npm global bin to PATH: `export PATH=$(npm bin -g):$PATH` + +Once installed, you can use all the Task Master commands! + +Quick test: Run `/taskmaster:help` to see all available commands. +""" diff --git a/.gemini/commands/tm/remove-all-subtasks.toml b/.gemini/commands/tm/remove-all-subtasks.toml new file mode 100644 index 0000000..cedea32 --- /dev/null +++ b/.gemini/commands/tm/remove-all-subtasks.toml @@ -0,0 +1,96 @@ +description="Remove All Subtasks" +prompt = """ +Clear all subtasks from all tasks globally. + +## Global Subtask Clearing + +Remove all subtasks across the entire project. Use with extreme caution. + +## Execution + +```bash +task-master clear-subtasks --all +``` + +## Pre-Clear Analysis + +1. **Project-Wide Summary** + ``` + Global Subtask Summary + ━━━━━━━━━━━━━━━━━━━━ + Total parent tasks: 12 + Total subtasks: 47 + - Completed: 15 + - In-progress: 8 + - Pending: 24 + + Work at risk: ~120 hours + ``` + +2. **Critical Warnings** + - In-progress subtasks that will lose work + - Completed subtasks with valuable history + - Complex dependency chains + - Integration test results + +## Double Confirmation + +``` +⚠️ DESTRUCTIVE OPERATION WARNING ⚠️ +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +This will remove ALL 47 subtasks from your project +Including 8 in-progress and 15 completed subtasks + +This action CANNOT be undone + +Type 'CLEAR ALL SUBTASKS' to confirm: +``` + +## Smart Safeguards + +- Require explicit confirmation phrase +- Create automatic backup +- Log all removed data +- Option to export first + +## Use Cases + +Valid reasons for global clear: +- Project restructuring +- Major pivot in approach +- Starting fresh breakdown +- Switching to different task organization + +## Process + +1. Full project analysis +2. Create backup file +3. Show detailed impact +4. Require confirmation +5. Execute removal +6. Generate summary report + +## Alternative Suggestions + +Before clearing all: +- Export subtasks to file +- Clear only pending subtasks +- Clear by task category +- Archive instead of delete + +## Post-Clear Report + +``` +Global Subtask Clear Complete +━━━━━━━━━━━━━━━━━━━━━━━━━━━ +Removed: 47 subtasks from 12 tasks +Backup saved: .taskmaster/backup/subtasks-20240115.json +Parent tasks updated: 12 +Time estimates adjusted: Yes + +Next steps: +- Review updated task list +- Re-expand complex tasks as needed +- Check project timeline +``` +""" diff --git a/.gemini/commands/tm/remove-dependency.toml b/.gemini/commands/tm/remove-dependency.toml new file mode 100644 index 0000000..f52b548 --- /dev/null +++ b/.gemini/commands/tm/remove-dependency.toml @@ -0,0 +1,65 @@ +description="Remove Dependency" +prompt = """ +Remove a dependency between tasks. + +Arguments: $ARGUMENTS + +Parse the task IDs to remove dependency relationship. + +## Removing Dependencies + +Removes a dependency relationship, potentially unblocking tasks. + +## Argument Parsing + +Parse natural language or IDs: +- "remove dependency between 5 and 3" +- "5 no longer needs 3" +- "unblock 5 from 3" +- "5 3" → remove dependency of 5 on 3 + +## Execution + +```bash +task-master remove-dependency --id=<task-id> --depends-on=<dependency-id> +``` + +## Pre-Removal Checks + +1. **Verify dependency exists** +2. **Check impact on task flow** +3. **Warn if it breaks logical sequence** +4. **Show what will be unblocked** + +## Smart Analysis + +Before removing: +- Show why dependency might have existed +- Check if removal makes tasks executable +- Verify no critical path disruption +- Suggest alternative dependencies + +## Post-Removal + +After removing: +1. Show updated task status +2. List newly unblocked tasks +3. Update project timeline +4. Suggest next actions + +## Safety Features + +- Confirm if removing critical dependency +- Show tasks that become immediately actionable +- Warn about potential issues +- Keep removal history + +## Example + +``` +/taskmaster:remove-dependency 5 from 3 +→ Removed: Task #5 no longer depends on #3 +→ Task #5 is now UNBLOCKED and ready to start +→ Warning: Consider if #5 still needs #2 completed first +``` +""" diff --git a/.gemini/commands/tm/remove-subtask.toml b/.gemini/commands/tm/remove-subtask.toml new file mode 100644 index 0000000..8da3d30 --- /dev/null +++ b/.gemini/commands/tm/remove-subtask.toml @@ -0,0 +1,87 @@ +description="Remove Subtask" +prompt = """ +Remove a subtask from its parent task. + +Arguments: $ARGUMENTS + +Parse subtask ID to remove, with option to convert to standalone task. + +## Removing Subtasks + +Remove a subtask and optionally convert it back to a standalone task. + +## Argument Parsing + +- "remove subtask 5.1" +- "delete 5.1" +- "convert 5.1 to task" → remove and convert +- "5.1 standalone" → convert to standalone + +## Execution Options + +### 1. Delete Subtask +```bash +task-master remove-subtask --id=<parentId.subtaskId> +``` + +### 2. Convert to Standalone +```bash +task-master remove-subtask --id=<parentId.subtaskId> --convert +``` + +## Pre-Removal Checks + +1. **Validate Subtask** + - Verify subtask exists + - Check completion status + - Review dependencies + +2. **Impact Analysis** + - Other subtasks that depend on it + - Parent task implications + - Data that will be lost + +## Removal Process + +### For Deletion: +1. Confirm if subtask has work done +2. Update parent task estimates +3. Remove subtask and its data +4. Clean up dependencies + +### For Conversion: +1. Assign new standalone task ID +2. Preserve all task data +3. Update dependency references +4. Maintain task history + +## Smart Features + +- Warn if subtask is in-progress +- Show impact on parent task +- Preserve important data +- Update related estimates + +## Example Flows + +``` +/taskmaster:remove-subtask 5.1 +→ Warning: Subtask #5.1 is in-progress +→ This will delete all subtask data +→ Parent task #5 will be updated +Confirm deletion? (y/n) + +/taskmaster:remove-subtask 5.1 convert +→ Converting subtask #5.1 to standalone task #89 +→ Preserved: All task data and history +→ Updated: 2 dependency references +→ New task #89 is now independent +``` + +## Post-Removal + +- Update parent task status +- Recalculate estimates +- Show updated hierarchy +- Suggest next actions +""" diff --git a/.gemini/commands/tm/remove-subtasks.toml b/.gemini/commands/tm/remove-subtasks.toml new file mode 100644 index 0000000..84943ae --- /dev/null +++ b/.gemini/commands/tm/remove-subtasks.toml @@ -0,0 +1,89 @@ +description="Remove Subtasks" +prompt = """ +Clear all subtasks from a specific task. + +Arguments: $ARGUMENTS (task ID) + +Remove all subtasks from a parent task at once. + +## Clearing Subtasks + +Bulk removal of all subtasks from a parent task. + +## Execution + +```bash +task-master remove-subtasks --id=$ARGUMENTS +``` + +## Pre-Clear Analysis + +1. **Subtask Summary** + - Number of subtasks + - Completion status of each + - Work already done + - Dependencies affected + +2. **Impact Assessment** + - Data that will be lost + - Dependencies to be removed + - Effect on project timeline + - Parent task implications + +## Confirmation Required + +``` +Remove Subtasks Confirmation +━━━━━━━━━━━━━━━━━━━━━━━━━ +Parent Task: #5 "Implement user authentication" +Subtasks to remove: 4 +- #5.1 "Setup auth framework" (done) +- #5.2 "Create login form" (in-progress) +- #5.3 "Add validation" (pending) +- #5.4 "Write tests" (pending) + +⚠️ This will permanently delete all subtask data +Continue? (y/n) +``` + +## Smart Features + +- Option to convert to standalone tasks +- Backup task data before clearing +- Preserve completed work history +- Update parent task appropriately + +## Process + +1. List all subtasks for confirmation +2. Check for in-progress work +3. Remove all subtasks +4. Update parent task +5. Clean up dependencies + +## Alternative Options + +Suggest alternatives: +- Convert important subtasks to tasks +- Keep completed subtasks +- Archive instead of delete +- Export subtask data first + +## Post-Clear + +- Show updated parent task +- Recalculate time estimates +- Update task complexity +- Suggest next steps + +## Example + +``` +/taskmaster:remove-subtasks 5 +→ Found 4 subtasks to remove +→ Warning: Subtask #5.2 is in-progress +→ Cleared all subtasks from task #5 +→ Updated parent task estimates +→ Suggestion: Consider re-expanding with better breakdown +``` +""" diff --git a/.gemini/commands/tm/remove-task.toml b/.gemini/commands/tm/remove-task.toml new file mode 100644 index 0000000..9059dba --- /dev/null +++ b/.gemini/commands/tm/remove-task.toml @@ -0,0 +1,110 @@ +description="Remove Task" +prompt = """ +Remove a task permanently from the project. + +Arguments: $ARGUMENTS (task ID) + +Delete a task and handle all its relationships properly. + +## Task Removal + +Permanently removes a task while maintaining project integrity. + +## Argument Parsing + +- "remove task 5" +- "delete 5" +- "5" → remove task 5 +- Can include "-y" for auto-confirm + +## Execution + +```bash +task-master remove-task --id=<id> [-y] +``` + +## Pre-Removal Analysis + +1. **Task Details** + - Current status + - Work completed + - Time invested + - Associated data + +2. **Relationship Check** + - Tasks that depend on this + - Dependencies this task has + - Subtasks that will be removed + - Blocking implications + +3. **Impact Assessment** + ``` + Task Removal Impact + ━━━━━━━━━━━━━━━━━━ + Task: #5 "Implement authentication" (in-progress) + Status: 60% complete (~8 hours work) + + Will affect: + - 3 tasks depend on this (will be blocked) + - Has 4 subtasks (will be deleted) + - Part of critical path + + ⚠️ This action cannot be undone + ``` + +## Smart Warnings + +- Warn if task is in-progress +- Show dependent tasks that will be blocked +- Highlight if part of critical path +- Note any completed work being lost + +## Removal Process + +1. Show comprehensive impact +2. Require confirmation (unless -y) +3. Update dependent task references +4. Remove task and subtasks +5. Clean up orphaned dependencies +6. Log removal with timestamp + +## Alternative Actions + +Suggest before deletion: +- Mark as cancelled instead +- Convert to documentation +- Archive task data +- Transfer work to another task + +## Post-Removal + +- List affected tasks +- Show broken dependencies +- Update project statistics +- Suggest dependency fixes +- Recalculate timeline + +## Example Flows + +``` +/taskmaster:remove-task 5 +→ Task #5 is in-progress with 8 hours logged +→ 3 other tasks depend on this +→ Suggestion: Mark as cancelled instead? +Remove anyway? (y/n) + +/taskmaster:remove-task 5 -y +→ Removed: Task #5 and 4 subtasks +→ Updated: 3 task dependencies +→ Warning: Tasks #7, #8, #9 now have missing dependency +→ Run /taskmaster:fix-dependencies to resolve +``` + +## Safety Features + +- Confirmation required +- Impact preview +- Removal logging +- Suggest alternatives +- No cascade delete of dependents +""" diff --git a/.gemini/commands/tm/setup-models.toml b/.gemini/commands/tm/setup-models.toml new file mode 100644 index 0000000..efc055c --- /dev/null +++ b/.gemini/commands/tm/setup-models.toml @@ -0,0 +1,54 @@ +description="Setup Models" +prompt = """ +Run interactive setup to configure AI models. + +## Interactive Model Configuration + +Guides you through setting up AI providers for Task Master. + +## Execution + +```bash +task-master models --setup +``` + +## Setup Process + +1. **Environment Check** + - Detect existing API keys + - Show current configuration + - Identify missing providers + +2. **Provider Selection** + - Choose main provider (required) + - Select research provider (recommended) + - Configure fallback (optional) + +3. **API Key Configuration** + - Prompt for missing keys + - Validate key format + - Test connectivity + - Save configuration + +## Smart Recommendations + +Based on your needs: +- **For best results**: Claude + Perplexity +- **Budget conscious**: GPT-3.5 + Perplexity +- **Maximum capability**: GPT-4 + Perplexity + Claude fallback + +## Configuration Storage + +Keys can be stored in: +1. Environment variables (recommended) +2. `.env` file in project +3. Global `.taskmaster/config` + +## Post-Setup + +After configuration: +- Test each provider +- Show usage examples +- Suggest next steps +- Verify parse-prd works +""" diff --git a/.gemini/commands/tm/show-task.toml b/.gemini/commands/tm/show-task.toml new file mode 100644 index 0000000..9ff339d --- /dev/null +++ b/.gemini/commands/tm/show-task.toml @@ -0,0 +1,85 @@ +description="Show Task" +prompt = """ +Show detailed task information with rich context and insights. + +Arguments: $ARGUMENTS + +## Enhanced Task Display + +Parse arguments to determine what to show and how. + +### 1. **Smart Task Selection** + +Based on $ARGUMENTS: +- Number → Show specific task with full context +- "current" → Show active in-progress task(s) +- "next" → Show recommended next task +- "blocked" → Show all blocked tasks with reasons +- "critical" → Show critical path tasks +- Multiple IDs → Comparative view + +### 2. **Contextual Information** + +For each task, intelligently include: + +**Core Details** +- Full task information (id, title, description, details) +- Current status with history +- Test strategy and acceptance criteria +- Priority and complexity analysis + +**Relationships** +- Dependencies (what it needs) +- Dependents (what needs it) +- Parent/subtask hierarchy +- Related tasks (similar work) + +**Time Intelligence** +- Created/updated timestamps +- Time in current status +- Estimated vs actual time +- Historical completion patterns + +### 3. **Visual Enhancements** + +``` +📋 Task #45: Implement User Authentication +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +Status: 🟡 in-progress (2 hours) +Priority: 🔴 High | Complexity: 73/100 + +Dependencies: ✅ #41, ✅ #42, ⏳ #43 (blocked) +Blocks: #46, #47, #52 + +Progress: ████████░░ 80% complete + +Recent Activity: +- 2h ago: Status changed to in-progress +- 4h ago: Dependency #42 completed +- Yesterday: Task expanded with 3 subtasks +``` + +### 4. **Intelligent Insights** + +Based on task analysis: +- **Risk Assessment**: Complexity vs time remaining +- **Bottleneck Analysis**: Is this blocking critical work? +- **Recommendation**: Suggested approach or concerns +- **Similar Tasks**: How others completed similar work + +### 5. **Action Suggestions** + +Context-aware next steps: +- If blocked → Show how to unblock +- If complex → Suggest expansion +- If in-progress → Show completion checklist +- If done → Show dependent tasks ready to start + +### 6. **Multi-Task View** + +When showing multiple tasks: +- Common dependencies +- Optimal completion order +- Parallel work opportunities +- Combined complexity analysis +""" diff --git a/.gemini/commands/tm/smart-workflow.toml b/.gemini/commands/tm/smart-workflow.toml new file mode 100644 index 0000000..2162259 --- /dev/null +++ b/.gemini/commands/tm/smart-workflow.toml @@ -0,0 +1,58 @@ +description="Smart Workflow" +prompt = """ +Execute an intelligent workflow based on current project state and recent commands. + +This command analyzes: +1. Recent commands you've run +2. Current project state +3. Time of day / day of week +4. Your working patterns + +Arguments: $ARGUMENTS + +## Intelligent Workflow Selection + +Based on context, I'll determine the best workflow: + +### Context Analysis +- Previous command executed +- Current task states +- Unfinished work from last session +- Your typical patterns + +### Smart Execution + +If last command was: +- `status` → Likely starting work → Run daily standup +- `complete` → Task finished → Find next task +- `list pending` → Planning → Suggest sprint planning +- `expand` → Breaking down work → Show complexity analysis +- `init` → New project → Show onboarding workflow + +If no recent commands: +- Morning? → Daily standup workflow +- Many pending tasks? → Sprint planning +- Tasks blocked? → Dependency resolution +- Friday? → Weekly review + +### Workflow Composition + +I'll chain appropriate commands: +1. Analyze current state +2. Execute primary workflow +3. Suggest follow-up actions +4. Prepare environment for coding + +### Learning Mode + +This command learns from your patterns: +- Track command sequences +- Note time preferences +- Remember common workflows +- Adapt to your style + +Example flows detected: +- Morning: standup → next → start +- After lunch: status → continue task +- End of day: complete → commit → status +""" diff --git a/.gemini/commands/tm/sync-readme.toml b/.gemini/commands/tm/sync-readme.toml new file mode 100644 index 0000000..32bf503 --- /dev/null +++ b/.gemini/commands/tm/sync-readme.toml @@ -0,0 +1,120 @@ +description="Sync README" +prompt = """ +Export tasks to README.md with professional formatting. + +Arguments: $ARGUMENTS + +Generate a well-formatted README with current task information. + +## README Synchronization + +Creates or updates README.md with beautifully formatted task information. + +## Argument Parsing + +Optional filters: +- "pending" → Only pending tasks +- "with-subtasks" → Include subtask details +- "by-priority" → Group by priority +- "sprint" → Current sprint only + +## Execution + +```bash +task-master sync-readme [--with-subtasks] [--status=<status>] +``` + +## README Generation + +### 1. **Project Header** +```markdown +# Project Name + +## 📋 Task Progress + +Last Updated: 2024-01-15 10:30 AM + +### Summary +- Total Tasks: 45 +- Completed: 15 (33%) +- In Progress: 5 (11%) +- Pending: 25 (56%) +``` + +### 2. **Task Sections** +Organized by status or priority: +- Progress indicators +- Task descriptions +- Dependencies noted +- Time estimates + +### 3. **Visual Elements** +- Progress bars +- Status badges +- Priority indicators +- Completion checkmarks + +## Smart Features + +1. **Intelligent Grouping** + - By feature area + - By sprint/milestone + - By assigned developer + - By priority + +2. **Progress Tracking** + - Overall completion + - Sprint velocity + - Burndown indication + - Time tracking + +3. **Formatting Options** + - GitHub-flavored markdown + - Task checkboxes + - Collapsible sections + - Table format available + +## Example Output + +```markdown +## 🚀 Current Sprint + +### In Progress +- [ ] 🔄 #5 **Implement user authentication** (60% complete) + - Dependencies: API design (#3 ✅) + - Subtasks: 4 (2 completed) + - Est: 8h / Spent: 5h + +### Pending (High Priority) +- [ ] ⚡ #8 **Create dashboard UI** + - Blocked by: #5 + - Complexity: High + - Est: 12h +``` + +## Customization + +Based on arguments: +- Include/exclude sections +- Detail level control +- Custom grouping +- Filter by criteria + +## Post-Sync + +After generation: +1. Show diff preview +2. Backup existing README +3. Write new content +4. Commit reminder +5. Update timestamp + +## Integration + +Works well with: +- Git workflows +- CI/CD pipelines +- Project documentation +- Team updates +- Client reports +""" diff --git a/.gemini/commands/tm/tm-main.toml b/.gemini/commands/tm/tm-main.toml new file mode 100644 index 0000000..4e9d922 --- /dev/null +++ b/.gemini/commands/tm/tm-main.toml @@ -0,0 +1,149 @@ +description="Task Master Main" +prompt = """ +# Task Master Command Reference + +Comprehensive command structure for Task Master integration with Claude Code. + +## Command Organization + +Commands are organized hierarchically to match Task Master's CLI structure while providing enhanced Claude Code integration. + +## Project Setup & Configuration + +### `/taskmaster:init` +- `init-project` - Initialize new project (handles PRD files intelligently) +- `init-project-quick` - Quick setup with auto-confirmation (-y flag) + +### `/taskmaster:models` +- `view-models` - View current AI model configuration +- `setup-models` - Interactive model configuration +- `set-main` - Set primary generation model +- `set-research` - Set research model +- `set-fallback` - Set fallback model + +## Task Generation + +### `/taskmaster:parse-prd` +- `parse-prd` - Generate tasks from PRD document +- `parse-prd-with-research` - Enhanced parsing with research mode + +### `/taskmaster:generate` +- `generate-tasks` - Create individual task files from tasks.json + +## Task Management + +### `/taskmaster:list` +- `list-tasks` - Smart listing with natural language filters +- `list-tasks-with-subtasks` - Include subtasks in hierarchical view +- `list-tasks-by-status` - Filter by specific status + +### `/taskmaster:set-status` +- `to-pending` - Reset task to pending +- `to-in-progress` - Start working on task +- `to-done` - Mark task complete +- `to-review` - Submit for review +- `to-deferred` - Defer task +- `to-cancelled` - Cancel task + +### `/taskmaster:sync-readme` +- `sync-readme` - Export tasks to README.md with formatting + +### `/taskmaster:update` +- `update-task` - Update tasks with natural language +- `update-tasks-from-id` - Update multiple tasks from a starting point +- `update-single-task` - Update specific task + +### `/taskmaster:add-task` +- `add-task` - Add new task with AI assistance + +### `/taskmaster:remove-task` +- `remove-task` - Remove task with confirmation + +## Subtask Management + +### `/taskmaster:add-subtask` +- `add-subtask` - Add new subtask to parent +- `convert-task-to-subtask` - Convert existing task to subtask + +### `/taskmaster:remove-subtask` +- `remove-subtask` - Remove subtask (with optional conversion) + +### `/taskmaster:clear-subtasks` +- `clear-subtasks` - Clear subtasks from specific task +- `clear-all-subtasks` - Clear all subtasks globally + +## Task Analysis & Breakdown + +### `/taskmaster:analyze-complexity` +- `analyze-complexity` - Analyze and generate expansion recommendations + +### `/taskmaster:complexity-report` +- `complexity-report` - Display complexity analysis report + +### `/taskmaster:expand` +- `expand-task` - Break down specific task +- `expand-all-tasks` - Expand all eligible tasks +- `with-research` - Enhanced expansion + +## Task Navigation + +### `/taskmaster:next` +- `next-task` - Intelligent next task recommendation + +### `/taskmaster:show` +- `show-task` - Display detailed task information + +### `/taskmaster:status` +- `project-status` - Comprehensive project dashboard + +## Dependency Management + +### `/taskmaster:add-dependency` +- `add-dependency` - Add task dependency + +### `/taskmaster:remove-dependency` +- `remove-dependency` - Remove task dependency + +### `/taskmaster:validate-dependencies` +- `validate-dependencies` - Check for dependency issues + +### `/taskmaster:fix-dependencies` +- `fix-dependencies` - Automatically fix dependency problems + +## Workflows & Automation + +### `/taskmaster:workflows` +- `smart-workflow` - Context-aware intelligent workflow execution +- `command-pipeline` - Chain multiple commands together +- `auto-implement-tasks` - Advanced auto-implementation with code generation + +## Utilities + +### `/taskmaster:utils` +- `analyze-project` - Deep project analysis and insights + +### `/taskmaster:setup` +- `install-taskmaster` - Comprehensive installation guide +- `quick-install-taskmaster` - One-line global installation + +## Usage Patterns + +### Natural Language +Most commands accept natural language arguments: +``` +/taskmaster:add-task create user authentication system +/taskmaster:update mark all API tasks as high priority +/taskmaster:list show blocked tasks +``` + +### ID-Based Commands +Commands requiring IDs intelligently parse from $ARGUMENTS: +``` +/taskmaster:show 45 +/taskmaster:expand 23 +/taskmaster:set-status/to-done 67 +``` + +### Smart Defaults +Commands provide intelligent defaults and suggestions based on context. +""" diff --git a/.gemini/commands/tm/to-cancelled.toml b/.gemini/commands/tm/to-cancelled.toml new file mode 100644 index 0000000..34cef40 --- /dev/null +++ b/.gemini/commands/tm/to-cancelled.toml @@ -0,0 +1,58 @@ +description="To Cancelled" +prompt = """ +Cancel a task permanently. + +Arguments: $ARGUMENTS (task ID) + +## Cancelling a Task + +This status indicates a task is no longer needed and won't be completed. + +## Valid Reasons for Cancellation + +- Requirements changed +- Feature deprecated +- Duplicate of another task +- Strategic pivot +- Technical approach invalidated + +## Pre-Cancellation Checks + +1. Confirm no critical dependencies +2. Check for partial implementation +3. Verify cancellation rationale +4. Document lessons learned + +## Execution + +```bash +task-master set-status --id=$ARGUMENTS --status=cancelled +``` + +## Cancellation Impact + +When cancelling: +1. **Dependency Updates** + - Notify dependent tasks + - Update project scope + - Recalculate timelines + +2. **Clean-up Actions** + - Remove related branches + - Archive any work done + - Update documentation + - Close related issues + +3. **Learning Capture** + - Document why cancelled + - Note what was learned + - Update estimation models + - Prevent future duplicates + +## Historical Preservation + +- Keep for reference +- Tag with cancellation reason +- Link to replacement if any +- Maintain audit trail +""" diff --git a/.gemini/commands/tm/to-deferred.toml b/.gemini/commands/tm/to-deferred.toml new file mode 100644 index 0000000..1eb5686 --- /dev/null +++ b/.gemini/commands/tm/to-deferred.toml @@ -0,0 +1,50 @@ +description="To Deferred" +prompt = """ +Defer a task for later consideration. + +Arguments: $ARGUMENTS (task ID) + +## Deferring a Task + +This status indicates a task is valid but not currently actionable or prioritized. + +## Valid Reasons for Deferral + +- Waiting for external dependencies +- Reprioritized for future sprint +- Blocked by technical limitations +- Resource constraints +- Strategic timing considerations + +## Execution + +```bash +task-master set-status --id=$ARGUMENTS --status=deferred +``` + +## Deferral Management + +When deferring: +1. **Document Reason** + - Capture why it's being deferred + - Set reactivation criteria + - Note any partial work completed + +2. **Impact Analysis** + - Check dependent tasks + - Update project timeline + - Notify affected stakeholders + +3. **Future Planning** + - Set review reminders + - Tag for specific milestone + - Preserve context for reactivation + - Link to blocking issues + +## Smart Tracking + +- Monitor deferral duration +- Alert when criteria met +- Prevent scope creep +- Regular review cycles +""" diff --git a/.gemini/commands/tm/to-done.toml b/.gemini/commands/tm/to-done.toml new file mode 100644 index 0000000..dc599e3 --- /dev/null +++ b/.gemini/commands/tm/to-done.toml @@ -0,0 +1,47 @@ +description="To Done" +prompt = """ +Mark a task as completed. + +Arguments: $ARGUMENTS (task ID) + +## Completing a Task + +This command validates task completion and updates project state intelligently. + +## Pre-Completion Checks + +1. Verify test strategy was followed +2. Check if all subtasks are complete +3. Validate acceptance criteria met +4. Ensure code is committed + +## Execution + +```bash +task-master set-status --id=$ARGUMENTS --status=done +``` + +## Post-Completion Actions + +1. **Update Dependencies** + - Identify newly unblocked tasks + - Update sprint progress + - Recalculate project timeline + +2. **Documentation** + - Generate completion summary + - Update CLAUDE.md with learnings + - Log implementation approach + +3. **Next Steps** + - Show newly available tasks + - Suggest logical next task + - Update velocity metrics + +## Celebration & Learning + +- Show impact of completion +- Display unblocked work +- Recognize achievement +- Capture lessons learned +""" diff --git a/.gemini/commands/tm/to-in-progress.toml b/.gemini/commands/tm/to-in-progress.toml new file mode 100644 index 0000000..e66bbaa --- /dev/null +++ b/.gemini/commands/tm/to-in-progress.toml @@ -0,0 +1,39 @@ +description="To In Progress" +prompt = """ +Start working on a task by setting its status to in-progress. + +Arguments: $ARGUMENTS (task ID) + +## Starting Work on Task + +This command does more than just change status - it prepares your environment for productive work. + +## Pre-Start Checks + +1. Verify dependencies are met +2. Check if another task is already in-progress +3. Ensure task details are complete +4. Validate test strategy exists + +## Execution + +```bash +task-master set-status --id=$ARGUMENTS --status=in-progress +``` + +## Environment Setup + +After setting to in-progress: +1. Create/checkout appropriate git branch +2. Open relevant documentation +3. Set up test watchers if applicable +4. Display task details and acceptance criteria +5. Show similar completed tasks for reference + +## Smart Suggestions + +- Estimated completion time based on complexity +- Related files from similar tasks +- Potential blockers to watch for +- Recommended first steps +""" diff --git a/.gemini/commands/tm/to-pending.toml b/.gemini/commands/tm/to-pending.toml new file mode 100644 index 0000000..49ff78b --- /dev/null +++ b/.gemini/commands/tm/to-pending.toml @@ -0,0 +1,35 @@ +description="To Pending" +prompt = """ +Set a task's status to pending. + +Arguments: $ARGUMENTS (task ID) + +## Setting Task to Pending + +This moves a task back to the pending state, useful for: +- Resetting erroneously started tasks +- Deferring work that was prematurely begun +- Reorganizing sprint priorities + +## Execution + +```bash +task-master set-status --id=$ARGUMENTS --status=pending +``` + +## Validation + +Before setting to pending: +- Warn if task is currently in-progress +- Check if this will block other tasks +- Suggest documenting why it's being reset +- Preserve any work already done + +## Smart Actions + +After setting to pending: +- Update sprint planning if needed +- Notify about freed resources +- Suggest priority reassessment +- Log the status change with context +""" diff --git a/.gemini/commands/tm/to-review.toml b/.gemini/commands/tm/to-review.toml new file mode 100644 index 0000000..b0316c6 --- /dev/null +++ b/.gemini/commands/tm/to-review.toml @@ -0,0 +1,43 @@ +description="To Review" +prompt = """ +Set a task's status to review. + +Arguments: $ARGUMENTS (task ID) + +## Marking Task for Review + +This status indicates work is complete but needs verification before final approval. + +## When to Use Review Status + +- Code complete but needs peer review +- Implementation done but needs testing +- Documentation written but needs proofreading +- Design complete but needs stakeholder approval + +## Execution + +```bash +task-master set-status --id=$ARGUMENTS --status=review +``` + +## Review Preparation + +When setting to review: +1. **Generate Review Checklist** + - Link to PR/MR if applicable + - Highlight key changes + - Note areas needing attention + - Include test results + +2. **Documentation** + - Update task with review notes + - Link relevant artifacts + - Specify reviewers if known + +3. **Smart Actions** + - Create review reminders + - Track review duration + - Suggest reviewers based on expertise + - Prepare rollback plan if needed +""" diff --git a/.gemini/commands/tm/update-single-task.toml b/.gemini/commands/tm/update-single-task.toml new file mode 100644 index 0000000..01ad1ed --- /dev/null +++ b/.gemini/commands/tm/update-single-task.toml @@ -0,0 +1,122 @@ +description="Update Single Task" +prompt = """ +Update a single specific task with new information. + +Arguments: $ARGUMENTS + +Parse task ID and update details. + +## Single Task Update + +Precisely update one task with AI assistance to maintain consistency. + +## Argument Parsing + +Natural language updates: +- "5: add caching requirement" +- "update 5 to include error handling" +- "task 5 needs rate limiting" +- "5 change priority to high" + +## Execution + +```bash +task-master update-task --id=<id> --prompt="<context>" +``` + +## Update Types + +### 1. **Content Updates** +- Enhance description +- Add requirements +- Clarify details +- Update acceptance criteria + +### 2. **Metadata Updates** +- Change priority +- Adjust time estimates +- Update complexity +- Modify dependencies + +### 3. **Strategic Updates** +- Revise approach +- Change test strategy +- Update implementation notes +- Adjust subtask needs + +## AI-Powered Updates + +The AI: +1. **Understands Context** + - Reads current task state + - Identifies update intent + - Maintains consistency + - Preserves important info + +2. **Applies Changes** + - Updates relevant fields + - Keeps style consistent + - Adds without removing + - Enhances clarity + +3. **Validates Results** + - Checks coherence + - Verifies completeness + - Maintains relationships + - Suggests related updates + +## Example Updates + +``` +/taskmaster:update/single 5: add rate limiting +→ Updating Task #5: "Implement API endpoints" + +Current: Basic CRUD endpoints +Adding: Rate limiting requirements + +Updated sections: +✓ Description: Added rate limiting mention +✓ Details: Added specific limits (100/min) +✓ Test Strategy: Added rate limit tests +✓ Complexity: Increased from 5 to 6 +✓ Time Estimate: Increased by 2 hours + +Suggestion: Also update task #6 (API Gateway) for consistency? +``` + +## Smart Features + +1. **Incremental Updates** + - Adds without overwriting + - Preserves work history + - Tracks what changed + - Shows diff view + +2. **Consistency Checks** + - Related task alignment + - Subtask compatibility + - Dependency validity + - Timeline impact + +3. **Update History** + - Timestamp changes + - Track who/what updated + - Reason for update + - Previous versions + +## Field-Specific Updates + +Quick syntax for specific fields: +- "5 priority:high" → Update priority only +- "5 add-time:4h" → Add to time estimate +- "5 status:review" → Change status +- "5 depends:3,4" → Add dependencies + +## Post-Update + +- Show updated task +- Highlight changes +- Check related tasks +- Update suggestions +- Timeline adjustments +""" diff --git a/.gemini/commands/tm/update-task.toml b/.gemini/commands/tm/update-task.toml new file mode 100644 index 0000000..95f0024 --- /dev/null +++ b/.gemini/commands/tm/update-task.toml @@ -0,0 +1,75 @@ +description="Update Task" +prompt = """ +Update tasks with intelligent field detection and bulk operations. + +Arguments: $ARGUMENTS + +## Intelligent Task Updates + +Parse arguments to determine update intent and execute smartly. + +### 1. **Natural Language Processing** + +Understand update requests like: +- "mark 23 as done" → Update status to done +- "increase priority of 45" → Set priority to high +- "add dependency on 12 to task 34" → Add dependency +- "tasks 20-25 need review" → Bulk status update +- "all API tasks high priority" → Pattern-based update + +### 2. **Smart Field Detection** + +Automatically detect what to update: +- Status keywords: done, complete, start, pause, review +- Priority changes: urgent, high, low, deprioritize +- Dependency updates: depends on, blocks, after +- Assignment: assign to, owner, responsible +- Time: estimate, spent, deadline + +### 3. **Bulk Operations** + +Support for multiple task updates: +``` +Examples: +- "complete tasks 12, 15, 18" +- "all pending auth tasks to in-progress" +- "increase priority for tasks blocking 45" +- "defer all documentation tasks" +``` + +### 4. **Contextual Validation** + +Before updating, check: +- Status transitions are valid +- Dependencies don't create cycles +- Priority changes make sense +- Bulk updates won't break project flow + +Show preview: +``` +Update Preview: +───────────────── +Tasks to update: #23, #24, #25 +Change: status → in-progress +Impact: Will unblock tasks #30, #31 +Warning: Task #24 has unmet dependencies +``` + +### 5. **Smart Suggestions** + +Based on update: +- Completing task? → Show newly unblocked tasks +- Changing priority? → Show impact on sprint +- Adding dependency? → Check for conflicts +- Bulk update? → Show summary of changes + +### 6. **Workflow Integration** + +After updates: +- Auto-update dependent task states +- Trigger status recalculation +- Update sprint/milestone progress +- Log changes with context + +Result: Flexible, intelligent task updates with safety checks. +""" diff --git a/.gemini/commands/tm/update-tasks-from-id.toml b/.gemini/commands/tm/update-tasks-from-id.toml new file mode 100644 index 0000000..7a1e58e --- /dev/null +++ b/.gemini/commands/tm/update-tasks-from-id.toml @@ -0,0 +1,111 @@ +description="Update Tasks From ID" +prompt = """ +Update multiple tasks starting from a specific ID. + +Arguments: $ARGUMENTS + +Parse starting task ID and update context. + +## Bulk Task Updates + +Update multiple related tasks based on new requirements or context changes. + +## Argument Parsing + +- "from 5: add security requirements" +- "5 onwards: update API endpoints" +- "starting at 5: change to use new framework" + +## Execution + +```bash +task-master update --from=<id> --prompt="<context>" +``` + +## Update Process + +### 1. **Task Selection** +Starting from specified ID: +- Include the task itself +- Include all dependent tasks +- Include related subtasks +- Smart boundary detection + +### 2. **Context Application** +AI analyzes the update context and: +- Identifies what needs changing +- Maintains consistency +- Preserves completed work +- Updates related information + +### 3. **Intelligent Updates** +- Modify descriptions appropriately +- Update test strategies +- Adjust time estimates +- Revise dependencies if needed + +## Smart Features + +1. **Scope Detection** + - Find natural task groupings + - Identify related features + - Stop at logical boundaries + - Avoid over-updating + +2. **Consistency Maintenance** + - Keep naming conventions + - Preserve relationships + - Update cross-references + - Maintain task flow + +3. **Change Preview** + ``` + Bulk Update Preview + ━━━━━━━━━━━━━━━━━━ + Starting from: Task #5 + Tasks to update: 8 tasks + 12 subtasks + + Context: "add security requirements" + + Changes will include: + - Add security sections to descriptions + - Update test strategies for security + - Add security-related subtasks where needed + - Adjust time estimates (+20% average) + + Continue? (y/n) + ``` + +## Example Updates + +``` +/taskmaster:update-tasks-from-id 5: change database to PostgreSQL +→ Analyzing impact starting from task #5 +→ Found 6 related tasks to update +→ Updates will maintain consistency +→ Preview changes? (y/n) + +Applied updates: +✓ Task #5: Updated connection logic references +✓ Task #6: Changed migration approach +✓ Task #7: Updated query syntax notes +✓ Task #8: Revised testing strategy +✓ Task #9: Updated deployment steps +✓ Task #12: Changed backup procedures +``` + +## Safety Features + +- Preview all changes +- Selective confirmation +- Rollback capability +- Change logging +- Validation checks + +## Post-Update + +- Summary of changes +- Consistency verification +- Suggest review tasks +- Update timeline if needed +""" diff --git a/.gemini/commands/tm/validate-dependencies.toml b/.gemini/commands/tm/validate-dependencies.toml new file mode 100644 index 0000000..80b2021 --- /dev/null +++ b/.gemini/commands/tm/validate-dependencies.toml @@ -0,0 +1,74 @@ +description="Validate Dependencies" +prompt = """ +Validate all task dependencies for issues. + +## Dependency Validation + +Comprehensive check for dependency problems across the entire project. + +## Execution + +```bash +task-master validate-dependencies +``` + +## Validation Checks + +1. **Circular Dependencies** + - A depends on B, B depends on A + - Complex circular chains + - Self-dependencies + +2. **Missing Dependencies** + - References to non-existent tasks + - Deleted task references + - Invalid task IDs + +3. **Logical Issues** + - Completed tasks depending on pending + - Cancelled tasks in dependency chains + - Impossible sequences + +4. **Complexity Warnings** + - Over-complex dependency chains + - Too many dependencies per task + - Bottleneck tasks + +## Smart Analysis + +The validation provides: +- Visual dependency graph +- Critical path analysis +- Bottleneck identification +- Suggested optimizations + +## Report Format + +``` +Dependency Validation Report +━━━━━━━━━━━━━━━━━━━━━━━━━━ +✅ No circular dependencies found +⚠️ 2 warnings found: + - Task #23 has 7 dependencies (consider breaking down) + - Task #45 blocks 5 other tasks (potential bottleneck) +❌ 1 error found: + - Task #67 depends on deleted task #66 + +Critical Path: #1 → #5 → #23 → #45 → #50 (15 days) +``` + +## Actionable Output + +For each issue found: +- Clear description +- Impact assessment +- Suggested fix +- Command to resolve + +## Next Steps + +After validation: +- Run `/taskmaster:fix-dependencies` to auto-fix +- Manually adjust problematic dependencies +- Rerun to verify fixes +""" diff --git a/.gemini/commands/tm/view-models.toml b/.gemini/commands/tm/view-models.toml new file mode 100644 index 0000000..819ff38 --- /dev/null +++ b/.gemini/commands/tm/view-models.toml @@ -0,0 +1,54 @@ +description="View Models" +prompt = """ +View current AI model configuration. + +## Model Configuration Display + +Shows the currently configured AI providers and models for Task Master. + +## Execution + +```bash +task-master models +``` + +## Information Displayed + +1. **Main Provider** + - Model ID and name + - API key status (configured/missing) + - Usage: Primary task generation + +2. **Research Provider** + - Model ID and name + - API key status + - Usage: Enhanced research mode + +3. **Fallback Provider** + - Model ID and name + - API key status + - Usage: Backup when main fails + +## Visual Status + +``` +Task Master AI Model Configuration +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +Main: ✅ claude-3-5-sonnet (configured) +Research: ✅ perplexity-sonar (configured) +Fallback: ⚠️ Not configured (optional) + +Available Models: +- claude-3-5-sonnet +- gpt-4-turbo +- gpt-3.5-turbo +- perplexity-sonar +``` + +## Next Actions + +Based on configuration: +- If missing API keys → Suggest setup +- If no research model → Explain benefits +- If all configured → Show usage tips +""" diff --git a/.gemini/settings.json b/.gemini/settings.json new file mode 100644 index 0000000..88f3426 --- /dev/null +++ b/.gemini/settings.json @@ -0,0 +1,20 @@ +{ + "mcpServers": { + "task-master-ai": { + "command": "npx", + "args": ["-y", "task-master-ai"], + "env": { + "TASK_MASTER_TOOLS": "core", + "ANTHROPIC_API_KEY": "YOUR_ANTHROPIC_API_KEY_HERE", + "PERPLEXITY_API_KEY": "YOUR_PERPLEXITY_API_KEY_HERE", + "OPENAI_API_KEY": "YOUR_OPENAI_KEY_HERE", + "GOOGLE_API_KEY": "YOUR_GOOGLE_KEY_HERE", + "XAI_API_KEY": "YOUR_XAI_KEY_HERE", + "OPENROUTER_API_KEY": "YOUR_OPENROUTER_KEY_HERE", + "MISTRAL_API_KEY": "YOUR_MISTRAL_KEY_HERE", + "AZURE_OPENAI_API_KEY": "YOUR_AZURE_KEY_HERE", + "OLLAMA_API_KEY": "YOUR_OLLAMA_API_KEY_HERE" + } + } + } +} diff --git a/.github/instructions/dev_workflow.instructions.md b/.github/instructions/dev_workflow.instructions.md new file mode 100644 index 0000000..2c7325e --- /dev/null +++ b/.github/instructions/dev_workflow.instructions.md @@ -0,0 +1,423 @@ +--- +description: Guide for using Taskmaster to manage task-driven development workflows +applyTo: "**/*" +--- + +# Taskmaster Development Workflow + +This guide outlines the standard process for using Taskmaster to manage software development projects. It is written as a set of instructions for you, the AI agent. + +- **Your Default Stance**: For most projects, the user can work directly within the `master` task context. Your initial actions should operate on this default context unless a clear pattern for multi-context work emerges. +- **Your Goal**: Your role is to elevate the user's workflow by intelligently introducing advanced features like **Tagged Task Lists** when you detect the appropriate context. Do not force tags on the user; suggest them as a helpful solution to a specific need. + +## The Basic Loop +The fundamental development cycle you will facilitate is: +1. **`list`**: Show the user what needs to be done. +2. **`next`**: Help the user decide what to work on. +3. **`show <id>`**: Provide details for a specific task. +4. **`expand <id>`**: Break down a complex task into smaller, manageable subtasks. +5. **Implement**: The user writes the code and tests. +6. **`update-subtask`**: Log progress and findings on behalf of the user. +7. **`set-status`**: Mark tasks and subtasks as `done` as work is completed. +8. **Repeat**. + +All your standard command executions should operate on the user's current task context, which defaults to `master`. + +--- + +## Standard Development Workflow Process + +### Simple Workflow (Default Starting Point) + +For new projects or when users are getting started, operate within the `master` tag context: + +- Start new projects by running `initialize_project` tool / `task-master init` or `parse_prd` / `task-master parse-prd --input='<prd-file.txt>'` (see @`taskmaster.instructions.md`) to generate initial tasks.json with tagged structure +- Configure rule sets during initialization with `--rules` flag (e.g., `task-master init --rules vscode,windsurf`) or manage them later with `task-master rules add/remove` commands +- Begin coding sessions with `get_tasks` / `task-master list` (see @`taskmaster.instructions.md`) to see current tasks, status, and IDs +- Determine the next task to work on using `next_task` / `task-master next` (see @`taskmaster.instructions.md`) +- Analyze task complexity with `analyze_project_complexity` / `task-master analyze-complexity --research` (see @`taskmaster.instructions.md`) before breaking down tasks +- Review complexity report using `complexity_report` / `task-master complexity-report` (see @`taskmaster.instructions.md`) +- Select tasks based on dependencies (all marked 'done'), priority level, and ID order +- View specific task details using `get_task` / `task-master show <id>` (see @`taskmaster.instructions.md`) to understand implementation requirements +- Break down complex tasks using `expand_task` / `task-master expand --id=<id> --force --research` (see @`taskmaster.instructions.md`) with appropriate flags like `--force` (to replace existing subtasks) and `--research` +- Implement code following task details, dependencies, and project standards +- Mark completed tasks with `set_task_status` / `task-master set-status --id=<id> --status=done` (see @`taskmaster.instructions.md`) +- Update dependent tasks when implementation differs from original plan using `update` / `task-master update --from=<id> --prompt="..."` or `update_task` / `task-master update-task --id=<id> --prompt="..."` (see @`taskmaster.instructions.md`) + +--- + +## Leveling Up: Agent-Led Multi-Context Workflows + +While the basic workflow is powerful, your primary opportunity to add value is by identifying when to introduce **Tagged Task Lists**. These patterns are your tools for creating a more organized and efficient development environment for the user, especially if you detect agentic or parallel development happening across the same session. + +**Critical Principle**: Most users should never see a difference in their experience. Only introduce advanced workflows when you detect clear indicators that the project has evolved beyond simple task management. + +### When to Introduce Tags: Your Decision Patterns + +Here are the patterns to look for. When you detect one, you should propose the corresponding workflow to the user. + +#### Pattern 1: Simple Git Feature Branching +This is the most common and direct use case for tags. + +- **Trigger**: The user creates a new git branch (e.g., `git checkout -b feature/user-auth`). +- **Your Action**: Propose creating a new tag that mirrors the branch name to isolate the feature's tasks from `master`. +- **Your Suggested Prompt**: *"I see you've created a new branch named 'feature/user-auth'. To keep all related tasks neatly organized and separate from your main list, I can create a corresponding task tag for you. This helps prevent merge conflicts in your `tasks.json` file later. Shall I create the 'feature-user-auth' tag?"* +- **Tool to Use**: `task-master add-tag --from-branch` + +#### Pattern 2: Team Collaboration +- **Trigger**: The user mentions working with teammates (e.g., "My teammate Alice is handling the database schema," or "I need to review Bob's work on the API."). +- **Your Action**: Suggest creating a separate tag for the user's work to prevent conflicts with shared master context. +- **Your Suggested Prompt**: *"Since you're working with Alice, I can create a separate task context for your work to avoid conflicts. This way, Alice can continue working with the master list while you have your own isolated context. When you're ready to merge your work, we can coordinate the tasks back to master. Shall I create a tag for your current work?"* +- **Tool to Use**: `task-master add-tag my-work --copy-from-current --description="My tasks while collaborating with Alice"` + +#### Pattern 3: Experiments or Risky Refactors +- **Trigger**: The user wants to try something that might not be kept (e.g., "I want to experiment with switching our state management library," or "Let's refactor the old API module, but I want to keep the current tasks as a reference."). +- **Your Action**: Propose creating a sandboxed tag for the experimental work. +- **Your Suggested Prompt**: *"This sounds like a great experiment. To keep these new tasks separate from our main plan, I can create a temporary 'experiment-zustand' tag for this work. If we decide not to proceed, we can simply delete the tag without affecting the main task list. Sound good?"* +- **Tool to Use**: `task-master add-tag experiment-zustand --description="Exploring Zustand migration"` + +#### Pattern 4: Large Feature Initiatives (PRD-Driven) +This is a more structured approach for significant new features or epics. + +- **Trigger**: The user describes a large, multi-step feature that would benefit from a formal plan. +- **Your Action**: Propose a comprehensive, PRD-driven workflow. +- **Your Suggested Prompt**: *"This sounds like a significant new feature. To manage this effectively, I suggest we create a dedicated task context for it. Here's the plan: I'll create a new tag called 'feature-xyz', then we can draft a Product Requirements Document (PRD) together to scope the work. Once the PRD is ready, I'll automatically generate all the necessary tasks within that new tag. How does that sound?"* +- **Your Implementation Flow**: + 1. **Create an empty tag**: `task-master add-tag feature-xyz --description "Tasks for the new XYZ feature"`. You can also start by creating a git branch if applicable, and then create the tag from that branch. + 2. **Collaborate & Create PRD**: Work with the user to create a detailed PRD file (e.g., `.taskmaster/docs/feature-xyz-prd.txt`). + 3. **Parse PRD into the new tag**: `task-master parse-prd .taskmaster/docs/feature-xyz-prd.txt --tag feature-xyz` + 4. **Prepare the new task list**: Follow up by suggesting `analyze-complexity` and `expand-all` for the newly created tasks within the `feature-xyz` tag. + +#### Pattern 5: Version-Based Development +Tailor your approach based on the project maturity indicated by tag names. + +- **Prototype/MVP Tags** (`prototype`, `mvp`, `poc`, `v0.x`): + - **Your Approach**: Focus on speed and functionality over perfection + - **Task Generation**: Create tasks that emphasize "get it working" over "get it perfect" + - **Complexity Level**: Lower complexity, fewer subtasks, more direct implementation paths + - **Research Prompts**: Include context like "This is a prototype - prioritize speed and basic functionality over optimization" + - **Example Prompt Addition**: *"Since this is for the MVP, I'll focus on tasks that get core functionality working quickly rather than over-engineering."* + +- **Production/Mature Tags** (`v1.0+`, `production`, `stable`): + - **Your Approach**: Emphasize robustness, testing, and maintainability + - **Task Generation**: Include comprehensive error handling, testing, documentation, and optimization + - **Complexity Level**: Higher complexity, more detailed subtasks, thorough implementation paths + - **Research Prompts**: Include context like "This is for production - prioritize reliability, performance, and maintainability" + - **Example Prompt Addition**: *"Since this is for production, I'll ensure tasks include proper error handling, testing, and documentation."* + +### Advanced Workflow (Tag-Based & PRD-Driven) + +**When to Transition**: Recognize when the project has evolved (or has initiated a project which existing code) beyond simple task management. Look for these indicators: +- User mentions teammates or collaboration needs +- Project has grown to 15+ tasks with mixed priorities +- User creates feature branches or mentions major initiatives +- User initializes Taskmaster on an existing, complex codebase +- User describes large features that would benefit from dedicated planning + +**Your Role in Transition**: Guide the user to a more sophisticated workflow that leverages tags for organization and PRDs for comprehensive planning. + +#### Master List Strategy (High-Value Focus) +Once you transition to tag-based workflows, the `master` tag should ideally contain only: +- **High-level deliverables** that provide significant business value +- **Major milestones** and epic-level features +- **Critical infrastructure** work that affects the entire project +- **Release-blocking** items + +**What NOT to put in master**: +- Detailed implementation subtasks (these go in feature-specific tags' parent tasks) +- Refactoring work (create dedicated tags like `refactor-auth`) +- Experimental features (use `experiment-*` tags) +- Team member-specific tasks (use person-specific tags) + +#### PRD-Driven Feature Development + +**For New Major Features**: +1. **Identify the Initiative**: When user describes a significant feature +2. **Create Dedicated Tag**: `add_tag feature-[name] --description="[Feature description]"` +3. **Collaborative PRD Creation**: Work with user to create comprehensive PRD in `.taskmaster/docs/feature-[name]-prd.txt` +4. **Parse & Prepare**: + - `parse_prd .taskmaster/docs/feature-[name]-prd.txt --tag=feature-[name]` + - `analyze_project_complexity --tag=feature-[name] --research` + - `expand_all --tag=feature-[name] --research` +5. **Add Master Reference**: Create a high-level task in `master` that references the feature tag + +**For Existing Codebase Analysis**: +When users initialize Taskmaster on existing projects: +1. **Codebase Discovery**: Use your native tools for producing deep context about the code base. You may use `research` tool with `--tree` and `--files` to collect up to date information using the existing architecture as context. +2. **Collaborative Assessment**: Work with user to identify improvement areas, technical debt, or new features +3. **Strategic PRD Creation**: Co-author PRDs that include: + - Current state analysis (based on your codebase research) + - Proposed improvements or new features + - Implementation strategy considering existing code +4. **Tag-Based Organization**: Parse PRDs into appropriate tags (`refactor-api`, `feature-dashboard`, `tech-debt`, etc.) +5. **Master List Curation**: Keep only the most valuable initiatives in master + +The parse-prd's `--append` flag enables the user to parse multiple PRDs within tags or across tags. PRDs should be focused and the number of tasks they are parsed into should be strategically chosen relative to the PRD's complexity and level of detail. + +### Workflow Transition Examples + +**Example 1: Simple → Team-Based** +``` +User: "Alice is going to help with the API work" +Your Response: "Great! To avoid conflicts, I'll create a separate task context for your work. Alice can continue with the master list while you work in your own context. When you're ready to merge, we can coordinate the tasks back together." +Action: add_tag my-api-work --copy-from-current --description="My API tasks while collaborating with Alice" +``` + +**Example 2: Simple → PRD-Driven** +``` +User: "I want to add a complete user dashboard with analytics, user management, and reporting" +Your Response: "This sounds like a major feature that would benefit from detailed planning. Let me create a dedicated context for this work and we can draft a PRD together to ensure we capture all requirements." +Actions: +1. add_tag feature-dashboard --description="User dashboard with analytics and management" +2. Collaborate on PRD creation +3. parse_prd dashboard-prd.txt --tag=feature-dashboard +4. Add high-level "User Dashboard" task to master +``` + +**Example 3: Existing Project → Strategic Planning** +``` +User: "I just initialized Taskmaster on my existing React app. It's getting messy and I want to improve it." +Your Response: "Let me research your codebase to understand the current architecture, then we can create a strategic plan for improvements." +Actions: +1. research "Current React app architecture and improvement opportunities" --tree --files=src/ +2. Collaborate on improvement PRD based on findings +3. Create tags for different improvement areas (refactor-components, improve-state-management, etc.) +4. Keep only major improvement initiatives in master +``` + +--- + +## Primary Interaction: MCP Server vs. CLI + +Taskmaster offers two primary ways to interact: + +1. **MCP Server (Recommended for Integrated Tools)**: + - For AI agents and integrated development environments (like VS Code), interacting via the **MCP server is the preferred method**. + - The MCP server exposes Taskmaster functionality through a set of tools (e.g., `get_tasks`, `add_subtask`). + - This method offers better performance, structured data exchange, and richer error handling compared to CLI parsing. + - Refer to @`mcp.instructions.md` for details on the MCP architecture and available tools. + - A comprehensive list and description of MCP tools and their corresponding CLI commands can be found in @`taskmaster.instructions.md`. + - **Restart the MCP server** if core logic in `scripts/modules` or MCP tool/direct function definitions change. + - **Note**: MCP tools fully support tagged task lists with complete tag management capabilities. + +2. **`task-master` CLI (For Users & Fallback)**: + - The global `task-master` command provides a user-friendly interface for direct terminal interaction. + - It can also serve as a fallback if the MCP server is inaccessible or a specific function isn't exposed via MCP. + - Install globally with `npm install -g task-master-ai` or use locally via `npx task-master-ai ...`. + - The CLI commands often mirror the MCP tools (e.g., `task-master list` corresponds to `get_tasks`). + - Refer to @`taskmaster.instructions.md` for a detailed command reference. + - **Tagged Task Lists**: CLI fully supports the new tagged system with seamless migration. + +## How the Tag System Works (For Your Reference) + +- **Data Structure**: Tasks are organized into separate contexts (tags) like "master", "feature-branch", or "v2.0". +- **Silent Migration**: Existing projects automatically migrate to use a "master" tag with zero disruption. +- **Context Isolation**: Tasks in different tags are completely separate. Changes in one tag do not affect any other tag. +- **Manual Control**: The user is always in control. There is no automatic switching. You facilitate switching by using `use-tag <name>`. +- **Full CLI & MCP Support**: All tag management commands are available through both the CLI and MCP tools for you to use. Refer to @`taskmaster.instructions.md` for a full command list. + +--- + +## Task Complexity Analysis + +- Run `analyze_project_complexity` / `task-master analyze-complexity --research` (see @`taskmaster.instructions.md`) for comprehensive analysis +- Review complexity report via `complexity_report` / `task-master complexity-report` (see @`taskmaster.instructions.md`) for a formatted, readable version. +- Focus on tasks with highest complexity scores (8-10) for detailed breakdown +- Use analysis results to determine appropriate subtask allocation +- Note that reports are automatically used by the `expand_task` tool/command + +## Task Breakdown Process + +- Use `expand_task` / `task-master expand --id=<id>`. It automatically uses the complexity report if found, otherwise generates default number of subtasks. +- Use `--num=<number>` to specify an explicit number of subtasks, overriding defaults or complexity report recommendations. +- Add `--research` flag to leverage Perplexity AI for research-backed expansion. +- Add `--force` flag to clear existing subtasks before generating new ones (default is to append). +- Use `--prompt="<context>"` to provide additional context when needed. +- Review and adjust generated subtasks as necessary. +- Use `expand_all` tool or `task-master expand --all` to expand multiple pending tasks at once, respecting flags like `--force` and `--research`. +- If subtasks need complete replacement (regardless of the `--force` flag on `expand`), clear them first with `clear_subtasks` / `task-master clear-subtasks --id=<id>`. + +## Implementation Drift Handling + +- When implementation differs significantly from planned approach +- When future tasks need modification due to current implementation choices +- When new dependencies or requirements emerge +- Use `update` / `task-master update --from=<futureTaskId> --prompt='<explanation>\nUpdate context...' --research` to update multiple future tasks. +- Use `update_task` / `task-master update-task --id=<taskId> --prompt='<explanation>\nUpdate context...' --research` to update a single specific task. + +## Task Status Management + +- Use 'pending' for tasks ready to be worked on +- Use 'done' for completed and verified tasks +- Use 'deferred' for postponed tasks +- Add custom status values as needed for project-specific workflows + +## Task Structure Fields + +- **id**: Unique identifier for the task (Example: `1`, `1.1`) +- **title**: Brief, descriptive title (Example: `"Initialize Repo"`) +- **description**: Concise summary of what the task involves (Example: `"Create a new repository, set up initial structure."`) +- **status**: Current state of the task (Example: `"pending"`, `"done"`, `"deferred"`) +- **dependencies**: IDs of prerequisite tasks (Example: `[1, 2.1]`) + - Dependencies are displayed with status indicators (✅ for completed, ⏱️ for pending) + - This helps quickly identify which prerequisite tasks are blocking work +- **priority**: Importance level (Example: `"high"`, `"medium"`, `"low"`) +- **details**: In-depth implementation instructions (Example: `"Use GitHub client ID/secret, handle callback, set session token."`) +- **testStrategy**: Verification approach (Example: `"Deploy and call endpoint to confirm 'Hello World' response."`) +- **subtasks**: List of smaller, more specific tasks (Example: `[{"id": 1, "title": "Configure OAuth", ...}]`) +- Refer to task structure details (previously linked to `tasks.instructions.md`). + +## Configuration Management (Updated) + +Taskmaster configuration is managed through two main mechanisms: + +1. **`.taskmaster/config.json` File (Primary):** + * Located in the project root directory. + * Stores most configuration settings: AI model selections (main, research, fallback), parameters (max tokens, temperature), logging level, default subtasks/priority, project name, etc. + * **Tagged System Settings**: Includes `global.defaultTag` (defaults to "master") and `tags` section for tag management configuration. + * **Managed via `task-master models --setup` command.** Do not edit manually unless you know what you are doing. + * **View/Set specific models via `task-master models` command or `models` MCP tool.** + * Created automatically when you run `task-master models --setup` for the first time or during tagged system migration. + +2. **Environment Variables (`.env` / `mcp.json`):** + * Used **only** for sensitive API keys and specific endpoint URLs. + * Place API keys (one per provider) in a `.env` file in the project root for CLI usage. + * For MCP/VS Code integration, configure these keys in the `env` section of `.vscode/mcp.json`. + * Available keys/variables: See `assets/env.example` or the Configuration section in the command reference (previously linked to `taskmaster.instructions.md`). + +3. **`.taskmaster/state.json` File (Tagged System State):** + * Tracks current tag context and migration status. + * Automatically created during tagged system migration. + * Contains: `currentTag`, `lastSwitched`, `migrationNoticeShown`. + +**Important:** Non-API key settings (like model selections, `MAX_TOKENS`, `TASKMASTER_LOG_LEVEL`) are **no longer configured via environment variables**. Use the `task-master models` command (or `--setup` for interactive configuration) or the `models` MCP tool. +**If AI commands FAIL in MCP** verify that the API key for the selected provider is present in the `env` section of `.vscode/mcp.json`. +**If AI commands FAIL in CLI** verify that the API key for the selected provider is present in the `.env` file in the root of the project. + +## Rules Management + +Taskmaster supports multiple AI coding assistant rule sets that can be configured during project initialization or managed afterward: + +- **Available Profiles**: Claude Code, Cline, Codex, VS Code, Roo Code, Trae, Windsurf (claude, cline, codex, vscode, roo, trae, windsurf) +- **During Initialization**: Use `task-master init --rules vscode,windsurf` to specify which rule sets to include +- **After Initialization**: Use `task-master rules add <profiles>` or `task-master rules remove <profiles>` to manage rule sets +- **Interactive Setup**: Use `task-master rules setup` to launch an interactive prompt for selecting rule profiles +- **Default Behavior**: If no `--rules` flag is specified during initialization, all available rule profiles are included +- **Rule Structure**: Each profile creates its own directory (e.g., `.github/instructions`, `.roo/rules`) with appropriate configuration files + +## Determining the Next Task + +- Run `next_task` / `task-master next` to show the next task to work on. +- The command identifies tasks with all dependencies satisfied +- Tasks are prioritized by priority level, dependency count, and ID +- The command shows comprehensive task information including: + - Basic task details and description + - Implementation details + - Subtasks (if they exist) + - Contextual suggested actions +- Recommended before starting any new development work +- Respects your project's dependency structure +- Ensures tasks are completed in the appropriate sequence +- Provides ready-to-use commands for common task actions + +## Viewing Specific Task Details + +- Run `get_task` / `task-master show <id>` to view a specific task. +- Use dot notation for subtasks: `task-master show 1.2` (shows subtask 2 of task 1) +- Displays comprehensive information similar to the next command, but for a specific task +- For parent tasks, shows all subtasks and their current status +- For subtasks, shows parent task information and relationship +- Provides contextual suggested actions appropriate for the specific task +- Useful for examining task details before implementation or checking status + +## Managing Task Dependencies + +- Use `add_dependency` / `task-master add-dependency --id=<id> --depends-on=<id>` to add a dependency. +- Use `remove_dependency` / `task-master remove-dependency --id=<id> --depends-on=<id>` to remove a dependency. +- The system prevents circular dependencies and duplicate dependency entries +- Dependencies are checked for existence before being added or removed +- Task files are automatically regenerated after dependency changes +- Dependencies are visualized with status indicators in task listings and files + +## Task Reorganization + +- Use `move_task` / `task-master move --from=<id> --to=<id>` to move tasks or subtasks within the hierarchy +- This command supports several use cases: + - Moving a standalone task to become a subtask (e.g., `--from=5 --to=7`) + - Moving a subtask to become a standalone task (e.g., `--from=5.2 --to=7`) + - Moving a subtask to a different parent (e.g., `--from=5.2 --to=7.3`) + - Reordering subtasks within the same parent (e.g., `--from=5.2 --to=5.4`) + - Moving a task to a new, non-existent ID position (e.g., `--from=5 --to=25`) + - Moving multiple tasks at once using comma-separated IDs (e.g., `--from=10,11,12 --to=16,17,18`) +- The system includes validation to prevent data loss: + - Allows moving to non-existent IDs by creating placeholder tasks + - Prevents moving to existing task IDs that have content (to avoid overwriting) + - Validates source tasks exist before attempting to move them +- The system maintains proper parent-child relationships and dependency integrity +- Task files are automatically regenerated after the move operation +- This provides greater flexibility in organizing and refining your task structure as project understanding evolves +- This is especially useful when dealing with potential merge conflicts arising from teams creating tasks on separate branches. Solve these conflicts very easily by moving your tasks and keeping theirs. + +## Iterative Subtask Implementation + +Once a task has been broken down into subtasks using `expand_task` or similar methods, follow this iterative process for implementation: + +1. **Understand the Goal (Preparation):** + * Use `get_task` / `task-master show <subtaskId>` (see @`taskmaster.instructions.md`) to thoroughly understand the specific goals and requirements of the subtask. + +2. **Initial Exploration & Planning (Iteration 1):** + * This is the first attempt at creating a concrete implementation plan. + * Explore the codebase to identify the precise files, functions, and even specific lines of code that will need modification. + * Determine the intended code changes (diffs) and their locations. + * Gather *all* relevant details from this exploration phase. + +3. **Log the Plan:** + * Run `update_subtask` / `task-master update-subtask --id=<subtaskId> --prompt='<detailed plan>'`. + * Provide the *complete and detailed* findings from the exploration phase in the prompt. Include file paths, line numbers, proposed diffs, reasoning, and any potential challenges identified. Do not omit details. The goal is to create a rich, timestamped log within the subtask's `details`. + +4. **Verify the Plan:** + * Run `get_task` / `task-master show <subtaskId>` again to confirm that the detailed implementation plan has been successfully appended to the subtask's details. + +5. **Begin Implementation:** + * Set the subtask status using `set_task_status` / `task-master set-status --id=<subtaskId> --status=in-progress`. + * Start coding based on the logged plan. + +6. **Refine and Log Progress (Iteration 2+):** + * As implementation progresses, you will encounter challenges, discover nuances, or confirm successful approaches. + * **Before appending new information**: Briefly review the *existing* details logged in the subtask (using `get_task` or recalling from context) to ensure the update adds fresh insights and avoids redundancy. + * **Regularly** use `update_subtask` / `task-master update-subtask --id=<subtaskId> --prompt='<update details>\n- What worked...\n- What didn't work...'` to append new findings. + * **Crucially, log:** + * What worked ("fundamental truths" discovered). + * What didn't work and why (to avoid repeating mistakes). + * Specific code snippets or configurations that were successful. + * Decisions made, especially if confirmed with user input. + * Any deviations from the initial plan and the reasoning. + * The objective is to continuously enrich the subtask's details, creating a log of the implementation journey that helps the AI (and human developers) learn, adapt, and avoid repeating errors. + +7. **Review & Update Rules (Post-Implementation):** + * Once the implementation for the subtask is functionally complete, review all code changes and the relevant chat history. + * Identify any new or modified code patterns, conventions, or best practices established during the implementation. + * Create new or update existing rules following internal guidelines (previously linked to `cursor_rules.instructions.md` and `self_improve.instructions.md`). + +8. **Mark Task Complete:** + * After verifying the implementation and updating any necessary rules, mark the subtask as completed: `set_task_status` / `task-master set-status --id=<subtaskId> --status=done`. + +9. **Commit Changes (If using Git):** + * Stage the relevant code changes and any updated/new rule files (`git add .`). + * Craft a comprehensive Git commit message summarizing the work done for the subtask, including both code implementation and any rule adjustments. + * Execute the commit command directly in the terminal (e.g., `git commit -m 'feat(module): Implement feature X for subtask <subtaskId>\n\n- Details about changes...\n- Updated rule Y for pattern Z'`). + * Consider if a Changeset is needed according to internal versioning guidelines (previously linked to `changeset.instructions.md`). If so, run `npm run changeset`, stage the generated file, and amend the commit or create a new one. + +10. **Proceed to Next Subtask:** + * Identify the next subtask (e.g., using `next_task` / `task-master next`). + +## Code Analysis & Refactoring Techniques + +- **Top-Level Function Search**: + - Useful for understanding module structure or planning refactors. + - Use grep/ripgrep to find exported functions/constants: + `rg "export (async function|function|const) \w+"` or similar patterns. + - Can help compare functions between files during migrations or identify potential naming conflicts. + +--- +*This workflow provides a general guideline. Adapt it based on your specific project needs and team practices.* \ No newline at end of file diff --git a/.github/instructions/self_improve.instructions.md b/.github/instructions/self_improve.instructions.md new file mode 100644 index 0000000..cbb625a --- /dev/null +++ b/.github/instructions/self_improve.instructions.md @@ -0,0 +1,71 @@ +--- +description: Guidelines for continuously improving VS Code rules based on emerging code patterns and best practices. +applyTo: "**/*" +--- + +- **Rule Improvement Triggers:** + - New code patterns not covered by existing rules + - Repeated similar implementations across files + - Common error patterns that could be prevented + - New libraries or tools being used consistently + - Emerging best practices in the codebase + +- **Analysis Process:** + - Compare new code with existing rules + - Identify patterns that should be standardized + - Look for references to external documentation + - Check for consistent error handling patterns + - Monitor test patterns and coverage + +- **Rule Updates:** + - **Add New Rules When:** + - A new technology/pattern is used in 3+ files + - Common bugs could be prevented by a rule + - Code reviews repeatedly mention the same feedback + - New security or performance patterns emerge + + - **Modify Existing Rules When:** + - Better examples exist in the codebase + - Additional edge cases are discovered + - Related rules have been updated + - Implementation details have changed + +- **Example Pattern Recognition:** + ```typescript + // If you see repeated patterns like: + const data = await prisma.user.findMany({ + select: { id: true, email: true }, + where: { status: 'ACTIVE' } + }); + + // Consider adding to [prisma.instructions.md](.github/instructions/prisma.instructions.md): + // - Standard select fields + // - Common where conditions + // - Performance optimization patterns + ``` + +- **Rule Quality Checks:** + - Rules should be actionable and specific + - Examples should come from actual code + - References should be up to date + - Patterns should be consistently enforced + +- **Continuous Improvement:** + - Monitor code review comments + - Track common development questions + - Update rules after major refactors + - Add links to relevant documentation + - Cross-reference related rules + +- **Rule Deprecation:** + - Mark outdated patterns as deprecated + - Remove rules that no longer apply + - Update references to deprecated rules + - Document migration paths for old patterns + +- **Documentation Updates:** + - Keep examples synchronized with code + - Update references to external docs + - Maintain links between related rules + - Document breaking changes +Follow [vscode_rules.instructions.md](.github/instructions/vscode_rules.instructions.md) for proper rule formatting and structure. diff --git a/.github/instructions/taskmaster.instructions.md b/.github/instructions/taskmaster.instructions.md new file mode 100644 index 0000000..c2fd1b3 --- /dev/null +++ b/.github/instructions/taskmaster.instructions.md @@ -0,0 +1,572 @@ +--- +description: Comprehensive reference for Taskmaster MCP tools and CLI commands. +applyTo: "**/*" +--- + +# Taskmaster Tool & Command Reference + +This document provides a detailed reference for interacting with Taskmaster, covering both the recommended MCP tools, suitable for integrations like VS Code, and the corresponding `task-master` CLI commands, designed for direct user interaction or fallback. + +**Note:** For interacting with Taskmaster programmatically or via integrated tools, using the **MCP tools is strongly recommended** due to better performance, structured data, and error handling. The CLI commands serve as a user-friendly alternative and fallback. + +**Important:** Several MCP tools involve AI processing... The AI-powered tools include `parse_prd`, `analyze_project_complexity`, `update_subtask`, `update_task`, `update`, `expand_all`, `expand_task`, and `add_task`. + +**🏷️ Tagged Task Lists System:** Task Master now supports **tagged task lists** for multi-context task management. This allows you to maintain separate, isolated lists of tasks for different features, branches, or experiments. Existing projects are seamlessly migrated to use a default "master" tag. Most commands now support a `--tag <name>` flag to specify which context to operate on. If omitted, commands use the currently active tag. + +--- + +## Initialization & Setup + +### 1. Initialize Project (`init`) + +* **MCP Tool:** `initialize_project` +* **CLI Command:** `task-master init [options]` +* **Description:** `Set up the basic Taskmaster file structure and configuration in the current directory for a new project.` +* **Key CLI Options:** + * `--name <name>`: `Set the name for your project in Taskmaster's configuration.` + * `--description <text>`: `Provide a brief description for your project.` + * `--version <version>`: `Set the initial version for your project, e.g., '0.1.0'.` + * `-y, --yes`: `Initialize Taskmaster quickly using default settings without interactive prompts.` +* **Usage:** Run this once at the beginning of a new project. +* **MCP Variant Description:** `Set up the basic Taskmaster file structure and configuration in the current directory for a new project by running the 'task-master init' command.` +* **Key MCP Parameters/Options:** + * `projectName`: `Set the name for your project.` (CLI: `--name <name>`) + * `projectDescription`: `Provide a brief description for your project.` (CLI: `--description <text>`) + * `projectVersion`: `Set the initial version for your project, e.g., '0.1.0'.` (CLI: `--version <version>`) + * `authorName`: `Author name.` (CLI: `--author <author>`) + * `skipInstall`: `Skip installing dependencies. Default is false.` (CLI: `--skip-install`) + * `addAliases`: `Add shell aliases tm, taskmaster, hamster, and ham. Default is false.` (CLI: `--aliases`) + * `yes`: `Skip prompts and use defaults/provided arguments. Default is false.` (CLI: `-y, --yes`) +* **Usage:** Run this once at the beginning of a new project, typically via an integrated tool like VS Code. Operates on the current working directory of the MCP server. +* **Important:** Once complete, you *MUST* parse a prd in order to generate tasks. There will be no tasks files until then. The next step after initializing should be to create a PRD using the example PRD in .taskmaster/templates/example_prd.txt. +* **Tagging:** Use the `--tag` option to parse the PRD into a specific, non-default tag context. If the tag doesn't exist, it will be created automatically. Example: `task-master parse-prd spec.txt --tag=new-feature`. + +### 2. Parse PRD (`parse_prd`) + +* **MCP Tool:** `parse_prd` +* **CLI Command:** `task-master parse-prd [file] [options]` +* **Description:** `Parse a Product Requirements Document, PRD, or text file with Taskmaster to automatically generate an initial set of tasks in tasks.json.` +* **Key Parameters/Options:** + * `input`: `Path to your PRD or requirements text file that Taskmaster should parse for tasks.` (CLI: `[file]` positional or `-i, --input <file>`) + * `output`: `Specify where Taskmaster should save the generated 'tasks.json' file. Defaults to '.taskmaster/tasks/tasks.json'.` (CLI: `-o, --output <file>`) + * `numTasks`: `Approximate number of top-level tasks Taskmaster should aim to generate from the document.` (CLI: `-n, --num-tasks <number>`) + * `force`: `Use this to allow Taskmaster to overwrite an existing 'tasks.json' without asking for confirmation.` (CLI: `-f, --force`) +* **Usage:** Useful for bootstrapping a project from an existing requirements document. +* **Notes:** Task Master will strictly adhere to any specific requirements mentioned in the PRD, such as libraries, database schemas, frameworks, tech stacks, etc., while filling in any gaps where the PRD isn't fully specified. Tasks are designed to provide the most direct implementation path while avoiding over-engineering. +* **Important:** This MCP tool makes AI calls and can take up to a minute to complete. Please inform users to hang tight while the operation is in progress. If the user does not have a PRD, suggest discussing their idea and then use the example PRD in `.taskmaster/templates/example_prd.txt` as a template for creating the PRD based on their idea, for use with `parse-prd`. + +--- + +## AI Model Configuration + +### 2. Manage Models (`models`) +* **MCP Tool:** `models` +* **CLI Command:** `task-master models [options]` +* **Description:** `View the current AI model configuration or set specific models for different roles (main, research, fallback). Allows setting custom model IDs for Ollama and OpenRouter.` +* **Key MCP Parameters/Options:** + * `setMain <model_id>`: `Set the primary model ID for task generation/updates.` (CLI: `--set-main <model_id>`) + * `setResearch <model_id>`: `Set the model ID for research-backed operations.` (CLI: `--set-research <model_id>`) + * `setFallback <model_id>`: `Set the model ID to use if the primary fails.` (CLI: `--set-fallback <model_id>`) + * `ollama <boolean>`: `Indicates the set model ID is a custom Ollama model.` (CLI: `--ollama`) + * `openrouter <boolean>`: `Indicates the set model ID is a custom OpenRouter model.` (CLI: `--openrouter`) + * `listAvailableModels <boolean>`: `If true, lists available models not currently assigned to a role.` (CLI: No direct equivalent; CLI lists available automatically) + * `projectRoot <string>`: `Optional. Absolute path to the project root directory.` (CLI: Determined automatically) +* **Key CLI Options:** + * `--set-main <model_id>`: `Set the primary model.` + * `--set-research <model_id>`: `Set the research model.` + * `--set-fallback <model_id>`: `Set the fallback model.` + * `--ollama`: `Specify that the provided model ID is for Ollama (use with --set-*).` + * `--openrouter`: `Specify that the provided model ID is for OpenRouter (use with --set-*). Validates against OpenRouter API.` + * `--bedrock`: `Specify that the provided model ID is for AWS Bedrock (use with --set-*).` + * `--setup`: `Run interactive setup to configure models, including custom Ollama/OpenRouter IDs.` +* **Usage (MCP):** Call without set flags to get current config. Use `setMain`, `setResearch`, or `setFallback` with a valid model ID to update the configuration. Use `listAvailableModels: true` to get a list of unassigned models. To set a custom model, provide the model ID and set `ollama: true` or `openrouter: true`. +* **Usage (CLI):** Run without flags to view current configuration and available models. Use set flags to update specific roles. Use `--setup` for guided configuration, including custom models. To set a custom model via flags, use `--set-<role>=<model_id>` along with either `--ollama` or `--openrouter`. +* **Notes:** Configuration is stored in `.taskmaster/config.json` in the project root. This command/tool modifies that file. Use `listAvailableModels` or `task-master models` to see internally supported models. OpenRouter custom models are validated against their live API. Ollama custom models are not validated live. +* **API note:** API keys for selected AI providers (based on their model) need to exist in the mcp.json file to be accessible in MCP context. The API keys must be present in the local .env file for the CLI to be able to read them. +* **Model costs:** The costs in supported models are expressed in dollars. An input/output value of 3 is $3.00. A value of 0.8 is $0.80. +* **Warning:** DO NOT MANUALLY EDIT THE .taskmaster/config.json FILE. Use the included commands either in the MCP or CLI format as needed. Always prioritize MCP tools when available and use the CLI as a fallback. + +--- + +## Task Listing & Viewing + +### 3. Get Tasks (`get_tasks`) + +* **MCP Tool:** `get_tasks` +* **CLI Command:** `task-master list [options]` +* **Description:** `List your Taskmaster tasks, optionally filtering by status and showing subtasks.` +* **Key Parameters/Options:** + * `status`: `Show only Taskmaster tasks matching this status (or multiple statuses, comma-separated), e.g., 'pending' or 'done,in-progress'.` (CLI: `-s, --status <status>`) + * `withSubtasks`: `Include subtasks indented under their parent tasks in the list.` (CLI: `--with-subtasks`) + * `tag`: `Specify which tag context to list tasks from. Defaults to the current active tag.` (CLI: `--tag <name>`) + * `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`) + * `watch`: `Watch for changes and auto-refresh the list in real-time. Works with file storage (fs.watch) and API storage (Supabase Realtime).` (CLI: `-w, --watch`) +* **Usage:** Get an overview of the project status, often used at the start of a work session. Use `--watch` to keep the list live-updating as tasks change. + +### 4. Get Next Task (`next_task`) + +* **MCP Tool:** `next_task` +* **CLI Command:** `task-master next [options]` +* **Description:** `Ask Taskmaster to show the next available task you can work on, based on status and completed dependencies.` +* **Key Parameters/Options:** + * `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`) + * `tag`: `Specify which tag context to use. Defaults to the current active tag.` (CLI: `--tag <name>`) +* **Usage:** Identify what to work on next according to the plan. + +### 5. Get Task Details (`get_task`) + +* **MCP Tool:** `get_task` +* **CLI Command:** `task-master show [id] [options]` +* **Description:** `Display detailed information for one or more specific Taskmaster tasks or subtasks by ID.` +* **Key Parameters/Options:** + * `id`: `Required. The ID of the Taskmaster task (e.g., '15'), subtask (e.g., '15.2'), or a comma-separated list of IDs ('1,5,10.2') you want to view.` (CLI: `[id]` positional or `-i, --id <id>`) + * `tag`: `Specify which tag context to get the task(s) from. Defaults to the current active tag.` (CLI: `--tag <name>`) + * `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`) +* **Usage:** Understand the full details for a specific task. When multiple IDs are provided, a summary table is shown. +* **CRITICAL INFORMATION** If you need to collect information from multiple tasks, use comma-separated IDs (i.e. 1,2,3) to receive an array of tasks. Do not needlessly get tasks one at a time if you need to get many as that is wasteful. + +--- + +## Task Creation & Modification + +### 6. Add Task (`add_task`) + +* **MCP Tool:** `add_task` +* **CLI Command:** `task-master add-task [options]` +* **Description:** `Add a new task to Taskmaster by describing it; AI will structure it.` +* **Key Parameters/Options:** + * `prompt`: `Required. Describe the new task you want Taskmaster to create, e.g., "Implement user authentication using JWT".` (CLI: `-p, --prompt <text>`) + * `dependencies`: `Specify the IDs of any Taskmaster tasks that must be completed before this new one can start, e.g., '12,14'.` (CLI: `-d, --dependencies <ids>`) + * `priority`: `Set the priority for the new task: 'high', 'medium', or 'low'. Default is 'medium'.` (CLI: `--priority <priority>`) + * `research`: `Enable Taskmaster to use the research role for potentially more informed task creation.` (CLI: `-r, --research`) + * `tag`: `Specify which tag context to add the task to. Defaults to the current active tag.` (CLI: `--tag <name>`) + * `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`) +* **Usage:** Quickly add newly identified tasks during development. +* **Important:** This MCP tool makes AI calls and can take up to a minute to complete. Please inform users to hang tight while the operation is in progress. + +### 7. Add Subtask (`add_subtask`) + +* **MCP Tool:** `add_subtask` +* **CLI Command:** `task-master add-subtask [options]` +* **Description:** `Add a new subtask to a Taskmaster parent task, or convert an existing task into a subtask.` +* **Key Parameters/Options:** + * `id` / `parent`: `Required. The ID of the Taskmaster task that will be the parent.` (MCP: `id`, CLI: `-p, --parent <id>`) + * `taskId`: `Use this if you want to convert an existing top-level Taskmaster task into a subtask of the specified parent.` (CLI: `-i, --task-id <id>`) + * `title`: `Required if not using taskId. The title for the new subtask Taskmaster should create.` (CLI: `-t, --title <title>`) + * `description`: `A brief description for the new subtask.` (CLI: `-d, --description <text>`) + * `details`: `Provide implementation notes or details for the new subtask.` (CLI: `--details <text>`) + * `dependencies`: `Specify IDs of other tasks or subtasks, e.g., '15' or '16.1', that must be done before this new subtask.` (CLI: `--dependencies <ids>`) + * `status`: `Set the initial status for the new subtask. Default is 'pending'.` (CLI: `-s, --status <status>`) + * `generate`: `Enable Taskmaster to regenerate markdown task files after adding the subtask.` (CLI: `--generate`) + * `tag`: `Specify which tag context to operate on. Defaults to the current active tag.` (CLI: `--tag <name>`) + * `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`) +* **Usage:** Break down tasks manually or reorganize existing tasks. + +### 8. Update Tasks (`update`) + +* **MCP Tool:** `update` +* **CLI Command:** `task-master update [options]` +* **Description:** `Update multiple upcoming tasks in Taskmaster based on new context or changes, starting from a specific task ID.` +* **Key Parameters/Options:** + * `from`: `Required. The ID of the first task Taskmaster should update. All tasks with this ID or higher that are not 'done' will be considered.` (CLI: `--from <id>`) + * `prompt`: `Required. Explain the change or new context for Taskmaster to apply to the tasks, e.g., "We are now using React Query instead of Redux Toolkit for data fetching".` (CLI: `-p, --prompt <text>`) + * `research`: `Enable Taskmaster to use the research role for more informed updates. Requires appropriate API key.` (CLI: `-r, --research`) + * `tag`: `Specify which tag context to operate on. Defaults to the current active tag.` (CLI: `--tag <name>`) + * `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`) +* **Usage:** Handle significant implementation changes or pivots that affect multiple future tasks. Example CLI: `task-master update --from='18' --prompt='Switching to React Query.\nNeed to refactor data fetching...'` +* **Important:** This MCP tool makes AI calls and can take up to a minute to complete. Please inform users to hang tight while the operation is in progress. + +### 9. Update Task (`update_task`) + +* **MCP Tool:** `update_task` +* **CLI Command:** `task-master update-task [options]` +* **Description:** `Modify a specific Taskmaster task by ID, incorporating new information or changes. By default, this replaces the existing task details.` +* **Key Parameters/Options:** + * `id`: `Required. The specific ID of the Taskmaster task, e.g., '15', you want to update.` (CLI: `-i, --id <id>`) + * `prompt`: `Required. Explain the specific changes or provide the new information Taskmaster should incorporate into this task.` (CLI: `-p, --prompt <text>`) + * `append`: `If true, appends the prompt content to the task's details with a timestamp, rather than replacing them. Behaves like update-subtask.` (CLI: `--append`) + * `research`: `Enable Taskmaster to use the research role for more informed updates. Requires appropriate API key.` (CLI: `-r, --research`) + * `tag`: `Specify which tag context the task belongs to. Defaults to the current active tag.` (CLI: `--tag <name>`) + * `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`) +* **Usage:** Refine a specific task based on new understanding. Use `--append` to log progress without creating subtasks. +* **Important:** This MCP tool makes AI calls and can take up to a minute to complete. Please inform users to hang tight while the operation is in progress. + +### 10. Update Subtask (`update_subtask`) + +* **MCP Tool:** `update_subtask` +* **CLI Command:** `task-master update-subtask [options]` +* **Description:** `Append timestamped notes or details to a specific Taskmaster subtask without overwriting existing content. Intended for iterative implementation logging.` +* **Key Parameters/Options:** + * `id`: `Required. The ID of the Taskmaster subtask, e.g., '5.2', to update with new information.` (CLI: `-i, --id <id>`) + * `prompt`: `Required. The information, findings, or progress notes to append to the subtask's details with a timestamp.` (CLI: `-p, --prompt <text>`) + * `research`: `Enable Taskmaster to use the research role for more informed updates. Requires appropriate API key.` (CLI: `-r, --research`) + * `tag`: `Specify which tag context the subtask belongs to. Defaults to the current active tag.` (CLI: `--tag <name>`) + * `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`) +* **Usage:** Log implementation progress, findings, and discoveries during subtask development. Each update is timestamped and appended to preserve the implementation journey. +* **Important:** This MCP tool makes AI calls and can take up to a minute to complete. Please inform users to hang tight while the operation is in progress. + +### 11. Set Task Status (`set_task_status`) + +* **MCP Tool:** `set_task_status` +* **CLI Command:** `task-master set-status [options]` +* **Description:** `Update the status of one or more Taskmaster tasks or subtasks, e.g., 'pending', 'in-progress', 'done'.` +* **Key Parameters/Options:** + * `id`: `Required. The ID(s) of the Taskmaster task(s) or subtask(s), e.g., '15', '15.2', or '16,17.1', to update.` (CLI: `-i, --id <id>`) + * `status`: `Required. The new status to set, e.g., 'done', 'pending', 'in-progress', 'review', 'cancelled'.` (CLI: `-s, --status <status>`) + * `tag`: `Specify which tag context to operate on. Defaults to the current active tag.` (CLI: `--tag <name>`) + * `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`) +* **Usage:** Mark progress as tasks move through the development cycle. + +### 12. Remove Task (`remove_task`) + +* **MCP Tool:** `remove_task` +* **CLI Command:** `task-master remove-task [options]` +* **Description:** `Permanently remove a task or subtask from the Taskmaster tasks list.` +* **Key Parameters/Options:** + * `id`: `Required. The ID of the Taskmaster task, e.g., '5', or subtask, e.g., '5.2', to permanently remove.` (CLI: `-i, --id <id>`) + * `yes`: `Skip the confirmation prompt and immediately delete the task.` (CLI: `-y, --yes`) + * `tag`: `Specify which tag context to operate on. Defaults to the current active tag.` (CLI: `--tag <name>`) + * `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`) +* **Usage:** Permanently delete tasks or subtasks that are no longer needed in the project. +* **Notes:** Use with caution as this operation cannot be undone. Consider using 'blocked', 'cancelled', or 'deferred' status instead if you just want to exclude a task from active planning but keep it for reference. The command automatically cleans up dependency references in other tasks. + +--- + +## Task Structure & Breakdown + +### 13. Expand Task (`expand_task`) + +* **MCP Tool:** `expand_task` +* **CLI Command:** `task-master expand [options]` +* **Description:** `Use Taskmaster's AI to break down a complex task into smaller, manageable subtasks. Appends subtasks by default.` +* **Key Parameters/Options:** + * `id`: `The ID of the specific Taskmaster task you want to break down into subtasks.` (CLI: `-i, --id <id>`) + * `num`: `Optional: Suggests how many subtasks Taskmaster should aim to create. Uses complexity analysis/defaults otherwise.` (CLI: `-n, --num <number>`) + * `research`: `Enable Taskmaster to use the research role for more informed subtask generation. Requires appropriate API key.` (CLI: `-r, --research`) + * `prompt`: `Optional: Provide extra context or specific instructions to Taskmaster for generating the subtasks.` (CLI: `-p, --prompt <text>`) + * `force`: `Optional: If true, clear existing subtasks before generating new ones. Default is false (append).` (CLI: `--force`) + * `tag`: `Specify which tag context the task belongs to. Defaults to the current active tag.` (CLI: `--tag <name>`) + * `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`) +* **Usage:** Generate a detailed implementation plan for a complex task before starting coding. Automatically uses complexity report recommendations if available and `num` is not specified. +* **Important:** This MCP tool makes AI calls and can take up to a minute to complete. Please inform users to hang tight while the operation is in progress. + +### 14. Expand All Tasks (`expand_all`) + +* **MCP Tool:** `expand_all` +* **CLI Command:** `task-master expand --all [options]` (Note: CLI uses the `expand` command with the `--all` flag) +* **Description:** `Tell Taskmaster to automatically expand all eligible pending/in-progress tasks based on complexity analysis or defaults. Appends subtasks by default.` +* **Key Parameters/Options:** + * `num`: `Optional: Suggests how many subtasks Taskmaster should aim to create per task.` (CLI: `-n, --num <number>`) + * `research`: `Enable research role for more informed subtask generation. Requires appropriate API key.` (CLI: `-r, --research`) + * `prompt`: `Optional: Provide extra context for Taskmaster to apply generally during expansion.` (CLI: `-p, --prompt <text>`) + * `force`: `Optional: If true, clear existing subtasks before generating new ones for each eligible task. Default is false (append).` (CLI: `--force`) + * `tag`: `Specify which tag context to expand. Defaults to the current active tag.` (CLI: `--tag <name>`) + * `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`) +* **Usage:** Useful after initial task generation or complexity analysis to break down multiple tasks at once. +* **Important:** This MCP tool makes AI calls and can take up to a minute to complete. Please inform users to hang tight while the operation is in progress. + +### 15. Clear Subtasks (`clear_subtasks`) + +* **MCP Tool:** `clear_subtasks` +* **CLI Command:** `task-master clear-subtasks [options]` +* **Description:** `Remove all subtasks from one or more specified Taskmaster parent tasks.` +* **Key Parameters/Options:** + * `id`: `The ID(s) of the Taskmaster parent task(s) whose subtasks you want to remove, e.g., '15' or '16,18'. Required unless using 'all'.` (CLI: `-i, --id <ids>`) + * `all`: `Tell Taskmaster to remove subtasks from all parent tasks.` (CLI: `--all`) + * `tag`: `Specify which tag context to operate on. Defaults to the current active tag.` (CLI: `--tag <name>`) + * `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`) +* **Usage:** Used before regenerating subtasks with `expand_task` if the previous breakdown needs replacement. + +### 16. Remove Subtask (`remove_subtask`) + +* **MCP Tool:** `remove_subtask` +* **CLI Command:** `task-master remove-subtask [options]` +* **Description:** `Remove a subtask from its Taskmaster parent, optionally converting it into a standalone task.` +* **Key Parameters/Options:** + * `id`: `Required. The ID(s) of the Taskmaster subtask(s) to remove, e.g., '15.2' or '16.1,16.3'.` (CLI: `-i, --id <id>`) + * `convert`: `If used, Taskmaster will turn the subtask into a regular top-level task instead of deleting it.` (CLI: `-c, --convert`) + * `generate`: `Enable Taskmaster to regenerate markdown task files after removing the subtask.` (CLI: `--generate`) + * `tag`: `Specify which tag context to operate on. Defaults to the current active tag.` (CLI: `--tag <name>`) + * `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`) +* **Usage:** Delete unnecessary subtasks or promote a subtask to a top-level task. + +### 17. Move Task (`move_task`) + +* **MCP Tool:** `move_task` +* **CLI Command:** `task-master move [options]` +* **Description:** `Move a task or subtask to a new position within the task hierarchy.` +* **Key Parameters/Options:** + * `from`: `Required. ID of the task/subtask to move (e.g., "5" or "5.2"). Can be comma-separated for multiple tasks.` (CLI: `--from <id>`) + * `to`: `Required. ID of the destination (e.g., "7" or "7.3"). Must match the number of source IDs if comma-separated.` (CLI: `--to <id>`) + * `tag`: `Specify which tag context to operate on. Defaults to the current active tag.` (CLI: `--tag <name>`) + * `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`) +* **Usage:** Reorganize tasks by moving them within the hierarchy. Supports various scenarios like: + * Moving a task to become a subtask + * Moving a subtask to become a standalone task + * Moving a subtask to a different parent + * Reordering subtasks within the same parent + * Moving a task to a new, non-existent ID (automatically creates placeholders) + * Moving multiple tasks at once with comma-separated IDs +* **Validation Features:** + * Allows moving tasks to non-existent destination IDs (creates placeholder tasks) + * Prevents moving to existing task IDs that already have content (to avoid overwriting) + * Validates that source tasks exist before attempting to move them + * Maintains proper parent-child relationships +* **Example CLI:** `task-master move --from=5.2 --to=7.3` to move subtask 5.2 to become subtask 7.3. +* **Example Multi-Move:** `task-master move --from=10,11,12 --to=16,17,18` to move multiple tasks to new positions. +* **Common Use:** Resolving merge conflicts in tasks.json when multiple team members create tasks on different branches. + +--- + +## Dependency Management + +### 18. Add Dependency (`add_dependency`) + +* **MCP Tool:** `add_dependency` +* **CLI Command:** `task-master add-dependency [options]` +* **Description:** `Define a dependency in Taskmaster, making one task a prerequisite for another.` +* **Key Parameters/Options:** + * `id`: `Required. The ID of the Taskmaster task that will depend on another.` (CLI: `-i, --id <id>`) + * `dependsOn`: `Required. The ID of the Taskmaster task that must be completed first, the prerequisite.` (CLI: `-d, --depends-on <id>`) + * `tag`: `Specify which tag context to operate on. Defaults to the current active tag.` (CLI: `--tag <name>`) + * `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <path>`) +* **Usage:** Establish the correct order of execution between tasks. + +### 19. Remove Dependency (`remove_dependency`) + +* **MCP Tool:** `remove_dependency` +* **CLI Command:** `task-master remove-dependency [options]` +* **Description:** `Remove a dependency relationship between two Taskmaster tasks.` +* **Key Parameters/Options:** + * `id`: `Required. The ID of the Taskmaster task you want to remove a prerequisite from.` (CLI: `-i, --id <id>`) + * `dependsOn`: `Required. The ID of the Taskmaster task that should no longer be a prerequisite.` (CLI: `-d, --depends-on <id>`) + * `tag`: `Specify which tag context to operate on. Defaults to the current active tag.` (CLI: `--tag <name>`) + * `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`) +* **Usage:** Update task relationships when the order of execution changes. + +### 20. Validate Dependencies (`validate_dependencies`) + +* **MCP Tool:** `validate_dependencies` +* **CLI Command:** `task-master validate-dependencies [options]` +* **Description:** `Check your Taskmaster tasks for dependency issues (like circular references or links to non-existent tasks) without making changes.` +* **Key Parameters/Options:** + * `tag`: `Specify which tag context to validate. Defaults to the current active tag.` (CLI: `--tag <name>`) + * `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`) +* **Usage:** Audit the integrity of your task dependencies. + +### 21. Fix Dependencies (`fix_dependencies`) + +* **MCP Tool:** `fix_dependencies` +* **CLI Command:** `task-master fix-dependencies [options]` +* **Description:** `Automatically fix dependency issues (like circular references or links to non-existent tasks) in your Taskmaster tasks.` +* **Key Parameters/Options:** + * `tag`: `Specify which tag context to fix dependencies in. Defaults to the current active tag.` (CLI: `--tag <name>`) + * `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`) +* **Usage:** Clean up dependency errors automatically. + +--- + +## Analysis & Reporting + +### 22. Analyze Project Complexity (`analyze_project_complexity`) + +* **MCP Tool:** `analyze_project_complexity` +* **CLI Command:** `task-master analyze-complexity [options]` +* **Description:** `Have Taskmaster analyze your tasks to determine their complexity and suggest which ones need to be broken down further.` +* **Key Parameters/Options:** + * `output`: `Where to save the complexity analysis report. Default is '.taskmaster/reports/task-complexity-report.json' (or '..._tagname.json' if a tag is used).` (CLI: `-o, --output <file>`) + * `threshold`: `The minimum complexity score (1-10) that should trigger a recommendation to expand a task.` (CLI: `-t, --threshold <number>`) + * `research`: `Enable research role for more accurate complexity analysis. Requires appropriate API key.` (CLI: `-r, --research`) + * `tag`: `Specify which tag context to analyze. Defaults to the current active tag.` (CLI: `--tag <name>`) + * `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`) +* **Usage:** Used before breaking down tasks to identify which ones need the most attention. +* **Important:** This MCP tool makes AI calls and can take up to a minute to complete. Please inform users to hang tight while the operation is in progress. + +### 23. View Complexity Report (`complexity_report`) + +* **MCP Tool:** `complexity_report` +* **CLI Command:** `task-master complexity-report [options]` +* **Description:** `Display the task complexity analysis report in a readable format.` +* **Key Parameters/Options:** + * `tag`: `Specify which tag context to show the report for. Defaults to the current active tag.` (CLI: `--tag <name>`) + * `file`: `Path to the complexity report (default: '.taskmaster/reports/task-complexity-report.json').` (CLI: `-f, --file <file>`) +* **Usage:** Review and understand the complexity analysis results after running analyze-complexity. + +--- + +## File Management + +### 24. Generate Task Files (`generate`) + +* **MCP Tool:** `generate` +* **CLI Command:** `task-master generate [options]` +* **Description:** `Create or update individual Markdown files for each task based on your tasks.json.` +* **Key Parameters/Options:** + * `output`: `The directory where Taskmaster should save the task files (default: in a 'tasks' directory).` (CLI: `-o, --output <directory>`) + * `tag`: `Specify which tag context to generate files for. Defaults to the current active tag.` (CLI: `--tag <name>`) + * `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`) +* **Usage:** Run this after making changes to tasks.json to keep individual task files up to date. This command is now manual and no longer runs automatically. + +--- + +## AI-Powered Research + +### 25. Research (`research`) + +* **MCP Tool:** `research` +* **CLI Command:** `task-master research [options]` +* **Description:** `Perform AI-powered research queries with project context to get fresh, up-to-date information beyond the AI's knowledge cutoff.` +* **Key Parameters/Options:** + * `query`: `Required. Research query/prompt (e.g., "What are the latest best practices for React Query v5?").` (CLI: `[query]` positional or `-q, --query <text>`) + * `taskIds`: `Comma-separated list of task/subtask IDs from the current tag context (e.g., "15,16.2,17").` (CLI: `-i, --id <ids>`) + * `filePaths`: `Comma-separated list of file paths for context (e.g., "src/api.js,docs/readme.md").` (CLI: `-f, --files <paths>`) + * `customContext`: `Additional custom context text to include in the research.` (CLI: `-c, --context <text>`) + * `includeProjectTree`: `Include project file tree structure in context (default: false).` (CLI: `--tree`) + * `detailLevel`: `Detail level for the research response: 'low', 'medium', 'high' (default: medium).` (CLI: `--detail <level>`) + * `saveTo`: `Task or subtask ID (e.g., "15", "15.2") to automatically save the research conversation to.` (CLI: `--save-to <id>`) + * `saveFile`: `If true, saves the research conversation to a markdown file in '.taskmaster/docs/research/'.` (CLI: `--save-file`) + * `noFollowup`: `Disables the interactive follow-up question menu in the CLI.` (CLI: `--no-followup`) + * `tag`: `Specify which tag context to use for task-based context gathering. Defaults to the current active tag.` (CLI: `--tag <name>`) + * `projectRoot`: `The directory of the project. Must be an absolute path.` (CLI: Determined automatically) +* **Usage:** **This is a POWERFUL tool that agents should use FREQUENTLY** to: + * Get fresh information beyond knowledge cutoff dates + * Research latest best practices, library updates, security patches + * Find implementation examples for specific technologies + * Validate approaches against current industry standards + * Get contextual advice based on project files and tasks +* **When to Consider Using Research:** + * **Before implementing any task** - Research current best practices + * **When encountering new technologies** - Get up-to-date implementation guidance (libraries, apis, etc) + * **For security-related tasks** - Find latest security recommendations + * **When updating dependencies** - Research breaking changes and migration guides + * **For performance optimization** - Get current performance best practices + * **When debugging complex issues** - Research known solutions and workarounds +* **Research + Action Pattern:** + * Use `research` to gather fresh information + * Use `update_subtask` to commit findings with timestamps + * Use `update_task` to incorporate research into task details + * Use `add_task` with research flag for informed task creation +* **Important:** This MCP tool makes AI calls and can take up to a minute to complete. The research provides FRESH data beyond the AI's training cutoff, making it invaluable for current best practices and recent developments. + +--- + +## Tag Management + +This new suite of commands allows you to manage different task contexts (tags). + +### 26. List Tags (`tags`) + +* **MCP Tool:** `list_tags` +* **CLI Command:** `task-master tags [options]` +* **Description:** `List all available tags with task counts, completion status, and other metadata.` +* **Key Parameters/Options:** + * `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`) + * `--show-metadata`: `Include detailed metadata in the output (e.g., creation date, description).` (CLI: `--show-metadata`) + +### 27. Add Tag (`add_tag`) + +* **MCP Tool:** `add_tag` +* **CLI Command:** `task-master add-tag <tagName> [options]` +* **Description:** `Create a new, empty tag context, or copy tasks from another tag.` +* **Key Parameters/Options:** + * `tagName`: `Name of the new tag to create (alphanumeric, hyphens, underscores).` (CLI: `<tagName>` positional) + * `--from-branch`: `Creates a tag with a name derived from the current git branch, ignoring the <tagName> argument.` (CLI: `--from-branch`) + * `--copy-from-current`: `Copy tasks from the currently active tag to the new tag.` (CLI: `--copy-from-current`) + * `--copy-from <tag>`: `Copy tasks from a specific source tag to the new tag.` (CLI: `--copy-from <tag>`) + * `--description <text>`: `Provide an optional description for the new tag.` (CLI: `-d, --description <text>`) + * `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`) + +### 28. Delete Tag (`delete_tag`) + +* **MCP Tool:** `delete_tag` +* **CLI Command:** `task-master delete-tag <tagName> [options]` +* **Description:** `Permanently delete a tag and all of its associated tasks.` +* **Key Parameters/Options:** + * `tagName`: `Name of the tag to delete.` (CLI: `<tagName>` positional) + * `--yes`: `Skip the confirmation prompt.` (CLI: `-y, --yes`) + * `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`) + +### 29. Use Tag (`use_tag`) + +* **MCP Tool:** `use_tag` +* **CLI Command:** `task-master use-tag <tagName>` +* **Description:** `Switch your active task context to a different tag.` +* **Key Parameters/Options:** + * `tagName`: `Name of the tag to switch to.` (CLI: `<tagName>` positional) + * `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`) + +### 30. Rename Tag (`rename_tag`) + +* **MCP Tool:** `rename_tag` +* **CLI Command:** `task-master rename-tag <oldName> <newName>` +* **Description:** `Rename an existing tag.` +* **Key Parameters/Options:** + * `oldName`: `The current name of the tag.` (CLI: `<oldName>` positional) + * `newName`: `The new name for the tag.` (CLI: `<newName>` positional) + * `file`: `Path to your Taskmaster 'tasks.json' file. Default relies on auto-detection.` (CLI: `-f, --file <file>`) + +### 31. Copy Tag (`copy_tag`) + +* **MCP Tool:** `copy_tag` +* **CLI Command:** `task-master copy-tag <sourceName> <targetName> [options]` +* **Description:** `Copy an entire tag context, including all its tasks and metadata, to a new tag.` +* **Key Parameters/Options:** + * `sourceName`: `Name of the tag to copy from.` (CLI: `<sourceName>` positional) + * `targetName`: `Name of the new tag to create.` (CLI: `<targetName>` positional) + * `--description <text>`: `Optional description for the new tag.` (CLI: `-d, --description <text>`) + +--- + +## Miscellaneous + +### 32. Sync Readme (`sync-readme`) -- experimental + +* **MCP Tool:** N/A +* **CLI Command:** `task-master sync-readme [options]` +* **Description:** `Exports your task list to your project's README.md file, useful for showcasing progress.` +* **Key Parameters/Options:** + * `status`: `Filter tasks by status (e.g., 'pending', 'done').` (CLI: `-s, --status <status>`) + * `withSubtasks`: `Include subtasks in the export.` (CLI: `--with-subtasks`) + * `tag`: `Specify which tag context to export from. Defaults to the current active tag.` (CLI: `--tag <name>`) + +--- + +## Environment Variables Configuration (Updated) + +Taskmaster primarily uses the **`.taskmaster/config.json`** file (in project root) for configuration (models, parameters, logging level, etc.), managed via `task-master models --setup`. + +Environment variables are used **only** for sensitive API keys related to AI providers and specific overrides like the Ollama base URL: + +* **API Keys (Required for corresponding provider):** + * `ANTHROPIC_API_KEY` + * `PERPLEXITY_API_KEY` + * `OPENAI_API_KEY` + * `GOOGLE_API_KEY` + * `MISTRAL_API_KEY` + * `AZURE_OPENAI_API_KEY` (Requires `AZURE_OPENAI_ENDPOINT` too) + * `OPENROUTER_API_KEY` + * `XAI_API_KEY` + * `OLLAMA_API_KEY` (Requires `OLLAMA_BASE_URL` too) +* **Endpoints (Optional/Provider Specific inside .taskmaster/config.json):** + * `AZURE_OPENAI_ENDPOINT` + * `OLLAMA_BASE_URL` (Default: `http://localhost:11434/api`) + +**Set API keys** in your **`.env`** file in the project root (for CLI use) or within the `env` section of your **`.vscode/mcp.json`** file (for MCP/VS Code integration). All other settings (model choice, max tokens, temperature, log level, custom endpoints) are managed in `.taskmaster/config.json` via `task-master models` command or `models` MCP tool. + +--- + +## MCP Tool Tiers + +Default: `core` (7 tools). Set via `TASK_MASTER_TOOLS` env var in MCP config. + +| Tier | Count | Tools | +|------|-------|-------| +| `core` | 7 | `get_tasks`, `next_task`, `get_task`, `set_task_status`, `update_subtask`, `parse_prd`, `expand_task` | +| `standard` | 14 | core + `initialize_project`, `analyze_project_complexity`, `expand_all`, `add_subtask`, `remove_task`, `add_task`, `complexity_report` | +| `all` | 44+ | standard + dependencies, tags, research, autopilot, scoping, models, rules | + +**Upgrade when tool unavailable:** Edit MCP config (`.vscode/mcp.json`, `.mcp.json`, or `.vscode/mcp.json`), change `TASK_MASTER_TOOLS` from `"core"` to `"standard"` or `"all"`, restart MCP. + +--- + +For details on how these commands fit into the development process, see the [dev_workflow.instructions.md](.github/instructions/dev_workflow.instructions.md). \ No newline at end of file diff --git a/.github/instructions/vscode_rules.instructions.md b/.github/instructions/vscode_rules.instructions.md new file mode 100644 index 0000000..8b80630 --- /dev/null +++ b/.github/instructions/vscode_rules.instructions.md @@ -0,0 +1,52 @@ +--- +description: Guidelines for creating and maintaining VS Code rules to ensure consistency and effectiveness. +applyTo: ".github/instructions/*.instructions.md" +--- + +- **Required Rule Structure:** + ```markdown + --- + description: Clear, one-line description of what the rule enforces + globs: path/to/files/*.ext, other/path/**/* + alwaysApply: boolean + --- + + - **Main Points in Bold** + - Sub-points with details + - Examples and explanations + ``` + +- **File References:** + - Use `[filename](mdc:path/to/file)` ([filename](mdc:filename)) to reference files + - Example: [prisma.instructions.md](.github/instructions/prisma.instructions.md) for rule references + - Example: [schema.prisma](mdc:prisma/schema.prisma) for code references + +- **Code Examples:** + - Use language-specific code blocks + ```typescript + // ✅ DO: Show good examples + const goodExample = true; + + // ❌ DON'T: Show anti-patterns + const badExample = false; + ``` + +- **Rule Content Guidelines:** + - Start with high-level overview + - Include specific, actionable requirements + - Show examples of correct implementation + - Reference existing code when possible + - Keep rules DRY by referencing other rules + +- **Rule Maintenance:** + - Update rules when new patterns emerge + - Add examples from actual codebase + - Remove outdated patterns + - Cross-reference related rules + +- **Best Practices:** + - Use bullet points for clarity + - Keep descriptions concise + - Include both DO and DON'T examples + - Reference actual code over theoretical examples + - Use consistent formatting across rules \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..fad7abb --- /dev/null +++ b/.gitignore @@ -0,0 +1,37 @@ +# Dependencies +node_modules/ + +# Build output +dist/ +*.tsbuildinfo + +# Environment variables +.env +.env.local +.env.*.local + +# Logs +logs/ +*.log +npm-debug.log* + +# Editor +.idea +*.suo +*.ntvs* +*.njsproj +*.sln +*.sw? + +# OS +.DS_Store +Thumbs.db + +# Test coverage +coverage/ + +# Docker volumes +pgdata/ + +# Prisma +src/db/prisma/migrations/*.sql.backup diff --git a/.mcp.json b/.mcp.json new file mode 100644 index 0000000..c586828 --- /dev/null +++ b/.mcp.json @@ -0,0 +1,24 @@ +{ + "mcpServers": { + "task-master-ai": { + "type": "stdio", + "command": "npx", + "args": [ + "-y", + "task-master-ai" + ], + "env": { + "TASK_MASTER_TOOLS": "core", + "ANTHROPIC_API_KEY": "YOUR_ANTHROPIC_API_KEY_HERE", + "PERPLEXITY_API_KEY": "YOUR_PERPLEXITY_API_KEY_HERE", + "OPENAI_API_KEY": "YOUR_OPENAI_KEY_HERE", + "GOOGLE_API_KEY": "YOUR_GOOGLE_KEY_HERE", + "XAI_API_KEY": "YOUR_XAI_KEY_HERE", + "OPENROUTER_API_KEY": "YOUR_OPENROUTER_KEY_HERE", + "MISTRAL_API_KEY": "YOUR_MISTRAL_KEY_HERE", + "AZURE_OPENAI_API_KEY": "YOUR_AZURE_KEY_HERE", + "OLLAMA_API_KEY": "YOUR_OLLAMA_API_KEY_HERE" + } + } + } +} diff --git a/.taskmaster/CLAUDE.md b/.taskmaster/CLAUDE.md new file mode 100644 index 0000000..e07d72f --- /dev/null +++ b/.taskmaster/CLAUDE.md @@ -0,0 +1,435 @@ +# Task Master AI - Agent Integration Guide + +## Essential Commands + +### Core Workflow Commands + +```bash +# Project Setup +task-master init # Initialize Task Master in current project +task-master parse-prd .taskmaster/docs/prd.md # Generate tasks from PRD document +task-master models --setup # Configure AI models interactively + +# Daily Development Workflow +task-master list # Show all tasks with status +task-master next # Get next available task to work on +task-master show <id> # View detailed task information (e.g., task-master show 1.2) +task-master set-status --id=<id> --status=done # Mark task complete + +# Task Management +task-master add-task --prompt="description" --research # Add new task with AI assistance +task-master expand --id=<id> --research --force # Break task into subtasks +task-master update-task --id=<id> --prompt="changes" # Update specific task +task-master update --from=<id> --prompt="changes" # Update multiple tasks from ID onwards +task-master update-subtask --id=<id> --prompt="notes" # Add implementation notes to subtask + +# Analysis & Planning +task-master analyze-complexity --research # Analyze task complexity +task-master complexity-report # View complexity analysis +task-master expand --all --research # Expand all eligible tasks + +# Dependencies & Organization +task-master add-dependency --id=<id> --depends-on=<id> # Add task dependency +task-master move --from=<id> --to=<id> # Reorganize task hierarchy +task-master validate-dependencies # Check for dependency issues +task-master generate # Update task markdown files (usually auto-called) +``` + +## Key Files & Project Structure + +### Core Files + +- `.taskmaster/tasks/tasks.json` - Main task data file (auto-managed) +- `.taskmaster/config.json` - AI model configuration (use `task-master models` to modify) +- `.taskmaster/docs/prd.md` - Product Requirements Document for parsing (`.md` extension recommended for better editor support) +- `.taskmaster/tasks/*.txt` - Individual task files (auto-generated from tasks.json) +- `.env` - API keys for CLI usage + +**PRD File Format:** While both `.txt` and `.md` extensions work, **`.md` is recommended** because: +- Markdown syntax highlighting in editors improves readability +- Proper rendering when previewing in VS Code, GitHub, or other tools +- Better collaboration through formatted documentation + +### Claude Code Integration Files + +- `CLAUDE.md` - Auto-loaded context for Claude Code (this file) +- `.claude/settings.json` - Claude Code tool allowlist and preferences +- `.claude/commands/` - Custom slash commands for repeated workflows +- `.mcp.json` - MCP server configuration (project-specific) + +### Directory Structure + +``` +project/ +├── .taskmaster/ +│ ├── tasks/ # Task files directory +│ │ ├── tasks.json # Main task database +│ │ ├── task-1.md # Individual task files +│ │ └── task-2.md +│ ├── docs/ # Documentation directory +│ │ ├── prd.md # Product requirements (.md recommended) +│ ├── reports/ # Analysis reports directory +│ │ └── task-complexity-report.json +│ ├── templates/ # Template files +│ │ └── example_prd.md # Example PRD template (.md recommended) +│ └── config.json # AI models & settings +├── .claude/ +│ ├── settings.json # Claude Code configuration +│ └── commands/ # Custom slash commands +├── .env # API keys +├── .mcp.json # MCP configuration +└── CLAUDE.md # This file - auto-loaded by Claude Code +``` + +## MCP Integration + +Task Master provides an MCP server that Claude Code can connect to. Configure in `.mcp.json`: + +```json +{ + "mcpServers": { + "task-master-ai": { + "command": "npx", + "args": ["-y", "task-master-ai"], + "env": { + "TASK_MASTER_TOOLS": "core", + "ANTHROPIC_API_KEY": "your_key_here", + "PERPLEXITY_API_KEY": "your_key_here", + "OPENAI_API_KEY": "OPENAI_API_KEY_HERE", + "GOOGLE_API_KEY": "GOOGLE_API_KEY_HERE", + "XAI_API_KEY": "XAI_API_KEY_HERE", + "OPENROUTER_API_KEY": "OPENROUTER_API_KEY_HERE", + "MISTRAL_API_KEY": "MISTRAL_API_KEY_HERE", + "AZURE_OPENAI_API_KEY": "AZURE_OPENAI_API_KEY_HERE", + "OLLAMA_API_KEY": "OLLAMA_API_KEY_HERE" + } + } + } +} +``` + +### MCP Tool Tiers + +Default: `core` (7 tools). Set via `TASK_MASTER_TOOLS` env var. + +| Tier | Count | Tools | +|------|-------|-------| +| `core` | 7 | `get_tasks`, `next_task`, `get_task`, `set_task_status`, `update_subtask`, `parse_prd`, `expand_task` | +| `standard` | 14 | core + `initialize_project`, `analyze_project_complexity`, `expand_all`, `add_subtask`, `remove_task`, `add_task`, `complexity_report` | +| `all` | 44+ | standard + dependencies, tags, research, autopilot, scoping, models, rules | + +**Upgrade when tool unavailable:** Edit MCP config, change `TASK_MASTER_TOOLS` from `"core"` to `"standard"` or `"all"`, restart MCP. + +### Essential MCP Tools + +```javascript +help; // = shows available taskmaster commands +// Project setup +initialize_project; // = task-master init +parse_prd; // = task-master parse-prd + +// Daily workflow +get_tasks; // = task-master list +next_task; // = task-master next +get_task; // = task-master show <id> +set_task_status; // = task-master set-status + +// Task management +add_task; // = task-master add-task +expand_task; // = task-master expand +update_task; // = task-master update-task +update_subtask; // = task-master update-subtask +update; // = task-master update + +// Analysis +analyze_project_complexity; // = task-master analyze-complexity +complexity_report; // = task-master complexity-report +``` + +## Claude Code Workflow Integration + +### Standard Development Workflow + +#### 1. Project Initialization + +```bash +# Initialize Task Master +task-master init + +# Create or obtain PRD, then parse it (use .md extension for better editor support) +task-master parse-prd .taskmaster/docs/prd.md + +# Analyze complexity and expand tasks +task-master analyze-complexity --research +task-master expand --all --research +``` + +If tasks already exist, another PRD can be parsed (with new information only!) using parse-prd with --append flag. This will add the generated tasks to the existing list of tasks.. + +#### 2. Daily Development Loop + +```bash +# Start each session +task-master next # Find next available task +task-master show <id> # Review task details + +# During implementation, check in code context into the tasks and subtasks +task-master update-subtask --id=<id> --prompt="implementation notes..." + +# Complete tasks +task-master set-status --id=<id> --status=done +``` + +#### 3. Multi-Claude Workflows + +For complex projects, use multiple Claude Code sessions: + +```bash +# Terminal 1: Main implementation +cd project && claude + +# Terminal 2: Testing and validation +cd project-test-worktree && claude + +# Terminal 3: Documentation updates +cd project-docs-worktree && claude +``` + +### Custom Slash Commands + +Create `.claude/commands/taskmaster-next.md`: + +```markdown +Find the next available Task Master task and show its details. + +Steps: + +1. Run `task-master next` to get the next task +2. If a task is available, run `task-master show <id>` for full details +3. Provide a summary of what needs to be implemented +4. Suggest the first implementation step +``` + +Create `.claude/commands/taskmaster-complete.md`: + +```markdown +Complete a Task Master task: $ARGUMENTS + +Steps: + +1. Review the current task with `task-master show $ARGUMENTS` +2. Verify all implementation is complete +3. Run any tests related to this task +4. Mark as complete: `task-master set-status --id=$ARGUMENTS --status=done` +5. Show the next available task with `task-master next` +``` + +## Tool Allowlist Recommendations + +Add to `.claude/settings.json`: + +```json +{ + "allowedTools": [ + "Edit", + "Bash(task-master *)", + "Bash(git commit:*)", + "Bash(git add:*)", + "Bash(npm run *)", + "mcp__task_master_ai__*" + ] +} +``` + +## Configuration & Setup + +### API Keys Required + +At least **one** of these API keys must be configured: + +- `ANTHROPIC_API_KEY` (Claude models) - **Recommended** +- `PERPLEXITY_API_KEY` (Research features) - **Highly recommended** +- `OPENAI_API_KEY` (GPT models) +- `GOOGLE_API_KEY` (Gemini models) +- `MISTRAL_API_KEY` (Mistral models) +- `OPENROUTER_API_KEY` (Multiple models) +- `XAI_API_KEY` (Grok models) + +An API key is required for any provider used across any of the 3 roles defined in the `models` command. + +### Model Configuration + +```bash +# Interactive setup (recommended) +task-master models --setup + +# Set specific models +task-master models --set-main claude-3-5-sonnet-20241022 +task-master models --set-research perplexity-llama-3.1-sonar-large-128k-online +task-master models --set-fallback gpt-4o-mini +``` + +## Task Structure & IDs + +### Task ID Format + +- Main tasks: `1`, `2`, `3`, etc. +- Subtasks: `1.1`, `1.2`, `2.1`, etc. +- Sub-subtasks: `1.1.1`, `1.1.2`, etc. + +### Task Status Values + +- `pending` - Ready to work on +- `in-progress` - Currently being worked on +- `done` - Completed and verified +- `deferred` - Postponed +- `cancelled` - No longer needed +- `blocked` - Waiting on external factors + +### Task Fields + +```json +{ + "id": "1.2", + "title": "Implement user authentication", + "description": "Set up JWT-based auth system", + "status": "pending", + "priority": "high", + "dependencies": ["1.1"], + "details": "Use bcrypt for hashing, JWT for tokens...", + "testStrategy": "Unit tests for auth functions, integration tests for login flow", + "subtasks": [] +} +``` + +## Claude Code Best Practices with Task Master + +### Context Management + +- Use `/clear` between different tasks to maintain focus +- This CLAUDE.md file is automatically loaded for context +- Use `task-master show <id>` to pull specific task context when needed + +### Iterative Implementation + +1. `task-master show <subtask-id>` - Understand requirements +2. Explore codebase and plan implementation +3. `task-master update-subtask --id=<id> --prompt="detailed plan"` - Log plan +4. `task-master set-status --id=<id> --status=in-progress` - Start work +5. Implement code following logged plan +6. `task-master update-subtask --id=<id> --prompt="what worked/didn't work"` - Log progress +7. `task-master set-status --id=<id> --status=done` - Complete task + +### Complex Workflows with Checklists + +For large migrations or multi-step processes: + +1. Create a markdown PRD file describing the new changes: `touch task-migration-checklist.md` (prds can be .txt or .md) +2. Use Taskmaster to parse the new prd with `task-master parse-prd --append` (also available in MCP) +3. Use Taskmaster to expand the newly generated tasks into subtasks. Consdier using `analyze-complexity` with the correct --to and --from IDs (the new ids) to identify the ideal subtask amounts for each task. Then expand them. +4. Work through items systematically, checking them off as completed +5. Use `task-master update-subtask` to log progress on each task/subtask and/or updating/researching them before/during implementation if getting stuck + +### Git Integration + +Task Master works well with `gh` CLI: + +```bash +# Create PR for completed task +gh pr create --title "Complete task 1.2: User authentication" --body "Implements JWT auth system as specified in task 1.2" + +# Reference task in commits +git commit -m "feat: implement JWT auth (task 1.2)" +``` + +### Parallel Development with Git Worktrees + +```bash +# Create worktrees for parallel task development +git worktree add ../project-auth feature/auth-system +git worktree add ../project-api feature/api-refactor + +# Run Claude Code in each worktree +cd ../project-auth && claude # Terminal 1: Auth work +cd ../project-api && claude # Terminal 2: API work +``` + +## Troubleshooting + +### AI Commands Failing + +```bash +# Check API keys are configured +cat .env # For CLI usage + +# Verify model configuration +task-master models + +# Test with different model +task-master models --set-fallback gpt-4o-mini +``` + +### MCP Connection Issues + +- Check `.mcp.json` configuration +- Verify Node.js installation +- Use `--mcp-debug` flag when starting Claude Code +- Use CLI as fallback if MCP unavailable + +### Task File Sync Issues + +```bash +# Regenerate task files from tasks.json +task-master generate + +# Fix dependency issues +task-master fix-dependencies +``` + +DO NOT RE-INITIALIZE. That will not do anything beyond re-adding the same Taskmaster core files. + +## Important Notes + +### AI-Powered Operations + +These commands make AI calls and may take up to a minute: + +- `parse_prd` / `task-master parse-prd` +- `analyze_project_complexity` / `task-master analyze-complexity` +- `expand_task` / `task-master expand` +- `expand_all` / `task-master expand --all` +- `add_task` / `task-master add-task` +- `update` / `task-master update` +- `update_task` / `task-master update-task` +- `update_subtask` / `task-master update-subtask` + +### File Management + +- Never manually edit `tasks.json` - use commands instead +- Never manually edit `.taskmaster/config.json` - use `task-master models` +- Task markdown files in `tasks/` are auto-generated +- Run `task-master generate` after manual changes to tasks.json + +### Claude Code Session Management + +- Use `/clear` frequently to maintain focused context +- Create custom slash commands for repeated Task Master workflows +- Configure tool allowlist to streamline permissions +- Use headless mode for automation: `claude -p "task-master next"` + +### Multi-Task Updates + +- Use `update --from=<id>` to update multiple future tasks +- Use `update-task --id=<id>` for single task updates +- Use `update-subtask --id=<id>` for implementation logging + +### Research Mode + +- Add `--research` flag for research-based AI enhancement +- Requires a research model API key like Perplexity (`PERPLEXITY_API_KEY`) in environment +- Provides more informed task creation and updates +- Recommended for complex technical tasks + +--- + +_This guide ensures Claude Code has immediate access to Task Master's essential functionality for agentic development workflows._ diff --git a/.taskmaster/config.json b/.taskmaster/config.json new file mode 100644 index 0000000..5d0d5ec --- /dev/null +++ b/.taskmaster/config.json @@ -0,0 +1,44 @@ +{ + "models": { + "main": { + "provider": "anthropic", + "modelId": "claude-sonnet-4-20250514", + "maxTokens": 64000, + "temperature": 0.2 + }, + "research": { + "provider": "claude-code", + "modelId": "opus", + "maxTokens": 32000, + "temperature": 0.1 + }, + "fallback": { + "provider": "claude-code", + "modelId": "opus", + "maxTokens": 32000, + "temperature": 0.2 + } + }, + "global": { + "logLevel": "info", + "debug": false, + "defaultNumTasks": 10, + "defaultSubtasks": 5, + "defaultPriority": "medium", + "projectName": "mcpctl", + "ollamaBaseURL": "http://localhost:11434/api", + "bedrockBaseURL": "https://bedrock.us-east-1.amazonaws.com", + "responseLanguage": "English", + "enableCodebaseAnalysis": true, + "enableProxy": false, + "anonymousTelemetry": true, + "userId": "1234567890" + }, + "claudeCode": {}, + "codexCli": {}, + "grokCli": { + "timeout": 120000, + "workingDirectory": null, + "defaultModel": "grok-4-latest" + } +} \ No newline at end of file diff --git a/.taskmaster/docs/prd-registry.txt b/.taskmaster/docs/prd-registry.txt new file mode 100644 index 0000000..0a92ced --- /dev/null +++ b/.taskmaster/docs/prd-registry.txt @@ -0,0 +1,74 @@ +mcpctl Registry Integration & Auto-Discovery Features +===================================================== + +Context: mcpctl already has Tasks 1-18 covering the core CLI, mcpd server, local LLM proxy, profiles library, and lifecycle management. These 3 new tasks extend mcpctl with automatic MCP server discovery and LLM-assisted installation. + +Research findings: Multiple public MCP server registries exist with open APIs: +- Official MCP Registry (registry.modelcontextprotocol.io) - 6,093 servers, no auth, OpenAPI spec, has env vars/packages/transport metadata +- Glama.ai (glama.ai/api/mcp/v1/servers) - 17,585 servers, no auth, env var JSON schemas +- Smithery.ai (registry.smithery.ai) - 3,567 servers, free API key, semantic search, verified badges, usage analytics +- NPM registry - ~1,989 packages with keyword:mcp-server +- PyPI - ~3,191 packages with mcp+server in name + +Dependencies: These tasks depend on Tasks 7 (CLI framework), 4 (Server Registry), 10 (Setup Wizard), 15 (Profiles Library) + +== Task 19: Implement MCP Registry Client == + +Build a multi-source registry client that queries the Official MCP Registry, Glama.ai, and Smithery.ai APIs to search, discover, and retrieve MCP server metadata. + +Requirements: +- Primary source: Official MCP Registry REST API (GET /v0/servers?search=...&limit=100&cursor=...) - no auth required +- Secondary: Glama.ai API (glama.ai/api/mcp/v1/servers) - no auth, cursor pagination +- Tertiary: Smithery.ai API (registry.smithery.ai/servers?q=...) - free API key from config +- Implement registry client with strategy pattern for each source +- Merge and deduplicate results across registries (match by npm package name or GitHub repo URL) +- Rank results by: relevance score, usage/popularity (from Smithery), verified status, last updated +- Cache results locally with configurable TTL (default 1 hour) +- Handle rate limits gracefully with exponential backoff +- Return normalized RegistryServer type with: name, description, packages (npm/pypi/docker), envTemplate (env vars with isSecret, description), transport type, repository URL, popularity score, verified status +- TDD: Write Vitest tests for every client method, cache, deduplication logic BEFORE implementation +- Security: Validate all API responses, sanitize descriptions (prevent XSS in terminal output), never log API keys +- SRE: Expose metrics for registry query latency, cache hit ratio, error rates +- Networking: Support HTTP proxy and custom CA certificates for enterprise environments +- Data Engineer: Include data platform MCP servers in search results (BigQuery, Snowflake, dbt, etc.) + +== Task 20: Implement mcpctl discover Command == + +Create the `mcpctl discover` CLI command that lets users search for MCP servers across all configured registries with rich filtering and display. + +Requirements: +- Command: `mcpctl discover <query>` - free text search (e.g., "slack", "database query tool", "terraform") +- Options: --category <category> (devops, data-platform, analytics, etc.), --verified (only verified servers), --transport <stdio|sse>, --registry <official|glama|smithery|all>, --limit <n>, --output <table|json|yaml> +- Table output columns: NAME, DESCRIPTION (truncated), PACKAGE, TRANSPORT, VERIFIED, POPULARITY +- Show install command hint: "Run 'mcpctl install <name>' to set up this server" +- Support interactive mode: `mcpctl discover --interactive` - uses inquirer to browse results, select server, and immediately trigger install +- Use the registry client from Task 19 +- TDD: Write tests for command parsing, output formatting, interactive mode BEFORE implementation +- SRE: Exit codes for scripting (0=found results, 1=error, 2=no results) +- Data Analyst: Include filtering by tags/categories relevant to BI tools +- Every function must have unit tests + +== Task 21: Implement mcpctl install with LLM-Assisted Auto-Configuration == + +Create the `mcpctl install <server-name>` command that uses a local LLM (Claude Code, Ollama, or other configured provider) to automatically read the MCP server's documentation, generate envTemplate/setup guide/profiles, and walk the user through configuration. + +Requirements: +- Command: `mcpctl install <server-name>` where server-name comes from discover results or direct registry reference +- Step 1: Fetch server metadata from registry (Task 19 client) +- Step 2: If envTemplate already complete in registry metadata, use it directly +- Step 3: If envTemplate incomplete/missing, use LLM to auto-generate it: + a. Fetch the server's README.md from its GitHub repository URL (from registry metadata) + b. Send README to local LLM (Claude Code session, Ollama, or configured provider from Task 12) + c. LLM prompt: "Analyze this MCP server README and extract: required environment variables (name, description, isSecret, setupUrl), recommended profiles (name, permissions), and a step-by-step setup guide" + d. Parse LLM response into structured envTemplate + setupGuide + defaultProfiles + e. Validate LLM output against Zod schema before using +- Step 4: Register the MCP server in mcpd (POST /api/mcp-servers) with generated envTemplate +- Step 5: Run the setup wizard (Task 10) to collect credentials from user +- Step 6: Create profile and optionally add to a project +- Options: --non-interactive (use env vars for credentials), --profile-name <name>, --project <name> (auto-add to project), --dry-run (show what would be configured without doing it), --skip-llm (only use registry metadata, no LLM analysis) +- LLM provider selection: Use the configured LLM provider from Task 12 (Ollama, Gemini CLI, DeepSeek, etc.) or use Claude Code session as the LLM +- Support batch install: `mcpctl install slack jira github` - install multiple servers +- TDD: Write Vitest tests for LLM prompt generation, response parsing, schema validation, full install flow BEFORE implementation +- Security: Sanitize LLM outputs (prevent prompt injection from malicious READMEs), validate generated envTemplate, never auto-execute suggested commands without user approval +- Principal Data Engineer: LLM should understand complex data platform auth patterns (service accounts, OAuth, connection strings) from README analysis +- Every function must have unit tests diff --git a/.taskmaster/docs/prd.txt b/.taskmaster/docs/prd.txt new file mode 100644 index 0000000..db4933a --- /dev/null +++ b/.taskmaster/docs/prd.txt @@ -0,0 +1,43 @@ +mcpctl: +We like kubectl, so we want similar syntax to manage MCP servers + +What are we managing? +We manage mcpd which is application that we will make on a backend (we want to deploy it with docker - docker compose) + +What will it do? +It will allow us to easly manage (run) and controll and audit mcp servers + +What it means run? +Now it means running it on synology nas with pontainer using docker-compose, but in a future it might mean scheaduling pods in kubernetes with mcp instances + +It should allow me to create "mcp projects", that will allow us to expose to a claude sessions. Similarly like with taskmaster, we want to be able to do "mcpctl claude add-mcp-project weekly_reports" + +While "weekly_reports" contains for example slack and jira mcps + +We want architecture that will allow us to audit what user runs what, but that for later, but we want to keep it in mind desining architecutre + +It must be scalable and stateless (outside of DB) and work with multiple instances and nicely scale + +Abstract goal: +making it easy to run mcp servers, making mcpctl both helper in configuration (no need to think about all pesky settings for each mcp - we will mantain profiles for them), so user lets say will want jira mcp, we want to take user by hand, ask them to log in, we want to redirect their browser to page that generates API tokens, and tell them what to do, if we cant do it ourselves + +We want to be the main go to manager for MCPs + +What about profiles and projects? +So some projects might have MCP that we want read only, or with limited endpoints + +Additional core features? +Prefiltering what MCP server retunrs, before handing it over to the claude instance, maybe using a local instance of geminii with gemini cli doing first filtering + +Lets say I will say: "write me a weekly report, to do so, get from Slack all messages related to my team, me, or security and linux servers" and then instead of wasting Claude-code tokens on such pointless filtering, it will use local LLM (vllam/ollama) or gemini binary or deepseek API tokens to find relevant messages (without processing them) so claude will only get relevant information. + +Or when claude is using via it, a MCP server that gives it some documentation from terraform, we don't want the whole thing, but related information to the query + +We want it to ask Claude to not just pull data like API, but tell our mcp layer what it wants, so it can look for it specifically + +Design: +mcp servers - run by mcpd (our server side of tool) on centralized server, it might contains credentials, so we deploy it but is unavaiable for local users +local - using some other LLM (gemini cli, or other mentioned earlier) to do pre processing +claude - getting final result + +claude asks -> local geminii/others, inteprets question and makes requests to mcpd to get data from mcp-servers it deployed and manage ---> mcp-servers deliver data--> local geminii/others process returning data, and refines it, to deliver to claude the smalless but the most comprehensive info with smallest context window -> claude gets response, without interacting with mcp servers directly diff --git a/.taskmaster/state.json b/.taskmaster/state.json new file mode 100644 index 0000000..2cc88c2 --- /dev/null +++ b/.taskmaster/state.json @@ -0,0 +1,6 @@ +{ + "currentTag": "master", + "lastSwitched": "2026-02-21T01:07:45.630Z", + "branchTagMapping": {}, + "migrationNoticeShown": true +} \ No newline at end of file diff --git a/.taskmaster/tasks/task_001.md b/.taskmaster/tasks/task_001.md new file mode 100644 index 0000000..3e17489 --- /dev/null +++ b/.taskmaster/tasks/task_001.md @@ -0,0 +1,410 @@ +# Task ID: 1 + +**Title:** Initialize Project Structure and Core Dependencies + +**Status:** pending + +**Dependencies:** None + +**Priority:** high + +**Description:** Set up the monorepo structure for mcpctl with CLI client, mcpd server, and shared libraries. Configure TypeScript, ESLint, and build tooling. + +**Details:** + +Create a monorepo using pnpm workspaces or npm workspaces with the following structure: + +``` +mcpctl/ +├── src/ +│ ├── cli/ # mcpctl CLI tool +│ ├── mcpd/ # Backend daemon server +│ ├── shared/ # Shared types, utilities, constants +│ └── local-proxy/ # Local LLM proxy component +├── docker/ +│ └── docker-compose.yml +├── package.json +├── tsconfig.base.json +└── pnpm-workspace.yaml +``` + +Dependencies to install: +- TypeScript 5.x +- Commander.js for CLI +- Express/Fastify for mcpd HTTP server +- Zod for schema validation +- Winston/Pino for logging +- Prisma or Drizzle for database ORM + +Create base tsconfig.json with strict mode, ES2022 target, and module resolution settings. Set up shared ESLint config with TypeScript rules. + +**Test Strategy:** + +Verify project builds successfully with `pnpm build`. Ensure all packages compile without errors. Test workspace linking works correctly between packages. + +## Subtasks + +### 1.1. Initialize pnpm workspace monorepo with future-proof directory structure + +**Status:** pending +**Dependencies:** None + +Create the complete monorepo directory structure using pnpm workspaces that accommodates all 18 planned tasks without requiring future refactoring. + +**Details:** + +Create root package.json with pnpm workspaces configuration. Create pnpm-workspace.yaml defining all workspace packages. Initialize the following directory structure: + +``` +mcpctl/ +├── src/ +│ ├── cli/ # mcpctl CLI tool (Task 7-10) +│ │ ├── src/ +│ │ ├── tests/ +│ │ └── package.json +│ ├── mcpd/ # Backend daemon server (Task 3-6, 14, 16) +│ │ ├── src/ +│ │ ├── tests/ +│ │ └── package.json +│ ├── shared/ # Shared types, utils, constants, validation +│ │ ├── src/ +│ │ │ ├── types/ # TypeScript interfaces/types +│ │ │ ├── utils/ # Utility functions +│ │ │ ├── constants/# Shared constants +│ │ │ ├── validation/ # Zod schemas +│ │ │ └── index.ts # Barrel export +│ │ ├── tests/ +│ │ └── package.json +│ ├── local-proxy/ # Local LLM proxy (Task 11-13) +│ │ ├── src/ +│ │ ├── tests/ +│ │ └── package.json +│ └── db/ # Database package (Task 2) +│ ├── src/ +│ ├── prisma/ # Schema and migrations +│ ├── seed/ # Seed data +│ ├── tests/ +│ └── package.json +├── docker/ +│ └── docker-compose.yml # Local dev services (postgres) +├── tests/ +│ ├── e2e/ # End-to-end tests (Task 18) +│ └── integration/ # Integration tests +├── docs/ # Documentation (Task 18) +├── package.json # Root workspace config +├── pnpm-workspace.yaml +└── turbo.json # Optional: Turborepo for build orchestration +``` + +Each package should have: +- Empty src/index.ts with barrel export pattern ready +- Empty tests/ directory +- package.json with correct workspace dependencies (@mcpctl/shared, @mcpctl/db) + +Use dependency injection patterns from the start by creating interfaces in shared/src/types/ for key services. +<info added on 2026-02-21T02:33:52.473Z> +CRITICAL STRUCTURAL CHANGE: The monorepo workspace packages directory has been renamed from `packages/` to `src/`. All path references in this subtask must use `src/` instead of `packages/`. + +Updated directory structure to implement: + +``` +mcpctl/ +├── src/ # All application source code (pnpm workspace packages) +│ ├── cli/ # @mcpctl/cli - CLI tool (Task 7-10) +│ │ ├── src/ +│ │ ├── tests/ +│ │ └── package.json +│ ├── mcpd/ # @mcpctl/mcpd - Backend daemon (Task 3-6, 14, 16) +│ │ ├── src/ +│ │ ├── tests/ +│ │ └── package.json +│ ├── shared/ # @mcpctl/shared - Shared types, utils, constants, validation +│ │ ├── src/ +│ │ │ ├── types/ # TypeScript interfaces/types +│ │ │ ├── utils/ # Utility functions +│ │ │ ├── constants/ # Shared constants +│ │ │ ├── validation/ # Zod schemas +│ │ │ └── index.ts # Barrel export +│ │ ├── tests/ +│ │ └── package.json +│ ├── local-proxy/ # @mcpctl/local-proxy - LLM proxy (Task 11-13) +│ │ ├── src/ +│ │ ├── tests/ +│ │ └── package.json +│ └── db/ # @mcpctl/db - Database/Prisma (Task 2) +│ ├── src/ +│ ├── prisma/ # Schema and migrations +│ ├── seed/ # Seed data +│ ├── tests/ +│ └── package.json +├── deploy/ # Deployment configs (docker-compose, k8s manifests) +│ ├── docker-compose.yml +│ ├── docker-compose.dev.yml +│ └── Dockerfile.* +├── docs/ # Documentation (Task 18) +├── tests/ # E2E and integration tests +│ ├── e2e/ +│ └── integration/ +├── package.json # Root workspace config +├── pnpm-workspace.yaml # Points to src/* +├── tsconfig.base.json +├── eslint.config.js +├── vitest.workspace.ts +└── turbo.json # Optional: Turborepo for build orchestration +``` + +The pnpm-workspace.yaml should contain: `packages: ["src/*"]` + +Key differences from previous structure: +- `packages/` renamed to `src/` for cleaner separation of app source from project management files +- `docker/` renamed to `deploy/` with additional files (docker-compose.dev.yml, Dockerfile.*) +- Added root config files: eslint.config.js, vitest.workspace.ts +- All workspace package references in pnpm-workspace.yaml use `src/*` pattern +</info added on 2026-02-21T02:33:52.473Z> + +### 1.2. Configure TypeScript with strict mode and project references + +**Status:** pending +**Dependencies:** 1.1 + +Set up TypeScript configuration with strict mode, ES2022 target, and proper project references for monorepo build orchestration. + +**Details:** + +Create root tsconfig.base.json with shared compiler options: +```json +{ + "compilerOptions": { + "target": "ES2022", + "module": "NodeNext", + "moduleResolution": "NodeNext", + "lib": ["ES2022"], + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "declaration": true, + "declarationMap": true, + "sourceMap": true, + "composite": true, + "incremental": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "noImplicitReturns": true, + "noFallthroughCasesInSwitch": true, + "exactOptionalPropertyTypes": true, + "noUncheckedIndexedAccess": true + } +} +``` + +Create package-specific tsconfig.json in each package that extends the base and sets appropriate paths: +- cli/tsconfig.json: outDir: dist, references to shared and db +- mcpd/tsconfig.json: outDir: dist, references to shared and db +- shared/tsconfig.json: outDir: dist (no references, it's the base) +- local-proxy/tsconfig.json: references to shared +- db/tsconfig.json: references to shared + +Create tsconfig.json at root with project references to all packages for unified builds. + +Install TypeScript 5.x as devDependency in root package.json. + +### 1.3. Set up Vitest testing framework with workspace configuration + +**Status:** pending +**Dependencies:** 1.2 + +Configure Vitest as the test framework across all packages with proper workspace setup, coverage reporting, and test-driven development infrastructure. + +**Details:** + +Install Vitest and related packages at root level: +- vitest +- @vitest/coverage-v8 +- @vitest/ui (optional, for visual test running) + +Create root vitest.config.ts: +```typescript +import { defineConfig } from 'vitest/config'; + +export default defineConfig({ + test: { + globals: true, + coverage: { + provider: 'v8', + reporter: ['text', 'json', 'html'], + exclude: ['**/node_modules/**', '**/dist/**', '**/*.config.*'] + }, + include: ['src/*/tests/**/*.test.ts', 'tests/**/*.test.ts'], + testTimeout: 10000 + } +}); +``` + +Create vitest.workspace.ts for workspace-aware testing: +```typescript +import { defineWorkspace } from 'vitest/config'; + +export default defineWorkspace([ + 'src/cli', + 'src/mcpd', + 'src/shared', + 'src/local-proxy', + 'src/db' +]); +``` + +Create per-package vitest.config.ts files that extend root config. + +Add npm scripts to root package.json: +- "test": "vitest" +- "test:run": "vitest run" +- "test:coverage": "vitest run --coverage" +- "test:ui": "vitest --ui" + +Create initial test file in src/shared/tests/index.test.ts to verify setup works: +```typescript +import { describe, it, expect } from 'vitest'; + +describe('shared package', () => { + it('should be configured correctly', () => { + expect(true).toBe(true); + }); +}); +``` + +### 1.4. Configure ESLint with TypeScript rules and docker-compose for local development + +**Status:** pending +**Dependencies:** 1.2 + +Set up shared ESLint configuration with TypeScript-aware rules, Prettier integration, and docker-compose.yml for local PostgreSQL database. + +**Details:** + +Install ESLint and plugins at root: +- eslint +- @typescript-eslint/parser +- @typescript-eslint/eslint-plugin +- eslint-config-prettier +- eslint-plugin-import + +Create eslint.config.js (flat config, ESLint 9+): +```javascript +import tseslint from '@typescript-eslint/eslint-plugin'; +import tsparser from '@typescript-eslint/parser'; + +export default [ + { + files: ['src/*/src/**/*.ts'], + languageOptions: { + parser: tsparser, + parserOptions: { + project: ['./src/*/tsconfig.json'], + tsconfigRootDir: import.meta.dirname + } + }, + plugins: { '@typescript-eslint': tseslint }, + rules: { + '@typescript-eslint/explicit-function-return-type': 'error', + '@typescript-eslint/no-explicit-any': 'error', + '@typescript-eslint/no-unused-vars': 'error', + '@typescript-eslint/strict-boolean-expressions': 'error', + 'no-console': ['warn', { allow: ['warn', 'error'] }] + } + } +]; +``` + +Create deploy/docker-compose.yml for local development: +```yaml +version: '3.8' +services: + postgres: + image: postgres:16-alpine + container_name: mcpctl-postgres + ports: + - "5432:5432" + environment: + POSTGRES_USER: mcpctl + POSTGRES_PASSWORD: mcpctl_dev + POSTGRES_DB: mcpctl + volumes: + - mcpctl-pgdata:/var/lib/postgresql/data + healthcheck: + test: ["CMD-SHELL", "pg_isready -U mcpctl"] + interval: 5s + timeout: 5s + retries: 5 + +volumes: + mcpctl-pgdata: +``` + +Add scripts to root package.json: +- "lint": "eslint src/*/src/**/*.ts" +- "lint:fix": "eslint src/*/src/**/*.ts --fix" +- "db:up": "docker-compose -f deploy/docker-compose.yml up -d" +- "db:down": "docker-compose -f deploy/docker-compose.yml down" + +Create .env.example at root with DATABASE_URL template: +``` +DATABASE_URL="postgresql://mcpctl:mcpctl_dev@localhost:5432/mcpctl" +``` + +### 1.5. Install core dependencies and perform security/architecture review + +**Status:** pending +**Dependencies:** 1.1, 1.3, 1.4 + +Install all required production dependencies across packages, run security audit, and validate the directory structure supports all 18 planned tasks. + +**Details:** + +Install dependencies per package: + +**src/cli/package.json:** +- commander (CLI framework) +- chalk (colored output) +- js-yaml (YAML parsing) +- inquirer (interactive prompts) + +**src/mcpd/package.json:** +- fastify (HTTP server) +- @fastify/cors, @fastify/helmet, @fastify/rate-limit (middleware) +- zod (schema validation) - also add to shared +- pino (logging, built into Fastify) + +**src/shared/package.json:** +- zod (shared validation schemas) + +**src/db/package.json:** +- prisma (ORM) +- @prisma/client + +**src/local-proxy/package.json:** +- @modelcontextprotocol/sdk (MCP protocol) + +**Root devDependencies:** +- typescript +- vitest, @vitest/coverage-v8 +- eslint and plugins (already specified) +- tsx (for running TypeScript directly) +- rimraf (cross-platform rm -rf for clean scripts) + +**Security Review Checklist:** +1. Run 'pnpm audit' and verify no high/critical vulnerabilities +2. Verify .gitignore excludes: .env, node_modules, dist, *.log +3. Verify .env.example has no real secrets, only templates +4. Ensure no API keys or secrets in any committed files +5. Document security audit results in SECURITY_AUDIT.md + +**Architecture Review Checklist:** +1. Verify structure supports Task 2 (db package with prisma/) +2. Verify structure supports Tasks 3-6 (mcpd with src/routes/, src/services/) +3. Verify structure supports Tasks 7-10 (cli with src/commands/) +4. Verify structure supports Tasks 11-13 (local-proxy with src/providers/) +5. Verify tests/ directories exist at package and root level +6. Verify dependency injection interfaces are defined in shared/src/types/ +7. Verify barrel exports in shared/src/index.ts +8. Document architecture decisions in ARCHITECTURE.md diff --git a/.taskmaster/tasks/task_002.md b/.taskmaster/tasks/task_002.md new file mode 100644 index 0000000..252e704 --- /dev/null +++ b/.taskmaster/tasks/task_002.md @@ -0,0 +1,155 @@ +# Task ID: 2 + +**Title:** Design and Implement Database Schema + +**Status:** pending + +**Dependencies:** 1 + +**Priority:** high + +**Description:** Create the database schema for storing MCP server configurations, projects, profiles, user sessions, and audit logs. Use PostgreSQL for production readiness. + +**Details:** + +Design PostgreSQL schema using Prisma ORM: + +```prisma +model User { + id String @id @default(uuid()) + email String @unique + name String? + sessions Session[] + auditLogs AuditLog[] + createdAt DateTime @default(now()) +} + +model McpServer { + id String @id @default(uuid()) + name String @unique + type String // e.g., 'slack', 'jira', 'terraform' + command String // npx command or docker image + args Json // command arguments + envTemplate Json // required env vars template + setupGuide String? // markdown guide for setup + profiles McpProfile[] + instances McpInstance[] +} + +model McpProfile { + id String @id @default(uuid()) + name String + serverId String + server McpServer @relation(fields: [serverId], references: [id]) + config Json // profile-specific config (read-only, limited endpoints, etc.) + filterRules Json? // pre-filtering rules + projects ProjectMcpProfile[] +} + +model Project { + id String @id @default(uuid()) + name String @unique + description String? + profiles ProjectMcpProfile[] + createdAt DateTime @default(now()) +} + +model ProjectMcpProfile { + projectId String + profileId String + project Project @relation(fields: [projectId], references: [id]) + profile McpProfile @relation(fields: [profileId], references: [id]) + @@id([projectId, profileId]) +} + +model McpInstance { + id String @id @default(uuid()) + serverId String + server McpServer @relation(fields: [serverId], references: [id]) + containerId String? + status String // running, stopped, error + config Json + createdAt DateTime @default(now()) +} + +model AuditLog { + id String @id @default(uuid()) + userId String? + user User? @relation(fields: [userId], references: [id]) + action String + resource String + details Json + timestamp DateTime @default(now()) +} + +model Session { + id String @id @default(uuid()) + userId String + user User @relation(fields: [userId], references: [id]) + token String @unique + expiresAt DateTime +} +``` + +Create migrations and seed data for common MCP servers (slack, jira, github, terraform). + +**Test Strategy:** + +Run Prisma migrations against test database. Verify all relations work correctly with seed data. Test CRUD operations for each model using Prisma client. + +## Subtasks + +### 2.1. Set up Prisma ORM and PostgreSQL test infrastructure with docker-compose + +**Status:** pending +**Dependencies:** None + +Initialize Prisma in the db package with PostgreSQL configuration, create docker-compose.yml for local development with separate test database, and set up test database setup/teardown scripts. + +**Details:** + +Create src/db/prisma directory structure. Install Prisma dependencies (@prisma/client, prisma as devDependency). Configure deploy/docker-compose.yml with two PostgreSQL services: mcpctl-postgres (port 5432) for development and mcpctl-postgres-test (port 5433) for testing. Create src/db/src/test-utils.ts with setupTestDb() and teardownTestDb() functions that handle database connection, schema push, and cleanup. Create .env and .env.test with DATABASE_URL pointing to respective databases. Initialize prisma/schema.prisma with PostgreSQL provider and basic generator config. Write Vitest tests for test utilities to verify they can connect, push schema, and cleanup correctly. + +### 2.2. Write TDD tests for all Prisma models before implementing schema + +**Status:** pending +**Dependencies:** 2.1 + +Create comprehensive Vitest test suites for all 8 models (User, McpServer, McpProfile, Project, ProjectMcpProfile, McpInstance, AuditLog, Session) testing CRUD operations, relations, constraints, and edge cases. + +**Details:** + +Create src/db/tests/models directory with separate test files: user.test.ts, mcp-server.test.ts, mcp-profile.test.ts, project.test.ts, mcp-instance.test.ts, audit-log.test.ts, session.test.ts. Each test file should include: (1) CRUD operations (create, read, update, delete), (2) Unique constraint violations (email for User, name for McpServer/Project), (3) Relation tests (User->Sessions, McpServer->McpProfile->Projects, etc.), (4) Cascade delete behavior, (5) JSON field validation for args, envTemplate, config, filterRules, details fields, (6) Default value tests (uuid, timestamps), (7) Edge cases like null optional fields. Tests will initially fail (TDD red phase) until schema is implemented. + +### 2.3. Implement Prisma schema with all models and security considerations + +**Status:** pending +**Dependencies:** 2.2 + +Create the complete Prisma schema with all 8 models, proper relations, indexes for audit queries, and security-conscious field design for credentials encryption at rest. + +**Details:** + +Implement src/db/prisma/schema.prisma with: User (id uuid, email unique, name optional, createdAt, relations to Session and AuditLog), McpServer (id uuid, name unique, type, command, args Json, envTemplate Json with @@map for encrypted storage notes, setupGuide optional, relations), McpProfile (id uuid, name, serverId FK, config Json, filterRules Json optional, relation to server and projects), Project (id uuid, name unique, description optional, createdAt, relation to profiles), ProjectMcpProfile (composite PK projectId+profileId, relations), McpInstance (id uuid, serverId FK, containerId optional, status enum-like string, config Json, metadata Json for future K8s support, createdAt, updatedAt), AuditLog (id uuid, userId optional FK, action, resource, details Json, timestamp, indexes on userId, timestamp, action for query performance), Session (id uuid, userId FK, token unique with index, expiresAt, createdAt). Add @@index annotations for frequently queried fields. Document in comments that envTemplate and config containing secrets must be encrypted at application layer. + +### 2.4. Create seed data functions with unit tests for common MCP servers + +**Status:** pending +**Dependencies:** 2.3 + +Implement seed functions for common MCP server configurations (Slack, Jira, GitHub, Terraform) with comprehensive unit tests for each seed function. + +**Details:** + +Create src/db/seed directory with: index.ts (main seed runner), mcp-servers.ts (server definitions), seed-mcp-servers.ts (seeding function), seed-default-profiles.ts (default profiles per server). Define server configurations: Slack (npx @modelcontextprotocol/server-slack, SLACK_BOT_TOKEN, SLACK_TEAM_ID env template with setup guide), Jira (npx @anthropic/mcp-server-jira, JIRA_URL, JIRA_EMAIL, JIRA_API_TOKEN), GitHub (npx @modelcontextprotocol/server-github, GITHUB_TOKEN), Terraform (npx terraform-docs-mcp). Create src/db/tests/seed directory with tests: seed-mcp-servers.test.ts, seed-default-profiles.test.ts. Tests should verify: (1) Each server is created with correct data, (2) Idempotency (running twice doesn't create duplicates), (3) Default profiles are linked correctly, (4) envTemplate JSON structure is valid. + +### 2.5. Create database migrations and perform security/architecture review + +**Status:** pending +**Dependencies:** 2.3, 2.4 + +Generate initial Prisma migration, create migration helper utilities with tests, and conduct comprehensive security and architecture review documenting findings. + +**Details:** + +Run 'npx prisma migrate dev --name init' to create initial migration in src/db/prisma/migrations. Create src/db/src/migration-helpers.ts with utilities: resetDatabase(), applyMigrations(), rollbackMigration() with proper error handling. Write unit tests in src/db/tests/migration-helpers.test.ts. Conduct security review and document in src/db/SECURITY_REVIEW.md: (1) PII handling - email in User is only PII, add note about GDPR considerations, (2) Credentials handling - envTemplate, config fields contain secrets, document encryption-at-rest requirement at application layer, (3) Audit log indexes verified for query performance, (4) Cascade delete behavior reviewed (Session deletes with User, but AuditLog userId set to null), (5) No sensitive data in plain text validation. Conduct architecture review documenting in src/db/ARCHITECTURE.md: (1) Schema supports all 18 tasks, (2) McpInstance.metadata Json field ready for K8s pod metadata, (3) AuditLog.details flexible for various action types, (4) Future migration considerations for adding fields without breaking data. diff --git a/.taskmaster/tasks/task_003.md b/.taskmaster/tasks/task_003.md new file mode 100644 index 0000000..e6ea229 --- /dev/null +++ b/.taskmaster/tasks/task_003.md @@ -0,0 +1,205 @@ +# Task ID: 3 + +**Title:** Implement mcpd Core Server Framework + +**Status:** pending + +**Dependencies:** 1, 2 + +**Priority:** high + +**Description:** Build the mcpd daemon server with Express/Fastify, including middleware for authentication, logging, and error handling. Design for horizontal scalability. + +**Details:** + +Create mcpd server in `src/mcpd/src/`: + +```typescript +// server.ts +import Fastify from 'fastify'; +import { PrismaClient } from '@prisma/client'; + +const app = Fastify({ logger: true }); +const prisma = new PrismaClient(); + +// Middleware +app.register(require('@fastify/cors')); +app.register(require('@fastify/helmet')); +app.register(require('@fastify/rate-limit'), { max: 100, timeWindow: '1 minute' }); + +// Health check for load balancers +app.get('/health', async () => ({ status: 'ok', timestamp: new Date().toISOString() })); + +// Auth middleware +app.addHook('preHandler', async (request, reply) => { + if (request.url === '/health') return; + const token = request.headers.authorization?.replace('Bearer ', ''); + if (!token) return reply.status(401).send({ error: 'Unauthorized' }); + // Validate token against session table +}); + +// Audit logging middleware +app.addHook('onResponse', async (request, reply) => { + await prisma.auditLog.create({ + data: { + action: request.method, + resource: request.url, + details: { statusCode: reply.statusCode }, + userId: request.user?.id + } + }); +}); +``` + +Design principles: +- Stateless: All state in PostgreSQL, no in-memory session storage +- Scalable: Can run multiple instances behind load balancer +- Configurable via environment variables +- Graceful shutdown handling + +**Test Strategy:** + +Unit test middleware functions. Integration test health endpoint. Load test with multiple concurrent requests. Verify statelessness by running two instances and alternating requests. + +## Subtasks + +### 3.1. Set up mcpd package structure with clean architecture layers and TDD infrastructure + +**Status:** pending +**Dependencies:** None + +Create the src/mcpd directory structure following clean architecture principles with separate layers for routes, controllers, services, and repositories, along with Vitest test configuration. + +**Details:** + +Create src/mcpd/src/ directory structure with the following layers: + +- routes/ - HTTP route definitions (thin layer, delegates to controllers) +- controllers/ - Request/response handling, input validation +- services/ - Business logic, orchestrates repositories +- repositories/ - Data access layer, Prisma abstraction +- middleware/ - Auth, logging, error handling, rate limiting +- config/ - Environment configuration with Zod validation +- types/ - TypeScript interfaces for dependency injection +- utils/ - Utility functions (graceful shutdown, health checks) + +Create src/mcpd/tests/ with matching structure: +- unit/ (routes, controllers, services, repositories, middleware) +- integration/ (API endpoint tests) +- fixtures/ (mock data, Prisma mock setup) + +Set up vitest.config.ts extending root config with mcpd-specific settings. Create test-utils.ts with Prisma mock factory and Fastify test helpers. Install dependencies: fastify, @fastify/cors, @fastify/helmet, @fastify/rate-limit, zod, pino. DevDependencies: vitest, @vitest/coverage-v8, supertest. + +### 3.2. Implement Fastify server core with health endpoint and database connectivity verification + +**Status:** pending +**Dependencies:** 3.1 + +Create the core Fastify server with health check endpoint that verifies PostgreSQL database connectivity, environment configuration validation, and server lifecycle management. + +**Details:** + +Create src/mcpd/src/server.ts with Fastify instance factory function createServer(config: ServerConfig) for testability via dependency injection. Implement: + +- config/env.ts: Zod schema for environment variables (DATABASE_URL, PORT, NODE_ENV, LOG_LEVEL) +- config/index.ts: loadConfig() function that validates env with Zod +- utils/health.ts: checkDatabaseConnectivity(prisma) function +- routes/health.ts: GET /health endpoint returning { status: 'ok' | 'degraded', timestamp: ISO8601, db: 'connected' | 'disconnected' } + +Server requirements: +- Fastify with pino logger enabled (configurable log level) +- Health endpoint bypasses auth middleware +- Health endpoint checks actual DB connectivity via prisma.$queryRaw +- Server does NOT start if DATABASE_URL is missing (fail fast) +- Export createServer() and startServer() separately for testing + +Write TDD tests FIRST in tests/unit/routes/health.test.ts and tests/unit/config/env.test.ts before implementing. + +### 3.3. Implement authentication middleware with JWT validation and session management + +**Status:** pending +**Dependencies:** 3.2 + +Create authentication preHandler hook that validates Bearer tokens against the Session table in PostgreSQL, with proper error responses and request decoration for downstream handlers. + +**Details:** + +Create src/mcpd/src/middleware/auth.ts with: + +- authMiddleware(prisma: PrismaClient) factory function (dependency injection) +- Fastify preHandler hook implementation +- Extract Bearer token from Authorization header +- Validate token exists and format is correct +- Query Session table: find by token, check expiresAt > now() +- Query User by session.userId for request decoration +- Decorate request with user: { id, email, name } via fastify.decorateRequest +- Return 401 Unauthorized with { error: 'Unauthorized', code: 'TOKEN_REQUIRED' } for missing token +- Return 401 with { error: 'Unauthorized', code: 'TOKEN_EXPIRED' } for expired session +- Return 401 with { error: 'Unauthorized', code: 'TOKEN_INVALID' } for invalid token + +Create types/fastify.d.ts with FastifyRequest augmentation for user property. + +Write unit tests in tests/unit/middleware/auth.test.ts with mocked Prisma client before implementation. + +### 3.4. Implement security middleware stack with CORS, Helmet, rate limiting, and input sanitization + +**Status:** pending +**Dependencies:** 3.2 + +Configure and register security middleware including CORS policy, Helmet security headers, rate limiting, and create input sanitization utilities to prevent injection attacks. + +**Details:** + +Create src/mcpd/src/middleware/security.ts with: + +- registerSecurityPlugins(app: FastifyInstance, config: SecurityConfig) function +- CORS configuration: configurable origins (default: same-origin for production, * for development), credentials support, allowed methods/headers +- Helmet configuration: contentSecurityPolicy, hsts (enabled in production), noSniff, frameguard +- Rate limiting: 100 requests per minute default, configurable via env, different limits for auth endpoints (stricter) + +Create src/mcpd/src/utils/sanitize.ts: +- sanitizeInput(input: unknown): sanitized value +- stripHtmlTags(), escapeHtml() for XSS prevention +- Validate JSON input doesn't exceed size limits + +Create src/mcpd/src/middleware/validate.ts: +- createValidationMiddleware(schema: ZodSchema) factory +- Validates request.body against Zod schema +- Returns 400 Bad Request with Zod errors formatted + +Document security decisions in src/mcpd/SECURITY.md with rationale for each configuration choice. + +### 3.5. Implement error handling, audit logging middleware, and graceful shutdown with comprehensive tests + +**Status:** pending +**Dependencies:** 3.2, 3.3, 3.4 + +Create global error handler, audit logging onResponse hook that records all operations to database, and graceful shutdown handling with connection draining and proper signal handling. + +**Details:** + +Create src/mcpd/src/middleware/error-handler.ts: +- Global Fastify error handler via setErrorHandler +- Handle Zod validation errors -> 400 Bad Request +- Handle Prisma errors (P2002 unique, P2025 not found) -> appropriate HTTP codes +- Handle custom application errors with error codes +- Log errors with pino, include stack trace in development only +- Never expose internal errors to clients in production + +Create src/mcpd/src/middleware/audit.ts: +- auditMiddleware(prisma: PrismaClient, auditLogger: AuditLogger) factory +- Fastify onResponse hook +- Create AuditLog record with: userId (from request.user), action (HTTP method), resource (URL), details ({ statusCode, responseTime, ip }) +- Skip audit logging for /health endpoint +- Async write - don't block response +- Handle audit write failures gracefully (log warning, don't fail request) + +Create src/mcpd/src/utils/shutdown.ts: +- setupGracefulShutdown(app: FastifyInstance, prisma: PrismaClient) function +- Handle SIGTERM, SIGINT signals +- Stop accepting new connections +- Wait for in-flight requests (configurable timeout, default 30s) +- Disconnect Prisma client +- Exit with appropriate code + +Create services/audit-logger.ts interface that Task 14 will implement. diff --git a/.taskmaster/tasks/task_004.md b/.taskmaster/tasks/task_004.md new file mode 100644 index 0000000..0255375 --- /dev/null +++ b/.taskmaster/tasks/task_004.md @@ -0,0 +1,111 @@ +# Task ID: 4 + +**Title:** Implement MCP Server Registry and Profile Management + +**Status:** pending + +**Dependencies:** 3 + +**Priority:** high + +**Description:** Create APIs for registering MCP servers, managing profiles with different permission levels, and storing configuration templates. + +**Details:** + +Create REST API endpoints in mcpd: + +```typescript +// routes/mcp-servers.ts +app.post('/api/mcp-servers', async (req) => { + const { name, type, command, args, envTemplate, setupGuide } = req.body; + return prisma.mcpServer.create({ data: { name, type, command, args, envTemplate, setupGuide } }); +}); + +app.get('/api/mcp-servers', async () => { + return prisma.mcpServer.findMany({ include: { profiles: true } }); +}); + +app.get('/api/mcp-servers/:id', async (req) => { + return prisma.mcpServer.findUnique({ where: { id: req.params.id }, include: { profiles: true, instances: true } }); +}); + +// Profile management +app.post('/api/mcp-servers/:serverId/profiles', async (req) => { + const { name, config, filterRules } = req.body; + return prisma.mcpProfile.create({ + data: { name, serverId: req.params.serverId, config, filterRules } + }); +}); + +// Example profile configs: +// Read-only Jira: { permissions: ['read'], allowedEndpoints: ['/issues/*', '/projects/*'] } +// Full Slack: { permissions: ['read', 'write'], channels: ['*'] } +// Limited Terraform: { permissions: ['read'], modules: ['aws_*', 'kubernetes_*'] } +``` + +Create seed data with pre-configured MCP server definitions: +- Slack MCP with OAuth setup guide +- Jira MCP with API token guide +- GitHub MCP with PAT guide +- Terraform docs MCP + +**Test Strategy:** + +Test CRUD operations for servers and profiles. Verify profile inheritance works. Test that invalid configurations are rejected by Zod validation. + +## Subtasks + +### 4.1. Create Zod validation schemas with comprehensive TDD test coverage + +**Status:** pending +**Dependencies:** None + +Define and test Zod schemas for MCP server registration, profile management, and configuration templates before implementing any routes or services. + +**Details:** + +Create src/mcpd/src/validation/mcp-server.schema.ts with schemas: CreateMcpServerSchema (name: string non-empty, type: enum ['slack', 'jira', 'github', 'terraform', 'custom'], command: string, args: array of strings, envTemplate: record with nested schema for { description: string, required: boolean, secret: boolean, setupUrl?: string }, setupGuide?: string). Create UpdateMcpServerSchema as partial of create. Create CreateMcpProfileSchema (name: string, serverId: uuid, config: record with permissions array ['read', 'write'], filterRules?: record). Create src/mcpd/tests/unit/validation/mcp-server.schema.test.ts with TDD tests BEFORE implementation: (1) Test valid server creation passes, (2) Test empty name fails, (3) Test invalid type fails, (4) Test envTemplate validates nested structure, (5) Test profile config validates permissions array only contains 'read'/'write', (6) Test UUID format validation for serverId, (7) Test sanitization of XSS attempts in setupGuide field, (8) Test envTemplate values cannot contain shell injection patterns. Security: Add custom Zod refinements to reject dangerous patterns in envTemplate values like backticks, $(), etc. + +### 4.2. Implement repository pattern for MCP server and profile data access + +**Status:** pending +**Dependencies:** 4.1 + +Create injectable repository classes for McpServer and McpProfile data access with Prisma, following dependency injection patterns for testability. + +**Details:** + +Create src/mcpd/src/repositories/interfaces.ts with IMcpServerRepository and IMcpProfileRepository interfaces defining all CRUD methods. Create src/mcpd/src/repositories/mcp-server.repository.ts implementing IMcpServerRepository with methods: create(data: CreateMcpServerInput), findById(id: string, include?: { profiles?: boolean, instances?: boolean }), findByName(name: string), findAll(include?: { profiles?: boolean }), update(id: string, data: UpdateMcpServerInput), delete(id: string). Create src/mcpd/src/repositories/mcp-profile.repository.ts with methods: create(data: CreateMcpProfileInput), findById(id: string), findByServerId(serverId: string), findAll(), update(id: string, data: UpdateMcpProfileInput), delete(id: string), validateProfilePermissions(profileId: string, requestedPermissions: string[]) to check profile cannot escalate beyond server's allowed permissions. Write TDD tests in src/mcpd/tests/unit/repositories/ before implementation using Prisma mock factory from Task 3's test utilities. Architecture note: These repositories will be used by Task 10 (setup wizard) and Task 15 (profiles library). + +### 4.3. Implement MCP server service layer with business logic and authorization + +**Status:** pending +**Dependencies:** 4.1, 4.2 + +Create McpServerService and McpProfileService with business logic, authorization checks, and validation orchestration using injected repositories. + +**Details:** + +Create src/mcpd/src/services/mcp-server.service.ts with constructor accepting IMcpServerRepository (DI). Methods: createServer(userId: string, data: CreateMcpServerInput) - validate with Zod schema, check user has 'admin' or 'server:create' permission, call repository; getServer(userId: string, id: string) - check read permission, include profiles if authorized; listServers(userId: string, filters?: ServerFilters); updateServer(userId: string, id: string, data) - check 'server:update' permission; deleteServer(userId: string, id: string) - check 'server:delete', verify no active instances. Create src/mcpd/src/services/mcp-profile.service.ts with methods: createProfile(userId: string, serverId: string, data) - validate profile permissions don't exceed server's capabilities, check 'profile:create' permission; updateProfile(); deleteProfile() - check no active instances using this profile. Security: Implement permission hierarchy where profile.config.permissions must be subset of server's allowed permissions. Create src/mcpd/src/services/authorization.ts with checkPermission(userId: string, resource: string, action: string) helper. Write TDD tests mocking repositories. + +### 4.4. Implement REST API routes for MCP servers and profiles with request validation + +**Status:** pending +**Dependencies:** 4.3 + +Create Fastify route handlers for MCP server and profile CRUD operations using the service layer, with Zod request validation middleware. + +**Details:** + +Create src/mcpd/src/routes/mcp-servers.ts with routes: POST /api/mcp-servers (create server, requires auth + admin), GET /api/mcp-servers (list all, requires auth), GET /api/mcp-servers/:id (get by ID with profiles/instances, requires auth), PUT /api/mcp-servers/:id (update, requires auth + admin), DELETE /api/mcp-servers/:id (delete, requires auth + admin). Create src/mcpd/src/routes/mcp-profiles.ts with routes: POST /api/mcp-servers/:serverId/profiles (create profile for server), GET /api/mcp-servers/:serverId/profiles (list profiles for server), GET /api/profiles/:id (get profile by ID), PUT /api/profiles/:id (update profile), DELETE /api/profiles/:id (delete profile). Each route handler: (1) Uses Zod schema via validation middleware from Task 3, (2) Calls appropriate service method, (3) Returns consistent response format { success: boolean, data?: T, error?: { code: string, message: string } }, (4) Uses request.user from auth middleware. Register routes in server.ts. Write integration tests using Fastify's inject() method. + +### 4.5. Create seed data for pre-configured MCP servers and perform security review + +**Status:** pending +**Dependencies:** 4.4 + +Implement seed data for Slack, Jira, GitHub, and Terraform MCP servers with default profiles, plus comprehensive security review of all implemented code. + +**Details:** + +Create src/mcpd/src/seed/mcp-servers.seed.ts with seedMcpServers() function using the McpServerService to create: (1) Slack MCP - command: 'npx', args: ['-y', '@modelcontextprotocol/server-slack'], envTemplate with SLACK_BOT_TOKEN (secret, setupUrl to api.slack.com), SLACK_TEAM_ID, setupGuide markdown with OAuth setup steps, default profiles: 'slack-read-only' (permissions: ['read']), 'slack-full' (permissions: ['read', 'write']); (2) Jira MCP - envTemplate with JIRA_URL, JIRA_EMAIL, JIRA_API_TOKEN (secret), setupGuide for API token creation; (3) GitHub MCP - envTemplate with GITHUB_TOKEN (secret, setupUrl to github.com/settings/tokens); (4) Terraform Docs MCP - no env required, read-only profile. Create src/mcpd/src/seed/index.ts that runs all seeders. Security Review - create SECURITY_REVIEW.md documenting: (1) All Zod schemas reviewed for injection prevention, (2) Authorization checked on every route, (3) envTemplate sanitization prevents shell injection, (4) Profile permission escalation prevented, (5) Secrets marked appropriately in envTemplate, (6) No sensitive data in logs or error responses. Run 'pnpm lint' and 'pnpm test:coverage' ensuring >80% coverage. diff --git a/.taskmaster/tasks/task_005.md b/.taskmaster/tasks/task_005.md new file mode 100644 index 0000000..7abb1f1 --- /dev/null +++ b/.taskmaster/tasks/task_005.md @@ -0,0 +1,126 @@ +# Task ID: 5 + +**Title:** Implement Project Management APIs + +**Status:** pending + +**Dependencies:** 4 + +**Priority:** high + +**Description:** Create APIs for managing MCP projects that group multiple MCP profiles together for easy assignment to Claude sessions. + +**Details:** + +Create project management endpoints: + +```typescript +// routes/projects.ts +app.post('/api/projects', async (req) => { + const { name, description, profileIds } = req.body; + const project = await prisma.project.create({ + data: { + name, + description, + profiles: { + create: profileIds.map(profileId => ({ profileId })) + } + }, + include: { profiles: { include: { profile: { include: { server: true } } } } } + }); + return project; +}); + +app.get('/api/projects', async () => { + return prisma.project.findMany({ + include: { profiles: { include: { profile: { include: { server: true } } } } } + }); +}); + +app.get('/api/projects/:name', async (req) => { + return prisma.project.findUnique({ + where: { name: req.params.name }, + include: { profiles: { include: { profile: { include: { server: true } } } } } + }); +}); + +app.put('/api/projects/:id/profiles', async (req) => { + const { profileIds } = req.body; + // Update project profiles + await prisma.projectMcpProfile.deleteMany({ where: { projectId: req.params.id } }); + await prisma.projectMcpProfile.createMany({ + data: profileIds.map(profileId => ({ projectId: req.params.id, profileId })) + }); +}); + +// Generate .mcp.json format for Claude +app.get('/api/projects/:name/mcp-config', async (req) => { + const project = await prisma.project.findUnique({ + where: { name: req.params.name }, + include: { profiles: { include: { profile: { include: { server: true } } } } } + }); + // Transform to .mcp.json format + return generateMcpConfig(project); +}); +``` + +**Test Strategy:** + +Test project CRUD operations. Verify profile associations work correctly. Test MCP config generation produces valid .mcp.json format. + +## Subtasks + +### 5.1. Write TDD tests for project Zod validation schemas and generateMcpConfig function + +**Status:** pending +**Dependencies:** None + +Create comprehensive Vitest test suites for project validation schemas and the critical generateMcpConfig function BEFORE implementing any code, following TDD red phase. + +**Details:** + +Create src/mcpd/tests/unit/validation/project.schema.test.ts with tests for: (1) CreateProjectSchema validates name (non-empty string, max 64 chars, alphanumeric-dash only), description (optional string, max 500 chars), profileIds (array of valid UUIDs, can be empty); (2) UpdateProjectSchema as partial; (3) UpdateProjectProfilesSchema validates profileIds array. Create src/mcpd/tests/unit/services/generate-mcp-config.test.ts with tests for generateMcpConfig function: (1) Returns valid .mcp.json structure with mcpServers object, (2) Each server entry has command, args, and env keys, (3) SECURITY: env values for secret fields are EXCLUDED or masked (critical requirement from context), (4) Server names are correctly derived from profile.server.name, (5) Empty project returns empty mcpServers object, (6) Multiple profiles from same server are handled correctly (no duplicates or merged appropriately). Security test: Verify generateMcpConfig strips SLACK_BOT_TOKEN, JIRA_API_TOKEN, GITHUB_TOKEN and any field marked secret:true in envTemplate. + +### 5.2. Implement project repository and generateMcpConfig service with security filtering + +**Status:** pending +**Dependencies:** 5.1 + +Create the project repository following the repository pattern from Task 4, plus the generateMcpConfig function that transforms project data to .mcp.json format while stripping sensitive credentials. + +**Details:** + +Create src/mcpd/src/repositories/project.repository.ts implementing IProjectRepository interface with methods: create(data: CreateProjectInput), findById(id: string, include?: { profiles?: { include?: { profile?: { include?: { server?: boolean } } } } }), findByName(name: string, include?: same), findAll(include?: same), update(id: string, data: UpdateProjectInput), delete(id: string), updateProfiles(projectId: string, profileIds: string[]) - handles delete-all-then-create pattern from task details. Create src/mcpd/src/services/mcp-config-generator.ts with generateMcpConfig(project: ProjectWithProfiles): McpJsonConfig function. Implementation: (1) Iterate project.profiles, (2) For each profile, get server.command, server.args, (3) Build env object from profile.config BUT filter out any key where server.envTemplate[key].secret === true, (4) Return { mcpServers: { [server.name]: { command, args, env } } }. SECURITY CRITICAL: The env object must NEVER include secret values - these are populated locally by the CLI (Task 9). Add JSDoc comment explaining this security design. Create TypeScript type McpJsonConfig matching .mcp.json schema structure. + +### 5.3. Implement project service layer with authorization and profile validation + +**Status:** pending +**Dependencies:** 5.2 + +Create ProjectService with business logic including authorization checks, profile existence validation, and orchestration of repository and mcp-config-generator. + +**Details:** + +Create src/mcpd/src/services/project.service.ts with constructor accepting IProjectRepository and IMcpProfileRepository (DI from Task 4). Methods: createProject(userId: string, data: CreateProjectInput) - validate with Zod schema, check 'project:create' permission, verify all profileIds exist via profile repository, call project repository; getProject(userId: string, nameOrId: string) - check read permission, return project with nested profiles; listProjects(userId: string) - filter based on permissions; updateProject(userId: string, id: string, data: UpdateProjectInput) - check 'project:update' permission; deleteProject(userId: string, id: string) - check 'project:delete' permission; updateProjectProfiles(userId: string, projectId: string, profileIds: string[]) - validate all profiles exist AND user has permission to use each profile (prevents adding profiles user cannot access); getMcpConfig(userId: string, projectName: string) - get project, verify read permission, call generateMcpConfig. Write TDD tests mocking repositories. Note: This service will be consumed by Task 9 (mcpctl claude add-mcp-project). + +### 5.4. Implement REST API routes for project CRUD and mcp-config endpoint + +**Status:** pending +**Dependencies:** 5.3 + +Create Fastify route handlers for all project management endpoints including the critical /api/projects/:name/mcp-config endpoint used by the CLI. + +**Details:** + +Create src/mcpd/src/routes/projects.ts with routes: POST /api/projects (create project with optional profileIds array), GET /api/projects (list all projects user can access), GET /api/projects/:name (get project by name with full profile/server hierarchy), PUT /api/projects/:id (update project name/description), DELETE /api/projects/:id (delete project), PUT /api/projects/:id/profiles (replace all profiles - uses delete-then-create pattern per task details), GET /api/projects/:name/mcp-config (generate .mcp.json format output - CRITICAL endpoint for Task 9 CLI integration). Each route: (1) Uses Zod schema validation middleware, (2) Calls ProjectService method, (3) Returns consistent response format from Task 4 pattern. Register routes in server.ts with /api prefix. The mcp-config endpoint response format must be stable as Task 9 depends on it: { mcpServers: { [name: string]: { command: string, args: string[], env: Record<string, string> } } }. Add OpenAPI/Swagger JSDoc annotations for mcp-config endpoint documenting the exact response format. + +### 5.5. Create integration tests and security review for project APIs + +**Status:** pending +**Dependencies:** 5.4 + +Write comprehensive integration tests simulating the full workflow from project creation through mcp-config generation, plus security review documenting credential handling. + +**Details:** + +Create src/mcpd/tests/integration/projects.test.ts with end-to-end scenarios: (1) Full workflow test: create MCP server (from Task 4 seed), create profile with credentials, create project referencing profile, call mcp-config endpoint, verify output is valid and EXCLUDES secrets; (2) Multi-profile project: create project with Slack + Jira profiles, verify mcp-config merges correctly; (3) Profile update atomicity: update project profiles, verify old profiles removed and new ones added in single transaction; (4) Authorization flow: verify user A cannot add user B's profiles to their project; (5) Concurrent access: simultaneous project updates don't corrupt data. Create src/mcpd/docs/SECURITY_REVIEW.md section for Task 5 documenting: (1) generateMcpConfig deliberately excludes secret env vars, (2) CLI (Task 9) is responsible for injecting secrets locally from user's credential store, (3) Profile permission checks prevent unauthorized profile usage, (4) Response format designed to be safe for transmission over network. Run 'pnpm test:coverage' targeting >85% coverage for project-related files. diff --git a/.taskmaster/tasks/task_006.md b/.taskmaster/tasks/task_006.md new file mode 100644 index 0000000..efe7014 --- /dev/null +++ b/.taskmaster/tasks/task_006.md @@ -0,0 +1,181 @@ +# Task ID: 6 + +**Title:** Implement Docker Container Management for MCP Servers + +**Status:** pending + +**Dependencies:** 3, 4 + +**Priority:** high + +**Description:** Create the container orchestration layer for running MCP servers as Docker containers, with support for docker-compose deployment. + +**Details:** + +Create Docker management module: + +```typescript +// services/container-manager.ts +import Docker from 'dockerode'; + +export class ContainerManager { + private docker: Docker; + + constructor() { + this.docker = new Docker({ socketPath: '/var/run/docker.sock' }); + } + + async startMcpServer(server: McpServer, config: McpProfile['config']): Promise<string> { + const container = await this.docker.createContainer({ + Image: server.image || 'node:20-alpine', + Cmd: this.buildCommand(server, config), + Env: this.buildEnvVars(server, config), + Labels: { + 'mcpctl.server': server.name, + 'mcpctl.managed': 'true' + }, + HostConfig: { + NetworkMode: 'mcpctl-network', + RestartPolicy: { Name: 'unless-stopped' } + } + }); + await container.start(); + return container.id; + } + + async stopMcpServer(containerId: string): Promise<void> { + const container = this.docker.getContainer(containerId); + await container.stop(); + await container.remove(); + } + + async getMcpServerStatus(containerId: string): Promise<'running' | 'stopped' | 'error'> { + try { + const container = this.docker.getContainer(containerId); + const info = await container.inspect(); + return info.State.Running ? 'running' : 'stopped'; + } catch { + return 'error'; + } + } + + async listManagedContainers(): Promise<Docker.ContainerInfo[]> { + return this.docker.listContainers({ + filters: { label: ['mcpctl.managed=true'] } + }); + } +} +``` + +Create docker-compose.yml template: +```yaml +version: '3.8' +services: + mcpd: + build: ./src/mcpd + ports: + - "3000:3000" + environment: + - DATABASE_URL=postgresql://... + volumes: + - /var/run/docker.sock:/var/run/docker.sock + networks: + - mcpctl-network + + postgres: + image: postgres:15 + volumes: + - pgdata:/var/lib/postgresql/data + networks: + - mcpctl-network + +networks: + mcpctl-network: + driver: bridge + +volumes: + pgdata: +``` + +**Test Strategy:** + +Test container creation, start, stop, and removal. Test status checking. Integration test with actual Docker daemon. Verify network isolation works correctly. + +## Subtasks + +### 6.1. Define McpOrchestrator interface and write TDD tests for ContainerManager + +**Status:** pending +**Dependencies:** None + +Define the McpOrchestrator abstraction interface that both DockerOrchestrator (this task) and KubernetesOrchestrator (task 17) will implement. Write comprehensive Vitest unit tests for all ContainerManager methods BEFORE implementation using dockerode mocks. + +**Details:** + +Create src/mcpd/src/services/orchestrator.ts with the McpOrchestrator interface including: startServer(), stopServer(), getStatus(), getLogs(), listInstances(). Then create src/mcpd/src/services/docker/__tests__/container-manager.test.ts with TDD tests covering: (1) constructor connects to Docker socket, (2) startMcpServer() creates container with correct labels, env vars, and network config, (3) stopMcpServer() stops and removes container, (4) getMcpServerStatus() returns 'running', 'stopped', or 'error' states, (5) listManagedContainers() filters by mcpctl.managed label, (6) buildCommand() generates correct command array from server config, (7) buildEnvVars() maps profile config to environment variables. Use vi.mock('dockerode') to mock all Docker operations. Tests should initially fail (TDD red phase). + +### 6.2. Implement ContainerManager class with DockerOrchestrator strategy pattern + +**Status:** pending +**Dependencies:** 6.1 + +Implement the ContainerManager class as a DockerOrchestrator implementation using dockerode, with all methods passing the TDD tests from subtask 1. + +**Details:** + +Create src/mcpd/src/services/docker/container-manager.ts implementing McpOrchestrator interface. Constructor accepts optional Docker socket path (default: /var/run/docker.sock). Implement startMcpServer(): create container with Image (server.image || 'node:20-alpine'), Cmd from buildCommand(), Env from buildEnvVars(), Labels (mcpctl.server, mcpctl.managed, mcpctl.profile), HostConfig with NetworkMode 'mcpctl-network' and RestartPolicy 'unless-stopped'. Implement stopMcpServer(): stop() then remove() the container. Implement getMcpServerStatus(): inspect() container and return state. Implement listManagedContainers(): listContainers() with label filter. Implement buildCommand(): parse server.command template with config substitutions. Implement buildEnvVars(): merge server.envTemplate with profile.config values. Add resource limits to HostConfig (Memory: 512MB default, NanoCPUs: 1e9 default) - these are overridable via server config. All TDD tests from subtask 1 should now pass. + +### 6.3. Create docker-compose.yml template with mcpd, PostgreSQL, and test MCP server + +**Status:** pending +**Dependencies:** None + +Create the production-ready docker-compose.yml template for local development with mcpd service, PostgreSQL database, a test MCP server container, and proper networking configuration. + +**Details:** + +Create deploy/docker-compose.yml with services: (1) mcpd - build from src/mcpd, expose port 3000, DATABASE_URL env var, mount /var/run/docker.sock (read-only), depends_on postgres with healthcheck, deploy resources limits (memory: 512M), restart: unless-stopped. (2) postgres - postgres:15-alpine image, POSTGRES_USER/PASSWORD/DB env vars, healthcheck with pg_isready, volume for pgdata, deploy resources limits (memory: 256M). (3) test-mcp-server - simple echo server image (node:20-alpine with npx @modelcontextprotocol/server-memory), labels for mcpctl.managed and mcpctl.server, same network. Create mcpctl-network as bridge driver. Create named volumes: pgdata. Add .env.example with required environment variables. Ensure all containers have resource limits and no --privileged flag. Add docker-compose.test.yml override for CI testing with ephemeral volumes. + +### 6.4. Write integration tests with real Docker daemon + +**Status:** pending +**Dependencies:** 6.2, 6.3 + +Create integration test suite that tests ContainerManager against a real Docker daemon, verifying actual container lifecycle operations work correctly. + +**Details:** + +Create src/mcpd/src/services/docker/__tests__/container-manager.integration.test.ts. Use vitest with longer timeout (30s). Before all: ensure mcpctl-network exists (create if not). After each: cleanup any test containers. Test cases: (1) startMcpServer() creates a real container with test MCP server image, verify container is running with docker inspect, (2) getMcpServerStatus() returns 'running' for active container, (3) stopMcpServer() removes container and getMcpServerStatus() returns 'error', (4) listManagedContainers() returns only containers with mcpctl.managed label, (5) test container networking - two MCP server containers can communicate on mcpctl-network. Use node:20-alpine with simple sleep command as test image. Add CI skip condition (describe.skipIf(!process.env.DOCKER_HOST)) for environments without Docker. Tag tests with '@integration' for selective running. + +### 6.5. Implement container network isolation and resource management + +**Status:** pending +**Dependencies:** 6.2 + +Add network segmentation utilities and resource management capabilities to ensure proper isolation between MCP server containers and prevent resource exhaustion. + +**Details:** + +Create src/mcpd/src/services/docker/network-manager.ts with: ensureNetworkExists() - creates mcpctl-network if not present with bridge driver, getNetworkInfo() - returns network details, connectContainer() - adds container to network, disconnectContainer() - removes from network. Add to ContainerManager: getContainerStats() - returns CPU/memory usage via container.stats(), setResourceLimits() - updates container resources. Implement container isolation: each MCP server profile can specify allowed networks, default deny all external network access, only allow container-to-container on mcpctl-network. Add ResourceConfig type with memory (bytes), cpuShares, cpuPeriod, pidsLimit. Write unit tests for network-manager with mocked dockerode. Integration test: start two containers, verify they can reach each other on mcpctl-network but not external network. + +### 6.6. Conduct security review of Docker socket access and container configuration + +**Status:** pending +**Dependencies:** 6.2, 6.3, 6.5 + +Perform comprehensive security review of all Docker-related code, documenting risks of Docker socket access and implementing security controls for container isolation. + +**Details:** + +Create src/mcpd/docs/DOCKER_SECURITY_REVIEW.md documenting: (1) Docker socket access risks - socket access grants root-equivalent privileges, mitigations implemented (read-only mount where possible, no container creation with --privileged, no host network mode, no host PID namespace). (2) Container escape prevention - no --privileged containers, no SYS_ADMIN capability, seccomp profile enabled (default), AppArmor profile enabled (default), drop all capabilities except required ones. (3) Image source validation - add validateImageSource() function that checks image against allowlist, reject images from untrusted registries, warn on :latest tags. (4) Resource limits - all containers MUST have memory and CPU limits, pids-limit to prevent fork bombs. (5) Network segmentation - MCP servers isolated to mcpctl-network, no external network access by default. (6) Secrets handling - environment variables with credentials are passed at runtime not build time, no secrets in image layers. Add security tests that verify: no --privileged, caps are dropped, resource limits are set. + +### 6.7. Implement container logs streaming and health monitoring + +**Status:** pending +**Dependencies:** 6.2 + +Add log streaming capabilities and health monitoring to ContainerManager to support instance lifecycle management (Task 16) and provide observability into running MCP servers. + +**Details:** + +Extend ContainerManager with: getLogs(containerId, options: LogOptions): AsyncIterator<string> - streams logs from container using dockerode container.logs() with follow option, LogOptions includes timestamps, tail lines count, since timestamp. getHealthStatus(containerId): returns health check result if container has HEALTHCHECK, otherwise infers from running state. attachToContainer(containerId): returns bidirectional stream for stdio. Add event subscriptions: onContainerStart, onContainerStop, onContainerDie callbacks using Docker events API. Create src/mcpd/src/services/docker/container-events.ts with ContainerEventEmitter class that listens to Docker daemon events and emits typed events. Write unit tests mocking dockerode stream responses. Integration test: start container, tail logs, verify log output matches container stdout. Test event subscription receives container lifecycle events. diff --git a/.taskmaster/tasks/task_007.md b/.taskmaster/tasks/task_007.md new file mode 100644 index 0000000..f2ba1f7 --- /dev/null +++ b/.taskmaster/tasks/task_007.md @@ -0,0 +1,311 @@ +# Task ID: 7 + +**Title:** Build mcpctl CLI Core Framework + +**Status:** pending + +**Dependencies:** 1 + +**Priority:** high + +**Description:** Create the CLI tool foundation using Commander.js with kubectl-inspired command structure, configuration management, and server communication. + +**Details:** + +Create CLI in `src/cli/src/`: + +```typescript +// index.ts +import { Command } from 'commander'; +import { loadConfig, saveConfig } from './config'; + +const program = new Command(); + +program + .name('mcpctl') + .description('kubectl-like CLI for managing MCP servers') + .version('0.1.0'); + +// Config management +program + .command('config') + .description('Manage mcpctl configuration') + .addCommand( + new Command('set-server') + .argument('<url>', 'mcpd server URL') + .action((url) => { + const config = loadConfig(); + config.serverUrl = url; + saveConfig(config); + console.log(`Server set to ${url}`); + }) + ) + .addCommand( + new Command('view') + .action(() => console.log(loadConfig())) + ); + +// API client +class McpctlClient { + constructor(private serverUrl: string, private token?: string) {} + + async get(path: string) { + const res = await fetch(`${this.serverUrl}${path}`, { + headers: this.token ? { Authorization: `Bearer ${this.token}` } : {} + }); + return res.json(); + } + + async post(path: string, data: any) { + const res = await fetch(`${this.serverUrl}${path}`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + ...(this.token ? { Authorization: `Bearer ${this.token}` } : {}) + }, + body: JSON.stringify(data) + }); + return res.json(); + } +} +``` + +Config file at `~/.mcpctl/config.json`: +```json +{ + "serverUrl": "http://localhost:3000", + "token": "..." +} +``` + +**Test Strategy:** + +Test CLI argument parsing. Test configuration persistence. Mock API calls and verify request formatting. Test error handling for network failures. + +## Subtasks + +### 7.1. Set up CLI package structure with TDD infrastructure and command registry pattern + +**Status:** pending +**Dependencies:** None + +Create src/cli directory structure with Commander.js foundation, Vitest test configuration, and extensible command registry pattern designed to scale for all planned commands (get, describe, apply, setup, project, claude, audit, start, stop, logs). + +**Details:** + +Create src/cli/src/ with the following structure: + +- commands/ - Command modules (empty initially, registry pattern) +- config/ - Configuration loading and validation +- client/ - API client for mcpd communication +- formatters/ - Output formatters (table, json, yaml) +- utils/ - Utility functions +- types/ - TypeScript interfaces +- index.ts - Main entry point with Commander setup + +Create src/cli/tests/ with matching structure: +- unit/ (commands, config, client, formatters) +- integration/ (CLI end-to-end tests) +- fixtures/ (mock data, mock server) + +Implement command registry pattern in src/commands/registry.ts: +```typescript +export interface CommandModule { + name: string; + register(program: Command): void; +} +export class CommandRegistry { + register(module: CommandModule): void; + registerAll(program: Command): void; +} +``` + +Set up vitest.config.ts extending root config. Install dependencies: commander, chalk, js-yaml, inquirer, zod for validation. DevDependencies: vitest, @vitest/coverage-v8, msw for API mocking. + +Write initial TDD tests before implementation: +- tests/unit/commands/registry.test.ts - Test registry adds commands correctly +- tests/unit/index.test.ts - Test CLI entry point parses version, help + +### 7.2. Implement secure configuration management with encrypted credential storage + +**Status:** pending +**Dependencies:** 7.1 + +Create configuration loader/saver with ~/.mcpctl/config.json for settings and ~/.mcpctl/credentials encrypted storage for tokens. Include proxy settings, custom CA certificates support, and Zod validation for enterprise environments. + +**Details:** + +Create src/cli/src/config/index.ts with: + +- loadConfig(): McpctlConfig - Load from ~/.mcpctl/config.json with Zod validation +- saveConfig(config: McpctlConfig): void - Save config atomically (write to temp, rename) +- getConfigPath(): string - Platform-aware config directory +- initConfig(): void - Create config directory and initial config if not exists + +Create src/cli/src/config/credentials.ts with SECURE credential storage: +- loadCredentials(): Credentials - Load encrypted from ~/.mcpctl/credentials +- saveCredentials(creds: Credentials): void - Encrypt and save credentials +- Use platform keychain when available (keytar package), fallback to encrypted file +- NEVER store tokens in plain text or config.json +- NEVER log tokens or include in error messages + +Create McpctlConfig schema with Zod: +```typescript +const ConfigSchema = z.object({ + serverUrl: z.string().url().default('http://localhost:3000'), + proxy: z.object({ + http: z.string().url().optional(), + https: z.string().url().optional(), + noProxy: z.array(z.string()).optional() + }).optional(), + tls: z.object({ + caFile: z.string().optional(), // Custom CA certificate path + insecureSkipVerify: z.boolean().default(false) // For dev only + }).optional(), + output: z.object({ + format: z.enum(['table', 'json', 'yaml']).default('table'), + color: z.boolean().default(true) + }).optional() +}); +``` + +Secure token handling: +- loadToken(): string | undefined - Get token from credentials store +- saveToken(token: string): void - Encrypt and save +- clearToken(): void - Securely delete token + +Write TDD tests BEFORE implementation in tests/unit/config/ + +### 7.3. Implement McpctlClient API client with enterprise networking support + +**Status:** pending +**Dependencies:** 7.2 + +Create the HTTP API client for communicating with mcpd server with proper error handling, retry logic, proxy support, custom CA certificates, and request/response interceptors for authentication. + +**Details:** + +Create src/cli/src/client/index.ts with McpctlClient class: + +```typescript +export class McpctlClient { + constructor(config: ClientConfig) // DI for testability + + // HTTP methods with proper typing + async get<T>(path: string): Promise<T> + async post<T>(path: string, data: unknown): Promise<T> + async put<T>(path: string, data: unknown): Promise<T> + async delete<T>(path: string): Promise<T> + + // Health check for connection testing + async healthCheck(): Promise<boolean> +} +``` + +Implement networking features: +- Proxy support: Use HTTP_PROXY/HTTPS_PROXY env vars + config.proxy settings +- Custom CA: Support config.tls.caFile for enterprise CAs +- Retry logic: Exponential backoff for transient failures (503, network errors) +- Timeout: Configurable request timeout (default 30s) +- Request interceptor: Add Authorization header from credentials store +- Response interceptor: Handle 401 (clear cached token, prompt re-auth) + +Create src/cli/src/client/errors.ts: +- McpctlClientError base class +- NetworkError for connection failures +- AuthenticationError for 401 +- NotFoundError for 404 +- ServerError for 5xx + +IMPORTANT: Never log request bodies that might contain secrets. Redact Authorization header in debug logs. + +Create mock server in tests/fixtures/mock-server.ts using msw (Mock Service Worker) for offline testing. Write TDD tests before implementation. + +### 7.4. Implement config command group with output formatters for SRE integration + +**Status:** pending +**Dependencies:** 7.2, 7.3 + +Create the config command group (set-server, view, get-token, set-token) and multi-format output system (table, json, yaml) with --output flag designed for SRE tooling integration (jq, grep, monitoring pipelines). + +**Details:** + +Create src/cli/src/commands/config.ts implementing CommandModule: + +```typescript +// config set-server <url> +// config view +// config set-token (interactive, secure input) +// config clear-token +// config set <key> <value> (generic setter for proxy, tls, etc.) +// config get <key> +``` + +Create src/cli/src/formatters/index.ts: +```typescript +export function formatOutput(data: unknown, format: OutputFormat): string +export function printTable(data: Record<string, unknown>[], columns: ColumnDef[]): void +export function printJson(data: unknown): void // Pretty printed, sortable keys +export function printYaml(data: unknown): void // Clean YAML output +``` + +SRE-friendly output requirements: +- JSON output must be valid, parseable by jq +- YAML output must be valid, parseable by yq +- Table output should be grep-friendly (consistent column widths) +- All formats support --no-color for CI/scripting +- Add --quiet flag to suppress non-essential output +- Exit codes: 0 success, 1 error, 2 invalid arguments + +Add global --output/-o flag to main program: +```typescript +program.option('-o, --output <format>', 'Output format (table, json, yaml)', 'table'); +``` + +Register config command via CommandRegistry. Write TDD tests before implementation. + +### 7.5. Create mock mcpd server and comprehensive security/architecture review + +**Status:** pending +**Dependencies:** 7.1, 7.2, 7.3, 7.4 + +Build mock mcpd server for offline CLI testing, write integration tests verifying CLI works against local docker-compose mcpd, and perform comprehensive security review of credential handling, CLI history protection, and token security. + +**Details:** + +Create src/cli/tests/fixtures/mock-mcpd-server.ts: +- Full mock of mcpd API endpoints using msw or express +- Realistic response data for servers, profiles, projects +- Configurable error scenarios (timeout, 500, 401) +- Startup/shutdown helpers for test lifecycle + +Create src/cli/tests/integration/cli.test.ts: +- Full CLI integration tests using execSync against built CLI +- Test against mock server in CI, real docker-compose in local dev +- Test full workflow: config -> connect -> list resources + +SECURITY REVIEW - create src/cli/SECURITY_REVIEW.md: + +1. Credential Storage Security: + - Verify credentials encrypted at rest (not plain JSON) + - Verify keychain integration on macOS/Windows + - Verify file permissions are 600 on credential file + +2. CLI History Protection: + - Document that tokens should NEVER be passed as CLI arguments + - set-token uses stdin or prompt, not --token=xxx + - Verify no sensitive data in bash history + +3. Token Handling: + - Verify tokens never logged (search codebase for console.log patterns) + - Verify error messages don't leak tokens + - Verify tokens redacted in debug output + +4. Network Security: + - Document TLS verification (not disabled by default) + - Document proxy credential handling + - Verify no credentials sent over non-HTTPS in production + +Run security audit: 'pnpm audit --audit-level=high'. Document findings. + +Run 'pnpm lint' and 'pnpm test:coverage' ensuring >80% coverage for CLI package. diff --git a/.taskmaster/tasks/task_008.md b/.taskmaster/tasks/task_008.md new file mode 100644 index 0000000..f02f974 --- /dev/null +++ b/.taskmaster/tasks/task_008.md @@ -0,0 +1,341 @@ +# Task ID: 8 + +**Title:** Implement mcpctl Server Management Commands + +**Status:** pending + +**Dependencies:** 7, 4 + +**Priority:** high + +**Description:** Add kubectl-style commands for listing, describing, and managing MCP servers (get, describe, apply, delete). + +**Details:** + +Add server management commands: + +```typescript +// commands/servers.ts +program + .command('get') + .description('Display resources') + .argument('<resource>', 'Resource type (servers, projects, profiles, instances)') + .option('-o, --output <format>', 'Output format (json, yaml, table)', 'table') + .action(async (resource, options) => { + const client = getClient(); + const data = await client.get(`/api/${resource}`); + formatOutput(data, options.output); + }); + +program + .command('describe') + .description('Show detailed information') + .argument('<resource>', 'Resource type') + .argument('<name>', 'Resource name or ID') + .action(async (resource, name) => { + const client = getClient(); + const data = await client.get(`/api/${resource}/${name}`); + console.log(yaml.dump(data)); + }); + +program + .command('apply') + .description('Apply configuration from file') + .option('-f, --file <path>', 'Path to config file') + .action(async (options) => { + const config = yaml.load(fs.readFileSync(options.file, 'utf8')); + const client = getClient(); + // Determine resource type and apply + const result = await client.post(`/api/${config.kind.toLowerCase()}s`, config.spec); + console.log(`${config.kind} "${result.name}" created/updated`); + }); + +// Resource definition format (kubectl-style) +// server.yaml: +// kind: McpServer +// spec: +// name: slack +// type: slack +// command: npx +// args: ["@anthropic/mcp-server-slack"] +// envTemplate: +// SLACK_TOKEN: "required" +``` + +Output formatters: +```typescript +function formatOutput(data: any[], format: string) { + if (format === 'json') return console.log(JSON.stringify(data, null, 2)); + if (format === 'yaml') return console.log(yaml.dump(data)); + // Table format + console.table(data.map(d => ({ NAME: d.name, TYPE: d.type, STATUS: d.status }))); +} +``` + +**Test Strategy:** + +Test each command with mock API responses. Test output formatting for all formats. Test apply command with various YAML configurations. + +## Subtasks + +### 8.1. Write TDD test suites for output formatters and resource type validation + +**Status:** pending +**Dependencies:** None + +Create comprehensive Vitest test suites for the output formatting system (JSON, YAML, table formats) and resource type validation BEFORE implementing the actual formatters. Tests must cover all output modes, --no-headers option, exit codes, field selection, and filtering capabilities. + +**Details:** + +Create src/cli/tests/unit/formatters directory with the following test files: + +1. formatters/output-formatter.test.ts: + - Test JSON output produces valid, jq-parseable JSON with proper indentation + - Test YAML output produces valid yaml.dump() output + - Test table output produces awk/grep-parseable format with consistent column widths + - Test --no-headers option removes header row from table output + - Test --field flag filters output to only specified fields (e.g., --field name,status) + - Test formatOutput() handles empty arrays gracefully + - Test formatOutput() handles single object vs array correctly + +2. formatters/resource-types.test.ts: + - Test valid resource types (servers, projects, profiles, instances) are accepted + - Test invalid resource types throw appropriate error with helpful message + - Test resource type normalization (singular to plural: server -> servers) + - Test case-insensitive resource matching + +3. Create src/cli/tests/fixtures/mock-resources.ts with sample data: + - mockServers: Array of McpServer objects with name, type, status fields + - mockProfiles: Array of McpProfile objects + - mockProjects: Array of Project objects + - mockInstances: Array of McpInstance objects with running/stopped status + +4. Create exit-codes.test.ts: + - Test exit code 0 for successful operations + - Test exit code 1 for general errors + - Test exit code 2 for resource not found + - Test exit code 3 for connection/network errors + +All tests should initially fail (TDD red phase). Use Vitest mocks for console.log/console.table to capture output. + +### 8.2. Write TDD test suites for get, describe, apply, and delete commands + +**Status:** pending +**Dependencies:** 8.1 + +Create comprehensive Vitest test suites for all four server management commands BEFORE implementation. Tests must mock API responses and verify correct CLI argument parsing, option handling, error states, and output generation. + +**Details:** + +Create src/cli/tests/unit/commands directory with the following test files: + +1. commands/get.test.ts: + - Test 'mcpctl get servers' calls GET /api/servers + - Test 'mcpctl get profiles' calls GET /api/profiles + - Test 'mcpctl get projects' calls GET /api/projects + - Test 'mcpctl get instances' calls GET /api/instances + - Test '-o json' outputs JSON format + - Test '-o yaml' outputs YAML format + - Test '-o table' (default) outputs table format + - Test '--no-headers' removes table header + - Test '--field name,status' filters columns + - Test invalid resource type shows error and exits with code 2 + - Test network error exits with code 3 + +2. commands/describe.test.ts: + - Test 'mcpctl describe server slack' calls GET /api/servers/slack + - Test output is always YAML format for detailed view + - Test 404 response shows 'Resource not found' message + - Test includes all resource fields in output + +3. commands/apply.test.ts: + - Test '-f server.yaml' reads file and sends POST/PUT request + - Test validates 'kind' field in YAML (McpServer, McpProfile, Project) + - Test validates required 'spec' field exists + - Test creates new resource when name doesn't exist (POST) + - Test updates existing resource when name exists (PUT) + - Test SECURITY: rejects file paths with directory traversal (../, etc.) + - Test SECURITY: validates YAML doesn't contain shell injection patterns + - Test SECURITY: limits file size to prevent DoS + - Test handles malformed YAML with clear error message + +4. commands/delete.test.ts: + - Test 'mcpctl delete server slack' calls DELETE /api/servers/slack + - Test prompts for confirmation unless --force is passed + - Test --force skips confirmation + - Test 404 shows appropriate 'not found' message + +Create src/cli/tests/fixtures/yaml-configs/ directory with sample YAML files for testing apply command. + +### 8.3. Implement output formatters with reusable architecture and SRE-friendly features + +**Status:** pending +**Dependencies:** 8.1 + +Implement the output formatting system with JSON, YAML, and table formats. Include --no-headers option for scripting, parseable exit codes, and --field flag for field selection. Design for reusability across all CLI commands. + +**Details:** + +Create src/cli/src/formatters directory with the following modules: + +1. formatters/output-formatter.ts: +```typescript +export type OutputFormat = 'json' | 'yaml' | 'table'; + +export interface FormatOptions { + format: OutputFormat; + noHeaders?: boolean; + fields?: string[]; +} + +export function formatOutput<T extends Record<string, unknown>>(data: T | T[], options: FormatOptions): string; +export function printOutput<T extends Record<string, unknown>>(data: T | T[], options: FormatOptions): void; +``` + +2. formatters/json-formatter.ts: + - Produce properly indented JSON (2 spaces) + - Ensure jq pipeline compatibility + - Support field filtering before output + +3. formatters/yaml-formatter.ts: + - Use js-yaml for YAML output + - Ensure kubectl-compatible YAML formatting + - Support field filtering + +4. formatters/table-formatter.ts: + - Fixed-width columns for awk/grep parseability + - Tab-separated values for reliable parsing + - UPPERCASE header row (NAME, TYPE, STATUS) + - --no-headers support for scripting + - Auto-truncate long values with ellipsis + +5. formatters/field-selector.ts: + - Parse --field flag (comma-separated field names) + - Support nested fields with dot notation (spec.name) + - Validate fields exist in data schema + +6. Create exit-codes.ts: +```typescript +export const EXIT_CODES = { + SUCCESS: 0, + ERROR: 1, + NOT_FOUND: 2, + NETWORK_ERROR: 3, + VALIDATION_ERROR: 4 +} as const; +``` + +7. Create resource-types.ts: + - Define valid resource types enum + - Singular to plural normalization + - Validation function with helpful error messages + +Ensure all formatters are pure functions for easy unit testing. Export via barrel file formatters/index.ts. + +### 8.4. Implement get, describe, apply, and delete commands with security hardening + +**Status:** pending +**Dependencies:** 8.2, 8.3 + +Implement all four server management commands using Commander.js. The apply command must include comprehensive security validation for file paths and YAML content to prevent injection attacks and path traversal vulnerabilities. + +**Details:** + +Create src/cli/src/commands/resources.ts with all four commands: + +1. 'get' command implementation: + - Register as 'mcpctl get <resource> [options]' + - Options: -o/--output (json|yaml|table), --no-headers, --field <fields> + - Call getClient().get(`/api/${resource}`) from cli-client + - Pass result to formatOutput() with options + - Handle errors with appropriate exit codes + +2. 'describe' command implementation: + - Register as 'mcpctl describe <resource> <name>' + - Call getClient().get(`/api/${resource}/${name}`) + - Output always in YAML format with full details + - Handle 404 with NOT_FOUND exit code + +3. 'apply' command implementation with SECURITY HARDENING: + - Register as 'mcpctl apply -f <file>' + - SECURITY: Validate file path + - Reject paths containing '..' (directory traversal) + - Reject absolute paths outside allowed directories + - Validate file extension is .yaml or .yml + - Check file size < 1MB to prevent DoS + - SECURITY: Validate YAML content + - Parse with yaml.load() using safe schema + - Validate 'kind' is in allowed list (McpServer, McpProfile, Project) + - Validate 'spec' object has expected structure + - Reject YAML with embedded functions or anchors if not needed + - Determine create vs update by checking if resource exists + - POST for create, PUT for update + - Output success message with resource name + +4. 'delete' command implementation: + - Register as 'mcpctl delete <resource> <name>' + - Prompt for confirmation using inquirer + - Support --force flag to skip confirmation + - Call DELETE /api/${resource}/${name} + - Output success/failure message + +5. Register all commands in command registry from Task 7. + +Create src/cli/src/utils/path-validator.ts for reusable path validation. +Create src/cli/src/utils/yaml-validator.ts for YAML security checks. + +### 8.5. Create integration tests with mock API server and comprehensive security review + +**Status:** pending +**Dependencies:** 8.4 + +Build complete integration test suite running all commands against a mock mcpd API server. Perform and document comprehensive security review of the apply command and path handling. Ensure all SRE and data engineer requirements are met. + +**Details:** + +Create src/cli/tests/integration directory with: + +1. integration/mock-mcpd-server.ts: + - Express server mocking all required endpoints: + - GET/POST /api/servers, GET/PUT/DELETE /api/servers/:name + - GET/POST /api/profiles, GET/PUT/DELETE /api/profiles/:name + - GET/POST /api/projects, GET/PUT/DELETE /api/projects/:name + - GET /api/instances + - Configurable responses for testing error scenarios + - Realistic latency simulation + +2. integration/commands.test.ts: + - Full command execution using execSync against built CLI + - Test get/describe/apply/delete for all resource types + - Test all output formats work correctly + - Test error handling and exit codes + - Test --no-headers and --field flags + +3. integration/sre-compatibility.test.ts: + - Test output is grep-friendly: 'mcpctl get servers | grep running' + - Test output is awk-friendly: 'mcpctl get servers | awk "{print $1}"' + - Test JSON is jq-friendly: 'mcpctl get servers -o json | jq .[]' + - Test exit codes work with shell scripts: 'mcpctl get servers || echo failed' + - Test --no-headers works for scripting + +4. integration/data-engineer.test.ts: + - Test --field flag for selecting specific columns + - Test filtering capabilities for data pipeline inspection + - Test describe provides full resource details + +5. Update src/cli/SECURITY_REVIEW.md: + - Document apply command security measures: + - Path traversal prevention with test evidence + - File size limits + - YAML injection prevention + - Document how credentials are NOT logged + - Document safe handling of user-supplied input + - Include security test results and findings + +6. Verify all requirements from task context: + - TDD: All unit and integration tests pass + - LOCAL DEV: Mock server works offline + - SECURITY: Document YAML injection risks and mitigations + - ARCHITECTURE: Formatters are reusable across commands + - SRE: Output is parseable, exit codes documented + - DATA ENGINEER: Field selection and filtering works diff --git a/.taskmaster/tasks/task_009.md b/.taskmaster/tasks/task_009.md new file mode 100644 index 0000000..86ee736 --- /dev/null +++ b/.taskmaster/tasks/task_009.md @@ -0,0 +1,319 @@ +# Task ID: 9 + +**Title:** Implement mcpctl Project Commands + +**Status:** pending + +**Dependencies:** 7, 5 + +**Priority:** high + +**Description:** Add commands for managing MCP projects and the critical 'claude add-mcp-project' command for integrating with Claude sessions. + +**Details:** + +Add project commands: + +```typescript +// commands/projects.ts +const projectCmd = program + .command('project') + .description('Manage MCP projects'); + +projectCmd + .command('create') + .argument('<name>', 'Project name') + .option('--profiles <profiles...>', 'Profile names to include') + .action(async (name, options) => { + const client = getClient(); + const profiles = await client.get('/api/profiles'); + const profileIds = profiles + .filter(p => options.profiles?.includes(p.name)) + .map(p => p.id); + const project = await client.post('/api/projects', { name, profileIds }); + console.log(`Project "${project.name}" created`); + }); + +projectCmd + .command('add-profile') + .argument('<project>', 'Project name') + .argument('<profile>', 'Profile name to add') + .action(async (project, profile) => { + // Add profile to project + }); + +// Critical command: claude add-mcp-project +const claudeCmd = program + .command('claude') + .description('Claude integration commands'); + +claudeCmd + .command('add-mcp-project') + .argument('<project>', 'Project name') + .option('--path <path>', 'Path to .mcp.json', '.mcp.json') + .action(async (projectName, options) => { + const client = getClient(); + const mcpConfig = await client.get(`/api/projects/${projectName}/mcp-config`); + + // Read existing .mcp.json or create new + let existing = {}; + if (fs.existsSync(options.path)) { + existing = JSON.parse(fs.readFileSync(options.path, 'utf8')); + } + + // Merge project MCPs into existing config + const merged = { + mcpServers: { + ...existing.mcpServers, + ...mcpConfig.mcpServers + } + }; + + fs.writeFileSync(options.path, JSON.stringify(merged, null, 2)); + console.log(`Added project "${projectName}" to ${options.path}`); + console.log('MCPs added:', Object.keys(mcpConfig.mcpServers).join(', ')); + }); + +claudeCmd + .command('remove-mcp-project') + .argument('<project>', 'Project name') + .action(async (projectName) => { + // Remove project MCPs from .mcp.json + }); +``` + +**Test Strategy:** + +Test project creation with and without profiles. Test claude add-mcp-project creates valid .mcp.json. Test merging with existing .mcp.json preserves other entries. + +## Subtasks + +### 9.1. Write TDD tests for project command Zod schemas and CLI argument parsing + +**Status:** pending +**Dependencies:** None + +Create comprehensive Vitest test suites for project command validation schemas, CLI argument parsing for project create/add-profile/remove-profile/status commands, and the claude command group structure BEFORE implementing any commands. + +**Details:** + +Create src/cli/tests/unit/commands/project.test.ts with TDD tests for: + +1. Project command validation schemas: + - CreateProjectSchema: name (alphanumeric-dash, 3-64 chars), --profiles array (optional, profile names) + - AddProfileSchema: project name (required), profile name (required) + - Test invalid project names rejected (spaces, special chars, empty) + - Test profile names validated against expected format + +2. CLI argument parsing tests: + - Test 'mcpctl project create weekly_reports' parses correctly + - Test 'mcpctl project create weekly_reports --profiles slack-ro jira-ro' captures profile array + - Test 'mcpctl project add-profile weekly_reports slack-full' captures both arguments + - Test 'mcpctl project remove-profile' validates required arguments + - Test 'mcpctl project status <name>' parses project name + - Test '--help' on project subcommands shows usage + +3. Claude command group structure tests: + - Test 'mcpctl claude' shows available subcommands + - Test 'mcpctl claude add-mcp-project' is recognized + - Test 'mcpctl claude remove-mcp-project' is recognized + - Verify extensible command group architecture for future Claude integration features + +Create src/cli/tests/fixtures/mock-profiles.ts with sample profile data (slack-ro, slack-full, jira-ro, jira-full, github-ro). All tests should initially fail (TDD red phase). + +### 9.2. Write TDD tests for claude add-mcp-project with .mcp.json security validation + +**Status:** pending +**Dependencies:** 9.1 + +Create comprehensive Vitest test suites for the critical claude add-mcp-project command focusing on .mcp.json manipulation, merge behavior with existing configs, path validation, and SECURITY: ensuring secrets are NEVER written to .mcp.json. + +**Details:** + +Create src/cli/tests/unit/commands/claude.test.ts with TDD tests: + +1. Basic functionality tests: + - Test 'mcpctl claude add-mcp-project weekly_reports' calls GET /api/projects/weekly_reports/mcp-config + - Test creates .mcp.json when file doesn't exist + - Test writes valid JSON with mcpServers object + - Test output includes list of added MCP server names + - Test '--path custom.mcp.json' writes to specified path + +2. Merge behavior tests: + - Test merges with existing .mcp.json preserving other entries + - Test existing mcpServers entries NOT overwritten (no data loss) + - Test handles empty existing .mcp.json gracefully + - Test handles malformed existing .mcp.json with clear error + +3. SECURITY tests (critical): + - Test .mcp.json output NEVER contains secret env values (SLACK_BOT_TOKEN, JIRA_API_TOKEN, GITHUB_TOKEN) + - Test env object only contains non-secret placeholder or reference values + - Test path traversal rejected: --path '../../../etc/passwd' fails + - Test --path validates parent directory exists + - Test command injection patterns in project name rejected + +4. Error handling tests: + - Test 404 from API shows 'Project not found' message + - Test network error shows connection error + - Test write permission error handled gracefully + +5. Remove command tests: + - Test 'mcpctl claude remove-mcp-project weekly_reports' removes project's servers from .mcp.json + - Test preserves other unrelated mcpServers entries + +Create src/cli/tests/fixtures/sample-mcp-json.ts with various .mcp.json states for testing. + +### 9.3. Implement project command group with CRUD operations and profile management + +**Status:** pending +**Dependencies:** 9.1 + +Implement the project subcommand group (create, add-profile, remove-profile, list, describe) using Commander.js with full TDD tests passing. Include project status command showing MCP server health for SRE dashboards. + +**Details:** + +Create src/cli/src/commands/project.ts implementing CommandModule: + +1. Command registration: +```typescript +const projectCmd = program.command('project').description('Manage MCP projects'); +``` + +2. 'project create' command: + - Arguments: <name> (required) + - Options: --profiles <profiles...> (profile names to include) + - Implementation: Fetch /api/profiles to resolve names to IDs, POST /api/projects + - Validation: Project name format validation via Zod schema + - Output: 'Project "name" created with N profiles' + +3. 'project add-profile' command: + - Arguments: <project> <profile> (both required) + - Implementation: GET current project, add profile ID, PUT /api/projects/:id/profiles + - Handle profile not found with clear error message + +4. 'project remove-profile' command: + - Arguments: <project> <profile> + - Implementation: Remove profile from project's profile list + +5. 'project list' command: + - Output: Table format showing NAME, PROFILES, CREATED columns + - Support -o json/yaml output formats + +6. 'project describe <name>' command: + - Show full project details including all profiles and their servers + +7. 'project status <name>' command (SRE requirement): + - Show project with all MCP servers and their health status + - Display: SERVER_NAME, PROFILE, STATUS (running/stopped/error), LAST_HEALTH_CHECK + - Support -o json for monitoring pipeline integration + - Exit code 0 if all healthy, 1 if any unhealthy (for alerting) + +8. Support tags/labels for data engineer categorization: + - Add --tag <key=value> option to create command + - Add --filter-tag <key=value> option to list command + +### 9.4. Implement claude command group with secure add-mcp-project and remove-mcp-project + +**Status:** pending +**Dependencies:** 9.2, 9.3 + +Implement the extensible claude subcommand group with the critical add-mcp-project command that safely writes .mcp.json without secrets, supporting both direct mcpd URLs and service discovery patterns for networking team requirements. + +**Details:** + +Create src/cli/src/commands/claude.ts implementing CommandModule: + +1. Extensible command group architecture: +```typescript +const claudeCmd = program.command('claude').description('Claude integration commands'); +// Designed for future: claude sync, claude validate, claude diagnose +``` + +2. 'claude add-mcp-project' implementation: + - Arguments: <project> (project name) + - Options: --path <path> (default: .mcp.json), --dry-run (show what would be written) + - Implementation: + a. Validate --path: reject traversal (../), validate extension (.json) + b. GET /api/projects/<project>/mcp-config from mcpd + c. SECURITY: Verify response contains NO secret values (double-check even though API shouldn't return them) + d. Read existing .mcp.json if exists, parse JSON + e. Merge: existing.mcpServers + new mcpServers (new overwrites conflicts) + f. Write atomic (temp file + rename) + - Output: List of added MCP server names + +3. SECURITY implementation in mcp-json-writer.ts: + - Create sanitizeMcpConfig() function that strips any env values matching secret patterns + - Log warning if API returns unexpected secret-looking values + - Never write plain-text credentials to filesystem + +4. Service discovery support (networking team requirement): + - Support mcpServers entries pointing to mcpd via: + a. Direct URL: env.MCPD_URL = 'http://nas:3000' + b. Service discovery: env.MCPD_SERVICE = 'mcpd.local' + - Document both patterns in command help + +5. 'claude remove-mcp-project' implementation: + - Read .mcp.json, identify servers added by this project (track via metadata) + - Remove only those servers, preserve others + - Add __mcpctl_source metadata to track which project added each server + +6. Create utils/mcp-json-utils.ts: + - readMcpJson(path): safely read and parse + - writeMcpJson(path, config): atomic write with backup + - mergeMcpServers(existing, new): merge logic + - validateMcpJson(config): structure validation + +### 9.5. Create integration tests and comprehensive security review documentation + +**Status:** pending +**Dependencies:** 9.3, 9.4 + +Build complete integration test suite testing project and claude commands against mock mcpd server, perform security review of .mcp.json manipulation, and document all security considerations including injection risks and credential handling. + +**Details:** + +Create src/cli/tests/integration/project-commands.test.ts: + +1. Full workflow integration tests: + - Start mock mcpd server with realistic responses + - Create project with profiles via CLI + - Add profiles to project + - Run 'claude add-mcp-project' and verify .mcp.json output + - Verify merge preserves existing entries + - Remove project and verify cleanup + +2. SRE integration tests: + - Test 'project status' output is grep-friendly + - Test exit codes work with shell scripts + - Test JSON output parseable by jq + - Test integration with monitoring (mock Prometheus metrics endpoint) + +3. Data engineer integration tests: + - Test project with tags (--tag team=data, --tag category=analytics) + - Test filtering by tags works + - Test BigQuery/Snowflake-style profile groupings + +4. Create src/cli/docs/SECURITY_REVIEW.md documenting: + - .mcp.json manipulation security: + a. Path traversal prevention with test evidence + b. Atomic file writes to prevent corruption + c. NEVER writing secrets (enforced at multiple layers) + - JSON injection prevention: + a. Input validation on project/profile names + b. Safe JSON serialization (no eval) + - Credential flow documentation: + a. Secrets stored server-side only + b. .mcp.json contains references, not values + c. CLI prompts for secrets locally when needed + - File permission recommendations (chmod 600) + +5. Mock mcpd server enhancements: + - Add /api/projects/:name/mcp-config endpoint + - Return realistic MCP config structure + - Test error scenarios (404, 500, timeout) + +6. Run full security audit: + - 'pnpm audit' for dependencies + - grep for console.log of sensitive data + - Verify no hardcoded credentials + - Document findings in SECURITY_REVIEW.md diff --git a/.taskmaster/tasks/task_010.md b/.taskmaster/tasks/task_010.md new file mode 100644 index 0000000..c78245e --- /dev/null +++ b/.taskmaster/tasks/task_010.md @@ -0,0 +1,327 @@ +# Task ID: 10 + +**Title:** Implement Interactive MCP Server Setup Wizard + +**Status:** pending + +**Dependencies:** 7, 4 + +**Priority:** medium + +**Description:** Create an interactive setup wizard that guides users through MCP server configuration, including OAuth flows and API token generation. + +**Details:** + +Create interactive setup wizard: + +```typescript +// commands/setup.ts +import inquirer from 'inquirer'; +import open from 'open'; + +program + .command('setup') + .argument('<server-type>', 'MCP server type (slack, jira, github, etc.)') + .action(async (serverType) => { + const client = getClient(); + const serverDef = await client.get(`/api/mcp-servers/types/${serverType}`); + + console.log(`\n🚀 Setting up ${serverDef.name} MCP Server\n`); + + // Show setup guide + if (serverDef.setupGuide) { + console.log(serverDef.setupGuide); + } + + // Collect required credentials + const answers = {}; + for (const [key, info] of Object.entries(serverDef.envTemplate)) { + if (info.oauth) { + // Handle OAuth flow + console.log(`\n📱 Opening browser for ${key} authentication...`); + const authUrl = `${client.serverUrl}/auth/${serverType}/start`; + await open(authUrl); + + const { token } = await inquirer.prompt([{ + type: 'input', + name: 'token', + message: 'Paste the token from the browser:' + }]); + answers[key] = token; + } else if (info.url) { + // Guide user to token generation page + console.log(`\n🔗 Opening ${info.description}...`); + await open(info.url); + console.log('Generate an API token with the following permissions:'); + console.log(info.permissions?.join(', ')); + + const { value } = await inquirer.prompt([{ + type: 'password', + name: 'value', + message: `Enter your ${key}:` + }]); + answers[key] = value; + } else { + const { value } = await inquirer.prompt([{ + type: info.secret ? 'password' : 'input', + name: 'value', + message: `Enter ${key}:`, + default: info.default + }]); + answers[key] = value; + } + } + + // Create profile with credentials + const { profileName } = await inquirer.prompt([{ + type: 'input', + name: 'profileName', + message: 'Name for this profile:', + default: `${serverType}-default` + }]); + + const profile = await client.post(`/api/mcp-servers/${serverDef.id}/profiles`, { + name: profileName, + config: answers + }); + + console.log(`\n✅ Profile "${profileName}" created successfully!`); + console.log(`Use: mcpctl project add-profile <project> ${profileName}`); + }); +``` + +Server-side setup definitions: +```typescript +const slackSetup = { + envTemplate: { + SLACK_BOT_TOKEN: { + description: 'Slack Bot Token', + url: 'https://api.slack.com/apps', + permissions: ['channels:read', 'chat:write', 'users:read'], + secret: true + }, + SLACK_TEAM_ID: { + description: 'Slack Team ID', + secret: false + } + } +}; +``` + +**Test Strategy:** + +Test wizard flow with mocked inquirer responses. Test OAuth URL generation. Test profile creation with collected credentials. Integration test with actual Slack/Jira setup. + +## Subtasks + +### 10.1. Write TDD tests for wizard step components and credential collection flow + +**Status:** pending +**Dependencies:** None + +Create comprehensive Vitest test suites for all wizard step functions BEFORE implementation, including tests for OAuth flows, API token collection, service account JSON upload, and inquirer prompt mocking for deterministic testing. + +**Details:** + +Create src/cli/tests/unit/commands/setup/wizard-steps.test.ts with TDD tests using vi.mock('inquirer') for deterministic prompt testing. Test cases: (1) collectCredential() with OAuth type opens browser and waits for callback token, (2) collectCredential() with API token type shows URL guidance and accepts password input, (3) collectCredential() with service account type accepts file path and validates JSON structure (for BigQuery), (4) collectCredential() with connection string type validates format (for Snowflake), (5) showSetupGuide() renders markdown correctly to terminal, (6) validateCredential() calls mcpd API to verify token before storage, (7) createProfile() posts to /api/mcp-servers/:id/profiles endpoint. Create src/cli/tests/unit/commands/setup/index.test.ts testing full wizard flow: parse server type argument, fetch server definition, iterate envTemplate, collect all credentials, create profile. Write mock fixtures for server definitions (Slack OAuth, Jira API token, GitHub PAT, BigQuery service account, Snowflake OAuth + connection string, dbt Cloud API token). All tests should fail initially (TDD red phase). + +### 10.2. Implement composable wizard step functions with auth strategy pattern + +**Status:** pending +**Dependencies:** 10.1 + +Create reusable, testable wizard step functions following the strategy pattern for different authentication types (OAuth, API token, service account JSON, connection string, multi-step flows) that can be composed for complex data platform MCP setups. + +**Details:** + +Create src/cli/src/commands/setup/auth-strategies.ts with authentication strategy interface and implementations: + +```typescript +interface AuthStrategy { + name: string; + collect(envKey: string, info: EnvTemplateInfo, options: CollectOptions): Promise<string>; + validate?(value: string): Promise<boolean>; +} + +class OAuthStrategy implements AuthStrategy // Opens browser, waits for callback +class ApiTokenStrategy implements AuthStrategy // Shows URL, accepts password input +class ServiceAccountStrategy implements AuthStrategy // File path input, JSON validation +class ConnectionStringStrategy implements AuthStrategy // Format validation (user:pass@host:port/db) +class MultiStepStrategy implements AuthStrategy // Composes multiple sub-strategies +``` + +Create src/cli/src/commands/setup/wizard-steps.ts with composable functions: +- showSetupGuide(guide: string): void - Render markdown to terminal with chalk +- selectAuthStrategy(info: EnvTemplateInfo): AuthStrategy - Factory based on envTemplate metadata +- collectCredentials(envTemplate: EnvTemplate, strategies: AuthStrategy[]): Promise<Record<string, string>> +- validateAllCredentials(credentials: Record<string, string>, server: McpServer): Promise<ValidationResult> +- createProfile(serverId: string, profileName: string, config: Record<string, string>): Promise<Profile> + +Data Engineer MCP support: +- BigQuery: ServiceAccountStrategy expecting JSON key file with 'type': 'service_account' +- Snowflake: MultiStepStrategy combining ConnectionStringStrategy + OAuthStrategy +- dbt Cloud: ApiTokenStrategy with project selection step + +All functions must pass TDD tests from subtask 1. + +### 10.3. Implement setup command with --non-interactive flag for CI/scripting + +**Status:** pending +**Dependencies:** 10.2 + +Create the main 'mcpctl setup <server-type>' command that orchestrates the wizard flow, with --non-interactive flag for CI/automation that accepts credentials via environment variables or stdin JSON. + +**Details:** + +Create src/cli/src/commands/setup/index.ts implementing CommandModule: + +```typescript +program + .command('setup') + .argument('<server-type>', 'MCP server type (slack, jira, github, bigquery, snowflake, dbt)') + .option('--non-interactive', 'Run without prompts, use env vars or stdin') + .option('--profile-name <name>', 'Name for the created profile') + .option('--stdin', 'Read credentials JSON from stdin') + .option('--dry-run', 'Validate without creating profile') + .action(async (serverType, options) => { ... }) +``` + +Interactive flow: +1. Fetch server definition from mcpd: GET /api/mcp-servers/types/:type +2. Display setup guide with showSetupGuide() +3. For each envTemplate entry, use selectAuthStrategy() and collect() +4. Validate all credentials with validateAllCredentials() +5. Prompt for profile name (default: ${serverType}-default) +6. Create profile via mcpd API +7. Print success message with 'mcpctl project add-profile' hint + +Non-interactive flow: +- --stdin: Read JSON from stdin with structure { "SLACK_BOT_TOKEN": "xoxb-...", ... } +- Env vars: Check for each envTemplate key in process.env +- Fail with clear error if required credential missing +- Validate all credentials before creating profile +- --dry-run: Skip profile creation, just validate + +Offline/local dev support: +- When mcpd unreachable, offer cached server definitions +- Support --mcpd-url override for local development + +Register via CommandRegistry. Write integration tests. + +### 10.4. Implement OAuth browser flow with proxy and enterprise SSO support + +**Status:** pending +**Dependencies:** 10.2 + +Create secure OAuth flow handler that opens browser for authentication, handles callback tokens, supports HTTP/HTTPS proxies, custom CA certificates for enterprise SSO, and secure redirect URL handling. + +**Details:** + +Create src/cli/src/commands/setup/oauth-handler.ts: + +```typescript +export class OAuthHandler { + constructor(private config: OAuthConfig) {} + + async startOAuthFlow(serverType: string): Promise<string> { + // 1. Generate state token for CSRF protection + // 2. Build auth URL with state and redirect_uri + // 3. Start local callback server on random port + // 4. Open browser with 'open' package + // 5. Wait for callback with token or timeout + // 6. Validate state matches + // 7. Return access token + } +} +``` + +Enterprise networking support: +- Load proxy settings from config (Task 7) and environment (HTTP_PROXY, HTTPS_PROXY, NO_PROXY) +- Support custom CA certificates for enterprise SSO (config.tls.caFile) +- Use https.Agent with proxy-agent for HTTPS requests through proxy +- Handle proxy authentication (Proxy-Authorization header) + +Callback server: +- Start on localhost:0 (random available port) +- Timeout after 5 minutes with clear error message +- CSRF protection via state parameter +- Redirect to success page after token received +- Shutdown immediately after callback + +Security considerations: +- State token must be cryptographically random (crypto.randomBytes) +- Validate redirect_uri matches expected pattern +- Don't log access tokens +- Clear token from memory after passing to credential store + +Create src/cli/tests/unit/commands/setup/oauth-handler.test.ts with mocked browser and HTTP server. + +### 10.5. Implement secure credential storage and comprehensive security review + +**Status:** pending +**Dependencies:** 10.2, 10.3, 10.4 + +Create secure credential storage for wizard-collected tokens using system keychain or encrypted file storage, validate tokens before storage, and conduct comprehensive security review of all OAuth handling, credential storage, and browser redirect safety. + +**Details:** + +Create src/cli/src/commands/setup/credential-store.ts: + +```typescript +export class WizardCredentialStore { + // Store credentials securely for later profile creation + async storeCredential(key: string, value: string, options: StoreOptions): Promise<void> + + // Validate credential with mcpd before storing + async validateAndStore(serverType: string, key: string, value: string): Promise<ValidationResult> + + // Retrieve for profile creation (one-time use) + async retrieveAndClear(key: string): Promise<string> +} +``` + +Secure storage implementation: +- Primary: System keychain via 'keytar' package (macOS Keychain, Windows Credential Vault, Linux Secret Service) +- Fallback: Encrypted file at ~/.mcpctl/wizard-credentials (AES-256-GCM) +- Encryption key derived from machine-specific data + user password +- Credentials cleared after profile creation (one-time use) + +API token validation before storage: +- POST /api/mcp-servers/:type/validate-credentials with credentials +- Slack: Test token with auth.test API +- Jira: Test with /rest/api/3/myself +- GitHub: Test with /user API +- BigQuery: Test service account with projects.list +- Snowflake: Test connection with simple query +- dbt: Test with /api/v2/accounts + +SECURITY REVIEW - create src/cli/docs/SETUP_WIZARD_SECURITY_REVIEW.md: + +1. OAuth Token Handling: + - State parameter uses crypto.randomBytes(32) + - Tokens never logged or written to non-encrypted storage + - Browser redirect validates callback URL pattern + - Local callback server binds to localhost only + +2. Credential Storage Security: + - Keychain used when available, encrypted file fallback + - File permissions 600 on credential storage + - Credentials cleared after single use + - No credentials in CLI history (no --token=xxx args) + +3. API Token Validation: + - All tokens validated before storage + - Validation errors don't leak token in error message + - Failed validation clears token from memory + +4. Network Security: + - HTTPS required for OAuth (except localhost callback) + - Proxy credentials handled securely + - Custom CA for enterprise SSO supported + +5. Browser Redirect Safety: + - Only localhost:port/callback pattern accepted + - State token prevents CSRF + - Success page doesn't display token + +Run 'pnpm audit --audit-level=high' and document findings. diff --git a/.taskmaster/tasks/task_011.md b/.taskmaster/tasks/task_011.md new file mode 100644 index 0000000..eb49e30 --- /dev/null +++ b/.taskmaster/tasks/task_011.md @@ -0,0 +1,191 @@ +# Task ID: 11 + +**Title:** Design Local LLM Proxy Architecture + +**Status:** pending + +**Dependencies:** 1, 3 + +**Priority:** high + +**Description:** Design the local proxy component that intercepts MCP requests, uses local LLMs to pre-filter data, and communicates with mcpd. + +**Details:** + +Create the local-proxy package architecture: + +```typescript +// src/local-proxy/src/index.ts + +// The local proxy acts as an MCP server that Claude connects to +// It intercepts requests, uses local LLM for filtering, then forwards to mcpd + +import { Server } from '@modelcontextprotocol/sdk/server/index.js'; +import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js'; + +export class McpctlLocalProxy { + private server: Server; + private llmProvider: LLMProvider; + private mcpdClient: McpdClient; + + constructor(config: ProxyConfig) { + this.server = new Server({ + name: 'mcpctl-proxy', + version: '1.0.0' + }, { + capabilities: { tools: {} } + }); + + this.llmProvider = createLLMProvider(config.llm); + this.mcpdClient = new McpdClient(config.mcpdUrl); + + this.setupHandlers(); + } + + private setupHandlers() { + // List available tools from all configured MCP servers + this.server.setRequestHandler('tools/list', async () => { + const tools = await this.mcpdClient.listAvailableTools(); + return { tools }; + }); + + // Handle tool calls with pre-filtering + this.server.setRequestHandler('tools/call', async (request) => { + const { name, arguments: args } = request.params; + + // Step 1: Use local LLM to interpret the request + const refinedQuery = await this.llmProvider.refineQuery({ + tool: name, + originalArgs: args, + context: request.params._context // What Claude is looking for + }); + + // Step 2: Forward to mcpd with refined query + const rawResult = await this.mcpdClient.callTool(name, refinedQuery); + + // Step 3: Use local LLM to filter/summarize response + const filteredResult = await this.llmProvider.filterResponse({ + tool: name, + query: refinedQuery, + response: rawResult, + maxTokens: 2000 // Keep context window small for Claude + }); + + return { content: [{ type: 'text', text: filteredResult }] }; + }); + } + + async start() { + const transport = new StdioServerTransport(); + await this.server.connect(transport); + } +} + +// LLM Provider interface +interface LLMProvider { + refineQuery(params: RefineParams): Promise<any>; + filterResponse(params: FilterParams): Promise<string>; +} +``` + +Architecture flow: +``` +Claude <--stdio--> mcpctl-proxy <--HTTP--> mcpd <---> MCP servers (containers) + | + v + Local LLM (Ollama/Gemini/vLLM) +``` + +**Test Strategy:** + +Unit test request/response transformation. Mock LLM provider and verify refinement logic. Integration test with actual local LLM. Test error handling when LLM is unavailable. + +## Subtasks + +### 11.1. Create local-proxy package structure with TDD infrastructure and mock LLM provider + +**Status:** pending +**Dependencies:** None + +Initialize the src/local-proxy directory with clean architecture layers, Vitest configuration, and a comprehensive mock LLM provider for testing without GPU requirements. + +**Details:** + +Create src/local-proxy/ with directory structure: src/{handlers,providers,services,middleware,types,utils}. Set up package.json with @modelcontextprotocol/sdk, vitest, and shared workspace dependencies. Configure vitest.config.ts with coverage requirements (>90%). Implement MockLLMProvider class that returns deterministic responses for testing - this is critical for CI/CD pipelines without GPU. Create test fixtures with sample MCP requests/responses for Slack, Jira, and database query scenarios. Include test utilities: createMockMcpRequest(), createMockLLMResponse(), createTestProxyInstance(). The mock provider must support configurable latency simulation and error injection for chaos testing. + +### 11.2. Design and implement LLMProvider interface with pluggable adapter architecture + +**Status:** pending +**Dependencies:** 11.1 + +Create the abstract LLMProvider interface and adapter factory pattern that allows swapping LLM backends (Ollama, Gemini, vLLM, DeepSeek) without changing proxy logic. + +**Details:** + +Define LLMProvider interface in src/types/llm.ts with methods: refineQuery(params: RefineParams): Promise<RefinedQuery>, filterResponse(params: FilterParams): Promise<FilteredResponse>, healthCheck(): Promise<boolean>, getMetrics(): ProviderMetrics. Create LLMProviderFactory that accepts provider configuration and returns appropriate implementation. Design for composability - allow chaining providers (e.g., Ollama for refinement, Gemini for filtering). Include connection pooling interface for providers that support it. Create abstract BaseLLMProvider class with common retry logic, timeout handling, and metrics collection. Define clear error types: LLMUnavailableError, LLMTimeoutError, LLMRateLimitError, PromptInjectionDetectedError. + +### 11.3. Implement MCP SDK server handlers with request/response transformation and validation + +**Status:** pending +**Dependencies:** 11.1, 11.2 + +Create the core McpctlLocalProxy class using @modelcontextprotocol/sdk with handlers for tools/list and tools/call, including MCP protocol message validation to prevent malformed requests. + +**Details:** + +Implement McpctlLocalProxy in src/index.ts following the architecture from task details. Create setRequestHandler for 'tools/list' that fetches available tools from mcpd and caches them with TTL. Create setRequestHandler for 'tools/call' with three-phase processing: (1) refineQuery phase using LLM, (2) forward to mcpd phase, (3) filterResponse phase using LLM. Implement MCP protocol validation middleware using Zod schemas - validate all incoming JSON-RPC messages against MCP specification before processing. Create McpdClient class in src/services/mcpd-client.ts with HTTP client for mcpd communication, including connection pooling and health checks. Handle stdio transport initialization with proper cleanup on SIGTERM/SIGINT. + +### 11.4. Implement security layer with prompt injection prevention and data isolation + +**Status:** pending +**Dependencies:** 11.2, 11.3 + +Create security middleware that validates all inputs, prevents prompt injection in LLM queries, ensures no data leakage between users, and sanitizes all MCP protocol messages. + +**Details:** + +Create src/middleware/security.ts with: (1) PromptInjectionValidator that scans user inputs for common injection patterns before sending to LLM - detect and reject inputs containing 'ignore previous', 'system:', role-switching attempts. (2) InputSanitizer that validates and sanitizes all tool arguments against expected schemas. (3) ResponseSanitizer that removes potentially sensitive data patterns (API keys, passwords, PII) from LLM-filtered responses before returning to Claude. (4) RequestIsolation middleware ensuring each request has its own context with no shared mutable state - critical for multi-tenant scenarios. Create SECURITY_AUDIT.md documenting all security controls and their test coverage. Implement allowlist-based argument validation for known MCP tools. + +### 11.5. Implement configurable filtering strategies with per-profile aggressiveness settings + +**Status:** pending +**Dependencies:** 11.2, 11.3 + +Create composable filtering strategy system that allows data scientists to configure filtering aggressiveness per MCP server type, supporting different needs for raw SQL vs pre-aggregated dashboards. + +**Details:** + +Design FilterStrategy interface in src/services/filter-engine.ts with methods: shouldFilter(response: MpcResponse): boolean, filter(response: MpcResponse, config: FilterConfig): FilteredResponse, getAggressiveness(): number. Implement AggressiveFilter for raw SQL results (summarize, limit rows, remove redundant columns), MinimalFilter for pre-aggregated data (pass-through with size limits only), and AdaptiveFilter that adjusts based on response characteristics. Create FilterConfig type with per-profile settings stored in mcpd: { profileId: string, strategy: 'aggressive' | 'minimal' | 'adaptive', maxTokens: number, preserveFields: string[], summaryPrompt?: string }. Implement FilterStrategyComposer that chains multiple strategies. Support runtime strategy switching without proxy restart. + +### 11.6. Implement chunking and streaming for large data responses with pagination support + +**Status:** pending +**Dependencies:** 11.3, 11.5 + +Design pagination and streaming strategy for handling large data responses (100k+ rows from database MCPs) that cannot be simply filtered, supporting cursor-based pagination in the proxy layer. + +**Details:** + +Create src/services/pagination.ts with PaginationManager class handling: (1) Detection of large responses that require chunking (configurable threshold, default 10K rows), (2) Cursor-based pagination with stable cursors stored in proxy memory with TTL, (3) Response streaming using async iterators for progressive delivery, (4) Chunk size optimization based on estimated token count. Implement PagedResponse type with { data: any[], cursor?: string, hasMore: boolean, totalEstimate?: number, chunkIndex: number }. Create ChunkingStrategy interface for different data types - TabularChunker for SQL results, JSONChunker for nested objects, TextChunker for large text responses. Add pagination metadata to MCP tool responses so Claude can request next pages. Handle cursor expiration gracefully with re-query capability. + +### 11.7. Implement observability with metrics endpoint and structured logging for SRE monitoring + +**Status:** pending +**Dependencies:** 11.2, 11.3, 11.5 + +Create comprehensive metrics collection and exposure system with /metrics endpoint (Prometheus format) and structured JSON logging for monitoring proxy health, performance, and LLM efficiency. + +**Details:** + +Create src/services/metrics.ts with MetricsCollector class tracking: requests_total (counter), request_duration_seconds (histogram), llm_inference_duration_seconds (histogram), filter_reduction_ratio (gauge - original_size/filtered_size), active_connections (gauge), error_total by error_type (counter), tokens_saved_total (counter). Implement /metrics HTTP endpoint on configurable port (separate from stdio MCP transport) serving Prometheus exposition format. Create structured logger in src/utils/logger.ts outputting JSON with fields: timestamp, level, requestId, toolName, phase (refine/forward/filter), duration_ms, input_tokens, output_tokens, reduction_percent. Add request tracing with correlation IDs propagated to mcpd. Include health check endpoint /health with component status (llm: ok/degraded, mcpd: ok/disconnected). + +### 11.8. Create integration tests and local development environment with docker-compose + +**Status:** pending +**Dependencies:** 11.1, 11.2, 11.3, 11.4, 11.5, 11.6, 11.7 + +Build comprehensive integration test suite testing the complete proxy flow against local mcpd and local Ollama, plus docker-compose setup for easy local development without external dependencies. + +**Details:** + +Create deploy/docker-compose.proxy.yml with services: ollama (with pre-pulled model), mcpd (from src/mcpd), postgres (for mcpd), and local-proxy. Add scripts/setup-local-dev.sh that pulls Ollama models, starts services, and verifies connectivity. Create integration test suite in tests/integration/ testing: (1) Full request flow from Claude-style request through proxy to mcpd and back, (2) LLM refinement actually modifies queries appropriately, (3) Response filtering reduces token count measurably, (4) Pagination works for large responses, (5) Error handling when Ollama is unavailable (falls back gracefully), (6) Metrics are recorded correctly during real requests. Create performance benchmark suite measuring latency overhead vs direct mcpd access. Document local development setup in LOCAL_DEV.md. diff --git a/.taskmaster/tasks/task_012.md b/.taskmaster/tasks/task_012.md new file mode 100644 index 0000000..1ab3cbe --- /dev/null +++ b/.taskmaster/tasks/task_012.md @@ -0,0 +1,153 @@ +# Task ID: 12 + +**Title:** Implement Local LLM Provider Integrations + +**Status:** pending + +**Dependencies:** 11 + +**Priority:** medium + +**Description:** Create adapters for different local LLM providers: Ollama, Gemini CLI, vLLM, and DeepSeek API for request refinement and response filtering. + +**Details:** + +Create LLM provider implementations: + +```typescript +// providers/ollama.ts +export class OllamaProvider implements LLMProvider { + constructor(private config: { host: string; model: string }) {} + + async refineQuery(params: RefineParams): Promise<any> { + const prompt = `You are helping refine a data request. +Tool: ${params.tool} +Original request: ${JSON.stringify(params.originalArgs)} +Context (what the user wants): ${params.context} + +Refine this query to be more specific. Output JSON only.`; + + const response = await fetch(`${this.config.host}/api/generate`, { + method: 'POST', + body: JSON.stringify({ model: this.config.model, prompt, format: 'json' }) + }); + return JSON.parse((await response.json()).response); + } + + async filterResponse(params: FilterParams): Promise<string> { + const prompt = `Filter this data to only include relevant information. +Query: ${JSON.stringify(params.query)} +Data: ${JSON.stringify(params.response).slice(0, 10000)} + +Extract only the relevant parts. Be concise. Max ${params.maxTokens} tokens.`; + + const response = await fetch(`${this.config.host}/api/generate`, { + method: 'POST', + body: JSON.stringify({ model: this.config.model, prompt }) + }); + return (await response.json()).response; + } +} + +// providers/gemini-cli.ts +export class GeminiCliProvider implements LLMProvider { + async refineQuery(params: RefineParams): Promise<any> { + const result = await execAsync( + `echo '${this.buildPrompt(params)}' | gemini -m gemini-2.0-flash` + ); + return JSON.parse(result.stdout); + } +} + +// providers/deepseek.ts +export class DeepSeekProvider implements LLMProvider { + constructor(private apiKey: string) {} + + async refineQuery(params: RefineParams): Promise<any> { + const response = await fetch('https://api.deepseek.com/v1/chat/completions', { + method: 'POST', + headers: { + 'Authorization': `Bearer ${this.apiKey}`, + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ + model: 'deepseek-chat', + messages: [{ role: 'user', content: this.buildPrompt(params) }] + }) + }); + return JSON.parse((await response.json()).choices[0].message.content); + } +} + +// Factory +export function createLLMProvider(config: LLMConfig): LLMProvider { + switch (config.type) { + case 'ollama': return new OllamaProvider(config); + case 'gemini-cli': return new GeminiCliProvider(); + case 'deepseek': return new DeepSeekProvider(config.apiKey); + case 'vllm': return new VLLMProvider(config); + default: throw new Error(`Unknown LLM provider: ${config.type}`); + } +} +``` + +**Test Strategy:** + +Unit test each provider with mocked API responses. Integration test with local Ollama instance. Test fallback behavior when provider is unavailable. Benchmark token usage reduction. + +## Subtasks + +### 12.1. Implement OllamaProvider with TDD, health checks, and circuit breaker pattern + +**Status:** pending +**Dependencies:** None + +Create the Ollama LLM provider implementation with full TDD approach, including health check endpoint monitoring, circuit breaker for fault tolerance, and mock mode for testing without a running Ollama instance. + +**Details:** + +Create src/providers/ollama.ts implementing LLMProvider interface from Task 11. Write Vitest tests BEFORE implementation covering: (1) refineQuery() sends correct POST to /api/generate with model and format:json, (2) filterResponse() handles large responses by truncating input to 10K chars, (3) healthCheck() calls /api/tags endpoint and returns true if model exists, (4) Circuit breaker opens after 3 consecutive failures within 30s, trips for 60s, then half-opens, (5) Timeout handling with AbortController after configurable duration (default 30s), (6) Mock mode returns deterministic responses when OLLAMA_MOCK=true for CI/CD. Implement connection pooling using undici Agent. Add structured logging for SRE monitoring with fields: model, prompt_tokens, completion_tokens, latency_ms, error_type. Security: Sanitize all prompt inputs using PromptSanitizer from Task 11.4, validate JSON responses with Zod schema before parsing. Rate limiting: configurable requests-per-minute with token bucket algorithm. + +### 12.2. Implement GeminiCliProvider and DeepSeekProvider with security hardening + +**Status:** pending +**Dependencies:** 12.1 + +Create Gemini CLI provider using subprocess execution with shell injection prevention, and DeepSeek API provider with secure API key handling, both following TDD methodology. + +**Details:** + +Create src/providers/gemini-cli.ts: Use execa (not child_process.exec) to prevent shell injection - pass prompt via stdin pipe, not command line arguments. Implement buildPrompt() with template literals and JSON.stringify for safe interpolation. Add timeout handling (default 60s for CLI). Parse stdout as JSON with Zod validation. Health check: verify 'gemini' binary exists using which command. Create src/providers/deepseek.ts: Implement OpenAI-compatible API client with fetch. API key from config (never log or include in prompts). Implement retry with exponential backoff for 429/5xx responses. Circuit breaker for API unavailability. Both providers: Implement LLMProvider interface methods refineQuery() and filterResponse(). Add mock modes for testing. Security review: (1) No credentials in logged prompts, (2) Validate all API responses before parsing, (3) Sanitize user inputs in prompts using shared PromptSanitizer. + +### 12.3. Implement VLLMProvider with OpenAI-compatible API and batch inference support + +**Status:** pending +**Dependencies:** 12.1 + +Create vLLM provider supporting the OpenAI-compatible API endpoint, with batch inference optimization for processing multiple requests efficiently, and configurable model selection. + +**Details:** + +Create src/providers/vllm.ts implementing LLMProvider interface. vLLM exposes OpenAI-compatible endpoint at /v1/completions or /v1/chat/completions. Support both completion and chat modes via config. Implement batch inference: when multiple refineQuery/filterResponse calls arrive within batching window (default 50ms), combine into single API call with multiple prompts for better GPU utilization. Configuration: { host: string, model: string, maxTokens: number, temperature: number, batchWindowMs: number }. Health check: call /health or /v1/models endpoint. Implement request queuing with configurable max queue size. Circuit breaker pattern matching OllamaProvider. Add metrics collection: batch_size_histogram, queue_depth_gauge, inference_time_per_request. Security: Same prompt sanitization as other providers. Mock mode for CI/CD testing. + +### 12.4. Implement LLM provider factory with configuration validation and provider benchmarking utilities + +**Status:** pending +**Dependencies:** 12.1, 12.2, 12.3 + +Create the factory function and configuration system for instantiating LLM providers, plus benchmarking utilities for data scientists to compare provider performance, quality, and cost. + +**Details:** + +Create src/providers/factory.ts with createLLMProvider(config: LLMConfig): LLMProvider function. LLMConfig Zod schema: { type: 'ollama'|'gemini-cli'|'deepseek'|'vllm', ...provider-specific fields }. Validate config at construction time with descriptive errors. Create src/utils/benchmark.ts with ProviderBenchmark class: Methods: runBenchmark(provider, testCases): BenchmarkResult, compareBenchmarks(results[]): ComparisonReport. BenchmarkResult type: { provider: string, testCases: { input, output, latencyMs, inputTokens, outputTokens, qualityScore? }[], avgLatency, p95Latency, totalTokens, estimatedCost? }. Include standard test cases for filtering accuracy: database rows, Slack messages, Jira tickets with known 'correct' filtered outputs. Quality scoring: compare filtered output against golden reference using semantic similarity (optional LLM-as-judge). Export results as JSON and markdown table for documentation. Add CLI command: mcpctl benchmark-providers --providers ollama,deepseek --test-suite standard. + +### 12.5. Implement security review layer and comprehensive integration tests for all providers + +**Status:** pending +**Dependencies:** 12.1, 12.2, 12.3, 12.4 + +Create security middleware for prompt injection prevention across all providers, implement rate limiting, add comprehensive integration tests verifying provider interoperability, and document security controls. + +**Details:** + +Create src/providers/security.ts with: (1) PromptSanitizer class - detect and neutralize injection patterns: 'ignore previous', 'system:', 'assistant:', embedded JSON/XML that could hijack prompts. Use regex + heuristic scoring. (2) ResponseValidator - validate LLM outputs match expected schema, detect and reject responses that contain prompt leakage or injection artifacts. (3) RateLimiter - token bucket per provider with configurable limits, shared across provider instances. (4) AuditLogger - log all LLM interactions for security review: timestamp, provider, sanitized_prompt (no PII), response_length, flagged_patterns. Create tests/integration/providers.test.ts: Test all 4 providers with same test suite verifying interface compliance. Create SECURITY_AUDIT.md documenting: all security controls, threat model (prompt injection, data exfiltration, DoS), test coverage, and manual review checklist. Add to CI: security-focused test suite that must pass before merge. diff --git a/.taskmaster/tasks/task_013.md b/.taskmaster/tasks/task_013.md new file mode 100644 index 0000000..0960f61 --- /dev/null +++ b/.taskmaster/tasks/task_013.md @@ -0,0 +1,195 @@ +# Task ID: 13 + +**Title:** Implement MCP Request/Response Filtering Logic + +**Status:** pending + +**Dependencies:** 11, 12 + +**Priority:** medium + +**Description:** Create the intelligent filtering system that analyzes Claude's intent and filters MCP responses to minimize token usage while maximizing relevance. + +**Details:** + +Create filtering logic: + +```typescript +// services/filter-engine.ts +export class FilterEngine { + constructor(private llm: LLMProvider) {} + + // Analyze Claude's request to understand intent + async analyzeIntent(request: ToolCallRequest): Promise<IntentAnalysis> { + const prompt = `Analyze this MCP tool call to understand the user's intent: +Tool: ${request.toolName} +Arguments: ${JSON.stringify(request.arguments)} + +Output JSON: +{ + "intent": "description of what user wants", + "keywords": ["relevant", "keywords"], + "filters": { "date_range": "...", "categories": [...] }, + "maxResults": number +}`; + + return this.llm.analyze(prompt); + } + + // Filter response based on intent + async filterResponse( + response: any, + intent: IntentAnalysis, + tool: ToolDefinition + ): Promise<FilteredResponse> { + // Strategy 1: Structural filtering (if response is array) + if (Array.isArray(response)) { + const filtered = await this.filterArray(response, intent); + return { data: filtered, reduction: 1 - filtered.length / response.length }; + } + + // Strategy 2: Field selection (for objects) + if (typeof response === 'object') { + const relevant = await this.selectRelevantFields(response, intent); + return { data: relevant, reduction: this.calculateReduction(response, relevant) }; + } + + // Strategy 3: Text summarization (for large text responses) + if (typeof response === 'string' && response.length > 5000) { + const summary = await this.summarize(response, intent); + return { data: summary, reduction: 1 - summary.length / response.length }; + } + + return { data: response, reduction: 0 }; + } + + private async filterArray(items: any[], intent: IntentAnalysis): Promise<any[]> { + // Score each item for relevance + const scored = await Promise.all( + items.map(async (item) => ({ + item, + score: await this.scoreRelevance(item, intent) + })) + ); + + // Return top N most relevant + return scored + .sort((a, b) => b.score - a.score) + .slice(0, intent.maxResults || 10) + .map(s => s.item); + } + + private async scoreRelevance(item: any, intent: IntentAnalysis): Promise<number> { + const itemStr = JSON.stringify(item).toLowerCase(); + let score = 0; + + // Keyword matching + for (const keyword of intent.keywords) { + if (itemStr.includes(keyword.toLowerCase())) score += 1; + } + + // Use LLM for deeper analysis if needed + if (score === 0) { + score = await this.llm.scoreRelevance(item, intent.intent); + } + + return score; + } +} +``` + +Example filtering for Slack messages: +```typescript +// User asks: "Get Slack messages about security from my team" +const intent = { + intent: 'Find security-related team messages', + keywords: ['security', 'vulnerability', 'patch', 'CVE'], + filters: { channels: ['team-*', 'security-*'] }, + maxResults: 20 +}; + +// Filter 1000 messages down to 20 most relevant +``` + +**Test Strategy:** + +Test intent analysis with various queries. Test filtering reduces data size significantly. Benchmark relevance accuracy. Test with real Slack/Jira data samples. + +## Subtasks + +### 13.1. Create FilterEngine core infrastructure with TDD and MockLLMProvider + +**Status:** pending +**Dependencies:** None + +Set up the services/filter-engine.ts file structure with TypeScript interfaces, Vitest test infrastructure, and MockLLMProvider for local testing without external API dependencies. + +**Details:** + +Create src/services/filter-engine.ts with core types and interfaces. Define IntentAnalysis interface: { intent: string, keywords: string[], filters: Record<string, any>, maxResults: number, confidence: number }. Define FilteredResponse interface: { data: any, reduction: number, metadata: FilterMetadata }. Define FilterMetadata for explainability: { originalItemCount: number, filteredItemCount: number, removedItems: RemovedItemExplanation[], filterStrategy: string, scoringLatencyMs: number }. Define RemovedItemExplanation: { item: any, reason: string, score: number, threshold: number }. Create MockLLMProvider in tests/mocks/mock-llm-provider.ts that returns deterministic responses based on input patterns - essential for CI/CD without GPU. Configure Vitest with coverage requirements (>90%). Create test fixtures in tests/fixtures/ with sample MCP requests/responses for Slack, Jira, database queries. Include createMockToolCallRequest(), createMockIntentAnalysis(), createTestFilterEngine() test utilities. + +### 13.2. Implement analyzeIntent method with keyword extraction and configurable parameters + +**Status:** pending +**Dependencies:** 13.1 + +Create the intent analysis system that interprets Claude's MCP tool calls to extract user intent, relevant keywords, filters, and maximum results using LLM-based analysis with configurable prompts. + +**Details:** + +Implement FilterEngine.analyzeIntent(request: ToolCallRequest): Promise<IntentAnalysis> method. Create IntentAnalyzer class in src/services/intent-analyzer.ts with configurable prompt templates per MCP tool type. Design prompt engineering for reliable JSON output: include examples, schema definition, and output format instructions. Implement keyword extraction with stemming/normalization for better matching. Add confidence scoring to intent analysis (0-1 scale) for downstream filtering decisions. Support tool-specific intent patterns: Slack (channels, date ranges, users), Jira (project, status, assignee), Database (tables, columns, aggregations). Create IntentAnalysisConfig: { promptTemplate: string, maxKeywords: number, includeNegativeKeywords: boolean, confidenceThreshold: number }. Allow data scientists to configure weights and thresholds per MCP type via JSON config file. Implement caching of intent analysis for identical requests (LRU cache with TTL). Add metrics: intent_analysis_latency_ms histogram. + +### 13.3. Implement array filtering strategy with relevance scoring and explainability + +**Status:** pending +**Dependencies:** 13.1, 13.2 + +Create the structural filtering strategy for array responses with intelligent relevance scoring, keyword matching, LLM-based deep analysis, and detailed explainability for why items were removed. + +**Details:** + +Implement FilterEngine.filterArray(items: any[], intent: IntentAnalysis): Promise<FilteredArrayResult> in src/services/filter-strategies/array-filter.ts. Create RelevanceScorer class with configurable scoring: (1) Keyword matching score with configurable weights per keyword, (2) Field importance weights (title > description > metadata), (3) LLM-based semantic scoring for items with zero keyword matches, (4) Composite scoring with normalization. Implement explainability: for each removed item, record { item, reason: 'keyword_score_below_threshold' | 'llm_relevance_low' | 'exceeded_max_results', score, threshold }. Return scored items sorted by relevance with top N based on intent.maxResults. Handle nested arrays recursively. Add A/B testing support: FilterArrayConfig.abTestId allows comparing scoring algorithms. Expose metrics: items_before, items_after, reduction_ratio, avg_score, scoring_latency_ms. Implement batch scoring optimization: score multiple items in single LLM call when possible. + +### 13.4. Implement object field selection and text summarization strategies + +**Status:** pending +**Dependencies:** 13.1, 13.2 + +Create filtering strategies for object responses (field selection based on relevance) and large text responses (intelligent summarization) with configurable thresholds and explainability. + +**Details:** + +Create src/services/filter-strategies/object-filter.ts with selectRelevantFields(obj: object, intent: IntentAnalysis): Promise<FilteredObjectResult>. Implement field relevance scoring: (1) Field name keyword matching, (2) Field value relevance to intent, (3) Configurable always-include fields per object type (e.g., 'id', 'timestamp'). Create FieldSelectionConfig: { preserveFields: string[], maxDepth: number, maxFields: number }. Track removed fields in explainability metadata. Create src/services/filter-strategies/text-filter.ts with summarize(text: string, intent: IntentAnalysis): Promise<SummarizedTextResult>. Implement intelligent summarization: (1) Detect text type (log file, documentation, code), (2) Apply appropriate summarization strategy, (3) Preserve critical information based on intent keywords. Summarization threshold: 5000 chars (configurable). Calculate reduction ratio: 1 - summary.length / original.length. Add metrics: fields_removed, text_reduction_ratio, summarization_latency_ms. + +### 13.5. Implement streaming-compatible large dataset filtering with memory efficiency + +**Status:** pending +**Dependencies:** 13.1, 13.3 + +Create filtering logic that integrates with Task 11's chunking/streaming system to handle 100K+ item datasets without loading all data into memory, using incremental scoring and progressive filtering. + +**Details:** + +Create src/services/filter-strategies/streaming-filter.ts integrating with PaginationManager from Task 11.6. Implement StreamingFilterEngine with methods: (1) createFilterStream(dataStream: AsyncIterable<any[]>, intent): AsyncIterable<FilteredChunk>, (2) processChunk(chunk: any[], runningState: FilterState): Promise<FilteredChunk>. Design FilterState to maintain: running top-N items with scores, min score threshold (dynamically adjusted), chunk index, total items processed. Implement progressive threshold adjustment: as more items are seen, raise threshold to maintain O(maxResults) memory. Use heap data structure for efficient top-N maintenance. Create ChunkedFilterResult: { chunk: any[], chunkIndex: number, runningReduction: number, isComplete: boolean }. Memory budget: configurable max memory for filter state (default 50MB). Add backpressure handling for slow downstream consumers. Expose metrics: chunks_processed, peak_memory_bytes, progressive_threshold. + +### 13.6. Implement security layer preventing data leakage in filtered responses + +**Status:** pending +**Dependencies:** 13.1, 13.3, 13.4 + +Create security middleware that sanitizes filtered responses to prevent accidental exposure of PII, credentials, or sensitive data, with configurable detection patterns and audit logging. + +**Details:** + +Create src/services/filter-security.ts with ResponseSanitizer class. Implement sensitive data detection: (1) Regex patterns for API keys, passwords, tokens (AWS, GitHub, Slack, etc.), (2) PII patterns: email, phone, SSN, credit card, IP addresses, (3) Custom patterns configurable per MCP type. Create SanitizationConfig: { redactPatterns: RegExp[], piiDetection: boolean, auditSensitiveAccess: boolean, allowlist: string[] }. Implement redaction strategies: full replacement with [REDACTED], partial masking (show last 4 chars), or removal. Create FilterSecurityAudit log entry when sensitive data detected: { timestamp, toolName, patternMatched, fieldPath, actionTaken }. Integrate with FilterEngine.filterResponse() as final step before returning. Prevent filtered items from 'leaking back' via explainability metadata - sanitize removed item summaries too. Add metrics: sensitive_data_detected_count, redactions_applied, audit_log_entries. + +### 13.7. Implement A/B testing framework and SRE metrics for filter performance monitoring + +**Status:** pending +**Dependencies:** 13.1, 13.2, 13.3, 13.4, 13.5, 13.6 + +Create comprehensive A/B testing infrastructure for comparing filter strategies, plus Prometheus-compatible metrics exposure for SRE monitoring of filter performance and effectiveness. + +**Details:** + +Create src/services/filter-metrics.ts with FilterMetricsCollector exposing Prometheus metrics: filter_requests_total (counter by tool, strategy), filter_duration_seconds (histogram), items_before_filter (histogram), items_after_filter (histogram), reduction_ratio (histogram), scoring_latency_seconds (histogram by strategy), sensitive_data_detections_total (counter). Create src/services/ab-testing.ts with ABTestingFramework class: Methods: assignExperiment(requestId): ExperimentAssignment, recordOutcome(requestId, metrics): void, getExperimentResults(experimentId): ABTestResults. ExperimentConfig: { id, strategies: FilterStrategy[], trafficSplit: number[], startDate, endDate }. Persist experiment assignments and outcomes for analysis. Create ABTestResults: { experimentId, strategyResults: { strategy, avgReduction, avgLatency, sampleSize }[], statisticalSignificance }. Integrate with FilterEngine: check experiment assignment, use assigned strategy, record outcome metrics. Add /metrics HTTP endpoint serving Prometheus exposition format. Create Grafana dashboard JSON template for filter monitoring. diff --git a/.taskmaster/tasks/task_014.md b/.taskmaster/tasks/task_014.md new file mode 100644 index 0000000..b837b22 --- /dev/null +++ b/.taskmaster/tasks/task_014.md @@ -0,0 +1,172 @@ +# Task ID: 14 + +**Title:** Implement Audit Logging System + +**Status:** pending + +**Dependencies:** 3, 6 + +**Priority:** medium + +**Description:** Create comprehensive audit logging for all MCP operations including who ran what, when, and what data was accessed. + +**Details:** + +Create audit logging system: + +```typescript +// services/audit-logger.ts +export class AuditLogger { + constructor(private prisma: PrismaClient) {} + + async logMcpCall(params: { + userId: string; + sessionId: string; + serverId: string; + tool: string; + arguments: any; + responseSize: number; + filteredSize: number; + duration: number; + success: boolean; + error?: string; + }) { + await this.prisma.auditLog.create({ + data: { + userId: params.userId, + action: 'mcp_call', + resource: `${params.serverId}:${params.tool}`, + details: { + sessionId: params.sessionId, + arguments: params.arguments, + responseSize: params.responseSize, + filteredSize: params.filteredSize, + reductionPercent: Math.round((1 - params.filteredSize / params.responseSize) * 100), + duration: params.duration, + success: params.success, + error: params.error + } + } + }); + } + + async logServerAction(params: { + userId: string; + action: 'start' | 'stop' | 'configure'; + serverId: string; + details?: any; + }) { + await this.prisma.auditLog.create({ + data: { + userId: params.userId, + action: `server_${params.action}`, + resource: params.serverId, + details: params.details + } + }); + } + + async getAuditTrail(filters: { + userId?: string; + serverId?: string; + action?: string; + from?: Date; + to?: Date; + limit?: number; + }) { + return this.prisma.auditLog.findMany({ + where: { + userId: filters.userId, + resource: filters.serverId ? { contains: filters.serverId } : undefined, + action: filters.action, + timestamp: { + gte: filters.from, + lte: filters.to + } + }, + orderBy: { timestamp: 'desc' }, + take: filters.limit || 100, + include: { user: true } + }); + } +} + +// CLI command for audit +program + .command('audit') + .description('View audit logs') + .option('--user <userId>', 'Filter by user') + .option('--server <serverId>', 'Filter by MCP server') + .option('--since <date>', 'Show logs since date') + .option('--limit <n>', 'Limit results', '50') + .action(async (options) => { + const logs = await client.get('/api/audit', options); + console.table(logs.map(l => ({ + TIME: l.timestamp, + USER: l.user?.email, + ACTION: l.action, + RESOURCE: l.resource + }))); + }); +``` + +**Test Strategy:** + +Test audit log creation for all operation types. Test query filtering works correctly. Test log retention/cleanup. Verify sensitive data is not logged. + +## Subtasks + +### 14.1. Design audit log schema and write TDD tests for AuditLogger methods + +**Status:** pending +**Dependencies:** None + +Define the AuditLog Prisma schema with SIEM-compatible structure, correlation IDs, and date partitioning support. Write comprehensive Vitest tests for all AuditLogger methods BEFORE implementation. + +**Details:** + +Create src/mcpd/tests/unit/services/audit-logger.test.ts with TDD tests covering: (1) logMcpCall() creates audit record with correct fields including correlationId, sessionId, serverId, tool, sanitized arguments, responseSize, filteredSize, duration, success/error status; (2) logServerAction() logs start/stop/configure actions with serverId and details; (3) getAuditTrail() supports filtering by userId, serverId, action, date range, and limit; (4) Sensitive data scrubbing - verify arguments containing password, token, secret, apiKey, credentials patterns are redacted; (5) Structured JSON format compatible with Splunk/ELK (include timestamp in ISO8601, log level, correlation_id, user_id, action, resource fields). Create src/db/prisma/audit-log-schema.prisma addition with: correlationId (uuid), action (indexed), resource, details (Json), timestamp (indexed, for partitioning), userId (optional FK), responseTimeMs, success (boolean). Add @@index([timestamp, action]) and @@index([userId, timestamp]) for query performance per SRE requirements. + +### 14.2. Implement AuditLogger service with async buffered writes and sensitive data scrubbing + +**Status:** pending +**Dependencies:** 14.1 + +Implement the AuditLogger class with high-throughput async write buffer, batch inserts to prevent performance impact, and comprehensive sensitive data scrubbing to prevent credential leakage. + +**Details:** + +Create src/mcpd/src/services/audit-logger.ts implementing: Constructor accepting PrismaClient with configurable buffer settings (bufferSize: 100 default, flushIntervalMs: 1000 default). Implement logMcpCall() that adds entry to in-memory buffer, triggers flush when buffer full. Implement logServerAction() similarly. Implement private flushBuffer() using prisma.auditLog.createMany() for batch inserts. Implement scrubSensitiveData(obj: unknown): unknown that recursively traverses objects and redacts values for keys matching patterns: /password/i, /token/i, /secret/i, /apiKey/i, /credentials/i, /authorization/i - replace with '[REDACTED]'. Add correlationId generation using crypto.randomUUID(). Implement getAuditTrail() with Prisma query supporting all filter parameters from task spec. Add graceful shutdown: flush remaining buffer before process exit. Performance consideration: Use setImmediate/process.nextTick for non-blocking buffer operations. Add JSDoc documenting the async nature and security guarantees. + +### 14.3. Implement audit query API with aggregation support for data analysts + +**Status:** pending +**Dependencies:** 14.2 + +Create REST API endpoints for querying audit logs with aggregation capabilities (requests per user/server/time window) and export functionality (CSV/JSON) for data analyst dashboards and usage reports. + +**Details:** + +Create src/mcpd/src/routes/audit.ts with endpoints: GET /api/audit - paginated audit log query with filters (userId, serverId, action, from, to, limit, offset); GET /api/audit/aggregations - aggregation queries returning counts grouped by user, server, action, or time window (hourly/daily/weekly); GET /api/audit/export - export audit data as CSV or JSON file download with same filter support. Implement AuditQueryService in src/mcpd/src/services/audit-query.service.ts with methods: queryAuditLogs(filters: AuditFilters): Promise<PaginatedResult<AuditLog>>; getAggregations(groupBy: 'user' | 'server' | 'action' | 'hour' | 'day', filters: AuditFilters): Promise<AggregationResult[]>; exportToCsv(filters: AuditFilters): Promise<ReadableStream>; exportToJson(filters: AuditFilters): Promise<ReadableStream>. Use Prisma groupBy for aggregations. For CSV export, use streaming to handle large datasets without memory issues. Add rate limiting on export endpoint to prevent DoS. Write Zod schemas for all query parameters. + +### 14.4. Implement CLI audit command with SRE-friendly output formats + +**Status:** pending +**Dependencies:** 14.3 + +Create the mcpctl audit CLI command with filters, multiple output formats (table/json/yaml for SIEM integration), and tail-like streaming capability for real-time log monitoring. + +**Details:** + +Create src/cli/src/commands/audit.ts implementing CommandModule with: 'mcpctl audit' - list recent audit logs; 'mcpctl audit --user <userId>' - filter by user; 'mcpctl audit --server <serverId>' - filter by MCP server; 'mcpctl audit --since <date>' - logs since date (supports ISO8601, relative like '1h', '24h', '7d'); 'mcpctl audit --action <action>' - filter by action type; 'mcpctl audit --limit <n>' - limit results (default 50); 'mcpctl audit --output json' - JSON output for jq piping; 'mcpctl audit --output yaml' - YAML output; 'mcpctl audit --follow' - stream new logs in real-time (WebSocket or polling); 'mcpctl audit export --format csv --since 7d > audit.csv' - export to file. Table output format: TIME | USER | ACTION | RESOURCE (aligned columns). JSON output must be valid JSON array parseable by jq. Add --no-color flag for CI environments. Use chalk for colored output (green=success, red=error actions). + +### 14.5. Implement log streaming to external SIEM systems and retention policy + +**Status:** pending +**Dependencies:** 14.2 + +Add support for streaming audit logs to external systems (Splunk HEC, ELK/Elasticsearch, generic webhook) and implement configurable log retention policy with automatic cleanup for compliance and storage management. + +**Details:** + +Create src/mcpd/src/services/audit-streaming.ts with AuditStreamingService supporting multiple destinations: SplunkHecDestination (HTTP Event Collector with HEC token auth), ElasticsearchDestination (bulk API with index templates), WebhookDestination (generic HTTP POST with configurable auth). Each destination implements AuditDestination interface: send(logs: AuditLog[]): Promise<void>; healthCheck(): Promise<boolean>. Implement retry logic with exponential backoff for failed sends. Create src/mcpd/src/services/audit-retention.ts with AuditRetentionService: configure(policy: RetentionPolicy): void where policy includes retentionDays (default 90), archiveEnabled (boolean), archiveDestination (S3 path or local path). Implement cleanup job using node-cron: DELETE FROM audit_logs WHERE timestamp < NOW() - retentionDays (with batched deletes to avoid long locks). Add archiveBeforeDelete option that exports to configured destination before deletion. Add configuration in .env: AUDIT_RETENTION_DAYS, AUDIT_SPLUNK_HEC_URL, AUDIT_SPLUNK_HEC_TOKEN, AUDIT_ELASTICSEARCH_URL. Write unit tests mocking external services. diff --git a/.taskmaster/tasks/task_015.md b/.taskmaster/tasks/task_015.md new file mode 100644 index 0000000..9a1dd3d --- /dev/null +++ b/.taskmaster/tasks/task_015.md @@ -0,0 +1,297 @@ +# Task ID: 15 + +**Title:** Create MCP Server Profiles Library + +**Status:** pending + +**Dependencies:** 4, 10 + +**Priority:** medium + +**Description:** Build a library of pre-configured MCP server profiles for popular tools (Slack, Jira, GitHub, Terraform, etc.) with setup guides and permission templates. + +**Details:** + +Create comprehensive server definitions: + +```typescript +// seed/mcp-servers.ts +export const mcpServerDefinitions = [ + { + name: 'slack', + type: 'slack', + displayName: 'Slack', + description: 'Access Slack channels, messages, and users', + command: 'npx', + args: ['-y', '@modelcontextprotocol/server-slack'], + envTemplate: { + SLACK_BOT_TOKEN: { + description: 'Slack Bot OAuth Token', + required: true, + secret: true, + setupUrl: 'https://api.slack.com/apps', + setupGuide: `## Slack MCP Setup\n\n1. Go to https://api.slack.com/apps\n2. Create new app or select existing\n3. Go to OAuth & Permissions\n4. Add scopes: channels:read, channels:history, users:read\n5. Install to workspace\n6. Copy Bot User OAuth Token` + }, + SLACK_TEAM_ID: { description: 'Slack Team/Workspace ID', required: true } + }, + defaultProfiles: [ + { name: 'read-only', config: { permissions: ['read'] } }, + { name: 'full-access', config: { permissions: ['read', 'write'] } } + ] + }, + { + name: 'jira', + type: 'jira', + displayName: 'Jira', + description: 'Access Jira issues, projects, and workflows', + command: 'npx', + args: ['-y', '@anthropic/mcp-server-jira'], + envTemplate: { + JIRA_URL: { description: 'Jira instance URL', required: true }, + JIRA_EMAIL: { description: 'Jira account email', required: true }, + JIRA_API_TOKEN: { + description: 'Jira API Token', + required: true, + secret: true, + setupUrl: 'https://id.atlassian.com/manage-profile/security/api-tokens', + setupGuide: `## Jira API Token Setup\n\n1. Go to https://id.atlassian.com/manage-profile/security/api-tokens\n2. Click Create API token\n3. Give it a label (e.g., "mcpctl")\n4. Copy the token` + } + }, + defaultProfiles: [ + { name: 'read-only', config: { permissions: ['read'], projects: ['*'] } }, + { name: 'project-limited', config: { permissions: ['read', 'write'], projects: [] } } + ] + }, + { + name: 'github', + type: 'github', + displayName: 'GitHub', + description: 'Access GitHub repositories, issues, and PRs', + command: 'npx', + args: ['-y', '@modelcontextprotocol/server-github'], + envTemplate: { + GITHUB_TOKEN: { + description: 'GitHub Personal Access Token', + required: true, + secret: true, + setupUrl: 'https://github.com/settings/tokens', + setupGuide: `## GitHub PAT Setup\n\n1. Go to https://github.com/settings/tokens\n2. Generate new token (classic)\n3. Select scopes: repo, read:user\n4. Copy token` + } + } + }, + { + name: 'terraform-docs', + type: 'terraform', + displayName: 'Terraform Documentation', + description: 'Access Terraform provider documentation', + command: 'npx', + args: ['-y', 'terraform-docs-mcp'], + envTemplate: {}, + defaultProfiles: [ + { name: 'aws-only', config: { providers: ['aws'] } }, + { name: 'all-providers', config: { providers: ['*'] } } + ] + } +]; +``` + +**Test Strategy:** + +Verify all server definitions have required fields. Test setup guides render correctly. Test default profiles work with actual MCP servers. + +## Subtasks + +### 15.1. Define TypeScript types and write TDD tests for MCP server profile schemas + +**Status:** pending +**Dependencies:** None + +Create comprehensive TypeScript interfaces and Zod validation schemas for MCP server profile definitions, including tests for all validation rules before implementation. + +**Details:** + +Create src/shared/src/types/mcp-profiles.ts with TypeScript interfaces: + +1. **Core Types**: + - `McpServerDefinition` - Main server definition with name, type, displayName, description, command, args, envTemplate, defaultProfiles, networkRequirements + - `EnvTemplateVariable` - Environment variable with description, required, secret, setupUrl, setupGuide, pattern (for validation) + - `DefaultProfile` - Profile configuration with name, config object, minimumScopes array + - `NetworkRequirement` - endpoints, ports, protocols for firewall documentation + +2. **Zod Schemas** in src/shared/src/schemas/mcp-profiles.schema.ts: + - Validate command is 'npx' or 'docker' or absolute path + - Validate envTemplate has at least one required variable for auth types + - Validate secret fields don't appear in args array + - Validate setupGuide is valid markdown with required sections + - Validate minimumScopes for each profile type + +3. **TDD Tests** in src/shared/src/__tests__/mcp-profiles.test.ts: + - Test valid definitions pass schema validation + - Test missing required fields fail validation + - Test invalid command types are rejected + - Test secret variable exposure in args is detected + - Test setupGuide markdown structure validation + - Test profile permission escalation detection + - Test networkRequirements field validation + +Export all types from src/shared/src/index.ts for use by other packages. + +### 15.2. Implement DevOps/SaaS MCP server profiles (Slack, Jira, GitHub, Terraform) + +**Status:** pending +**Dependencies:** 15.1 + +Create pre-configured MCP server profile definitions for common DevOps and SaaS tools with complete setup guides, minimum required scopes, and network requirements documentation. + +**Details:** + +Create src/mcpd/src/seed/mcp-servers/devops.ts with server definitions: + +1. **Slack Profile**: + - Command: npx -y @modelcontextprotocol/server-slack + - Required scopes: channels:read, channels:history, users:read (READ), plus channels:write, chat:write (WRITE) + - Network: api.slack.com:443/HTTPS, files.slack.com:443/HTTPS + - Profiles: read-only (minimum), full-access (with write scopes) + - Setup guide with step-by-step Slack app creation + +2. **Jira Profile**: + - Command: npx -y @anthropic/mcp-server-jira + - Required scopes: read:jira-work, read:jira-user (READ), write:jira-work (WRITE) + - Network: *.atlassian.net:443/HTTPS + - Profiles: read-only, project-limited (with project filter config) + - Setup guide for API token generation + +3. **GitHub Profile**: + - Command: npx -y @modelcontextprotocol/server-github + - Required scopes: repo:read, read:user (READ), repo:write, workflow (WRITE) + - Network: api.github.com:443/HTTPS, github.com:443/HTTPS + - Profiles: read-only, contributor, admin + - Setup guide for PAT creation with fine-grained tokens + +4. **Terraform Docs Profile**: + - Command: npx -y terraform-docs-mcp + - No auth required (public docs) + - Network: registry.terraform.io:443/HTTPS + - Profiles: aws-only, azure-only, gcp-only, all-providers + +Include mock validation endpoints for local testing in src/mcpd/src/seed/mcp-servers/__mocks__/devops-validators.ts + +### 15.3. Implement Data Platform MCP server profiles (BigQuery, Snowflake, dbt Cloud, Databricks, Airflow) + +**Status:** pending +**Dependencies:** 15.1 + +Create MCP server profile definitions for critical data platform tools with service account authentication patterns, connection string templates, and BI integration support. + +**Details:** + +Create src/mcpd/src/seed/mcp-servers/data-platform.ts with server definitions: + +1. **BigQuery Profile**: + - Command: npx -y @anthropic/mcp-server-bigquery (or community equivalent) + - Auth: Service account JSON file upload + - envTemplate: GOOGLE_APPLICATION_CREDENTIALS (path to JSON), BQ_PROJECT_ID + - Network: bigquery.googleapis.com:443/HTTPS, storage.googleapis.com:443/HTTPS + - Profiles: viewer (roles/bigquery.dataViewer), analyst (roles/bigquery.user), admin + +2. **Snowflake Profile**: + - Auth: Multi-step OAuth or key-pair authentication + - envTemplate: SNOWFLAKE_ACCOUNT, SNOWFLAKE_USER, SNOWFLAKE_WAREHOUSE, SNOWFLAKE_PRIVATE_KEY or SNOWFLAKE_PASSWORD + - Connection string pattern: snowflake://<user>@<account>/<warehouse> + - Network: <account>.snowflakecomputing.com:443/HTTPS + - Profiles: reader, analyst, developer + +3. **dbt Cloud Profile**: + - Command: npx -y @dbt-labs/mcp-server-dbt (or community) + - envTemplate: DBT_CLOUD_TOKEN, DBT_CLOUD_ACCOUNT_ID, DBT_CLOUD_PROJECT_ID + - Network: cloud.getdbt.com:443/HTTPS + - Profiles: viewer, developer, admin + +4. **Databricks Profile**: + - envTemplate: DATABRICKS_HOST, DATABRICKS_TOKEN, DATABRICKS_CLUSTER_ID (optional) + - Network: <workspace>.azuredatabricks.net:443/HTTPS or <workspace>.cloud.databricks.com:443/HTTPS + - Profiles: workspace-reader, job-runner, admin + +5. **Apache Airflow Profile**: + - envTemplate: AIRFLOW_URL, AIRFLOW_USERNAME, AIRFLOW_PASSWORD (basic) or AIRFLOW_API_KEY + - Network: <airflow-host>:8080/HTTP or :443/HTTPS + - Profiles: viewer, operator, admin + +Include connection string builder utilities and validators for each platform. + +### 15.4. Implement BI/Analytics tool MCP profiles (Tableau, Looker, Metabase, Grafana) + +**Status:** pending +**Dependencies:** 15.1 + +Create MCP server profile definitions for BI and analytics visualization tools commonly used by data analysts for report automation and dashboard access. + +**Details:** + +Create src/mcpd/src/seed/mcp-servers/analytics.ts with server definitions: + +1. **Tableau Profile**: + - Auth: Personal Access Token (PAT) or connected app JWT + - envTemplate: TABLEAU_SERVER_URL, TABLEAU_SITE_ID, TABLEAU_TOKEN_NAME, TABLEAU_TOKEN_SECRET + - Network: <tableau-server>:443/HTTPS (Tableau Cloud: online.tableau.com) + - Profiles: viewer (read dashboards), explorer (create workbooks), creator (full access) + - Setup guide for PAT generation in Tableau account settings + +2. **Looker Profile**: + - Auth: API3 client credentials + - envTemplate: LOOKER_BASE_URL, LOOKER_CLIENT_ID, LOOKER_CLIENT_SECRET + - Network: <instance>.cloud.looker.com:443/HTTPS + - Profiles: viewer, developer, admin + - Setup guide for API3 key creation + +3. **Metabase Profile**: + - Auth: Session token or API key + - envTemplate: METABASE_URL, METABASE_USERNAME, METABASE_PASSWORD or METABASE_API_KEY + - Network: <metabase-host>:3000/HTTP or :443/HTTPS + - Profiles: viewer, analyst, admin + - Note: Self-hosted vs Cloud configuration differences + +4. **Grafana Profile**: + - Auth: API key or service account token + - envTemplate: GRAFANA_URL, GRAFANA_API_KEY or GRAFANA_SERVICE_ACCOUNT_TOKEN + - Network: <grafana-host>:3000/HTTP or :443/HTTPS + - Profiles: viewer, editor, admin + - Setup guide for service account token creation + +All profiles should include query/export permissions appropriate for analyst workflows (read dashboards, export data, schedule reports where supported). + +### 15.5. Create profile registry, validation service, and network requirements documentation generator + +**Status:** pending +**Dependencies:** 15.2, 15.3, 15.4 + +Build the central profile registry that indexes all MCP server definitions, provides validation services, and generates network requirements documentation for firewall planning. + +**Details:** + +Create src/mcpd/src/services/mcp-profile-registry.ts: + +1. **McpProfileRegistry Class**: + - `getAllDefinitions()` - Returns all registered MCP server definitions + - `getDefinitionByName(name: string)` - Lookup by server name + - `getDefinitionsByCategory(category: 'devops' | 'data-platform' | 'analytics')` - Filter by category + - `searchDefinitions(query: string)` - Search by name, description, or tags + - `validateDefinition(def: McpServerDefinition)` - Validate against Zod schema + - `registerCustomDefinition(def: McpServerDefinition)` - Add user-defined servers + +2. **ProfileValidationService** in src/mcpd/src/services/profile-validation.ts: + - `validateCredentials(serverName: string, env: Record<string, string>)` - Test credentials with mock endpoints + - `checkMinimumScopes(serverName: string, profile: string)` - Verify profile has required scopes + - `detectPermissionEscalation(base: string[], requested: string[])` - Security check for scope expansion + +3. **NetworkDocsGenerator** in src/mcpd/src/services/network-docs-generator.ts: + - `generateFirewallRules(serverNames: string[])` - Output firewall rules in various formats (iptables, AWS SG, Azure NSG) + - `generateNetworkDiagram(projectName: string)` - Mermaid diagram of network flows + - `exportToCSV()` - Export all endpoints/ports/protocols for firewall team + +4. **Seed Database Integration**: + - Update src/mcpd/src/seed/index.ts to load all profile definitions + - Create `seedMcpServerLibrary()` function that populates database from profile registry + - Support incremental updates when new profiles are added + +Export registry and services from src/mcpd/src/index.ts diff --git a/.taskmaster/tasks/task_016.md b/.taskmaster/tasks/task_016.md new file mode 100644 index 0000000..0280127 --- /dev/null +++ b/.taskmaster/tasks/task_016.md @@ -0,0 +1,168 @@ +# Task ID: 16 + +**Title:** Implement Instance Lifecycle Management + +**Status:** pending + +**Dependencies:** 6, 8 + +**Priority:** medium + +**Description:** Create APIs and commands for managing MCP server instance lifecycle: start, stop, restart, status, and health monitoring. + +**Details:** + +Create instance management: + +```typescript +// routes/instances.ts +app.post('/api/instances', async (req) => { + const { profileId } = req.body; + const profile = await prisma.mcpProfile.findUnique({ + where: { id: profileId }, + include: { server: true } + }); + + const containerManager = new ContainerManager(); + const containerId = await containerManager.startMcpServer(profile.server, profile.config); + + const instance = await prisma.mcpInstance.create({ + data: { + serverId: profile.serverId, + containerId, + status: 'running', + config: profile.config + } + }); + + await auditLogger.logServerAction({ + userId: req.user.id, + action: 'start', + serverId: profile.server.name, + details: { instanceId: instance.id, containerId } + }); + + return instance; +}); + +app.delete('/api/instances/:id', async (req) => { + const instance = await prisma.mcpInstance.findUnique({ where: { id: req.params.id } }); + const containerManager = new ContainerManager(); + await containerManager.stopMcpServer(instance.containerId); + await prisma.mcpInstance.delete({ where: { id: req.params.id } }); +}); + +app.post('/api/instances/:id/restart', async (req) => { + const instance = await prisma.mcpInstance.findUnique({ + where: { id: req.params.id }, + include: { server: true } + }); + const containerManager = new ContainerManager(); + await containerManager.stopMcpServer(instance.containerId); + const newContainerId = await containerManager.startMcpServer(instance.server, instance.config); + return prisma.mcpInstance.update({ + where: { id: req.params.id }, + data: { containerId: newContainerId, status: 'running' } + }); +}); + +// Health monitoring +app.get('/api/instances/:id/health', async (req) => { + const instance = await prisma.mcpInstance.findUnique({ where: { id: req.params.id } }); + const containerManager = new ContainerManager(); + const status = await containerManager.getMcpServerStatus(instance.containerId); + const logs = await containerManager.getContainerLogs(instance.containerId, { tail: 50 }); + return { status, logs, lastChecked: new Date() }; +}); + +// CLI commands +program + .command('start') + .argument('<profile>', 'Profile name') + .action(async (profile) => { + const instance = await client.post('/api/instances', { profileName: profile }); + console.log(`Started instance ${instance.id}`); + }); + +program + .command('stop') + .argument('<instance-id>', 'Instance ID') + .action(async (id) => { + await client.delete(`/api/instances/${id}`); + console.log(`Stopped instance ${id}`); + }); + +program + .command('logs') + .argument('<instance-id>', 'Instance ID') + .option('-f, --follow', 'Follow logs') + .action(async (id, options) => { + if (options.follow) { + // Stream logs + } else { + const { logs } = await client.get(`/api/instances/${id}/health`); + console.log(logs); + } + }); +``` + +**Test Strategy:** + +Test instance start/stop/restart lifecycle. Test health monitoring updates status correctly. Test logs streaming. Integration test with real Docker containers. + +## Subtasks + +### 16.1. Write TDD test suites for Instance Lifecycle API endpoints + +**Status:** pending +**Dependencies:** None + +Create comprehensive Vitest test suites for all instance lifecycle endpoints (POST /api/instances, DELETE /api/instances/:id, POST /api/instances/:id/restart, GET /api/instances/:id/health, GET /api/instances/:id/logs) BEFORE implementation using mocked ContainerManager and Prisma. + +**Details:** + +Write comprehensive Vitest tests following TDD methodology for all instance lifecycle API endpoints. Tests must cover: (1) POST /api/instances - successful instance creation from profile, invalid profileId handling, ContainerManager.startMcpServer mock expectations, audit logging verification; (2) DELETE /api/instances/:id - successful stop and cleanup, non-existent instance handling, containerId validation to prevent targeting unmanaged containers; (3) POST /api/instances/:id/restart - graceful shutdown with drainTimeout for data pipelines, proper sequencing of stop/start, config preservation; (4) GET /api/instances/:id/health - Prometheus-compatible metrics format, liveness/readiness probe responses, alerting threshold configuration (unhealthy for N minutes), JSON health object structure; (5) GET /api/instances/:id/logs - pagination with cursor, log injection prevention (sanitize ANSI codes and control characters), tail parameter validation. Use msw or vitest-fetch-mock for request mocking. All tests should fail initially (TDD red phase). Include security tests: validate containerId format (UUIDs only), reject path traversal in instance IDs, verify only containers with mcpctl labels can be controlled. + +### 16.2. Write TDD test suites for CLI instance management commands + +**Status:** pending +**Dependencies:** None + +Create Vitest test suites for CLI commands (start, stop, restart, logs, status) BEFORE implementation, testing argument parsing, API client calls, output formatting, and WebSocket/SSE log streaming. + +**Details:** + +Write comprehensive Vitest tests for all CLI commands following TDD methodology: (1) 'mcpctl start <profile>' - test profile name validation, API call to POST /api/instances, success/error output formatting, instance ID display; (2) 'mcpctl stop <instance-id>' - test instance ID format validation, API call to DELETE /api/instances/:id, graceful shutdown with --drain-timeout flag for data pipeline instances, confirmation prompt (--yes to skip); (3) 'mcpctl restart <instance-id>' - test restart with optional --drain-timeout, API call to POST /api/instances/:id/restart; (4) 'mcpctl logs <instance-id>' - test -f/--follow flag for streaming, --tail N option, --since timestamp option, WebSocket connection for live streaming, graceful disconnect handling; (5) 'mcpctl status <instance-id>' - test health status display, readiness/liveness indicators, uptime calculation, JSON output format. Test network boundary scenarios: WebSocket reconnection on disconnect, SSE fallback when WebSocket unavailable, proxy-friendly streaming options. Include exit code tests for scripting compatibility. + +### 16.3. Implement Instance Lifecycle API endpoints with security and audit logging + +**Status:** pending +**Dependencies:** 16.1 + +Implement all instance lifecycle API endpoints (create, stop, restart, health, logs) passing TDD tests from subtask 1, with security validation, graceful shutdown support, and comprehensive audit logging integration. + +**Details:** + +Implement routes/instances.ts with all lifecycle endpoints: (1) POST /api/instances - validate profileId exists, call ContainerManager.startMcpServer with profile config, create McpInstance record in Prisma, emit audit log with auditLogger.logServerAction({action: 'start', ...}); (2) DELETE /api/instances/:id - validate instance exists and containerId format is UUID, verify container has mcpctl management labels before stopping, call ContainerManager.stopMcpServer with configurable drainTimeout for graceful shutdown of data pipelines, delete McpInstance record, emit audit log; (3) POST /api/instances/:id/restart - implement atomic restart with stop-then-start, preserve config across restart, support drainTimeout query parameter for graceful drain before restart; (4) GET /api/instances/:id/health - call ContainerManager.getMcpServerStatus and getHealthStatus, return structured health object with {status, lastChecked, readiness, liveness, consecutiveFailures, alertThreshold}, format compatible with Prometheus/Grafana alerting; (5) GET /api/instances/:id/logs - call ContainerManager.getContainerLogs with cursor-based pagination, sanitize log output to prevent log injection (strip ANSI escape sequences, null bytes, control characters), support ELK/Loki-compatible structured JSON format. Implement security middleware to validate all containerIds are managed by mcpctl. + +### 16.4. Implement CLI commands for instance lifecycle with streaming log support + +**Status:** pending +**Dependencies:** 16.2, 16.3 + +Implement CLI commands (start, stop, restart, logs, status) passing TDD tests from subtask 2, including WebSocket/SSE log streaming that works across network boundaries. + +**Details:** + +Implement commands/instances.ts with all CLI commands: (1) 'start <profile>' - call API client.post('/api/instances', {profileName: profile}), display instance ID and status, exit code 0 on success; (2) 'stop <instance-id>' - prompt for confirmation unless --yes flag, support --drain-timeout <seconds> for data pipeline graceful shutdown, call client.delete(`/api/instances/${id}`), display stop confirmation; (3) 'restart <instance-id>' - support --drain-timeout flag, call client.post(`/api/instances/${id}/restart`), display new container ID; (4) 'logs <instance-id>' - implement dual transport: WebSocket primary with SSE fallback for proxy-friendly environments, -f/--follow starts WebSocket connection to /api/instances/:id/logs/stream, --tail N parameter (default 50), --since timestamp filter, handle reconnection on disconnect with exponential backoff, gracefully handle Ctrl+C; (5) 'status <instance-id>' - call GET /api/instances/:id/health, display formatted health info with readiness/liveness indicators, support -o json output. Implement WebSocket client that works through corporate proxies (use HTTP upgrade with proper headers). For non-streaming logs, paginate through cursor-based API. + +### 16.5. Create integration tests and docker-compose environment for instance lifecycle + +**Status:** pending +**Dependencies:** 16.3, 16.4 + +Build comprehensive integration test suite testing complete instance lifecycle against real Docker containers, including health monitoring with alerting thresholds and log streaming across network boundaries. + +**Details:** + +Create integration test suite in tests/integration/instance-lifecycle.test.ts: (1) Full lifecycle test - create instance from profile, verify container running with 'docker ps', check health endpoint returns running status, stream logs with follow mode, restart instance (verify old container stopped, new container running), stop with drain timeout, verify container removed; (2) Health monitoring tests - configure alerting threshold (e.g., 3 consecutive failures), simulate unhealthy container, verify health endpoint returns correct consecutiveFailures count, test readiness probe (container ready to serve), test liveness probe (container process alive), verify Prometheus-format metrics exportable at /metrics; (3) Log streaming integration - test WebSocket streaming receives live container output, test SSE fallback when WebSocket unavailable, test log format is ELK/Loki compatible (JSON with timestamp, level, message fields), verify log injection prevention (send malicious log content, verify sanitized output); (4) Data pipeline graceful shutdown - create long-running instance simulating data processing, send stop with drain timeout, verify container receives SIGTERM, verify container has grace period before SIGKILL; (5) Network boundary tests - configure proxy simulation, verify log streaming works through proxy. Update docker-compose.yml to include test-mcp-server with configurable logging behavior. diff --git a/.taskmaster/tasks/task_017.md b/.taskmaster/tasks/task_017.md new file mode 100644 index 0000000..30f011a --- /dev/null +++ b/.taskmaster/tasks/task_017.md @@ -0,0 +1,443 @@ +# Task ID: 17 + +**Title:** Implement Kubernetes Support Architecture + +**Status:** pending + +**Dependencies:** 6, 16 + +**Priority:** low + +**Description:** Design and implement the abstraction layer for Kubernetes deployment support, preparing for future pod scheduling of MCP instances. + +**Details:** + +Create orchestrator abstraction: + +```typescript +// services/orchestrator.ts +export interface McpOrchestrator { + startServer(server: McpServer, config: any): Promise<string>; + stopServer(instanceId: string): Promise<void>; + getStatus(instanceId: string): Promise<InstanceStatus>; + getLogs(instanceId: string, options: LogOptions): Promise<string>; + listInstances(filters?: InstanceFilters): Promise<Instance[]>; +} + +// Docker implementation (current) +export class DockerOrchestrator implements McpOrchestrator { + private docker: Docker; + // ... existing Docker implementation +} + +// Kubernetes implementation (future-ready) +export class KubernetesOrchestrator implements McpOrchestrator { + private k8sClient: KubernetesClient; + + constructor(config: K8sConfig) { + this.k8sClient = new KubernetesClient(config); + } + + async startServer(server: McpServer, config: any): Promise<string> { + const pod = { + apiVersion: 'v1', + kind: 'Pod', + metadata: { + name: `mcp-${server.name}-${Date.now()}`, + labels: { + 'mcpctl.io/server': server.name, + 'mcpctl.io/managed': 'true' + } + }, + spec: { + containers: [{ + name: 'mcp-server', + image: server.image || 'node:20-alpine', + command: this.buildCommand(server), + env: this.buildEnvVars(config), + resources: { + requests: { memory: '128Mi', cpu: '100m' }, + limits: { memory: '512Mi', cpu: '500m' } + } + }], + restartPolicy: 'Always' + } + }; + + const created = await this.k8sClient.createPod(pod); + return created.metadata.name; + } + + // ... other K8s implementations +} + +// Factory based on configuration +export function createOrchestrator(config: OrchestratorConfig): McpOrchestrator { + switch (config.type) { + case 'docker': return new DockerOrchestrator(config); + case 'kubernetes': return new KubernetesOrchestrator(config); + default: throw new Error(`Unknown orchestrator: ${config.type}`); + } +} +``` + +Configuration: +```yaml +orchestrator: + type: docker # or 'kubernetes' + docker: + socketPath: /var/run/docker.sock + kubernetes: + namespace: mcpctl + kubeconfig: /path/to/kubeconfig +``` + +**Test Strategy:** + +Unit test orchestrator interface compliance for both implementations. Integration test Docker implementation. Mock Kubernetes API for K8s implementation tests. + +## Subtasks + +### 17.1. Define K8s-specific interfaces and write TDD tests for KubernetesOrchestrator + +**Status:** pending +**Dependencies:** None + +Extend the McpOrchestrator interface (from Task 6) with Kubernetes-specific types and write comprehensive Vitest unit tests for all KubernetesOrchestrator methods BEFORE implementation using mocked @kubernetes/client-node. + +**Details:** + +Create src/shared/src/types/kubernetes.ts with K8s-specific types: + +```typescript +import { McpOrchestrator, McpServer, InstanceStatus, LogOptions, InstanceFilters } from './orchestrator'; + +export interface K8sConfig { + namespace: string; + kubeconfig?: string; // Path to kubeconfig file + inCluster?: boolean; // Use in-cluster config + context?: string; // Specific kubeconfig context +} + +export interface K8sPodMetadata { + name: string; + namespace: string; + labels: Record<string, string>; + annotations: Record<string, string>; + uid: string; +} + +export interface K8sResourceRequirements { + requests: { memory: string; cpu: string }; + limits: { memory: string; cpu: string }; +} + +export interface K8sSecurityContext { + runAsNonRoot: boolean; + runAsUser: number; + readOnlyRootFilesystem: boolean; + allowPrivilegeEscalation: boolean; + capabilities: { drop: string[] }; +} +``` + +Create src/mcpd/tests/unit/services/kubernetes-orchestrator.test.ts with comprehensive TDD tests: + +1. Constructor tests: verify kubeconfig loading (file path vs in-cluster), namespace validation, error handling for missing config +2. startServer() tests: verify Pod spec generation includes security context, resource limits, labels, command building, env vars +3. stopServer() tests: verify graceful pod termination, wait for completion, error handling for non-existent pods +4. getStatus() tests: verify status mapping from K8s pod phases (Pending, Running, Succeeded, Failed, Unknown) to InstanceStatus +5. getLogs() tests: verify log options (tail, follow, since, timestamps) are mapped correctly to K8s log API +6. listInstances() tests: verify label selector filtering works, pagination handling for large deployments + +Mock @kubernetes/client-node CoreV1Api using vitest.mock() with proper type definitions. All tests should fail initially (TDD red phase). + +### 17.2. Implement KubernetesOrchestrator class with Pod security contexts and resource management + +**Status:** pending +**Dependencies:** 17.1 + +Implement the KubernetesOrchestrator class using @kubernetes/client-node, with all methods passing TDD tests from subtask 1, including SRE-approved pod security contexts, resource requests/limits, and proper label conventions. + +**Details:** + +Install @kubernetes/client-node in src/mcpd. Create src/mcpd/src/services/kubernetes-orchestrator.ts: + +```typescript +import * as k8s from '@kubernetes/client-node'; +import { McpOrchestrator, McpServer, InstanceStatus, LogOptions, InstanceFilters, Instance } from '@mcpctl/shared'; +import { K8sConfig, K8sSecurityContext, K8sResourceRequirements } from '@mcpctl/shared'; + +export class KubernetesOrchestrator implements McpOrchestrator { + private coreApi: k8s.CoreV1Api; + private namespace: string; + + constructor(config: K8sConfig) { + const kc = new k8s.KubeConfig(); + if (config.inCluster) { + kc.loadFromCluster(); + } else if (config.kubeconfig) { + kc.loadFromFile(config.kubeconfig); + } else { + kc.loadFromDefault(); + } + if (config.context) kc.setCurrentContext(config.context); + this.coreApi = kc.makeApiClient(k8s.CoreV1Api); + this.namespace = config.namespace; + } + + async startServer(server: McpServer, config: any): Promise<string> { + const podName = `mcp-${server.name}-${Date.now()}`; + const pod: k8s.V1Pod = { + apiVersion: 'v1', + kind: 'Pod', + metadata: { + name: podName, + namespace: this.namespace, + labels: { + 'mcpctl.io/server': server.name, + 'mcpctl.io/managed': 'true', + 'app.kubernetes.io/name': `mcp-${server.name}`, + 'app.kubernetes.io/component': 'mcp-server', + 'app.kubernetes.io/managed-by': 'mcpctl' + }, + annotations: { + 'mcpctl.io/created-at': new Date().toISOString() + } + }, + spec: { + containers: [{ + name: 'mcp-server', + image: server.image || 'node:20-alpine', + command: this.buildCommand(server), + env: this.buildEnvVars(config), + resources: this.getResourceRequirements(config), + securityContext: this.getSecurityContext() + }], + securityContext: { runAsNonRoot: true, runAsUser: 1000, fsGroup: 1000 }, + restartPolicy: 'Always', + serviceAccountName: config.serviceAccount || 'default' + } + }; + const created = await this.coreApi.createNamespacedPod(this.namespace, pod); + return created.body.metadata!.name!; + } + + private getSecurityContext(): k8s.V1SecurityContext { + return { + runAsNonRoot: true, + runAsUser: 1000, + readOnlyRootFilesystem: true, + allowPrivilegeEscalation: false, + capabilities: { drop: ['ALL'] } + }; + } + + private getResourceRequirements(config: any): k8s.V1ResourceRequirements { + return { + requests: { memory: config.memoryRequest || '128Mi', cpu: config.cpuRequest || '100m' }, + limits: { memory: config.memoryLimit || '512Mi', cpu: config.cpuLimit || '500m' } + }; + } + // ... implement stopServer, getStatus, getLogs, listInstances +} +``` + +Implement all remaining methods with proper error handling and K8s API error translation. + +### 17.3. Implement createOrchestrator factory function and configuration schema + +**Status:** pending +**Dependencies:** 17.2 + +Create the orchestrator factory function that instantiates DockerOrchestrator or KubernetesOrchestrator based on configuration, with Zod schema validation and configuration file support. + +**Details:** + +Create src/mcpd/src/services/orchestrator-factory.ts: + +```typescript +import { z } from 'zod'; +import { McpOrchestrator } from '@mcpctl/shared'; +import { DockerOrchestrator } from './container-manager'; // From Task 6 +import { KubernetesOrchestrator } from './kubernetes-orchestrator'; + +const DockerConfigSchema = z.object({ + socketPath: z.string().default('/var/run/docker.sock'), + host: z.string().optional(), + port: z.number().optional(), + network: z.string().default('mcpctl-network') +}); + +const KubernetesConfigSchema = z.object({ + namespace: z.string().default('mcpctl'), + kubeconfig: z.string().optional(), + inCluster: z.boolean().default(false), + context: z.string().optional() +}); + +const OrchestratorConfigSchema = z.discriminatedUnion('type', [ + z.object({ type: z.literal('docker'), docker: DockerConfigSchema }), + z.object({ type: z.literal('kubernetes'), kubernetes: KubernetesConfigSchema }) +]); + +export type OrchestratorConfig = z.infer<typeof OrchestratorConfigSchema>; + +export function createOrchestrator(config: OrchestratorConfig): McpOrchestrator { + const validated = OrchestratorConfigSchema.parse(config); + switch (validated.type) { + case 'docker': + return new DockerOrchestrator(validated.docker); + case 'kubernetes': + return new KubernetesOrchestrator(validated.kubernetes); + default: + throw new Error(`Unknown orchestrator type`); + } +} +``` + +Create src/mcpd/src/config/orchestrator.ts for loading config from environment variables and config files (supporting both YAML and JSON). Write TDD tests in src/mcpd/tests/unit/services/orchestrator-factory.test.ts BEFORE implementation: + +1. Test factory creates DockerOrchestrator when type='docker' +2. Test factory creates KubernetesOrchestrator when type='kubernetes' +3. Test factory throws on invalid type +4. Test Zod validation rejects invalid configs +5. Test default values are applied correctly +6. Test config loading from MCPCTL_ORCHESTRATOR_TYPE env var + +### 17.4. Implement K8s NetworkPolicy and PersistentVolumeClaim builders for MCP server isolation + +**Status:** pending +**Dependencies:** 17.2 + +Create resource builders for Kubernetes NetworkPolicy (network isolation between MCP servers) and PersistentVolumeClaim (for stateful data MCPs like caching or GPU providers) with proper annotations for observability. + +**Details:** + +Create src/mcpd/src/services/k8s-resources.ts with resource builder functions: + +```typescript +import * as k8s from '@kubernetes/client-node'; + +export interface NetworkPolicyConfig { + serverName: string; + namespace: string; + allowEgress?: string[]; // CIDR blocks or service names to allow + allowIngress?: string[]; // Pod labels allowed to connect +} + +export function buildNetworkPolicy(config: NetworkPolicyConfig): k8s.V1NetworkPolicy { + return { + apiVersion: 'networking.k8s.io/v1', + kind: 'NetworkPolicy', + metadata: { + name: `mcp-${config.serverName}-netpol`, + namespace: config.namespace, + labels: { 'mcpctl.io/server': config.serverName, 'mcpctl.io/managed': 'true' } + }, + spec: { + podSelector: { matchLabels: { 'mcpctl.io/server': config.serverName } }, + policyTypes: ['Ingress', 'Egress'], + ingress: [{ + from: [{ podSelector: { matchLabels: { 'mcpctl.io/component': 'local-proxy' } } }] + }], + egress: config.allowEgress?.map(cidr => ({ + to: [{ ipBlock: { cidr } }] + })) || [{ to: [{ ipBlock: { cidr: '0.0.0.0/0' } }] }] // Default: allow all egress + } + }; +} + +export interface PVCConfig { + serverName: string; + namespace: string; + storageSize: string; // e.g., '1Gi' + storageClass?: string; + accessModes?: string[]; +} + +export function buildPVC(config: PVCConfig): k8s.V1PersistentVolumeClaim { + return { + apiVersion: 'v1', + kind: 'PersistentVolumeClaim', + metadata: { + name: `mcp-${config.serverName}-data`, + namespace: config.namespace, + labels: { 'mcpctl.io/server': config.serverName, 'mcpctl.io/managed': 'true' }, + annotations: { + 'mcpctl.io/purpose': 'mcp-server-cache', + 'mcpctl.io/created-at': new Date().toISOString() + } + }, + spec: { + accessModes: config.accessModes || ['ReadWriteOnce'], + storageClassName: config.storageClass, + resources: { requests: { storage: config.storageSize } } + } + }; +} + +export function buildGpuAffinityRules(gpuType: string): k8s.V1Affinity { + return { + nodeAffinity: { + requiredDuringSchedulingIgnoredDuringExecution: { + nodeSelectorTerms: [{ + matchExpressions: [{ + key: 'nvidia.com/gpu.product', + operator: 'In', + values: [gpuType] + }] + }] + } + } + }; +} +``` + +Write TDD tests in src/mcpd/tests/unit/services/k8s-resources.test.ts verifying all resource builders generate valid K8s manifests. + +### 17.5. Create integration tests with kind/k3d and document K8s deployment architecture + +**Status:** pending +**Dependencies:** 17.2, 17.3, 17.4 + +Build integration test suite using kind or k3d for local K8s cluster testing, create comprehensive SRE documentation covering deployment architecture, resource recommendations, and network requirements. + +**Details:** + +Create src/mcpd/tests/integration/kubernetes/ directory with integration tests: + +1. Create setup script src/mcpd/tests/integration/kubernetes/setup-kind.ts: +```typescript +import { execSync } from 'child_process'; + +export async function setupKindCluster(): Promise<void> { + execSync('kind create cluster --name mcpctl-test --config tests/integration/kubernetes/kind-config.yaml', { stdio: 'inherit' }); +} + +export async function teardownKindCluster(): Promise<void> { + execSync('kind delete cluster --name mcpctl-test', { stdio: 'inherit' }); +} +``` + +2. Create kind-config.yaml with proper resource limits +3. Create kubernetes-orchestrator.integration.test.ts testing: + - Pod creation and deletion lifecycle + - Status monitoring through pod phases + - Log retrieval from running pods + - NetworkPolicy enforcement (cannot reach blocked endpoints) + - PVC mounting for stateful MCPs + +4. Create src/mcpd/docs/KUBERNETES_DEPLOYMENT.md documenting: + - Architecture overview: mcpctl namespace, resource types, label conventions + - Security: Pod security standards (restricted), NetworkPolicies, ServiceAccounts + - SRE recommendations: HPA configurations, PDB templates, monitoring with Prometheus labels + - Resource sizing guide: Small (128Mi/100m), Medium (512Mi/500m), Large (2Gi/1000m) + - Network requirements: Required egress rules per MCP server type, ingress from local-proxy + - Troubleshooting: Common issues, kubectl commands, log access + - GPU support: Node affinity, NVIDIA device plugin requirements + +5. Create example manifests in src/mcpd/examples/k8s/: + - namespace.yaml, rbac.yaml, networkpolicy.yaml, sample-mcp-pod.yaml + +Integration tests should skip gracefully when kind is not available (CI compatibility). diff --git a/.taskmaster/tasks/task_018.md b/.taskmaster/tasks/task_018.md new file mode 100644 index 0000000..3825c3f --- /dev/null +++ b/.taskmaster/tasks/task_018.md @@ -0,0 +1,582 @@ +# Task ID: 18 + +**Title:** Create End-to-End Integration and Documentation + +**Status:** pending + +**Dependencies:** 9, 13, 14, 15, 16 + +**Priority:** medium + +**Description:** Build comprehensive integration tests, usage documentation, and example workflows for the complete mcpctl system. + +**Details:** + +Create E2E tests and documentation: + +```typescript +// tests/e2e/full-workflow.test.ts +describe('mcpctl E2E', () => { + test('complete workflow: setup to Claude usage', async () => { + // 1. Start mcpd server + const mcpd = await startMcpd(); + + // 2. Setup MCP server via CLI + await exec('mcpctl setup slack --non-interactive --token=test-token'); + + // 3. Create project + await exec('mcpctl project create weekly_reports --profiles slack-read-only jira-read-only'); + + // 4. Add to Claude config + await exec('mcpctl claude add-mcp-project weekly_reports'); + const mcpJson = JSON.parse(fs.readFileSync('.mcp.json', 'utf8')); + expect(mcpJson.mcpServers['mcpctl-proxy']).toBeDefined(); + + // 5. Start local proxy + const proxy = await startLocalProxy(); + + // 6. Simulate Claude request through proxy + const response = await proxy.callTool('slack_get_messages', { + channel: 'team', + _context: 'Find security-related messages' + }); + + // 7. Verify response is filtered + expect(response.content.length).toBeLessThan(originalData.length); + + // 8. Verify audit log + const audit = await exec('mcpctl audit --limit 1'); + expect(audit).toContain('mcp_call'); + }); +}); +``` + +Documentation structure: +``` +docs/ +├── getting-started.md +├── installation.md +├── configuration.md +├── cli-reference.md +├── mcp-servers/ +│ ├── slack.md +│ ├── jira.md +│ └── github.md +├── architecture.md +├── local-llm-setup.md +├── deployment/ +│ ├── docker-compose.md +│ └── kubernetes.md +└── examples/ + ├── weekly-reports.md + └── terraform-docs.md +``` + +Example workflow documentation: +```markdown +# Weekly Reports Workflow + +## Setup +```bash +# Install mcpctl +npm install -g mcpctl + +# Configure server +mcpctl config set-server http://your-nas:3000 + +# Setup MCPs +mcpctl setup slack +mcpctl setup jira + +# Create project +mcpctl project create weekly_reports --profiles slack-team jira-myproject + +# Add to Claude +mcpctl claude add-mcp-project weekly_reports +``` + +## Usage with Claude +In your Claude session: +> "Write me a weekly report. Get all messages from Slack related to my team and security, and all Jira tickets I worked on this week." + +The local proxy will filter thousands of messages to only the relevant ones. +``` + +**Test Strategy:** + +Run full E2E test suite. Test all documented workflows work as described. Validate documentation accuracy with fresh setup. Test error scenarios and recovery. + +## Subtasks + +### 18.1. Build E2E Test Infrastructure with Docker Compose Local Environment + +**Status:** pending +**Dependencies:** None + +Create the complete E2E test infrastructure using docker-compose that runs mcpd, PostgreSQL, mock MCP servers, and local LLM proxy entirely locally without external dependencies. + +**Details:** + +Create tests/e2e directory structure with: + +**docker-compose.e2e.yml:** +```yaml +version: '3.8' +services: + postgres-e2e: + image: postgres:16-alpine + environment: + POSTGRES_USER: mcpctl_test + POSTGRES_PASSWORD: test_password + POSTGRES_DB: mcpctl_test + healthcheck: + test: ['CMD-SHELL', 'pg_isready -U mcpctl_test'] + mcpd: + build: ../../src/mcpd + depends_on: + postgres-e2e: + condition: service_healthy + environment: + DATABASE_URL: postgresql://mcpctl_test:test_password@postgres-e2e:5432/mcpctl_test + mock-slack-mcp: + build: ./mocks/slack-mcp + ports: ['9001:9001'] + mock-jira-mcp: + build: ./mocks/jira-mcp + ports: ['9002:9002'] + ollama: + image: ollama/ollama:latest + volumes: ['ollama-data:/root/.ollama'] + local-proxy: + build: ../../src/local-proxy + depends_on: [mcpd, ollama] +volumes: + ollama-data: +``` + +**tests/e2e/setup.ts:** +- startE2EEnvironment(): Start all containers, wait for health +- stopE2EEnvironment(): Stop and cleanup containers +- resetDatabase(): Truncate all tables between tests +- getMcpdClient(): Return configured API client for mcpd +- getProxyClient(): Return configured MCP client for local-proxy + +**tests/e2e/mocks/slack-mcp/**: Dockerfile and Node.js mock implementing MCP protocol returning configurable test data (1000+ messages for filtering tests) + +**tests/e2e/mocks/jira-mcp/**: Similar mock for Jira with test tickets + +**tests/e2e/fixtures/**: Test data files (slack-messages.json, jira-tickets.json) with realistic but synthetic data + +**tests/e2e/vitest.config.ts:** +```typescript +export default defineConfig({ + test: { + globalSetup: './setup.ts', + testTimeout: 120000, + hookTimeout: 60000, + setupFiles: ['./test-utils.ts'] + } +}); +``` + +Add scripts to root package.json: +- "test:e2e": "vitest run --config tests/e2e/vitest.config.ts" +- "test:e2e:up": "docker-compose -f tests/e2e/docker-compose.e2e.yml up -d" +- "test:e2e:down": "docker-compose -f tests/e2e/docker-compose.e2e.yml down -v" + +### 18.2. Implement Full Workflow E2E Tests with Security Validation + +**Status:** pending +**Dependencies:** 18.1 + +Create comprehensive E2E test suites covering the complete user workflow from CLI setup through proxy usage, plus security-focused tests verifying no credential leakage, proper auth flows, and permission boundary enforcement. + +**Details:** + +Create tests/e2e/workflows directory with test files: + +**tests/e2e/workflows/full-workflow.test.ts:** +```typescript +describe('mcpctl E2E: Complete Workflow', () => { + test('setup to Claude usage', async () => { + // 1. Start mcpd server (via docker-compose) + expect(await getMcpdHealth()).toBe('ok'); + + // 2. Setup MCP server via CLI + const setupResult = await exec('mcpctl setup slack --non-interactive --token=xoxb-test-token'); + expect(setupResult.exitCode).toBe(0); + + // 3. Create project with profiles + await exec('mcpctl project create weekly_reports --profiles slack-read-only jira-read-only'); + const project = await getMcpdClient().getProject('weekly_reports'); + expect(project.profiles).toHaveLength(2); + + // 4. Add to Claude config + await exec('mcpctl claude add-mcp-project weekly_reports'); + const mcpJson = JSON.parse(fs.readFileSync('.mcp.json', 'utf8')); + expect(mcpJson.mcpServers['mcpctl-proxy']).toBeDefined(); + expect(mcpJson.mcpServers['mcpctl-proxy'].env.SLACK_BOT_TOKEN).toBeUndefined(); // No secrets! + + // 5. Simulate proxy request with context filtering + const response = await getProxyClient().callTool('slack_get_messages', { + channel: 'team', + _context: 'Find security-related messages' + }); + expect(response.content.length).toBeLessThan(1000); // Filtered from 1000+ test messages + + // 6. Verify audit log + const audit = await exec('mcpctl audit --limit 1 --format json'); + expect(JSON.parse(audit.stdout)[0].action).toBe('mcp_call'); + }); +}); +``` + +**tests/e2e/security/credential-leakage.test.ts:** +```typescript +describe('Security: No Credential Leakage', () => { + test('.mcp.json never contains actual secrets', async () => { + await exec('mcpctl setup slack --token=xoxb-real-token'); + await exec('mcpctl claude add-mcp-project test_project'); + const content = fs.readFileSync('.mcp.json', 'utf8'); + expect(content).not.toContain('xoxb-'); + expect(content).not.toMatch(/[A-Za-z0-9]{32,}/); + }); + + test('audit logs scrub sensitive data', async () => { + await exec('mcpctl setup jira --token=secret-api-token'); + const logs = await prisma.auditLog.findMany({ where: { action: 'mcp_server_setup' }}); + logs.forEach(log => { + expect(JSON.stringify(log.details)).not.toContain('secret-api-token'); + }); + }); + + test('CLI history does not contain tokens', async () => { + // Verify --token values are masked in any logged commands + }); +}); +``` + +**tests/e2e/security/auth-flows.test.ts:** +```typescript +describe('Security: Authentication Flows', () => { + test('API rejects requests without valid token', async () => { + const response = await fetch(`${MCPD_URL}/api/projects`, { + headers: { 'Authorization': 'Bearer invalid-token' } + }); + expect(response.status).toBe(401); + }); + + test('expired sessions are rejected', async () => { + const expiredSession = await createExpiredSession(); + const response = await authenticatedFetch('/api/projects', expiredSession.token); + expect(response.status).toBe(401); + }); +}); +``` + +**tests/e2e/security/permission-boundaries.test.ts:** +```typescript +describe('Security: Permission Boundaries', () => { + test('read-only profile cannot call write operations', async () => { + await exec('mcpctl project create readonly_test --profiles slack-read-only'); + const response = await getProxyClient().callTool('slack_post_message', { + channel: 'general', + text: 'test' + }); + expect(response.error).toContain('permission denied'); + }); +}); +``` + +**tests/e2e/workflows/error-recovery.test.ts:** Test scenarios for network failures, container restarts, database disconnections with proper recovery + +### 18.3. Create User and Technical Documentation Suite + +**Status:** pending +**Dependencies:** None + +Build comprehensive documentation covering getting started, installation, configuration, CLI reference, architecture overview, and local LLM setup guides with proper markdown structure. + +**Details:** + +Create docs/ directory structure: + +**docs/getting-started.md:** +- Quick 5-minute setup guide +- Prerequisites (Node.js, Docker, pnpm) +- Install mcpctl globally: `npm install -g mcpctl` +- Start mcpd: `docker-compose up -d` or `mcpctl daemon start` +- Configure first MCP server: `mcpctl setup slack` +- Create first project: `mcpctl project create my_assistant --profiles slack-read-only` +- Add to Claude: `mcpctl claude add-mcp-project my_assistant` +- Verify with `mcpctl status` + +**docs/installation.md:** +- NPM global install: `npm install -g mcpctl` +- Docker deployment: Using provided docker-compose.yml +- Kubernetes deployment: Helm chart reference (link to deployment/kubernetes.md) +- Building from source: Clone, pnpm install, pnpm build +- Verifying installation: `mcpctl version`, `mcpctl doctor` + +**docs/configuration.md:** +- Environment variables reference (DATABASE_URL, MCPD_URL, LOG_LEVEL, etc.) +- Configuration file locations (~/.mcpctl/config.yaml, .mcpctl.yaml) +- Per-project configuration (.mcpctl.yaml in project root) +- Secrets management (keyring integration, environment variables, --token flags) +- Example configurations for different environments + +**docs/cli-reference.md:** +- Complete command reference with examples +- `mcpctl setup <server>` - Configure MCP server +- `mcpctl project create|list|delete|status` - Project management +- `mcpctl profile list|describe|apply` - Profile management +- `mcpctl claude add-mcp-project|remove-mcp-project` - Claude integration +- `mcpctl instance start|stop|restart|logs|status` - Instance lifecycle +- `mcpctl audit [--limit N] [--format json|table]` - Audit log queries +- `mcpctl config get|set` - Configuration management +- Global flags: --server, --format, --verbose, --quiet + +**docs/architecture.md:** +- High-level system diagram (ASCII or Mermaid) +- Component descriptions: CLI, mcpd, local-proxy, database +- Data flow: Claude -> .mcp.json -> local-proxy -> mcpd -> MCP servers +- Security model: Token validation, audit logging, credential isolation +- Scalability: Stateless mcpd, PostgreSQL HA, horizontal scaling + +**docs/local-llm-setup.md:** +- Ollama installation and configuration +- Model recommendations for filtering (llama3.2, qwen2.5) +- Gemini CLI setup as alternative +- vLLM for high-throughput deployments +- DeepSeek API configuration +- Performance tuning and benchmarks + +**docs/mcp-servers/:** +- slack.md: Slack MCP setup, required scopes, profile examples +- jira.md: Jira Cloud/Server setup, API token creation +- github.md: GitHub token scopes, repository access +- terraform.md: Terraform docs MCP configuration +- Each includes: Prerequisites, Setup steps, Available profiles, Troubleshooting + +**docs/deployment/:** +- docker-compose.md: Production docker-compose configuration +- kubernetes.md: Helm chart installation, values.yaml reference + +### 18.4. Create SRE Runbooks and Network Topology Documentation + +**Status:** pending +**Dependencies:** 18.3 + +Write operational runbooks for common SRE scenarios including restart procedures, credential rotation, scaling, and diagnostics, plus network topology documentation for enterprise deployments with proxy, firewall, and DNS considerations. + +**Details:** + +Create docs/operations/ directory: + +**docs/operations/runbooks/:** + +**restart-failed-instance.md:** +```markdown +# Runbook: Restart Failed MCP Instance + +## Symptoms +- `mcpctl instance status <name>` shows 'error' or 'stopped' +- Audit logs show repeated connection failures +- Claude reports MCP tool unavailable + +## Diagnosis +1. Check instance status: `mcpctl instance status <name> --verbose` +2. View recent logs: `mcpctl instance logs <name> --tail 100` +3. Check container health: `docker inspect mcpctl-<name> | jq '.[0].State'` + +## Resolution Steps +1. Stop the instance: `mcpctl instance stop <name>` +2. Check for resource exhaustion: `docker stats --no-stream` +3. Restart: `mcpctl instance start <name>` +4. Verify health: `mcpctl instance status <name> --wait-healthy` +5. Test connectivity: `mcpctl instance test <name>` + +## Escalation +- If repeated failures: Check network connectivity to external APIs +- If OOM: Increase container memory limits in profile configuration +``` + +**rotate-credentials.md:** Steps for rotating Slack tokens, Jira API keys, GitHub PATs without downtime + +**scale-up.md:** Adding mcpd instances, database read replicas, load balancer configuration + +**diagnose-connectivity.md:** Network troubleshooting between proxy, mcpd, and MCP servers + +**backup-restore.md:** PostgreSQL backup procedures, disaster recovery + +**security-incident.md:** Credential exposure response, audit log analysis, revocation procedures + +**docs/operations/network-topology.md:** +```markdown +# Network Topology and Enterprise Deployment + +## Architecture Diagram +[Mermaid diagram showing: Claude Desktop -> local-proxy (localhost) -> Corporate Proxy -> mcpd (internal network) -> MCP Servers (Slack API, Jira API, etc.)] + +## Network Requirements + +### Local Proxy (runs on developer machine) +- Listens on localhost:9229 (configurable) +- Outbound: HTTPS to mcpd server (configurable URL) +- No direct internet access required + +### mcpd Server (internal deployment) +- Inbound: HTTPS from corporate network (developer machines) +- Outbound: HTTPS to MCP server APIs (Slack, Jira, GitHub) +- PostgreSQL: Port 5432 to database server + +### Firewall Rules +| Source | Destination | Port | Protocol | Purpose | +|--------|-------------|------|----------|--------| +| Developer workstations | mcpd | 443 | HTTPS | API access | +| mcpd | PostgreSQL | 5432 | TCP | Database | +| mcpd | api.slack.com | 443 | HTTPS | Slack MCP | +| mcpd | *.atlassian.net | 443 | HTTPS | Jira MCP | +| mcpd | api.github.com | 443 | HTTPS | GitHub MCP | + +### Proxy Configuration +- If corporate proxy required: Set HTTP_PROXY/HTTPS_PROXY for mcpd container +- No-proxy list: Database server, internal services +- SSL inspection: May require custom CA certificate injection + +### DNS Configuration +- mcpd server should be resolvable: mcpd.internal.company.com +- Or use IP address in mcpctl config: `mcpctl config set-server https://10.0.0.50:443` + +### TLS/Certificate Requirements +- mcpd should use valid TLS certificate (Let's Encrypt or internal CA) +- Certificate SANs should include all access hostnames +- For self-signed: Export CA and configure in mcpctl: `mcpctl config set-ca /path/to/ca.pem` +``` + +**docs/operations/troubleshooting-network.md:** +- Common issues: Connection refused, certificate errors, proxy authentication +- Diagnostic commands: `mcpctl doctor`, `mcpctl test-connection` +- tcpdump/Wireshark guidance for packet inspection +- Proxy debugging with curl equivalents + +### 18.5. Implement Data Team Example Workflows with Automated Validation + +**Status:** pending +**Dependencies:** 18.1, 18.2, 18.3 + +Create example workflow documentation for data analysts and engineers including weekly report generation, data pipeline monitoring, and documentation querying, with automated E2E tests validating each workflow works as documented. + +**Details:** + +Create docs/examples/ directory with workflow documentation: + +**docs/examples/weekly-reports.md:** +```markdown +# Weekly Reports Workflow + +## Use Case +Generate weekly team reports by aggregating Slack discussions and Jira ticket updates. + +## Setup +```bash +# Install mcpctl (if not already installed) +npm install -g mcpctl + +# Configure mcpd server connection +mcpctl config set-server http://your-nas:3000 + +# Setup MCP servers with appropriate tokens +mcpctl setup slack --token $SLACK_BOT_TOKEN +mcpctl setup jira --url https://company.atlassian.net --token $JIRA_API_TOKEN + +# Create project with read-only profiles +mcpctl project create weekly_reports --profiles slack-team jira-myproject + +# Add to Claude Desktop +mcpctl claude add-mcp-project weekly_reports +``` + +## Usage with Claude +In your Claude session, say: +> "Write me a weekly report for the security team. Get all Slack messages from #security-team mentioning incidents or vulnerabilities this week, and all Jira tickets I worked on with status changes." + +The local proxy will: +1. Intercept the Slack API request +2. Use local LLM to identify relevant messages (filtering 1000s to ~50) +3. Return only pertinent data to Claude +4. Log the operation for audit compliance + +## Expected Output +- Weekly summary with categorized Slack discussions +- Jira ticket status updates with time spent +- Action items extracted from conversations +``` + +**docs/examples/data-pipeline-monitoring.md:** +- Setup for monitoring Airflow/dbt pipelines via Slack alerts +- Integration with Jira for incident tracking +- Example Claude prompts for pipeline health checks + +**docs/examples/documentation-querying.md:** +- Setup Terraform docs MCP for infrastructure documentation +- GitHub MCP for code documentation querying +- Example: "Find all S3 buckets with public access in our Terraform configs" + +**tests/e2e/examples/ directory with automated validation:** + +**tests/e2e/examples/weekly-reports.test.ts:** +```typescript +describe('Example Workflow: Weekly Reports', () => { + test('follows documented setup steps', async () => { + // Parse setup commands from docs/examples/weekly-reports.md + const setupCommands = extractCodeBlocks('docs/examples/weekly-reports.md', 'bash'); + + for (const cmd of setupCommands) { + // Skip comments and variable-dependent commands for test + if (cmd.startsWith('#') || cmd.includes('$SLACK')) continue; + + // Execute with test tokens + const result = await exec(cmd.replace('$SLACK_BOT_TOKEN', 'test-token')); + expect(result.exitCode).toBe(0); + } + }); + + test('proxy filters messages as described', async () => { + // Setup as documented + await exec('mcpctl setup slack --non-interactive --token=test-token'); + await exec('mcpctl project create weekly_reports --profiles slack-read-only'); + + // Simulate Claude request matching documented usage + const response = await getProxyClient().callTool('slack_search_messages', { + query: 'security incidents vulnerabilities', + _context: 'Find security-related messages for weekly report' + }); + + // Verify filtering works as documented + expect(response.messages.length).toBeLessThan(100); // Filtered from 1000+ + expect(response.messages.every(m => + m.text.toLowerCase().includes('security') || + m.text.toLowerCase().includes('incident') || + m.text.toLowerCase().includes('vulnerability') + )).toBe(true); + }); + + test('audit log records operation', async () => { + const auditResult = await exec('mcpctl audit --limit 1 --format json'); + const lastAudit = JSON.parse(auditResult.stdout)[0]; + expect(lastAudit.action).toBe('mcp_call'); + expect(lastAudit.resource).toContain('slack'); + }); +}); +``` + +**tests/e2e/examples/data-pipeline-monitoring.test.ts:** Similar validation for pipeline monitoring workflow + +**tests/e2e/examples/documentation-querying.test.ts:** Validation for Terraform/GitHub docs workflow + +Each test file: +1. Parses the corresponding markdown file for setup commands +2. Executes commands (with test credentials) to verify they work +3. Simulates the documented Claude usage pattern +4. Verifies expected outcomes match documentation claims diff --git a/.taskmaster/tasks/task_019.md b/.taskmaster/tasks/task_019.md new file mode 100644 index 0000000..08e2126 --- /dev/null +++ b/.taskmaster/tasks/task_019.md @@ -0,0 +1,98 @@ +# Task ID: 19 + +**Title:** Implement Local LLM Pre-filtering Proxy + +**Status:** cancelled + +**Dependencies:** None + +**Priority:** high + +**Description:** Build the local proxy component that intercepts Claude's MCP requests, uses local LLMs (Gemini CLI, Ollama, vLLM, or DeepSeek API) to interpret questions, fetch relevant data from mcpd, and filter/refine responses to minimize context window usage before returning to Claude. + +**Details:** + +Create src/local-proxy/src/ with the following architecture: + +**Core Components:** + +1. **MCP Protocol Handler** (mcp-handler.ts): + - Implement MCP server interface using @modelcontextprotocol/sdk + - Register as the MCP endpoint Claude connects to + - Parse incoming tool calls and extract the semantic intent + +2. **LLM Provider Abstraction** (providers/): + ```typescript + interface LLMProvider { + name: string; + interpretQuery(query: string, context: McpToolCall): Promise<InterpretedQuery>; + filterResponse(data: unknown, originalQuery: string, maxTokens: number): Promise<FilteredResponse>; + } + ``` + Implement providers: + - gemini-cli.ts: Shell out to `gemini` CLI binary + - ollama.ts: HTTP client to local Ollama server (localhost:11434) + - vllm.ts: OpenAI-compatible API client + - deepseek.ts: DeepSeek API client + +3. **Query Interpreter** (interpreter.ts): + - Takes Claude's raw MCP request (e.g., 'get_slack_messages') + - Uses local LLM to understand semantic intent: "Find messages related to security and linux servers from my team" + - Generates optimized query parameters for mcpd + +4. **Response Filter** (filter.ts): + - Receives raw data from mcpd (potentially thousands of Slack messages, large Terraform docs) + - Uses local LLM to extract ONLY relevant information matching original query + - Implements token counting to stay within configured limits + - Returns compressed, relevant subset of data + +5. **mcpd Client** (mcpd-client.ts): + - HTTP client to communicate with mcpd server + - Handles authentication (forwards Claude session token) + - Supports all MCP operations exposed by mcpd + +**Configuration:** +```typescript +interface ProxyConfig { + mcpdUrl: string; // e.g., 'http://mcpd.local:3000' + llmProvider: 'gemini-cli' | 'ollama' | 'vllm' | 'deepseek'; + llmConfig: { + model?: string; // e.g., 'llama3.2', 'gemini-pro' + endpoint?: string; // for vllm/deepseek + maxTokensPerFilter: number; // target output size + }; + filteringEnabled: boolean; // can be disabled for passthrough +} +``` + +**Flow:** +1. Claude calls local-proxy MCP server +2. Proxy interprets query semantics via local LLM +3. Proxy calls mcpd with optimized query +4. mcpd returns raw MCP data +5. Proxy filters response via local LLM +6. Claude receives minimal, relevant context + +**Pseudo-code for filter.ts:** +```typescript +async function filterResponse( + rawData: unknown, + originalQuery: string, + provider: LLMProvider +): Promise<FilteredResponse> { + const dataStr = JSON.stringify(rawData); + if (dataStr.length < 4000) return { data: rawData, filtered: false }; + + const prompt = `Given this query: "${originalQuery}" + Extract ONLY the relevant information from this data. + Return a JSON array of relevant items, max 10 items. + Data: ${dataStr.slice(0, 50000)}`; // Truncate for LLM input + + const filtered = await provider.filterResponse(dataStr, originalQuery, 2000); + return { data: filtered, filtered: true, originalSize: dataStr.length }; +} +``` + +**Test Strategy:** + +Unit tests for each LLM provider with mocked HTTP/CLI responses. Integration tests with actual Ollama instance (docker-compose service). Test query interpretation produces valid mcpd parameters. Test filtering reduces data size while preserving relevant content. Load test with large payloads (10MB JSON) to verify memory handling. Test fallback behavior when LLM provider is unavailable. Test passthrough mode when filtering is disabled. diff --git a/.taskmaster/tasks/task_020.md b/.taskmaster/tasks/task_020.md new file mode 100644 index 0000000..79132e4 --- /dev/null +++ b/.taskmaster/tasks/task_020.md @@ -0,0 +1,85 @@ +# Task ID: 20 + +**Title:** Implement MCP Project Management with Claude Code Integration + +**Status:** cancelled + +**Dependencies:** None + +**Priority:** high + +**Description:** Build the `mcpctl claude add-mcp-project <project-name>` command that configures Claude Code sessions to use specific MCP server profiles, generating and managing .mcp.json files automatically. + +**Details:** + +Extend src/cli/src/commands/ with Claude Code integration: + +**New Commands:** + +1. **mcpctl claude add-mcp-project <name>** (claude/add-mcp-project.ts): + - Fetches project definition from mcpd API + - Generates .mcp.json file pointing to local-proxy + - Configures local-proxy to route to the project's MCP profiles + - Example output: + ```json + { + "mcpServers": { + "weekly_reports": { + "command": "npx", + "args": ["-y", "@mcpctl/local-proxy", "--project", "weekly_reports", "--mcpd", "http://mcpd.local:3000"], + "env": {} + } + } + } + ``` + +2. **mcpctl claude remove-mcp-project <name>** (claude/remove-mcp-project.ts): + - Removes project from .mcp.json + - Cleans up local-proxy config + +3. **mcpctl claude list-projects** (claude/list-projects.ts): + - Shows configured projects in current directory's .mcp.json + - Shows available projects from mcpd + +4. **mcpctl project create <name>** (project/create.ts): + - Creates new project on mcpd + - Interactive profile selection + +5. **mcpctl project add-profile <project> <profile>** (project/add-profile.ts): + - Links existing profile to project + +**MCP.json Management** (lib/mcp-json.ts): +```typescript +interface McpJsonManager { + findMcpJson(startDir: string): string | null; // Search up directory tree + readMcpJson(path: string): McpJsonConfig; + writeMcpJson(path: string, config: McpJsonConfig): void; + addProject(config: McpJsonConfig, project: ProjectConfig): McpJsonConfig; + removeProject(config: McpJsonConfig, projectName: string): McpJsonConfig; +} +``` + +**mcpd API Extensions** (src/mcpd/src/routes/projects.ts): +- GET /projects - List all projects +- GET /projects/:name - Get project details with profiles +- POST /projects - Create project +- PUT /projects/:name/profiles - Update project profiles +- GET /projects/:name/claude-config - Get Claude-ready config + +**Workflow Example:** +```bash +# On mcpd server (admin sets up projects) +mcpctl project create weekly_reports +mcpctl project add-profile weekly_reports slack-readonly +mcpctl project add-profile weekly_reports jira-readonly + +# On developer machine +cd ~/my-workspace +mcpctl claude add-mcp-project weekly_reports +# Creates/updates .mcp.json with weekly_reports config +# Now Claude Code in this directory can use slack and jira MCPs +``` + +**Test Strategy:** + +Unit test MCP.json parsing and manipulation with various file states (missing, empty, existing projects). Test findMcpJson directory traversal. Integration test with mcpd API: create project, add profiles, fetch Claude config. E2E test: run `mcpctl claude add-mcp-project`, verify .mcp.json created, start Claude Code (mock), verify MCP connection works. Test error handling: project not found, profile not found, conflicting project names. Test update behavior when project already exists in .mcp.json. diff --git a/.taskmaster/tasks/task_021.md b/.taskmaster/tasks/task_021.md new file mode 100644 index 0000000..c815651 --- /dev/null +++ b/.taskmaster/tasks/task_021.md @@ -0,0 +1,127 @@ +# Task ID: 21 + +**Title:** Implement Guided MCP Server Setup Wizard with Credential Flow + +**Status:** cancelled + +**Dependencies:** None + +**Priority:** medium + +**Description:** Build an interactive setup wizard that guides users through MCP server configuration, including browser-based OAuth flows, API token generation pages, and step-by-step credential setup with secure storage. + +**Details:** + +Create src/cli/src/commands/setup/ with guided setup flows: + +**Setup Wizard Architecture:** + +1. **Setup Command** (setup.ts): + ```bash + mcpctl setup <server-type> # e.g., mcpctl setup slack + ``` + - Fetches server definition from mcpd (envTemplate, setupGuide) + - Runs appropriate setup flow based on server type + +2. **Setup Flows** (flows/): + - oauth-flow.ts: For OAuth-based services (Slack, GitHub) + - api-key-flow.ts: For API key services (Jira, OpenAI) + - custom-flow.ts: For services with unique setup + +3. **OAuth Flow Handler** (flows/oauth-flow.ts): + ```typescript + async function runOAuthFlow(serverType: string, config: OAuthConfig): Promise<Credentials> { + // 1. Start local HTTP server to receive OAuth callback + const callbackServer = await startCallbackServer(config.callbackPort); + + // 2. Open browser to OAuth authorization URL + const authUrl = buildOAuthUrl(config); + console.log(`Opening browser to authorize ${serverType}...`); + await open(authUrl); // Uses 'open' package + + // 3. Wait for callback with auth code + const authCode = await callbackServer.waitForCode(); + + // 4. Exchange code for tokens + const tokens = await exchangeCodeForTokens(authCode, config); + + // 5. Securely store tokens via mcpd + await mcpdClient.storeCredentials(serverType, tokens); + + return tokens; + } + ``` + +4. **API Key Flow Handler** (flows/api-key-flow.ts): + ```typescript + async function runApiKeyFlow(serverType: string, config: ApiKeyConfig): Promise<Credentials> { + // 1. Display setup instructions + console.log(chalk.bold(`\nSetting up ${serverType}...\n`)); + console.log(config.setupGuide); // Markdown rendered to terminal + + // 2. Open browser to API key generation page + if (config.apiKeyUrl) { + const shouldOpen = await confirm('Open browser to generate API key?'); + if (shouldOpen) await open(config.apiKeyUrl); + } + + // 3. Prompt for required credentials + const credentials: Record<string, string> = {}; + for (const envVar of config.requiredEnvVars) { + credentials[envVar.name] = await password({ + message: `Enter ${envVar.description}:`, + mask: '*' + }); + } + + // 4. Validate credentials (test API call) + const valid = await validateCredentials(serverType, credentials); + if (!valid) throw new Error('Invalid credentials'); + + // 5. Store securely via mcpd + await mcpdClient.storeCredentials(serverType, credentials); + + return credentials; + } + ``` + +5. **Credential Storage** (src/mcpd/src/services/credentials.ts): + - Encrypt credentials at rest using AES-256-GCM + - Master key from environment (MCPCTL_MASTER_KEY) or Vault integration + - Store encrypted credentials in database (McpServer.encryptedCredentials new field) + - Never log or expose credentials in API responses + +**Server-Specific Setup Guides (seed data):** + +- **Slack:** + - Guide: "1. Go to api.slack.com/apps, 2. Create app, 3. Add OAuth scopes..." + - OAuth flow with workspace authorization + - Scopes: channels:read, users:read, chat:write + +- **Jira:** + - Guide: "1. Go to id.atlassian.com/manage-profile/security/api-tokens" + - API key flow with URL, email, token + - Test: GET /rest/api/3/myself + +- **GitHub:** + - Guide: "1. Go to github.com/settings/tokens" + - API key flow OR GitHub App OAuth + - Scopes: repo, read:org + +- **Terraform Docs:** + - No credentials needed + - Setup verifies terraform CLI installed + +**Profile Creation After Setup:** +```bash +mcpctl setup slack +# After successful setup: +# "Slack configured! Create a profile for this server?" +# > Profile name: slack-readonly +# > Read-only mode? Yes +# Profile 'slack-readonly' created and linked to Slack server. +``` + +**Test Strategy:** + +Unit test each flow handler with mocked external services. Test OAuth callback server starts and receives codes correctly. Test API key validation with mock API responses. Integration test with actual services using test accounts (Slack test workspace, GitHub test token). Test credential encryption/decryption roundtrip. Test setup guide rendering (markdown to terminal). E2E test: run `mcpctl setup slack`, mock browser open, simulate OAuth callback, verify credentials stored and profile created. Test error recovery: invalid credentials, timeout waiting for callback, network failures. Security test: verify credentials never logged, encrypted at rest, not in API responses. diff --git a/.taskmaster/tasks/task_022.md b/.taskmaster/tasks/task_022.md new file mode 100644 index 0000000..fa8c169 --- /dev/null +++ b/.taskmaster/tasks/task_022.md @@ -0,0 +1,271 @@ +# Task ID: 22 + +**Title:** Implement MCP Registry Client + +**Status:** pending + +**Dependencies:** None + +**Priority:** high + +**Description:** Build a multi-source registry client that queries the Official MCP Registry, Glama.ai, and Smithery.ai APIs to search, discover, and retrieve MCP server metadata with deduplication, ranking, and caching. + +**Details:** + +Create src/cli/src/registry/ directory with the following structure: + +``` +registry/ +├── client.ts # Main RegistryClient facade +├── sources/ +│ ├── base.ts # Abstract RegistrySource interface +│ ├── official.ts # Official MCP Registry (registry.modelcontextprotocol.io) +│ ├── glama.ts # Glama.ai registry +│ └── smithery.ts # Smithery.ai registry +├── types.ts # RegistryServer, SearchOptions, etc. +├── cache.ts # TTL-based result caching +├── dedup.ts # Deduplication logic +├── ranking.ts # Result ranking algorithm +└── index.ts # Barrel export +``` + +**Strategy Pattern Implementation:** +```typescript +// types.ts +export interface EnvVar { + name: string; + description: string; + isSecret: boolean; + setupUrl?: string; +} + +export interface RegistryServer { + name: string; + description: string; + packages: { + npm?: string; + pypi?: string; + docker?: string; + }; + envTemplate: EnvVar[]; + transport: 'stdio' | 'sse' | 'websocket'; + repositoryUrl?: string; + popularityScore: number; + verified: boolean; + sourceRegistry: 'official' | 'glama' | 'smithery'; + lastUpdated?: Date; +} + +export interface SearchOptions { + query: string; + limit?: number; + registries?: ('official' | 'glama' | 'smithery')[]; + verified?: boolean; + transport?: 'stdio' | 'sse'; + category?: string; +} + +// base.ts +export abstract class RegistrySource { + abstract name: string; + abstract search(query: string, limit: number): Promise<RegistryServer[]>; + protected abstract normalizeResult(raw: unknown): RegistryServer; +} +``` + +**Official MCP Registry Source (GET /v0/servers):** +- Base URL: https://registry.modelcontextprotocol.io/v0/servers +- Query params: ?search=<query>&limit=100&cursor=<cursor> +- No authentication required +- Pagination via cursor +- Response includes: name, description, npm package, env vars, transport + +**Glama.ai Source:** +- Base URL: https://glama.ai/api/mcp/v1/servers +- No authentication required +- Cursor-based pagination +- Response includes env var JSON schemas + +**Smithery.ai Source:** +- Base URL: https://registry.smithery.ai/servers +- Query params: ?q=<query> +- Requires free API key from config (optional, graceful fallback) +- Has verified badges, usage analytics + +**Caching Implementation:** +```typescript +// cache.ts +import { createHash } from 'crypto'; + +export class RegistryCache { + private cache = new Map<string, { data: RegistryServer[]; expires: number }>(); + private defaultTTL: number; + + constructor(ttlMs = 3600000) { // 1 hour default + this.defaultTTL = ttlMs; + } + + private getKey(query: string, options: SearchOptions): string { + return createHash('sha256').update(JSON.stringify({ query, options })).digest('hex'); + } + + get(query: string, options: SearchOptions): RegistryServer[] | null { + const key = this.getKey(query, options); + const entry = this.cache.get(key); + if (entry && entry.expires > Date.now()) { + return entry.data; + } + this.cache.delete(key); + return null; + } + + set(query: string, options: SearchOptions, data: RegistryServer[]): void { + const key = this.getKey(query, options); + this.cache.set(key, { data, expires: Date.now() + this.defaultTTL }); + } + + getHitRatio(): { hits: number; misses: number; ratio: number } { /* metrics */ } +} +``` + +**Deduplication Logic:** +- Match by npm package name first (exact match) +- Fall back to GitHub repository URL comparison +- Keep the result with highest popularity score +- Merge envTemplate data from multiple sources + +**Ranking Algorithm:** +1. Relevance score (text match quality) - weight: 40% +2. Popularity/usage count (Smithery analytics) - weight: 30% +3. Verified status - weight: 20% +4. Recency (last updated) - weight: 10% + +**Rate Limiting & Retry:** +```typescript +export async function withRetry<T>( + fn: () => Promise<T>, + maxRetries = 3, + baseDelay = 1000 +): Promise<T> { + for (let attempt = 0; attempt < maxRetries; attempt++) { + try { + return await fn(); + } catch (error) { + if (attempt === maxRetries - 1) throw error; + const delay = baseDelay * Math.pow(2, attempt) + Math.random() * 1000; + await new Promise(r => setTimeout(r, delay)); + } + } + throw new Error('Unreachable'); +} +``` + +**Security Requirements:** +- Validate all API responses with Zod schemas +- Sanitize descriptions to prevent terminal escape sequence injection +- Never log API keys (Smithery key) +- Support HTTP_PROXY/HTTPS_PROXY environment variables +- Support NODE_EXTRA_CA_CERTS for custom CA certificates + +**SRE Metrics (expose via shared metrics module):** +- registry_query_latency_ms (histogram by source) +- registry_cache_hit_ratio (gauge) +- registry_error_count (counter by source, error_type) + +**Test Strategy:** + +TDD approach - write tests BEFORE implementation: + +1. **Unit tests for each registry source:** + - Mock HTTP responses for official, glama, smithery APIs + - Test normalization of raw API responses to RegistryServer type + - Test pagination handling (cursor-based) + - Test error handling (network errors, invalid responses, rate limits) + +2. **Cache tests:** + - Test cache hit returns data without API call + - Test cache miss triggers API call + - Test TTL expiration correctly invalidates entries + - Test cache key generation is deterministic + - Test hit ratio metrics accuracy + +3. **Deduplication tests:** + - Test npm package name matching + - Test GitHub URL matching with different formats (https vs git@) + - Test keeping highest popularity score + - Test envTemplate merging from multiple sources + +4. **Ranking tests:** + - Test relevance scoring for exact vs partial matches + - Test popularity weight contribution + - Test verified boost + - Test overall ranking order + +5. **Integration tests:** + - Test full search flow with mocked HTTP + - Test parallel queries to all registries + - Test graceful degradation when one registry fails + +6. **Security tests:** + - Test Zod validation rejects malformed responses + - Test terminal escape sequence sanitization + - Test no API keys in error messages or logs + +Run: `pnpm --filter @mcpctl/cli test:run -- --coverage registry/` + +## Subtasks + +### 22.1. Define Registry Types, Zod Schemas, and Base Abstract Source Interface + +**Status:** pending +**Dependencies:** None + +Create the foundational types, validation schemas, and abstract base class for all registry sources following TDD and strategy pattern principles. + +**Details:** + +Create src/cli/src/registry/ directory structure. Implement types.ts with RegistryServer, SearchOptions, EnvVar interfaces. Define Zod schemas for validating all API responses (OfficialRegistryResponseSchema, GlamaResponseSchema, SmitheryResponseSchema) to ensure security validation. Create base.ts with abstract RegistrySource class including name property, search() method, and normalizeResult() protected method. Include terminal escape sequence sanitization utility in types.ts. Write comprehensive Vitest tests BEFORE implementation: test type guards, Zod schema validation with valid/invalid inputs, sanitization of malicious strings with ANSI escape codes. Add category tags including data platform categories (bigquery, snowflake, dbt). Export everything via index.ts barrel file. + +### 22.2. Implement Individual Registry Sources with HTTP Client and Proxy Support + +**Status:** pending +**Dependencies:** 22.1 + +Implement the three concrete registry source classes (OfficialRegistrySource, GlamaRegistrySource, SmitheryRegistrySource) with proper HTTP handling, proxy support, and response normalization. + +**Details:** + +Create sources/official.ts for https://registry.modelcontextprotocol.io/v0/servers - implement cursor-based pagination, normalize responses to RegistryServer type. Create sources/glama.ts for https://glama.ai/api/mcp/v1/servers - handle JSON schema env vars, cursor pagination. Create sources/smithery.ts for https://registry.smithery.ai/servers - optional API key from config, graceful fallback if unauthorized, handle verified badges and analytics. Implement shared HTTP client utility supporting HTTP_PROXY/HTTPS_PROXY environment variables and NODE_EXTRA_CA_CERTS for custom CA certificates. Add exponential backoff retry logic with jitter (withRetry function). Never log API keys in error messages or debug output. Use structured logging with appropriate log levels. Write tests BEFORE implementation using mock HTTP responses. + +### 22.3. Implement TTL-Based Caching with Metrics and Hit Ratio Tracking + +**Status:** pending +**Dependencies:** 22.1 + +Build the RegistryCache class with TTL-based expiration, SHA-256 cache keys, hit/miss metrics, and integration with the SRE metrics module. + +**Details:** + +Create cache.ts with RegistryCache class. Use SHA-256 hash of query+options JSON for cache keys. Implement TTL-based expiration with configurable defaultTTL (default 1 hour). Track hits/misses with getHitRatio() method returning { hits, misses, ratio }. Integrate with shared metrics module to expose registry_cache_hit_ratio gauge. Implement cache.clear() for testing and manual invalidation. Add cache size limits with LRU eviction if needed. Ensure thread-safety for concurrent access patterns. Write comprehensive Vitest tests BEFORE implementation covering cache behavior. + +### 22.4. Implement Deduplication Logic and Ranking Algorithm + +**Status:** pending +**Dependencies:** 22.1 + +Create the deduplication module to merge results from multiple registries and the ranking algorithm to sort results by relevance, popularity, verification, and recency. + +**Details:** + +Create dedup.ts with deduplicateResults(results: RegistryServer[]): RegistryServer[] function. Match duplicates by npm package name (exact match) first, then fall back to GitHub repositoryUrl comparison. Keep the result with highest popularityScore when merging duplicates. Merge envTemplate arrays from multiple sources, deduplicating by env var name. Create ranking.ts with rankResults(results: RegistryServer[], query: string): RegistryServer[] function. Implement weighted scoring: text match relevance 40%, popularity/usage 30%, verified status 20%, recency 10%. Text relevance uses fuzzy matching on name and description. Write tests BEFORE implementation with sample datasets. + +### 22.5. Build Main RegistryClient Facade with Parallel Queries and SRE Metrics + +**Status:** pending +**Dependencies:** 22.1, 22.2, 22.3, 22.4 + +Create the main RegistryClient facade class that orchestrates parallel queries across all sources, applies caching, deduplication, ranking, and exposes SRE metrics for observability. + +**Details:** + +Create client.ts with RegistryClient class implementing the facade pattern. Constructor accepts optional config for enabling/disabling specific registries, cache TTL, and Smithery API key. Implement search(options: SearchOptions): Promise<RegistryServer[]> that queries all enabled registries in parallel using Promise.allSettled, applies caching, deduplication, and ranking. Expose SRE metrics via shared metrics module: registry_query_latency_ms histogram labeled by source, registry_error_count counter labeled by source and error_type. Use structured logging for all operations. Handle partial failures gracefully (return results from successful sources). Create index.ts barrel export for clean public API. Include comprehensive JSDoc documentation. diff --git a/.taskmaster/tasks/task_023.md b/.taskmaster/tasks/task_023.md new file mode 100644 index 0000000..ec3987a --- /dev/null +++ b/.taskmaster/tasks/task_023.md @@ -0,0 +1,596 @@ +# Task ID: 23 + +**Title:** Implement mcpctl discover Command + +**Status:** pending + +**Dependencies:** 22 + +**Priority:** medium + +**Description:** Create the `mcpctl discover` CLI command that lets users search for MCP servers across all configured registries with filtering, multiple output formats, and an interactive browsing mode. + +**Details:** + +Create src/cli/src/commands/discover.ts: + +```typescript +import { Command } from 'commander'; +import { RegistryClient } from '../registry/client'; +import { formatTable, formatJson, formatYaml } from '../utils/output'; +import inquirer from 'inquirer'; + +export function createDiscoverCommand(): Command { + const cmd = new Command('discover') + .description('Search for MCP servers across registries') + .argument('<query>', 'Search query (e.g., "slack", "database", "terraform")') + .option('--category <category>', 'Filter by category (devops, data-platform, analytics, security)') + .option('--verified', 'Only show verified servers') + .option('--transport <type>', 'Filter by transport (stdio, sse)', undefined) + .option('--registry <source>', 'Search specific registry (official, glama, smithery, all)', 'all') + .option('--limit <n>', 'Maximum results to show', '20') + .option('--output <format>', 'Output format (table, json, yaml)', 'table') + .option('--interactive', 'Interactive browsing mode') + .action(async (query, options) => { + await discoverAction(query, options); + }); + return cmd; +} +``` + +**Table Output Format:** +``` +┌─────────────────┬────────────────────────────────┬───────────────────────┬───────────┬──────────┬────────────┐ +│ NAME │ DESCRIPTION │ PACKAGE │ TRANSPORT │ VERIFIED │ POPULARITY │ +├─────────────────┼────────────────────────────────┼───────────────────────┼───────────┼──────────┼────────────┤ +│ slack-mcp │ Slack workspace integration... │ @anthropic/slack-mcp │ stdio │ ✓ │ ★★★★☆ │ +│ slack-tools │ Send messages, manage chan... │ slack-mcp-server │ stdio │ │ ★★★☆☆ │ +└─────────────────┴────────────────────────────────┴───────────────────────┴───────────┴──────────┴────────────┘ + +Run 'mcpctl install <name>' to set up a server +``` + +**Implementation Details:** + +```typescript +// discover-action.ts +import chalk from 'chalk'; +import Table from 'cli-table3'; + +const CATEGORIES = ['devops', 'data-platform', 'analytics', 'security', 'productivity', 'development'] as const; + +interface DiscoverOptions { + category?: string; + verified?: boolean; + transport?: 'stdio' | 'sse'; + registry?: 'official' | 'glama' | 'smithery' | 'all'; + limit?: string; + output?: 'table' | 'json' | 'yaml'; + interactive?: boolean; +} + +export async function discoverAction(query: string, options: DiscoverOptions): Promise<void> { + const client = new RegistryClient(); + + const searchOptions = { + query, + limit: parseInt(options.limit ?? '20', 10), + registries: options.registry === 'all' + ? ['official', 'glama', 'smithery'] + : [options.registry], + verified: options.verified, + transport: options.transport, + category: options.category, + }; + + const results = await client.search(searchOptions); + + if (results.length === 0) { + console.log(chalk.yellow('No MCP servers found matching your query.')); + console.log(chalk.dim('Try a different search term or remove filters.')); + process.exit(2); // Exit code 2 = no results + } + + if (options.interactive) { + await interactiveMode(results); + return; + } + + switch (options.output) { + case 'json': + console.log(JSON.stringify(results, null, 2)); + break; + case 'yaml': + console.log(formatYaml(results)); + break; + default: + printTable(results); + console.log(chalk.cyan("\nRun 'mcpctl install <name>' to set up a server")); + } +} + +function printTable(servers: RegistryServer[]): void { + const table = new Table({ + head: ['NAME', 'DESCRIPTION', 'PACKAGE', 'TRANSPORT', 'VERIFIED', 'POPULARITY'], + colWidths: [18, 35, 25, 10, 9, 12], + wordWrap: true, + }); + + for (const server of servers) { + table.push([ + server.name, + truncate(server.description, 32), + server.packages.npm ?? server.packages.pypi ?? '-', + server.transport, + server.verified ? chalk.green('✓') : '', + popularityStars(server.popularityScore), + ]); + } + + console.log(table.toString()); +} + +function popularityStars(score: number): string { + const stars = Math.round(score / 20); // 0-100 -> 0-5 stars + return '★'.repeat(stars) + '☆'.repeat(5 - stars); +} +``` + +**Interactive Mode with Inquirer:** +```typescript +async function interactiveMode(servers: RegistryServer[]): Promise<void> { + const { selected } = await inquirer.prompt([ + { + type: 'list', + name: 'selected', + message: 'Select an MCP server to install:', + choices: servers.map(s => ({ + name: `${s.name} - ${truncate(s.description, 50)} ${s.verified ? '✓' : ''}`, + value: s.name, + })), + pageSize: 15, + }, + ]); + + const { confirm } = await inquirer.prompt([ + { + type: 'confirm', + name: 'confirm', + message: `Install ${selected}?`, + default: true, + }, + ]); + + if (confirm) { + // Trigger install command + const installCmd = await import('./install'); + await installCmd.installAction(selected, {}); + } +} +``` + +**Exit Codes for Scripting:** +- 0: Success, results found +- 1: Error (network, API, etc.) +- 2: No results found + +**Category Inference for Data Analyst Tools:** +Include categories relevant to BI/analytics: +- 'data-platform': BigQuery, Snowflake, Databricks, dbt +- 'analytics': Tableau, Looker, Metabase +- 'database': PostgreSQL, MySQL, MongoDB tools + +**Test Strategy:** + +TDD approach - write tests BEFORE implementation: + +1. **Command parsing tests:** + - Test all option combinations parse correctly + - Test query argument is required + - Test invalid transport value rejected + - Test invalid registry value rejected + - Test limit parsed as integer + +2. **Output formatting tests:** + - Test table format with varying description lengths + - Test table truncation at specified width + - Test JSON output is valid JSON array + - Test YAML output is valid YAML + - Test popularity score to stars conversion (0-100 -> 0-5 stars) + - Test verified badge displays correctly + +3. **Interactive mode tests (mock inquirer):** + - Test server list displayed as choices + - Test selection triggers install confirmation + - Test cancel does not trigger install + - Test pagination with >15 results + +4. **Exit code tests:** + - Test exit(0) when results found + - Test exit(1) on registry client error + - Test exit(2) when no results match + +5. **Integration tests:** + - Test full command execution with mocked RegistryClient + - Test --verified filter reduces results + - Test --category filter applies correctly + - Test --registry limits to single source + +6. **Filter combination tests:** + - Test verified + transport + category combined + - Test filters with no matches returns empty + +Run: `pnpm --filter @mcpctl/cli test:run -- --coverage commands/discover` + +## Subtasks + +### 23.1. Write TDD Test Suites for Command Parsing, Option Validation, and Exit Codes + +**Status:** pending +**Dependencies:** None + +Create comprehensive Vitest test suites for the discover command's argument parsing, option validation, and exit code behavior BEFORE implementation, following the project's TDD approach. + +**Details:** + +Create src/cli/tests/unit/commands/discover.test.ts with the following test categories: + +**Command Parsing Tests:** +- Test 'mcpctl discover' without query argument shows error and exits with code 2 (invalid arguments) +- Test 'mcpctl discover slack' parses query correctly as 'slack' +- Test 'mcpctl discover "database tools"' handles quoted multi-word queries +- Test query argument is accessible in action handler + +**Option Validation Tests:** +- Test --category accepts valid values: 'devops', 'data-platform', 'analytics', 'security', 'productivity', 'development' +- Test --category with invalid value shows error listing valid options +- Test --verified flag sets verified=true in options +- Test --transport accepts 'stdio' and 'sse' only, rejects invalid values +- Test --registry accepts 'official', 'glama', 'smithery', 'all' (default), rejects others +- Test --limit parses as integer (e.g., '20' -> 20) +- Test --limit with non-numeric value shows validation error +- Test --output accepts 'table', 'json', 'yaml', rejects others +- Test --interactive flag sets interactive=true + +**Default Values Tests:** +- Test --registry defaults to 'all' when not specified +- Test --limit defaults to '20' when not specified +- Test --output defaults to 'table' when not specified + +**Exit Code Tests:** +- Test exit code 0 when results are found +- Test exit code 1 on RegistryClient errors (network, API failures) +- Test exit code 2 when no results match query/filters + +**Filter Combination Tests:** +- Test --verified + --category + --transport combined correctly +- Test all filters with empty results returns exit code 2 + +Create src/cli/tests/fixtures/mock-registry-client.ts with MockRegistryClient class that returns configurable results or throws configurable errors for testing. Use vitest mock functions to capture calls to verify correct option passing. + +All tests should initially fail (TDD red phase) as the discover command doesn't exist yet. + +### 23.2. Write TDD Test Suites for Output Formatters with Security Sanitization + +**Status:** pending +**Dependencies:** 23.1 + +Create comprehensive Vitest test suites for all three output formats (table, JSON, YAML), popularity star rendering, description truncation, and critical security tests for terminal escape sequence sanitization. + +**Details:** + +Create src/cli/tests/unit/commands/discover-output.test.ts with the following test categories: + +**Table Output Tests:** +- Test table header contains: NAME, DESCRIPTION, PACKAGE, TRANSPORT, VERIFIED, POPULARITY +- Test table column widths match spec: 18, 35, 25, 10, 9, 12 +- Test word wrapping works for long descriptions +- Test description truncation at 32 characters with ellipsis +- Test verified=true shows green checkmark (chalk.green('✓')) +- Test verified=false shows empty string +- Test footer shows "Run 'mcpctl install <name>' to set up a server" +- Test empty results array shows yellow 'No MCP servers found' message + +**Popularity Stars Tests (popularityStars function):** +- Test score 0 returns '☆☆☆☆☆' (0 filled stars) +- Test score 20 returns '★☆☆☆☆' (1 filled star) +- Test score 50 returns '★★★☆☆' (2.5 rounds to 3 stars - verify rounding) +- Test score 100 returns '★★★★★' (5 filled stars) +- Test intermediate values: 10->1, 30->2, 60->3, 80->4 + +**JSON Output Tests:** +- Test JSON output is valid JSON (passes JSON.parse()) +- Test JSON output is pretty-printed with 2-space indentation +- Test JSON array contains all RegistryServer fields +- Test JSON is jq-parseable: 'echo output | jq .[]' works +- Test --output json does NOT print footer message + +**YAML Output Tests:** +- Test YAML output is valid YAML (passes yaml.load()) +- Test YAML output uses formatYaml utility from utils/output +- Test --output yaml does NOT print footer message + +**SECURITY - Terminal Escape Sequence Sanitization Tests:** +- Test description containing ANSI codes '\x1b[31mRED\x1b[0m' is sanitized +- Test description containing '\033[1mBOLD\033[0m' is sanitized +- Test name containing escape sequences is sanitized +- Test package name containing escape sequences is sanitized +- Test sanitization removes all \x1b[ and \033[ patterns +- Test sanitization preserves normal text content +- Test prevents cursor movement codes (\x1b[2J screen clear, etc.) + +**Truncate Function Tests:** +- Test truncate('short', 32) returns 'short' unchanged +- Test truncate('exactly 32 characters string!!!', 32) returns unchanged +- Test truncate('this is a very long description that exceeds limit', 32) returns 'this is a very long description...' (29 chars + '...') + +Create src/cli/tests/fixtures/mock-servers.ts with sample RegistryServer objects including edge cases: very long descriptions, special characters, potential injection strings, missing optional fields (packages.pypi undefined). + +### 23.3. Implement discover Command Definition and Action Handler with Sanitization + +**Status:** pending +**Dependencies:** 23.1, 23.2 + +Implement the discover command using Commander.js following the project's command registration pattern, with the discoverAction handler that orchestrates RegistryClient calls, applies filters, handles errors, and sets correct exit codes. + +**Details:** + +Create src/cli/src/commands/discover.ts implementing the CommandModule interface from the project's command registry pattern: + +```typescript +// discover.ts +import { Command } from 'commander'; +import { RegistryClient } from '../registry/client'; +import { sanitizeTerminalOutput } from '../utils/sanitize'; +import { DiscoverOptions, CATEGORIES } from './discover-types'; +import { printResults } from './discover-output'; + +const VALID_TRANSPORTS = ['stdio', 'sse'] as const; +const VALID_REGISTRIES = ['official', 'glama', 'smithery', 'all'] as const; +const VALID_OUTPUT_FORMATS = ['table', 'json', 'yaml'] as const; + +export function createDiscoverCommand(): Command { + const cmd = new Command('discover') + .description('Search for MCP servers across registries') + .argument('<query>', 'Search query (e.g., "slack", "database", "terraform")') + .option('--category <category>', `Filter by category (${CATEGORIES.join(', ')})`) + .option('--verified', 'Only show verified servers') + .option('--transport <type>', 'Filter by transport (stdio, sse)') + .option('--registry <source>', 'Search specific registry (official, glama, smithery, all)', 'all') + .option('--limit <n>', 'Maximum results to show', '20') + .option('--output <format>', 'Output format (table, json, yaml)', 'table') + .option('--interactive', 'Interactive browsing mode') + .action(async (query, options) => { + await discoverAction(query, options); + }); + return cmd; +} +``` + +Create src/cli/src/commands/discover-action.ts: +```typescript +export async function discoverAction(query: string, options: DiscoverOptions): Promise<void> { + // 1. Validate options (transport, registry, output, category) + // 2. Parse limit as integer with validation + // 3. Build SearchOptions for RegistryClient + // 4. Call client.search() wrapped in try/catch + // 5. Handle empty results -> exit code 2 + // 6. Handle network/API errors -> exit code 1 with structured logging + // 7. Sanitize all string fields in results (prevent terminal injection) + // 8. Delegate to printResults() or interactiveMode() based on options +} +``` + +Create src/cli/src/utils/sanitize.ts: +```typescript +export function sanitizeTerminalOutput(text: string): string { + // Remove ANSI escape sequences: \x1b[...m, \033[...m + // Remove cursor control sequences + // Preserve legitimate text content + return text + .replace(/\x1b\[[0-9;]*[a-zA-Z]/g, '') + .replace(/\033\[[0-9;]*[a-zA-Z]/g, '') + .replace(/[\x00-\x08\x0B\x0C\x0E-\x1F]/g, ''); +} + +export function sanitizeServerResult(server: RegistryServer): RegistryServer { + return { + ...server, + name: sanitizeTerminalOutput(server.name), + description: sanitizeTerminalOutput(server.description), + // sanitize other user-facing string fields + }; +} +``` + +Create src/cli/src/commands/discover-types.ts with TypeScript interfaces and constants. + +Register discover command via CommandRegistry following existing patterns in src/cli/src/commands/. + +### 23.4. Implement Output Formatters: Table with cli-table3, JSON, and YAML + +**Status:** pending +**Dependencies:** 23.2, 23.3 + +Implement the three output format handlers (table, JSON, YAML) including the popularity stars renderer, description truncation, verified badge display, and footer message. Table uses cli-table3 with specified column widths. + +**Details:** + +Create src/cli/src/commands/discover-output.ts: + +```typescript +import chalk from 'chalk'; +import Table from 'cli-table3'; +import { RegistryServer } from '../registry/types'; +import { formatYaml } from '../utils/output'; + +export function printResults(servers: RegistryServer[], format: 'table' | 'json' | 'yaml'): void { + switch (format) { + case 'json': + printJsonOutput(servers); + break; + case 'yaml': + printYamlOutput(servers); + break; + default: + printTableOutput(servers); + console.log(chalk.cyan("\nRun 'mcpctl install <name>' to set up a server")); + } +} + +function printTableOutput(servers: RegistryServer[]): void { + const table = new Table({ + head: ['NAME', 'DESCRIPTION', 'PACKAGE', 'TRANSPORT', 'VERIFIED', 'POPULARITY'], + colWidths: [18, 35, 25, 10, 9, 12], + wordWrap: true, + style: { head: ['cyan'] } + }); + + for (const server of servers) { + table.push([ + server.name, + truncate(server.description, 32), + getPackageName(server.packages), + server.transport, + server.verified ? chalk.green('✓') : '', + popularityStars(server.popularityScore), + ]); + } + + console.log(table.toString()); +} + +function printJsonOutput(servers: RegistryServer[]): void { + console.log(JSON.stringify(servers, null, 2)); +} + +function printYamlOutput(servers: RegistryServer[]): void { + console.log(formatYaml(servers)); +} + +export function truncate(text: string, maxLength: number): string { + if (text.length <= maxLength) return text; + return text.slice(0, maxLength - 3) + '...'; +} + +export function popularityStars(score: number): string { + const stars = Math.round(score / 20); // 0-100 -> 0-5 stars + return '★'.repeat(stars) + '☆'.repeat(5 - stars); +} + +function getPackageName(packages: RegistryServer['packages']): string { + return packages.npm ?? packages.pypi ?? packages.docker ?? '-'; +} +``` + +Create src/cli/src/commands/discover-no-results.ts for handling empty results: +```typescript +export function printNoResults(): void { + console.log(chalk.yellow('No MCP servers found matching your query.')); + console.log(chalk.dim('Try a different search term or remove filters.')); +} +``` + +Ensure formatYaml utility exists in src/cli/src/utils/output.ts (may need to create if not existing from Task 7). Install cli-table3 dependency: 'pnpm --filter @mcpctl/cli add cli-table3'. + +**Data Analyst/BI Category Support:** +Ensure CATEGORIES constant includes categories relevant to data analysts: +- 'data-platform': BigQuery, Snowflake, Databricks, dbt +- 'analytics': Tableau, Looker, Metabase, Power BI +- 'database': PostgreSQL, MySQL, MongoDB connectors +- 'visualization': Grafana, Superset integrations + +This supports the Data Analyst persona requirement from the task context. + +### 23.5. Implement Interactive Mode with Inquirer and Install Integration + +**Status:** pending +**Dependencies:** 23.3, 23.4 + +Implement the interactive browsing mode using Inquirer.js that allows users to scroll through results, select a server, confirm installation, and trigger the install command. Include graceful handling of user cancellation. + +**Details:** + +Create src/cli/src/commands/discover-interactive.ts: + +```typescript +import inquirer from 'inquirer'; +import chalk from 'chalk'; +import { RegistryServer } from '../registry/types'; +import { truncate } from './discover-output'; + +export async function interactiveMode(servers: RegistryServer[]): Promise<void> { + // Step 1: Display server selection list + const { selected } = await inquirer.prompt([ + { + type: 'list', + name: 'selected', + message: 'Select an MCP server to install:', + choices: servers.map(s => ({ + name: formatChoice(s), + value: s.name, + })), + pageSize: 15, // Show 15 items before scrolling + }, + ]); + + // Step 2: Show server details and confirm installation + const selectedServer = servers.find(s => s.name === selected); + if (selectedServer) { + console.log(chalk.dim('\nSelected server details:')); + console.log(chalk.dim(` Description: ${selectedServer.description}`)); + console.log(chalk.dim(` Package: ${selectedServer.packages.npm ?? selectedServer.packages.pypi ?? '-'}`)); + console.log(chalk.dim(` Transport: ${selectedServer.transport}`)); + } + + const { confirm } = await inquirer.prompt([ + { + type: 'confirm', + name: 'confirm', + message: `Install ${selected}?`, + default: true, + }, + ]); + + if (confirm) { + // Dynamically import install command to avoid circular dependencies + const { installAction } = await import('./install'); + await installAction([selected], {}); // Pass as array per install command spec + } else { + console.log(chalk.dim('Installation cancelled.')); + } +} + +function formatChoice(server: RegistryServer): string { + const verifiedBadge = server.verified ? chalk.green(' ✓') : ''; + const description = truncate(server.description, 50); + return `${server.name} - ${description}${verifiedBadge}`; +} +``` + +Create src/cli/tests/unit/commands/discover-interactive.test.ts with mock inquirer tests: +- Test server list displayed as scrollable choices +- Test selection triggers install confirmation prompt +- Test confirm=true triggers installAction with correct server name +- Test confirm=false outputs 'Installation cancelled' and exits gracefully +- Test pagination works with >15 results (pageSize check) +- Test Ctrl+C cancellation is handled gracefully (inquirer throws on SIGINT) +- Test formatChoice includes verified badge for verified servers +- Test formatChoice truncates long descriptions correctly + +Update src/cli/src/commands/discover-action.ts to call interactiveMode when options.interactive is true: +```typescript +if (options.interactive) { + await interactiveMode(sanitizedResults); + return; +} +``` + +**Error Handling:** +- Wrap inquirer prompts in try/catch to handle Ctrl+C gracefully +- Exit with code 0 on user cancellation (not an error) +- Log structured message on cancellation for SRE observability + +**Integration with Install Command:** +- The install command (Task 24) may not exist yet - create a stub if needed +- src/cli/src/commands/install.ts stub: export async function installAction(servers: string[], options: {}): Promise<void> { console.log('Install not implemented yet'); } diff --git a/.taskmaster/tasks/task_024.md b/.taskmaster/tasks/task_024.md new file mode 100644 index 0000000..82209b0 --- /dev/null +++ b/.taskmaster/tasks/task_024.md @@ -0,0 +1,698 @@ +# Task ID: 24 + +**Title:** Implement mcpctl install with LLM-Assisted Auto-Configuration + +**Status:** pending + +**Dependencies:** 22, 23 + +**Priority:** medium + +**Description:** Create the `mcpctl install <server-name>` command that uses a local LLM to automatically read MCP server documentation, generate envTemplate/setup guides/profiles, and walk users through configuration with validation. + +**Details:** + +Create src/cli/src/commands/install.ts: + +```typescript +import { Command } from 'commander'; +import { RegistryClient } from '../registry/client'; +import { LLMProvider } from '../llm/provider'; +import { SetupWizard } from '../setup/wizard'; +import { McpdClient } from '../api/mcpd-client'; + +export function createInstallCommand(): Command { + const cmd = new Command('install') + .description('Install and configure an MCP server') + .argument('<servers...>', 'Server name(s) from registry') + .option('--non-interactive', 'Use env vars for credentials, no prompts') + .option('--profile-name <name>', 'Name for the created profile') + .option('--project <name>', 'Auto-add to this project') + .option('--dry-run', 'Show configuration without applying') + .option('--skip-llm', 'Only use registry metadata, no LLM analysis') + .action(async (servers, options) => { + await installAction(servers, options); + }); + return cmd; +} +``` + +**Installation Flow:** + +```typescript +// install-action.ts +export async function installAction( + serverNames: string[], + options: InstallOptions +): Promise<void> { + const registry = new RegistryClient(); + const mcpd = new McpdClient(); + const llm = await getLLMProvider(); // From Task 12 config + + for (const serverName of serverNames) { + console.log(chalk.blue(`\nInstalling ${serverName}...`)); + + // Step 1: Fetch server metadata from registry + const serverMeta = await registry.getServer(serverName); + if (!serverMeta) { + console.error(chalk.red(`Server '${serverName}' not found in registries`)); + continue; + } + + // Step 2: Check if envTemplate is complete + let envTemplate = serverMeta.envTemplate; + let setupGuide = serverMeta.setupGuide; + let defaultProfiles: ProfileConfig[] = []; + + const needsLLMAnalysis = ( + !options.skipLlm && + (!envTemplate || envTemplate.length === 0 || hasIncompleteEnvVars(envTemplate)) + ); + + // Step 3: LLM-assisted configuration generation + if (needsLLMAnalysis && serverMeta.repositoryUrl) { + console.log(chalk.dim('Analyzing server documentation with LLM...')); + + const readme = await fetchReadme(serverMeta.repositoryUrl); + const llmResult = await analyzeWithLLM(llm, readme, serverMeta); + + // Merge LLM results with registry data + envTemplate = mergeEnvTemplates(envTemplate, llmResult.envTemplate); + setupGuide = llmResult.setupGuide || setupGuide; + defaultProfiles = llmResult.profiles || []; + } + + if (options.dryRun) { + printDryRun(serverMeta, envTemplate, setupGuide, defaultProfiles); + continue; + } + + // Step 4: Register MCP server in mcpd + const registeredServer = await mcpd.registerServer({ + name: serverMeta.name, + command: serverMeta.packages.npm + ? `npx -y ${serverMeta.packages.npm}` + : serverMeta.packages.docker + ? `docker run ${serverMeta.packages.docker}` + : throw new Error('No package source available'), + envTemplate, + transport: serverMeta.transport, + }); + + // Step 5: Run setup wizard to collect credentials + const wizard = new SetupWizard(envTemplate, { nonInteractive: options.nonInteractive }); + const credentials = await wizard.run(); + + // Step 6: Create profile + const profileName = options.profileName || `${serverMeta.name}-default`; + const profile = await mcpd.createProfile({ + name: profileName, + serverId: registeredServer.id, + config: credentials, + }); + + // Step 7: Optionally add to project + if (options.project) { + await mcpd.addProfileToProject(options.project, profile.id); + console.log(chalk.green(`Added to project '${options.project}'`)); + } + + console.log(chalk.green(`✓ ${serverMeta.name} installed successfully`)); + console.log(chalk.dim(` Profile: ${profileName}`)); + } +} +``` + +**LLM Analysis Implementation:** + +```typescript +// llm-analyzer.ts +import { z } from 'zod'; + +const LLMAnalysisSchema = z.object({ + envTemplate: z.array(z.object({ + name: z.string(), + description: z.string(), + isSecret: z.boolean(), + setupUrl: z.string().url().optional(), + defaultValue: z.string().optional(), + })), + setupGuide: z.string().optional(), + profiles: z.array(z.object({ + name: z.string(), + description: z.string(), + permissions: z.array(z.string()), + })).optional(), +}); + +const ANALYSIS_PROMPT = ` +Analyze this MCP server README and extract configuration information. + +README: +{readme} + +Extract and return JSON with: +1. envTemplate: Array of required environment variables with: + - name: The env var name (e.g., SLACK_BOT_TOKEN) + - description: What this variable is for and where to get it + - isSecret: true if this is a secret/token/password + - setupUrl: URL to docs for obtaining this credential (if mentioned) + +2. setupGuide: Step-by-step setup instructions in markdown + +3. profiles: Suggested permission profiles (e.g., read-only, admin, limited) + +Return ONLY valid JSON matching this exact schema. No markdown formatting. +`; + +export async function analyzeWithLLM( + llm: LLMProvider, + readme: string, + serverMeta: RegistryServer +): Promise<z.infer<typeof LLMAnalysisSchema>> { + // Sanitize README to prevent prompt injection + const sanitizedReadme = sanitizeForLLM(readme); + + const prompt = ANALYSIS_PROMPT.replace('{readme}', sanitizedReadme); + + const response = await llm.complete(prompt, { + maxTokens: 2000, + temperature: 0.1, // Low temperature for structured output + }); + + // Extract JSON from response (handle markdown code blocks) + const jsonStr = extractJSON(response); + + // Validate with Zod + const parsed = LLMAnalysisSchema.safeParse(JSON.parse(jsonStr)); + if (!parsed.success) { + console.warn(chalk.yellow('LLM output validation failed, using registry data only')); + return { envTemplate: [], setupGuide: undefined, profiles: [] }; + } + + return parsed.data; +} + +function sanitizeForLLM(text: string): string { + // Remove potential prompt injection patterns + return text + .replace(/```[\s\S]*?```/g, (match) => match) // Keep code blocks + .replace(/\{\{.*?\}\}/g, '') // Remove template syntax + .replace(/\[INST\]/gi, '') // Remove common injection patterns + .replace(/\[\/?SYSTEM\]/gi, '') + .slice(0, 50000); // Limit length +} +``` + +**GitHub README Fetching:** + +```typescript +// github.ts +export async function fetchReadme(repoUrl: string): Promise<string> { + const { owner, repo } = parseGitHubUrl(repoUrl); + + // Try common README locations + const paths = ['README.md', 'readme.md', 'README.rst', 'README']; + + for (const path of paths) { + try { + const response = await fetch( + `https://raw.githubusercontent.com/${owner}/${repo}/main/${path}` + ); + if (response.ok) { + return await response.text(); + } + // Try master branch + const masterResponse = await fetch( + `https://raw.githubusercontent.com/${owner}/${repo}/master/${path}` + ); + if (masterResponse.ok) { + return await masterResponse.text(); + } + } catch { + continue; + } + } + + throw new Error(`Could not fetch README from ${repoUrl}`); +} +``` + +**Security Considerations:** +- Sanitize LLM outputs before using (prevent prompt injection from malicious READMEs) +- Validate generated envTemplate with Zod schema +- Never auto-execute commands suggested by LLM without explicit user approval +- Log LLM interactions for audit (without sensitive data) +- Rate limit LLM calls to prevent abuse + +**Data Platform Auth Pattern Recognition:** +LLM should understand complex auth patterns commonly found in data tools: +- Service account JSON (GCP BigQuery, Vertex AI) +- Connection strings (Snowflake, Databricks) +- OAuth flows (dbt Cloud, Tableau) +- IAM roles (AWS Redshift, Athena) +- API keys with scopes (Fivetran, Airbyte) + +**Test Strategy:** + +TDD approach - write tests BEFORE implementation: + +1. **Command parsing tests:** + - Test single server argument + - Test multiple servers (batch install) + - Test all options parse correctly + - Test --non-interactive and --dry-run flags + +2. **Registry fetch tests:** + - Test successful server lookup + - Test server not found handling + - Test registry error handling + +3. **LLM prompt generation tests:** + - Test prompt template populated correctly + - Test README truncation at 50k chars + - Test sanitization removes injection patterns + - Test code blocks preserved in sanitization + +4. **LLM response parsing tests:** + - Test valid JSON extraction from plain response + - Test JSON extraction from markdown code blocks + - Test Zod validation accepts valid schema + - Test Zod validation rejects invalid schema + - Test graceful fallback on validation failure + +5. **GitHub README fetch tests:** + - Test main branch fetch + - Test master branch fallback + - Test different README filename handling + - Test repository URL parsing (https, git@) + - Test fetch failure handling + +6. **envTemplate merge tests:** + - Test LLM results merged with registry data + - Test LLM results don't override existing registry data + - Test deduplication by env var name + +7. **Full install flow tests:** + - Test complete flow with mocked dependencies + - Test dry-run shows config without applying + - Test skip-llm uses registry data only + - Test non-interactive uses env vars + - Test batch install processes all servers + +8. **Security tests:** + - Test prompt injection patterns sanitized + - Test malformed LLM output rejected + - Test no command auto-execution + +9. **Data platform tests:** + - Test recognition of service account JSON patterns + - Test recognition of connection string patterns + - Test OAuth flow detection + +Run: `pnpm --filter @mcpctl/cli test:run -- --coverage commands/install` + +## Subtasks + +### 24.1. Write TDD Test Suites for Install Command Parsing, GitHub README Fetching, and Core Types + +**Status:** pending +**Dependencies:** None + +Create comprehensive Vitest test suites for the install command's CLI parsing, GitHub README fetching module with proxy support, and foundational types/Zod schemas BEFORE implementation, following the project's strict TDD approach. + +**Details:** + +Create src/cli/tests/unit/commands/install/ directory with test files. Write tests for: + +1. **Command Parsing Tests** (install.test.ts): + - Test single server argument parsing + - Test multiple servers (batch install): `mcpctl install slack jira github` + - Test all options parse correctly: --non-interactive, --profile-name, --project, --dry-run, --skip-llm + - Test required argument validation (exits with code 2 if no server specified) + - Test option combinations are mutually compatible + +2. **GitHub README Fetching Tests** (github-fetcher.test.ts): + - Test parseGitHubUrl() extracts owner/repo from various URL formats (https://github.com/owner/repo, git@github.com:owner/repo.git) + - Test fetchReadme() tries multiple paths: README.md, readme.md, README.rst, README + - Test branch fallback: main -> master + - Test HTTP_PROXY/HTTPS_PROXY environment variable support using undici ProxyAgent + - Test custom CA certificate support (NODE_EXTRA_CA_CERTS) + - Test GitHub rate limit handling (403 with X-RateLimit-Remaining: 0) with exponential backoff + - Test timeout handling (30s default) with AbortController + - Create test fixtures: mock README responses in src/cli/tests/fixtures/readmes/ + +3. **Type and Schema Tests** (types.test.ts): + - Test InstallOptions Zod schema validates all fields + - Test EnvTemplateEntry schema requires name, description, isSecret + - Test LLMAnalysisResult schema validates envTemplate array, setupGuide string, profiles array + - Test ProfileConfig schema validates name, description, permissions array + +4. **Mock Infrastructure**: + - Create MockRegistryClient in src/cli/tests/mocks/ that implements RegistryClient interface + - Create MockLLMProvider that returns deterministic responses for testing + - Create MockMcpdClient for testing server registration and profile creation + - Use msw (Mock Service Worker) for GitHub API mocking + +All tests must fail initially (red phase) with 'module not found' or 'function not implemented' errors. + +### 24.2. Write TDD Test Suites for LLM Analysis with Security Sanitization and Data Platform Auth Recognition + +**Status:** pending +**Dependencies:** 24.1 + +Create comprehensive Vitest test suites for the LLM-based README analysis module, focusing on prompt injection prevention, output validation with Zod, and recognition of complex data platform authentication patterns (BigQuery service accounts, Snowflake connection strings, dbt OAuth). + +**Details:** + +Create src/cli/tests/unit/llm/analyzer.test.ts with comprehensive test coverage: + +1. **Prompt Sanitization Tests** (SECURITY CRITICAL): + - Test sanitizeForLLM() removes prompt injection patterns: [INST], [/SYSTEM], </s>, <|endoftext|>, <<SYS>> + - Test removal of template syntax: {{variable}}, ${command} + - Test preservation of legitimate code blocks (```typescript...```) + - Test length truncation at 50000 characters + - Test handling of Unicode edge cases and zero-width characters + - Create malicious README fixtures in src/cli/tests/fixtures/readmes/malicious/ + +2. **LLM Output Validation Tests**: + - Test extractJSON() handles markdown code blocks: ```json...``` + - Test extractJSON() handles raw JSON without code blocks + - Test LLMAnalysisSchema Zod validation catches missing required fields + - Test validation rejects envTemplate entries without isSecret field + - Test graceful fallback returns empty result on validation failure (warn, don't crash) + - Test malformed JSON handling (truncated, invalid syntax) + +3. **Data Platform Auth Pattern Recognition Tests** (Principal Data Engineer focus): + - Test BigQuery: recognizes GOOGLE_APPLICATION_CREDENTIALS, service account JSON patterns + - Test Snowflake: recognizes SNOWFLAKE_ACCOUNT, SNOWFLAKE_USER, connection string format + - Test dbt Cloud: recognizes DBT_API_KEY, DBT_ACCOUNT_ID, project selection patterns + - Test Databricks: recognizes DATABRICKS_HOST, DATABRICKS_TOKEN, cluster configuration + - Test AWS data services: recognizes AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, IAM role ARN + - Test Fivetran/Airbyte: recognizes API key with scopes pattern + - Create test fixtures: src/cli/tests/fixtures/readmes/data-platforms/ with realistic READMEs + +4. **LLM Provider Integration Tests**: + - Test analyzeWithLLM() uses injected LLMProvider (dependency injection) + - Test temperature set to 0.1 for structured output + - Test maxTokens set appropriately (2000) + - Test timeout handling (60s default) + - Test circuit breaker triggers on consecutive failures + +5. **Profile Generation Tests**: + - Test default profiles extracted: read-only, admin, limited access + - Test permissions array parsing + - Test profile descriptions are sanitized + +6. **Structured Logging Tests** (SRE focus): + - Test LLM interactions are logged with requestId, duration_ms, input_tokens + - Test sensitive data (API keys, tokens) are NEVER logged + - Test README content is not logged in full (truncate in logs) + +### 24.3. Implement GitHub README Fetcher with Proxy Support and Rate Limit Handling + +**Status:** pending +**Dependencies:** 24.1 + +Implement the GitHub README fetching module with enterprise networking support (HTTP/HTTPS proxy, custom CA certs), intelligent branch detection, rate limit handling with exponential backoff, and proper error handling for network failures. + +**Details:** + +Create src/cli/src/install/github-fetcher.ts: + +```typescript +import { ProxyAgent } from 'undici'; +import { createHash } from 'crypto'; + +export interface GitHubFetcherConfig { + proxyUrl?: string; // From HTTP_PROXY/HTTPS_PROXY + caFile?: string; // Custom CA certificate path + timeout?: number; // Default 30000ms + maxRetries?: number; // Default 3 + rateLimitWaitMs?: number; // Default 60000ms +} + +export interface ParsedGitHubUrl { + owner: string; + repo: string; +} + +export class GitHubReadmeFetcher { + private config: Required<GitHubFetcherConfig>; + private agent?: ProxyAgent; + + constructor(config: Partial<GitHubFetcherConfig> = {}) { ... } + + parseGitHubUrl(repoUrl: string): ParsedGitHubUrl { ... } + + async fetchReadme(repoUrl: string): Promise<string> { ... } + + private async fetchWithRetry(url: string, attempt: number): Promise<Response> { ... } + + private handleRateLimit(response: Response): Promise<void> { ... } +} +``` + +**Implementation Requirements:** + +1. **URL Parsing**: + - Support HTTPS: `https://github.com/owner/repo` + - Support HTTPS with .git: `https://github.com/owner/repo.git` + - Support SSH: `git@github.com:owner/repo.git` + - Extract owner and repo, strip .git suffix + - Throw descriptive error for invalid URLs + +2. **README Fetching**: + - Try paths in order: README.md, readme.md, README.rst, README, Readme.md + - Try branches in order: main, master, HEAD + - Use raw.githubusercontent.com for content fetching + - Return first successful fetch + +3. **Proxy Support**: + - Detect HTTP_PROXY, HTTPS_PROXY, NO_PROXY environment variables + - Create undici ProxyAgent when proxy configured + - Pass agent to fetch() dispatcher option + - Support custom CA via NODE_EXTRA_CA_CERTS or config.caFile + +4. **Rate Limit Handling**: + - Check X-RateLimit-Remaining header + - On 403 with rate limit exceeded, wait until X-RateLimit-Reset + - Log rate limit events for SRE visibility + - Implement exponential backoff: 1s, 2s, 4s (max 3 retries) + +5. **Error Handling**: + - Throw ReadmeNotFoundError if all paths fail + - Throw NetworkError on connection failures + - Throw RateLimitError if exhausted after retries + - Include repository URL in all error messages + +6. **SRE Metrics**: + - Export metrics: github_fetch_duration_seconds, github_rate_limit_remaining gauge + - Log structured events: { event: 'github_fetch', repo, branch, path, duration_ms } + +### 24.4. Implement LLM-Based README Analyzer with Secure Prompt Construction and Zod Validation + +**Status:** pending +**Dependencies:** 24.1, 24.2, 24.3 + +Implement the LLM analysis module that processes MCP server READMEs to extract environment templates, setup guides, and suggested profiles using the pluggable LLMProvider interface with robust input sanitization and output validation. + +**Details:** + +Create src/cli/src/install/llm-analyzer.ts: + +```typescript +import { z } from 'zod'; +import type { LLMProvider } from '../llm/provider'; +import type { RegistryServer } from '../registry/types'; + +export const EnvTemplateEntrySchema = z.object({ + name: z.string().min(1), + description: z.string().min(10), + isSecret: z.boolean(), + setupUrl: z.string().url().optional(), + defaultValue: z.string().optional(), + validation: z.enum(['required', 'optional', 'conditional']).optional(), +}); + +export const ProfileConfigSchema = z.object({ + name: z.string().min(1), + description: z.string(), + permissions: z.array(z.string()), +}); + +export const LLMAnalysisSchema = z.object({ + envTemplate: z.array(EnvTemplateEntrySchema), + setupGuide: z.string().optional(), + profiles: z.array(ProfileConfigSchema).optional(), +}); + +export type LLMAnalysisResult = z.infer<typeof LLMAnalysisSchema>; + +export class LLMReadmeAnalyzer { + constructor( + private llmProvider: LLMProvider, + private logger: StructuredLogger + ) {} + + async analyze( + readme: string, + serverMeta: RegistryServer + ): Promise<LLMAnalysisResult> { ... } + + sanitizeForLLM(text: string): string { ... } + + private buildPrompt(readme: string, serverMeta: RegistryServer): string { ... } + + private extractJSON(response: string): string { ... } + + private validateAndParse(jsonStr: string): LLMAnalysisResult { ... } +} +``` + +**Implementation Requirements:** + +1. **Input Sanitization** (SECURITY CRITICAL): + - Remove prompt injection patterns: `[INST]`, `[/INST]`, `<<SYS>>`, `<</SYS>>`, `</s>`, `<|endoftext|>`, `<|im_start|>`, `<|im_end|>` + - Remove template syntax: `{{...}}`, `${...}`, `<%...%>` + - Preserve code blocks (```...```) for context + - Truncate to 50000 characters with warning log + - Normalize Unicode (NFKC) to prevent homoglyph attacks + - Log sanitization actions for audit + +2. **Prompt Construction**: + - Use structured prompt template requesting JSON output + - Include serverMeta context (name, type, existing envTemplate if partial) + - Request specific fields: envTemplate, setupGuide, profiles + - Include examples of data platform auth patterns in prompt + - Set temperature=0.1 for deterministic output + - Set maxTokens=2000 + +3. **Response Processing**: + - Extract JSON from markdown code blocks if present + - Handle raw JSON without code blocks + - Validate with Zod schema + - Return empty result on validation failure (graceful degradation) + - Log validation errors for debugging + +4. **Data Platform Auth Recognition**: + - Include prompt context about common patterns: + - GCP: Service account JSON files, GOOGLE_APPLICATION_CREDENTIALS + - AWS: Access keys, IAM roles, STS assume role + - Azure: Service principals, managed identity + - Snowflake: Account URL, OAuth, key-pair auth + - Databricks: Personal access tokens, OAuth M2M + - dbt Cloud: API tokens with account/project scoping + +5. **Error Handling**: + - Wrap LLM calls in try-catch + - Return fallback result on LLM timeout + - Circuit breaker integration via LLMProvider + - Never propagate sensitive data in errors + +6. **Structured Logging** (SRE): + - Log: requestId, llmProvider, promptLength, responseLength, duration_ms + - NEVER log: full README content, API keys, tokens + - Log validation failures with field paths + +### 24.5. Implement Install Command Handler with Full Installation Flow, SetupWizard, and mcpd Integration + +**Status:** pending +**Dependencies:** 24.1, 24.2, 24.3, 24.4 + +Implement the main install command and action handler that orchestrates the full installation flow: registry lookup, LLM analysis (optional), server registration with mcpd, interactive credential collection via SetupWizard, profile creation, and optional project assignment. + +**Details:** + +Create src/cli/src/commands/install.ts and src/cli/src/install/install-action.ts: + +```typescript +// install.ts +import { Command } from 'commander'; +import { installAction } from '../install/install-action'; + +export function createInstallCommand(): Command { + return new Command('install') + .description('Install and configure an MCP server') + .argument('<servers...>', 'Server name(s) from registry') + .option('--non-interactive', 'Use env vars for credentials, no prompts') + .option('--profile-name <name>', 'Name for the created profile') + .option('--project <name>', 'Auto-add to this project') + .option('--dry-run', 'Show configuration without applying') + .option('--skip-llm', 'Only use registry metadata, no LLM analysis') + .action(installAction); +} + +// install-action.ts +export interface InstallOptions { + nonInteractive?: boolean; + profileName?: string; + project?: string; + dryRun?: boolean; + skipLlm?: boolean; +} + +export async function installAction( + serverNames: string[], + options: InstallOptions +): Promise<void> { ... } +``` + +**Implementation Requirements:** + +1. **Command Registration**: + - Register in src/cli/src/commands/index.ts command registry + - Follow existing Commander.js patterns from discover command (Task 23) + - Set exit codes: 0 success, 1 partial success, 2 complete failure + +2. **Installation Flow** (per server): + - Step 1: Fetch server metadata from RegistryClient (from Task 22) + - Step 2: Check if envTemplate is complete or needs LLM analysis + - Step 3: If needed and --skip-llm not set, fetch README and analyze with LLM + - Step 4: Merge LLM results with registry metadata (registry takes precedence for conflicts) + - Step 5: If --dry-run, print configuration and exit + - Step 6: Register MCP server with mcpd via McpdClient + - Step 7: Run SetupWizard to collect credentials (or use env vars if --non-interactive) + - Step 8: Create profile with collected credentials + - Step 9: If --project specified, add profile to project + +3. **Dependency Injection**: + - Accept RegistryClient, LLMProvider, McpdClient via constructor or factory + - Enable testing with mock implementations + - Use getLLMProvider() factory from Task 12 configuration + +4. **SetupWizard Integration** (from Task 10): + - Pass envTemplate to SetupWizard + - Handle nonInteractive mode (read from environment) + - Validate credentials before storing + - Support OAuth flows via browser for applicable servers + +5. **Dry Run Mode**: + - Print server metadata (name, command, transport) + - Print envTemplate with descriptions + - Print setupGuide if available + - Print suggested profiles + - Use chalk for formatted output + - Exit without side effects + +6. **Batch Install**: + - Process servers sequentially (to avoid mcpd race conditions) + - Continue on individual server failures (log warning) + - Report summary at end: X installed, Y failed + - Return appropriate exit code + +7. **Error Handling**: + - Catch RegistryNotFoundError and suggest 'mcpctl discover' + - Catch McpdConnectionError and print mcpd health check URL + - Catch SetupWizardCancelledError gracefully + - Never expose credentials in error messages + +8. **Structured Logging** (SRE): + - Log: serverName, registrySource, llmAnalysisUsed, installDuration_ms, success + - Emit metrics: install_total (counter), install_duration_seconds (histogram) + +9. **Output Messages**: + - Use chalk.blue for progress + - Use chalk.green + checkmark for success + - Use chalk.red for errors + - Print profile name and usage instructions on success diff --git a/.taskmaster/tasks/tasks.json b/.taskmaster/tasks/tasks.json new file mode 100644 index 0000000..b47d9b6 --- /dev/null +++ b/.taskmaster/tasks/tasks.json @@ -0,0 +1,701 @@ +{ + "master": { + "tasks": [ + { + "id": 1, + "title": "Initialize Project Structure and Core Dependencies", + "description": "Set up the monorepo structure for mcpctl with CLI client, mcpd server, and shared libraries. Configure TypeScript, ESLint, and build tooling.", + "details": "Create a monorepo using pnpm workspaces or npm workspaces with the following structure:\n\n```\nmcpctl/\n├── src/\n│ ├── cli/ # mcpctl CLI tool\n│ ├── mcpd/ # Backend daemon server\n│ ├── shared/ # Shared types, utilities, constants\n│ └── local-proxy/ # Local LLM proxy component\n├── deploy/\n│ └── docker-compose.yml\n├── package.json\n├── tsconfig.base.json\n└── pnpm-workspace.yaml\n```\n\nDependencies to install:\n- TypeScript 5.x\n- Commander.js for CLI\n- Express/Fastify for mcpd HTTP server\n- Zod for schema validation\n- Winston/Pino for logging\n- Prisma or Drizzle for database ORM\n\nCreate base tsconfig.json with strict mode, ES2022 target, and module resolution settings. Set up shared ESLint config with TypeScript rules.", + "testStrategy": "Verify project builds successfully with `pnpm build`. Ensure all packages compile without errors. Test workspace linking works correctly between packages.", + "priority": "high", + "dependencies": [], + "status": "done", + "subtasks": [ + { + "id": 1, + "title": "Initialize pnpm workspace monorepo with future-proof directory structure", + "description": "Create the complete monorepo directory structure using pnpm workspaces that accommodates all 18 planned tasks without requiring future refactoring.", + "dependencies": [], + "details": "Create root package.json with pnpm workspaces configuration. Create pnpm-workspace.yaml defining all workspace packages. Initialize the following directory structure:\n\n```\nmcpctl/\n├── src/\n│ ├── cli/ # mcpctl CLI tool (Task 7-10)\n│ │ ├── src/\n│ │ ├── tests/\n│ │ └── package.json\n│ ├── mcpd/ # Backend daemon server (Task 3-6, 14, 16)\n│ │ ├── src/\n│ │ ├── tests/\n│ │ └── package.json\n│ ├── shared/ # Shared types, utils, constants, validation\n│ │ ├── src/\n│ │ │ ├── types/ # TypeScript interfaces/types\n│ │ │ ├── utils/ # Utility functions\n│ │ │ ├── constants/# Shared constants\n│ │ │ ├── validation/ # Zod schemas\n│ │ │ └── index.ts # Barrel export\n│ │ ├── tests/\n│ │ └── package.json\n│ ├── local-proxy/ # Local LLM proxy (Task 11-13)\n│ │ ├── src/\n│ │ ├── tests/\n│ │ └── package.json\n│ └── db/ # Database package (Task 2)\n│ ├── src/\n│ ├── prisma/ # Schema and migrations\n│ ├── seed/ # Seed data\n│ ├── tests/\n│ └── package.json\n├── deploy/\n│ └── docker-compose.yml # Local dev services (postgres)\n├── tests/\n│ ├── e2e/ # End-to-end tests (Task 18)\n│ └── integration/ # Integration tests\n├── docs/ # Documentation (Task 18)\n├── package.json # Root workspace config\n├── pnpm-workspace.yaml\n└── turbo.json # Optional: Turborepo for build orchestration\n```\n\nThe pnpm-workspace.yaml should contain: `packages: [\"src/*\"]`", + "status": "done", + "testStrategy": "Write Vitest tests that verify: (1) All expected directories exist, (2) All package.json files are valid JSON with correct workspace protocol dependencies, (3) pnpm-workspace.yaml correctly includes all packages, (4) Running 'pnpm install' succeeds and creates correct node_modules symlinks between packages. Run 'pnpm ls' to verify workspace linking." + }, + { + "id": 2, + "title": "Configure TypeScript with strict mode and project references", + "description": "Set up TypeScript configuration with strict mode, ES2022 target, and proper project references for monorepo build orchestration.", + "dependencies": [ + 1 + ], + "details": "Create root tsconfig.base.json with shared compiler options. Create package-specific tsconfig.json in each package that extends the base and sets appropriate paths.", + "status": "done", + "testStrategy": "Write Vitest tests that verify tsconfig.base.json exists and has strict: true, each package tsconfig.json extends base correctly." + }, + { + "id": 3, + "title": "Set up Vitest testing framework with workspace configuration", + "description": "Configure Vitest as the test framework across all packages with proper workspace setup, coverage reporting, and test-driven development infrastructure.", + "dependencies": [ + 2 + ], + "details": "Install Vitest and related packages at root level. Create root vitest.config.ts and vitest.workspace.ts for workspace-aware testing pointing to src/cli, src/mcpd, src/shared, src/local-proxy, src/db.", + "status": "done", + "testStrategy": "Run 'pnpm test:run' and verify Vitest discovers and runs tests, coverage report is generated." + }, + { + "id": 4, + "title": "Configure ESLint with TypeScript rules and docker-compose for local development", + "description": "Set up shared ESLint configuration with TypeScript-aware rules, Prettier integration, and docker-compose.yml for local PostgreSQL database.", + "dependencies": [ + 2 + ], + "details": "Install ESLint and plugins at root. Create eslint.config.js (flat config, ESLint 9+). Create deploy/docker-compose.yml for local development with PostgreSQL service.", + "status": "done", + "testStrategy": "Write Vitest tests that verify eslint.config.js exists and exports valid config, deploy/docker-compose.yml is valid YAML and defines postgres service." + }, + { + "id": 5, + "title": "Install core dependencies and perform security/architecture review", + "description": "Install all required production dependencies across packages, run security audit, and validate the directory structure supports all 18 planned tasks.", + "dependencies": [ + 1, + 3, + 4 + ], + "details": "Install dependencies per package in src/cli, src/mcpd, src/shared, src/db, src/local-proxy. Perform security and architecture review.", + "status": "done", + "testStrategy": "Verify each package.json has required dependencies, run pnpm audit, verify .gitignore contains required patterns." + } + ] + }, + { + "id": 2, + "title": "Design and Implement Database Schema", + "description": "Create the database schema for storing MCP server configurations, projects, profiles, user sessions, and audit logs. Use PostgreSQL for production readiness.", + "details": "Design PostgreSQL schema using Prisma ORM with models: User, McpServer, McpProfile, Project, ProjectMcpProfile, McpInstance, AuditLog, Session. Create migrations and seed data for common MCP servers (slack, jira, github, terraform).", + "testStrategy": "Run Prisma migrations against test database. Verify all relations work correctly with seed data. Test CRUD operations for each model using Prisma client.", + "priority": "high", + "dependencies": [ + "1" + ], + "status": "pending", + "subtasks": [ + { + "id": 1, + "title": "Set up Prisma ORM and PostgreSQL test infrastructure with docker-compose", + "description": "Initialize Prisma in the db package with PostgreSQL configuration, create docker-compose.yml for local development with separate test database.", + "dependencies": [], + "details": "Create src/db/prisma directory structure. Install Prisma dependencies. Configure deploy/docker-compose.yml with two PostgreSQL services: mcpctl-postgres (port 5432) for development and mcpctl-postgres-test (port 5433) for testing.", + "status": "pending", + "testStrategy": "Write Vitest tests that verify docker-compose creates both postgres services, setupTestDb() successfully connects and pushes schema." + }, + { + "id": 2, + "title": "Write TDD tests for all Prisma models before implementing schema", + "description": "Create comprehensive Vitest test suites for all 8 models testing CRUD operations, relations, constraints, and edge cases.", + "dependencies": [ + 1 + ], + "details": "Create src/db/tests/models directory with separate test files for each model. Tests will initially fail (TDD red phase) until schema is implemented.", + "status": "pending", + "testStrategy": "Tests are expected to fail initially (TDD red phase). After schema implementation, all tests should pass." + }, + { + "id": 3, + "title": "Implement Prisma schema with all models and security considerations", + "description": "Create the complete Prisma schema with all 8 models, proper relations, indexes for audit queries, and security-conscious field design.", + "dependencies": [ + 2 + ], + "details": "Implement src/db/prisma/schema.prisma with all models. Add version Int field and updatedAt DateTime for git-based backup support.", + "status": "pending", + "testStrategy": "Run TDD tests from subtask 2 - all should now pass (TDD green phase). Verify npx prisma validate passes." + }, + { + "id": 4, + "title": "Create seed data functions with unit tests for common MCP servers", + "description": "Implement seed functions for common MCP server configurations (Slack, Jira, GitHub, Terraform) with comprehensive unit tests.", + "dependencies": [ + 3 + ], + "details": "Create src/db/seed directory with server definitions and seeding functions for Slack, Jira, GitHub, Terraform MCP servers.", + "status": "pending", + "testStrategy": "Write unit tests BEFORE implementing seed functions (TDD). Verify seedMcpServers() creates exactly 4 servers with idempotent behavior." + }, + { + "id": 5, + "title": "Create database migrations and perform security/architecture review", + "description": "Generate initial Prisma migration, create migration helper utilities with tests, and conduct comprehensive security and architecture review.", + "dependencies": [ + 3, + 4 + ], + "details": "Run npx prisma migrate dev --name init. Create src/db/src/migration-helpers.ts. Document security and architecture findings.", + "status": "pending", + "testStrategy": "Verify migration files exist, migration helper tests pass, SECURITY_REVIEW.md covers all security checkpoints." + } + ] + }, + { + "id": 3, + "title": "Implement mcpd Core Server Framework", + "description": "Build the mcpd daemon server with Express/Fastify, including middleware for authentication, logging, and error handling. Design for horizontal scalability.", + "details": "Create mcpd server in src/mcpd/src/ with Fastify, health check endpoint, auth middleware, and audit logging. Design for statelessness and scalability.", + "testStrategy": "Unit test middleware functions. Integration test health endpoint. Load test with multiple concurrent requests. Verify statelessness by running two instances.", + "priority": "high", + "dependencies": [ + "1", + "2" + ], + "status": "pending", + "subtasks": [ + { + "id": 1, + "title": "Set up mcpd package structure with clean architecture layers and TDD infrastructure", + "description": "Create the src/mcpd directory structure following clean architecture principles with separate layers for routes, controllers, services, and repositories.", + "dependencies": [], + "details": "Create src/mcpd/src/ with routes/, controllers/, services/, repositories/, middleware/, config/, types/, utils/ directories.", + "status": "pending", + "testStrategy": "Write initial Vitest tests that verify all required directories exist, package.json has required dependencies." + }, + { + "id": 2, + "title": "Implement Fastify server core with health endpoint and database connectivity verification", + "description": "Create the core Fastify server with health check endpoint that verifies PostgreSQL database connectivity.", + "dependencies": [ + 1 + ], + "details": "Create src/mcpd/src/server.ts with Fastify instance factory function. Implement config validation with Zod and health endpoint.", + "status": "pending", + "testStrategy": "TDD approach - write tests first for config validation, health endpoint returns correct structure." + }, + { + "id": 3, + "title": "Implement authentication middleware with JWT validation and session management", + "description": "Create authentication preHandler hook that validates Bearer tokens against the Session table in PostgreSQL.", + "dependencies": [ + 2 + ], + "details": "Create src/mcpd/src/middleware/auth.ts with authMiddleware factory function using dependency injection.", + "status": "pending", + "testStrategy": "TDD - write all tests before implementation for 401 responses, token validation, request decoration." + }, + { + "id": 4, + "title": "Implement security middleware stack with CORS, Helmet, rate limiting, and input sanitization", + "description": "Configure and register security middleware including CORS policy, Helmet security headers, rate limiting.", + "dependencies": [ + 2 + ], + "details": "Create src/mcpd/src/middleware/security.ts with registerSecurityPlugins function. Create sanitization and validation utilities.", + "status": "pending", + "testStrategy": "TDD tests for CORS headers, Helmet security headers, rate limiting returns 429, input validation." + }, + { + "id": 5, + "title": "Implement error handling, audit logging middleware, and graceful shutdown", + "description": "Create global error handler, audit logging onResponse hook, and graceful shutdown handling with connection draining.", + "dependencies": [ + 2, + 3, + 4 + ], + "details": "Create error-handler.ts, audit.ts middleware, and shutdown.ts utilities in src/mcpd/src/.", + "status": "pending", + "testStrategy": "TDD for all components: error handler HTTP codes, audit middleware creates records, graceful shutdown handles SIGTERM." + } + ] + }, + { + "id": 4, + "title": "Implement MCP Server Registry and Profile Management", + "description": "Create APIs for registering MCP servers, managing profiles with different permission levels, and storing configuration templates.", + "details": "Create REST API endpoints in mcpd for MCP server and profile CRUD operations with seed data for common servers.", + "testStrategy": "Test CRUD operations for servers and profiles. Verify profile inheritance works. Test that invalid configurations are rejected by Zod validation.", + "priority": "high", + "dependencies": [ + "3" + ], + "status": "pending", + "subtasks": [ + { + "id": 1, + "title": "Create Zod validation schemas with comprehensive TDD test coverage", + "description": "Define and test Zod schemas for MCP server registration, profile management, and configuration templates before implementing any routes.", + "dependencies": [], + "details": "Create src/mcpd/src/validation/mcp-server.schema.ts with CreateMcpServerSchema, UpdateMcpServerSchema, CreateMcpProfileSchema.", + "status": "pending", + "testStrategy": "TDD approach - write all tests first, then implement schemas. Tests verify valid inputs pass, invalid inputs fail." + }, + { + "id": 2, + "title": "Implement repository pattern for MCP server and profile data access", + "description": "Create injectable repository classes for McpServer and McpProfile data access with Prisma, following dependency injection patterns.", + "dependencies": [ + 1 + ], + "details": "Create src/mcpd/src/repositories/interfaces.ts with IMcpServerRepository and IMcpProfileRepository interfaces.", + "status": "pending", + "testStrategy": "TDD - write tests before implementation with mocked PrismaClient. Verify all repository methods are covered." + }, + { + "id": 3, + "title": "Implement MCP server service layer with business logic and authorization", + "description": "Create McpServerService and McpProfileService with business logic, authorization checks, and validation orchestration.", + "dependencies": [ + 1, + 2 + ], + "details": "Create src/mcpd/src/services/mcp-server.service.ts and mcp-profile.service.ts with DI and authorization checks.", + "status": "pending", + "testStrategy": "TDD - write tests first mocking repositories and authorization. Verify authorization checks are called for every method." + }, + { + "id": 4, + "title": "Implement REST API routes for MCP servers and profiles with request validation", + "description": "Create Fastify route handlers for MCP server and profile CRUD operations using the service layer.", + "dependencies": [ + 3 + ], + "details": "Create src/mcpd/src/routes/mcp-servers.ts and mcp-profiles.ts with all CRUD endpoints.", + "status": "pending", + "testStrategy": "Write integration tests before implementation using Fastify.inject(). Test with docker-compose postgres." + }, + { + "id": 5, + "title": "Create seed data for pre-configured MCP servers and perform security review", + "description": "Implement seed data for Slack, Jira, GitHub, and Terraform MCP servers with default profiles, plus security review.", + "dependencies": [ + 4 + ], + "details": "Create src/mcpd/src/seed/mcp-servers.seed.ts with seedMcpServers() function and SECURITY_REVIEW.md.", + "status": "pending", + "testStrategy": "Write unit tests for seed functions. Security tests for injection prevention, authorization checks." + } + ] + }, + { + "id": 5, + "title": "Implement Project Management APIs", + "description": "Create APIs for managing MCP projects that group multiple MCP profiles together for easy assignment to Claude sessions.", + "details": "Create project management endpoints with generateMcpConfig function for .mcp.json format output.", + "testStrategy": "Test project CRUD operations. Verify profile associations work correctly. Test MCP config generation produces valid .mcp.json format.", + "priority": "high", + "dependencies": [ + "4" + ], + "status": "pending", + "subtasks": [ + { + "id": 1, + "title": "Write TDD tests for project Zod validation schemas and generateMcpConfig function", + "description": "Create comprehensive Vitest test suites for project validation schemas and generateMcpConfig function BEFORE implementing any code.", + "dependencies": [], + "details": "Create tests for CreateProjectSchema, UpdateProjectSchema, UpdateProjectProfilesSchema, and generateMcpConfig with security tests.", + "status": "pending", + "testStrategy": "TDD red phase - all tests should fail initially. Verify generateMcpConfig security tests check secret env vars are excluded." + }, + { + "id": 2, + "title": "Implement project repository and generateMcpConfig service with security filtering", + "description": "Create the project repository and generateMcpConfig function that strips sensitive credentials from output.", + "dependencies": [ + 1 + ], + "details": "Create src/mcpd/src/repositories/project.repository.ts and src/mcpd/src/services/mcp-config-generator.ts.", + "status": "pending", + "testStrategy": "Run TDD tests from subtask 1. Verify output must NOT contain secret values." + }, + { + "id": 3, + "title": "Implement project service layer with authorization and profile validation", + "description": "Create ProjectService with business logic including authorization checks and profile existence validation.", + "dependencies": [ + 2 + ], + "details": "Create src/mcpd/src/services/project.service.ts with DI accepting IProjectRepository and IMcpProfileRepository.", + "status": "pending", + "testStrategy": "TDD - write tests before implementation. Verify authorization and profile validation." + }, + { + "id": 4, + "title": "Implement REST API routes for project CRUD and mcp-config endpoint", + "description": "Create Fastify route handlers for all project management endpoints including /api/projects/:name/mcp-config.", + "dependencies": [ + 3 + ], + "details": "Create src/mcpd/src/routes/projects.ts with all CRUD routes and mcp-config endpoint.", + "status": "pending", + "testStrategy": "Integration tests using Fastify.inject(). Verify mcp-config returns valid structure WITHOUT secret env vars." + }, + { + "id": 5, + "title": "Create integration tests and security review for project APIs", + "description": "Write comprehensive integration tests and security review documenting credential handling.", + "dependencies": [ + 4 + ], + "details": "Create src/mcpd/tests/integration/projects.test.ts with end-to-end scenarios and SECURITY_REVIEW.md section.", + "status": "pending", + "testStrategy": "Run full integration test suite. Verify coverage >85% for project-related files." + } + ] + }, + { + "id": 6, + "title": "Implement Docker Container Management for MCP Servers", + "description": "Create the container orchestration layer for running MCP servers as Docker containers, with support for docker-compose deployment.", + "details": "Create Docker management module with ContainerManager class using dockerode. Create deploy/docker-compose.yml template.", + "testStrategy": "Test container creation, start, stop, and removal. Integration test with actual Docker daemon. Verify network isolation.", + "priority": "high", + "dependencies": [ + "3", + "4" + ], + "status": "pending", + "subtasks": [ + { + "id": 1, + "title": "Define McpOrchestrator interface and write TDD tests for ContainerManager", + "description": "Define the McpOrchestrator abstraction interface for Docker and Kubernetes orchestrators. Write comprehensive unit tests.", + "dependencies": [], + "details": "Create src/mcpd/src/services/orchestrator.ts interface and TDD tests for ContainerManager methods.", + "status": "pending", + "testStrategy": "Run tests to verify they exist and fail with expected errors. Coverage target: 100% of interface methods." + }, + { + "id": 2, + "title": "Implement ContainerManager class with DockerOrchestrator strategy pattern", + "description": "Implement the ContainerManager class as a DockerOrchestrator implementation using dockerode.", + "dependencies": [ + 1 + ], + "details": "Create src/mcpd/src/services/docker/container-manager.ts implementing McpOrchestrator interface.", + "status": "pending", + "testStrategy": "Run unit tests from subtask 1. Verify TypeScript compilation and resource limits." + }, + { + "id": 3, + "title": "Create docker-compose.yml template with mcpd, PostgreSQL, and test MCP server", + "description": "Create the production-ready deploy/docker-compose.yml template for local development.", + "dependencies": [], + "details": "Create deploy/docker-compose.yml with mcpd, postgres, and test-mcp-server services with proper networking.", + "status": "pending", + "testStrategy": "Validate with docker-compose config. Run docker-compose up -d and verify all services start." + }, + { + "id": 4, + "title": "Write integration tests with real Docker daemon", + "description": "Create integration test suite that tests ContainerManager against a real Docker daemon.", + "dependencies": [ + 2, + 3 + ], + "details": "Create src/mcpd/src/services/docker/__tests__/container-manager.integration.test.ts.", + "status": "pending", + "testStrategy": "Run integration tests with pnpm --filter @mcpctl/mcpd test:integration. Verify containers are created/destroyed." + }, + { + "id": 5, + "title": "Implement container network isolation and resource management", + "description": "Add network segmentation utilities and resource management capabilities for container isolation.", + "dependencies": [ + 2 + ], + "details": "Create src/mcpd/src/services/docker/network-manager.ts with network isolation and resource management.", + "status": "pending", + "testStrategy": "Unit tests for network creation. Integration test: verify container network isolation." + }, + { + "id": 6, + "title": "Conduct security review of Docker socket access and container configuration", + "description": "Perform comprehensive security review of all Docker-related code with security controls documentation.", + "dependencies": [ + 2, + 3, + 5 + ], + "details": "Create src/mcpd/docs/DOCKER_SECURITY_REVIEW.md documenting risks and mitigations.", + "status": "pending", + "testStrategy": "Review DOCKER_SECURITY_REVIEW.md covers all 6 security areas. Run security unit tests." + }, + { + "id": 7, + "title": "Implement container logs streaming and health monitoring", + "description": "Add log streaming capabilities and health monitoring to ContainerManager for observability.", + "dependencies": [ + 2 + ], + "details": "Extend ContainerManager with getLogs, getHealthStatus, attachToContainer, and event subscriptions.", + "status": "pending", + "testStrategy": "Unit tests for getLogs. Integration test: run container, tail logs, verify output." + } + ] + }, + { + "id": 7, + "title": "Build mcpctl CLI Core Framework", + "description": "Create the CLI tool foundation using Commander.js with kubectl-inspired command structure, configuration management, and server communication.", + "details": "Create CLI in src/cli/src/ with Commander.js, configuration management at ~/.mcpctl/config.json, and API client for mcpd.", + "testStrategy": "Test CLI argument parsing. Test configuration persistence. Mock API calls and verify request formatting.", + "priority": "high", + "dependencies": [ + "1" + ], + "status": "pending", + "subtasks": [ + { + "id": 1, + "title": "Set up CLI package structure with TDD infrastructure and command registry pattern", + "description": "Create src/cli directory structure with Commander.js foundation, Vitest test configuration, and extensible command registry.", + "dependencies": [], + "details": "Create src/cli/src/ with commands/, config/, client/, formatters/, utils/, types/ directories and registry pattern.", + "status": "pending", + "testStrategy": "TDD approach - write tests first. Tests verify CLI shows version, help, CommandRegistry works." + }, + { + "id": 2, + "title": "Implement secure configuration management with encrypted credential storage", + "description": "Create configuration loader/saver with ~/.mcpctl/config.json and encrypted credentials storage.", + "dependencies": [ + 1 + ], + "details": "Implement config management with proxy settings, custom CA certificates support, and Zod validation.", + "status": "pending", + "testStrategy": "TDD tests for config loading, saving, validation, and credential encryption." + } + ] + }, + { + "id": 8, + "title": "Implement mcpctl get and describe Commands", + "description": "Create kubectl-style get and describe commands for viewing MCP servers, profiles, projects, and instances.", + "details": "Implement get command with table/json/yaml output formats and describe command for detailed views.", + "testStrategy": "Test output formatting for each resource type. Test filtering and sorting options.", + "priority": "medium", + "dependencies": [ + "7" + ], + "status": "pending", + "subtasks": null + }, + { + "id": 9, + "title": "Implement mcpctl apply and setup Commands", + "description": "Create apply command for declarative configuration and setup wizard for interactive MCP server configuration.", + "details": "Implement apply command for YAML/JSON config files and interactive setup wizard with credential prompts.", + "testStrategy": "Test YAML/JSON parsing. Test interactive prompts with mock inputs. Verify credentials are stored securely.", + "priority": "medium", + "dependencies": [ + "7", + "4" + ], + "status": "pending", + "subtasks": null + }, + { + "id": 10, + "title": "Implement mcpctl claude and project Commands", + "description": "Create commands for managing Claude MCP configuration and project assignments.", + "details": "Implement claude command for managing .mcp.json files and project command for project management.", + "testStrategy": "Test .mcp.json file generation. Test project switching. Verify file permissions are correct.", + "priority": "medium", + "dependencies": [ + "7", + "5" + ], + "status": "pending", + "subtasks": null + }, + { + "id": 11, + "title": "Design Local LLM Proxy Architecture", + "description": "Design the architecture for the local LLM proxy that enables Claude to use MCP servers through a local intermediary.", + "details": "Design proxy architecture in src/local-proxy/ with MCP protocol handling and request/response transformation.", + "testStrategy": "Architecture review. Document security considerations. Create proof-of-concept for MCP protocol handling.", + "priority": "medium", + "dependencies": [ + "1" + ], + "status": "pending", + "subtasks": null + }, + { + "id": 12, + "title": "Implement Local LLM Proxy Core", + "description": "Build the core local proxy server that handles MCP protocol communication between Claude and MCP servers.", + "details": "Implement proxy server in src/local-proxy/src/ with MCP SDK integration and request routing.", + "testStrategy": "Test MCP protocol parsing. Test request routing. Integration test with actual MCP server.", + "priority": "medium", + "dependencies": [ + "11" + ], + "status": "pending", + "subtasks": null + }, + { + "id": 13, + "title": "Implement LLM Provider Strategy Pattern", + "description": "Create pluggable LLM provider support with strategy pattern for different providers (OpenAI, Anthropic, local models).", + "details": "Implement provider strategy pattern in src/local-proxy/src/providers/ with adapters for different LLM APIs.", + "testStrategy": "Test each provider adapter. Test provider switching. Mock API responses for testing.", + "priority": "low", + "dependencies": [ + "12" + ], + "status": "pending", + "subtasks": null + }, + { + "id": 14, + "title": "Implement Audit Logging and Compliance", + "description": "Create comprehensive audit logging system for tracking all MCP operations for compliance and debugging.", + "details": "Implement audit logging in src/mcpd/src/services/ with structured logging, retention policies, and query APIs.", + "testStrategy": "Test audit log creation. Test query APIs. Verify log retention works correctly.", + "priority": "medium", + "dependencies": [ + "3" + ], + "status": "pending", + "subtasks": null + }, + { + "id": 15, + "title": "Create MCP Profiles Library", + "description": "Build a library of pre-configured MCP profiles for common use cases with best practices baked in.", + "details": "Create profile library in src/shared/src/profiles/ with templates for common MCP server configurations.", + "testStrategy": "Test profile templates are valid. Test profile application. Document each profile's use case.", + "priority": "low", + "dependencies": [ + "4" + ], + "status": "pending", + "subtasks": null + }, + { + "id": 16, + "title": "Implement MCP Instance Lifecycle Management", + "description": "Create APIs and CLI commands for managing the full lifecycle of MCP server instances.", + "details": "Implement instance lifecycle management in src/mcpd/src/services/ with start, stop, restart, logs commands.", + "testStrategy": "Test instance state transitions. Test concurrent instance management. Integration test with Docker.", + "priority": "medium", + "dependencies": [ + "6" + ], + "status": "pending", + "subtasks": null + }, + { + "id": 17, + "title": "Add Kubernetes Deployment Support", + "description": "Extend the orchestration layer to support Kubernetes deployments for production environments.", + "details": "Implement KubernetesOrchestrator in src/mcpd/src/services/k8s/ implementing McpOrchestrator interface.", + "testStrategy": "Test Kubernetes manifest generation. Test with kind/minikube. Verify resource limits and security contexts.", + "priority": "low", + "dependencies": [ + "6" + ], + "status": "pending", + "subtasks": null + }, + { + "id": 18, + "title": "Documentation and Testing", + "description": "Create comprehensive documentation and end-to-end test suite for the entire mcpctl system.", + "details": "Create documentation in docs/ and e2e tests in tests/e2e/ covering all major workflows.", + "testStrategy": "Review documentation for completeness. Run e2e test suite. Test installation instructions.", + "priority": "medium", + "dependencies": [ + "7", + "8", + "9", + "10" + ], + "status": "pending", + "subtasks": null + }, + { + "id": 19, + "title": "CANCELLED - Auth middleware", + "description": "Merged into Task 3 subtasks", + "details": null, + "testStrategy": null, + "priority": null, + "dependencies": [], + "status": "cancelled", + "subtasks": null, + "updatedAt": "2026-02-21T02:21:03.958Z" + }, + { + "id": 20, + "title": "CANCELLED - Duplicate project management", + "description": "Merged into Task 5", + "details": null, + "testStrategy": null, + "priority": null, + "dependencies": [], + "status": "cancelled", + "subtasks": null, + "updatedAt": "2026-02-21T02:21:03.966Z" + }, + { + "id": 21, + "title": "CANCELLED - Duplicate audit logging", + "description": "Merged into Task 14", + "details": null, + "testStrategy": null, + "priority": null, + "dependencies": [], + "status": "cancelled", + "subtasks": null, + "updatedAt": "2026-02-21T02:21:03.972Z" + }, + { + "id": 22, + "title": "Implement Health Monitoring Dashboard", + "description": "Create a monitoring dashboard for tracking MCP server health, resource usage, and system metrics.", + "details": "Implement health monitoring endpoints in src/mcpd/src/routes/ and optional web dashboard.", + "testStrategy": "Test health check endpoints. Test metrics collection. Verify dashboard displays correct data.", + "priority": "low", + "dependencies": [ + "6", + "14" + ], + "status": "pending", + "subtasks": null + }, + { + "id": 23, + "title": "Implement Backup and Restore", + "description": "Create backup and restore functionality for mcpctl configuration and state.", + "details": "Implement git-based backup in src/mcpd/src/services/backup/ with encrypted secrets and restore capability.", + "testStrategy": "Test backup creation. Test restore from backup. Verify secrets are encrypted.", + "priority": "low", + "dependencies": [ + "2", + "5" + ], + "status": "pending", + "subtasks": null + }, + { + "id": 24, + "title": "CI/CD Pipeline Setup", + "description": "Set up continuous integration and deployment pipelines for the mcpctl project.", + "details": "Create GitHub Actions workflows in .github/workflows/ for testing, building, and releasing.", + "testStrategy": "Test CI pipeline runs successfully. Test release automation. Verify artifacts are published.", + "priority": "medium", + "dependencies": [ + "1" + ], + "status": "pending", + "subtasks": null + } + ], + "metadata": { + "created": "2026-02-21T02:23:17.813Z", + "updated": "2026-02-21T02:23:17.813Z", + "description": "Tasks for master context" + } + } +} \ No newline at end of file diff --git a/.taskmaster/templates/example_prd.txt b/.taskmaster/templates/example_prd.txt new file mode 100644 index 0000000..194114d --- /dev/null +++ b/.taskmaster/templates/example_prd.txt @@ -0,0 +1,47 @@ +<context> +# Overview +[Provide a high-level overview of your product here. Explain what problem it solves, who it's for, and why it's valuable.] + +# Core Features +[List and describe the main features of your product. For each feature, include: +- What it does +- Why it's important +- How it works at a high level] + +# User Experience +[Describe the user journey and experience. Include: +- User personas +- Key user flows +- UI/UX considerations] +</context> +<PRD> +# Technical Architecture +[Outline the technical implementation details: +- System components +- Data models +- APIs and integrations +- Infrastructure requirements] + +# Development Roadmap +[Break down the development process into phases: +- MVP requirements +- Future enhancements +- Do not think about timelines whatsoever -- all that matters is scope and detailing exactly what needs to be build in each phase so it can later be cut up into tasks] + +# Logical Dependency Chain +[Define the logical order of development: +- Which features need to be built first (foundation) +- Getting as quickly as possible to something usable/visible front end that works +- Properly pacing and scoping each feature so it is atomic but can also be built upon and improved as development approaches] + +# Risks and Mitigations +[Identify potential risks and how they'll be addressed: +- Technical challenges +- Figuring out the MVP that we can build upon +- Resource constraints] + +# Appendix +[Include any additional information: +- Research findings +- Technical specifications] +</PRD> \ No newline at end of file diff --git a/.taskmaster/templates/example_prd_rpg.txt b/.taskmaster/templates/example_prd_rpg.txt new file mode 100644 index 0000000..5ad908f --- /dev/null +++ b/.taskmaster/templates/example_prd_rpg.txt @@ -0,0 +1,511 @@ +<rpg-method> +# Repository Planning Graph (RPG) Method - PRD Template + +This template teaches you (AI or human) how to create structured, dependency-aware PRDs using the RPG methodology from Microsoft Research. The key insight: separate WHAT (functional) from HOW (structural), then connect them with explicit dependencies. + +## Core Principles + +1. **Dual-Semantics**: Think functional (capabilities) AND structural (code organization) separately, then map them +2. **Explicit Dependencies**: Never assume - always state what depends on what +3. **Topological Order**: Build foundation first, then layers on top +4. **Progressive Refinement**: Start broad, refine iteratively + +## How to Use This Template + +- Follow the instructions in each `<instruction>` block +- Look at `<example>` blocks to see good vs bad patterns +- Fill in the content sections with your project details +- The AI reading this will learn the RPG method by following along +- Task Master will parse the resulting PRD into dependency-aware tasks + +## Recommended Tools for Creating PRDs + +When using this template to **create** a PRD (not parse it), use **code-context-aware AI assistants** for best results: + +**Why?** The AI needs to understand your existing codebase to make good architectural decisions about modules, dependencies, and integration points. + +**Recommended tools:** +- **Claude Code** (claude-code CLI) - Best for structured reasoning and large contexts +- **Cursor/Windsurf** - IDE integration with full codebase context +- **Gemini CLI** (gemini-cli) - Massive context window for large codebases +- **Codex/Grok CLI** - Strong code generation with context awareness + +**Note:** Once your PRD is created, `task-master parse-prd` works with any configured AI model - it just needs to read the PRD text itself, not your codebase. +</rpg-method> + +--- + +<overview> +<instruction> +Start with the problem, not the solution. Be specific about: +- What pain point exists? +- Who experiences it? +- Why existing solutions don't work? +- What success looks like (measurable outcomes)? + +Keep this section focused - don't jump into implementation details yet. +</instruction> + +## Problem Statement +[Describe the core problem. Be concrete about user pain points.] + +## Target Users +[Define personas, their workflows, and what they're trying to achieve.] + +## Success Metrics +[Quantifiable outcomes. Examples: "80% task completion via autopilot", "< 5% manual intervention rate"] + +</overview> + +--- + +<functional-decomposition> +<instruction> +Now think about CAPABILITIES (what the system DOES), not code structure yet. + +Step 1: Identify high-level capability domains +- Think: "What major things does this system do?" +- Examples: Data Management, Core Processing, Presentation Layer + +Step 2: For each capability, enumerate specific features +- Use explore-exploit strategy: + * Exploit: What features are REQUIRED for core value? + * Explore: What features make this domain COMPLETE? + +Step 3: For each feature, define: +- Description: What it does in one sentence +- Inputs: What data/context it needs +- Outputs: What it produces/returns +- Behavior: Key logic or transformations + +<example type="good"> +Capability: Data Validation + Feature: Schema validation + - Description: Validate JSON payloads against defined schemas + - Inputs: JSON object, schema definition + - Outputs: Validation result (pass/fail) + error details + - Behavior: Iterate fields, check types, enforce constraints + + Feature: Business rule validation + - Description: Apply domain-specific validation rules + - Inputs: Validated data object, rule set + - Outputs: Boolean + list of violated rules + - Behavior: Execute rules sequentially, short-circuit on failure +</example> + +<example type="bad"> +Capability: validation.js + (Problem: This is a FILE, not a CAPABILITY. Mixing structure into functional thinking.) + +Capability: Validation + Feature: Make sure data is good + (Problem: Too vague. No inputs/outputs. Not actionable.) +</example> +</instruction> + +## Capability Tree + +### Capability: [Name] +[Brief description of what this capability domain covers] + +#### Feature: [Name] +- **Description**: [One sentence] +- **Inputs**: [What it needs] +- **Outputs**: [What it produces] +- **Behavior**: [Key logic] + +#### Feature: [Name] +- **Description**: +- **Inputs**: +- **Outputs**: +- **Behavior**: + +### Capability: [Name] +... + +</functional-decomposition> + +--- + +<structural-decomposition> +<instruction> +NOW think about code organization. Map capabilities to actual file/folder structure. + +Rules: +1. Each capability maps to a module (folder or file) +2. Features within a capability map to functions/classes +3. Use clear module boundaries - each module has ONE responsibility +4. Define what each module exports (public interface) + +The goal: Create a clear mapping between "what it does" (functional) and "where it lives" (structural). + +<example type="good"> +Capability: Data Validation + → Maps to: src/validation/ + ├── schema-validator.js (Schema validation feature) + ├── rule-validator.js (Business rule validation feature) + └── index.js (Public exports) + +Exports: + - validateSchema(data, schema) + - validateRules(data, rules) +</example> + +<example type="bad"> +Capability: Data Validation + → Maps to: src/utils.js + (Problem: "utils" is not a clear module boundary. Where do I find validation logic?) + +Capability: Data Validation + → Maps to: src/validation/everything.js + (Problem: One giant file. Features should map to separate files for maintainability.) +</example> +</instruction> + +## Repository Structure + +``` +project-root/ +├── src/ +│ ├── [module-name]/ # Maps to: [Capability Name] +│ │ ├── [file].js # Maps to: [Feature Name] +│ │ └── index.js # Public exports +│ └── [module-name]/ +├── tests/ +└── docs/ +``` + +## Module Definitions + +### Module: [Name] +- **Maps to capability**: [Capability from functional decomposition] +- **Responsibility**: [Single clear purpose] +- **File structure**: + ``` + module-name/ + ├── feature1.js + ├── feature2.js + └── index.js + ``` +- **Exports**: + - `functionName()` - [what it does] + - `ClassName` - [what it does] + +</structural-decomposition> + +--- + +<dependency-graph> +<instruction> +This is THE CRITICAL SECTION for Task Master parsing. + +Define explicit dependencies between modules. This creates the topological order for task execution. + +Rules: +1. List modules in dependency order (foundation first) +2. For each module, state what it depends on +3. Foundation modules should have NO dependencies +4. Every non-foundation module should depend on at least one other module +5. Think: "What must EXIST before I can build this module?" + +<example type="good"> +Foundation Layer (no dependencies): + - error-handling: No dependencies + - config-manager: No dependencies + - base-types: No dependencies + +Data Layer: + - schema-validator: Depends on [base-types, error-handling] + - data-ingestion: Depends on [schema-validator, config-manager] + +Core Layer: + - algorithm-engine: Depends on [base-types, error-handling] + - pipeline-orchestrator: Depends on [algorithm-engine, data-ingestion] +</example> + +<example type="bad"> +- validation: Depends on API +- API: Depends on validation +(Problem: Circular dependency. This will cause build/runtime issues.) + +- user-auth: Depends on everything +(Problem: Too many dependencies. Should be more focused.) +</example> +</instruction> + +## Dependency Chain + +### Foundation Layer (Phase 0) +No dependencies - these are built first. + +- **[Module Name]**: [What it provides] +- **[Module Name]**: [What it provides] + +### [Layer Name] (Phase 1) +- **[Module Name]**: Depends on [[module-from-phase-0], [module-from-phase-0]] +- **[Module Name]**: Depends on [[module-from-phase-0]] + +### [Layer Name] (Phase 2) +- **[Module Name]**: Depends on [[module-from-phase-1], [module-from-foundation]] + +[Continue building up layers...] + +</dependency-graph> + +--- + +<implementation-roadmap> +<instruction> +Turn the dependency graph into concrete development phases. + +Each phase should: +1. Have clear entry criteria (what must exist before starting) +2. Contain tasks that can be parallelized (no inter-dependencies within phase) +3. Have clear exit criteria (how do we know phase is complete?) +4. Build toward something USABLE (not just infrastructure) + +Phase ordering follows topological sort of dependency graph. + +<example type="good"> +Phase 0: Foundation + Entry: Clean repository + Tasks: + - Implement error handling utilities + - Create base type definitions + - Setup configuration system + Exit: Other modules can import foundation without errors + +Phase 1: Data Layer + Entry: Phase 0 complete + Tasks: + - Implement schema validator (uses: base types, error handling) + - Build data ingestion pipeline (uses: validator, config) + Exit: End-to-end data flow from input to validated output +</example> + +<example type="bad"> +Phase 1: Build Everything + Tasks: + - API + - Database + - UI + - Tests + (Problem: No clear focus. Too broad. Dependencies not considered.) +</example> +</instruction> + +## Development Phases + +### Phase 0: [Foundation Name] +**Goal**: [What foundational capability this establishes] + +**Entry Criteria**: [What must be true before starting] + +**Tasks**: +- [ ] [Task name] (depends on: [none or list]) + - Acceptance criteria: [How we know it's done] + - Test strategy: [What tests prove it works] + +- [ ] [Task name] (depends on: [none or list]) + +**Exit Criteria**: [Observable outcome that proves phase complete] + +**Delivers**: [What can users/developers do after this phase?] + +--- + +### Phase 1: [Layer Name] +**Goal**: + +**Entry Criteria**: Phase 0 complete + +**Tasks**: +- [ ] [Task name] (depends on: [[tasks-from-phase-0]]) +- [ ] [Task name] (depends on: [[tasks-from-phase-0]]) + +**Exit Criteria**: + +**Delivers**: + +--- + +[Continue with more phases...] + +</implementation-roadmap> + +--- + +<test-strategy> +<instruction> +Define how testing will be integrated throughout development (TDD approach). + +Specify: +1. Test pyramid ratios (unit vs integration vs e2e) +2. Coverage requirements +3. Critical test scenarios +4. Test generation guidelines for Surgical Test Generator + +This section guides the AI when generating tests during the RED phase of TDD. + +<example type="good"> +Critical Test Scenarios for Data Validation module: + - Happy path: Valid data passes all checks + - Edge cases: Empty strings, null values, boundary numbers + - Error cases: Invalid types, missing required fields + - Integration: Validator works with ingestion pipeline +</example> +</instruction> + +## Test Pyramid + +``` + /\ + /E2E\ ← [X]% (End-to-end, slow, comprehensive) + /------\ + /Integration\ ← [Y]% (Module interactions) + /------------\ + / Unit Tests \ ← [Z]% (Fast, isolated, deterministic) + /----------------\ +``` + +## Coverage Requirements +- Line coverage: [X]% minimum +- Branch coverage: [X]% minimum +- Function coverage: [X]% minimum +- Statement coverage: [X]% minimum + +## Critical Test Scenarios + +### [Module/Feature Name] +**Happy path**: +- [Scenario description] +- Expected: [What should happen] + +**Edge cases**: +- [Scenario description] +- Expected: [What should happen] + +**Error cases**: +- [Scenario description] +- Expected: [How system handles failure] + +**Integration points**: +- [What interactions to test] +- Expected: [End-to-end behavior] + +## Test Generation Guidelines +[Specific instructions for Surgical Test Generator about what to focus on, what patterns to follow, project-specific test conventions] + +</test-strategy> + +--- + +<architecture> +<instruction> +Describe technical architecture, data models, and key design decisions. + +Keep this section AFTER functional/structural decomposition - implementation details come after understanding structure. +</instruction> + +## System Components +[Major architectural pieces and their responsibilities] + +## Data Models +[Core data structures, schemas, database design] + +## Technology Stack +[Languages, frameworks, key libraries] + +**Decision: [Technology/Pattern]** +- **Rationale**: [Why chosen] +- **Trade-offs**: [What we're giving up] +- **Alternatives considered**: [What else we looked at] + +</architecture> + +--- + +<risks> +<instruction> +Identify risks that could derail development and how to mitigate them. + +Categories: +- Technical risks (complexity, unknowns) +- Dependency risks (blocking issues) +- Scope risks (creep, underestimation) +</instruction> + +## Technical Risks +**Risk**: [Description] +- **Impact**: [High/Medium/Low - effect on project] +- **Likelihood**: [High/Medium/Low] +- **Mitigation**: [How to address] +- **Fallback**: [Plan B if mitigation fails] + +## Dependency Risks +[External dependencies, blocking issues] + +## Scope Risks +[Scope creep, underestimation, unclear requirements] + +</risks> + +--- + +<appendix> +## References +[Papers, documentation, similar systems] + +## Glossary +[Domain-specific terms] + +## Open Questions +[Things to resolve during development] +</appendix> + +--- + +<task-master-integration> +# How Task Master Uses This PRD + +When you run `task-master parse-prd <file>.txt`, the parser: + +1. **Extracts capabilities** → Main tasks + - Each `### Capability:` becomes a top-level task + +2. **Extracts features** → Subtasks + - Each `#### Feature:` becomes a subtask under its capability + +3. **Parses dependencies** → Task dependencies + - `Depends on: [X, Y]` sets task.dependencies = ["X", "Y"] + +4. **Orders by phases** → Task priorities + - Phase 0 tasks = highest priority + - Phase N tasks = lower priority, properly sequenced + +5. **Uses test strategy** → Test generation context + - Feeds test scenarios to Surgical Test Generator during implementation + +**Result**: A dependency-aware task graph that can be executed in topological order. + +## Why RPG Structure Matters + +Traditional flat PRDs lead to: +- ❌ Unclear task dependencies +- ❌ Arbitrary task ordering +- ❌ Circular dependencies discovered late +- ❌ Poorly scoped tasks + +RPG-structured PRDs provide: +- ✅ Explicit dependency chains +- ✅ Topological execution order +- ✅ Clear module boundaries +- ✅ Validated task graph before implementation + +## Tips for Best Results + +1. **Spend time on dependency graph** - This is the most valuable section for Task Master +2. **Keep features atomic** - Each feature should be independently testable +3. **Progressive refinement** - Start broad, use `task-master expand` to break down complex tasks +4. **Use research mode** - `task-master parse-prd --research` leverages AI for better task generation +</task-master-integration> diff --git a/.vscode/mcp.json b/.vscode/mcp.json new file mode 100644 index 0000000..88f3426 --- /dev/null +++ b/.vscode/mcp.json @@ -0,0 +1,20 @@ +{ + "mcpServers": { + "task-master-ai": { + "command": "npx", + "args": ["-y", "task-master-ai"], + "env": { + "TASK_MASTER_TOOLS": "core", + "ANTHROPIC_API_KEY": "YOUR_ANTHROPIC_API_KEY_HERE", + "PERPLEXITY_API_KEY": "YOUR_PERPLEXITY_API_KEY_HERE", + "OPENAI_API_KEY": "YOUR_OPENAI_KEY_HERE", + "GOOGLE_API_KEY": "YOUR_GOOGLE_KEY_HERE", + "XAI_API_KEY": "YOUR_XAI_KEY_HERE", + "OPENROUTER_API_KEY": "YOUR_OPENROUTER_KEY_HERE", + "MISTRAL_API_KEY": "YOUR_MISTRAL_KEY_HERE", + "AZURE_OPENAI_API_KEY": "YOUR_AZURE_KEY_HERE", + "OLLAMA_API_KEY": "YOUR_OLLAMA_API_KEY_HERE" + } + } + } +} diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 0000000..ec26a18 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,5 @@ +# Claude Code Instructions + +## Task Master AI Instructions +**Import Task Master's development workflow commands and guidelines, treat as if import is in the main CLAUDE.md file.** +@./.taskmaster/CLAUDE.md diff --git a/deploy/docker-compose.yml b/deploy/docker-compose.yml new file mode 100644 index 0000000..892a7f9 --- /dev/null +++ b/deploy/docker-compose.yml @@ -0,0 +1,37 @@ +services: + postgres: + image: postgres:16-alpine + container_name: mcpctl-postgres + ports: + - "5432:5432" + environment: + POSTGRES_USER: mcpctl + POSTGRES_PASSWORD: mcpctl_dev + POSTGRES_DB: mcpctl + volumes: + - mcpctl-pgdata:/var/lib/postgresql/data + healthcheck: + test: ["CMD-SHELL", "pg_isready -U mcpctl"] + interval: 5s + timeout: 5s + retries: 5 + + postgres-test: + image: postgres:16-alpine + container_name: mcpctl-postgres-test + ports: + - "5433:5432" + environment: + POSTGRES_USER: mcpctl + POSTGRES_PASSWORD: mcpctl_test + POSTGRES_DB: mcpctl_test + tmpfs: + - /var/lib/postgresql/data + healthcheck: + test: ["CMD-SHELL", "pg_isready -U mcpctl"] + interval: 5s + timeout: 5s + retries: 5 + +volumes: + mcpctl-pgdata: diff --git a/eslint.config.js b/eslint.config.js new file mode 100644 index 0000000..d57ac82 --- /dev/null +++ b/eslint.config.js @@ -0,0 +1,26 @@ +import tseslint from '@typescript-eslint/eslint-plugin'; +import tsparser from '@typescript-eslint/parser'; + +export default [ + { + files: ['src/*/src/**/*.ts'], + languageOptions: { + parser: tsparser, + parserOptions: { + project: ['./src/*/tsconfig.json'], + tsconfigRootDir: import.meta.dirname, + }, + }, + plugins: { '@typescript-eslint': tseslint }, + rules: { + '@typescript-eslint/explicit-function-return-type': 'error', + '@typescript-eslint/no-explicit-any': 'error', + '@typescript-eslint/no-unused-vars': 'error', + '@typescript-eslint/strict-boolean-expressions': 'error', + 'no-console': ['warn', { allow: ['warn', 'error'] }], + }, + }, + { + ignores: ['**/dist/**', '**/node_modules/**', '**/*.config.*'], + }, +]; diff --git a/i.sh b/i.sh new file mode 100644 index 0000000..4badb8a --- /dev/null +++ b/i.sh @@ -0,0 +1,57 @@ +#!/bin/bash +# 1. Install & Set Fish +sudo dnf install -y fish byobu curl wl-clipboard +chsh -s /usr/bin/fish + +# 2. SILENCE THE PROMPTS (The "Wtf" Fix) +mkdir -p ~/.byobu +byobu-ctrl-a emacs + +# 3. Configure Byobu Core (Clean Paths) +byobu-enable +mkdir -p ~/.byobu/bin +# We REMOVED the -S flag to stop those random files appearing in your folders +echo "set -g default-shell /usr/bin/fish" > ~/.byobu/.tmux.conf +echo "set -g default-command /usr/bin/fish" >> ~/.byobu/.tmux.conf +echo "set -g mouse off" >> ~/.byobu/.tmux.conf +echo "set -s set-clipboard on" >> ~/.byobu/.tmux.conf + +# 4. Create the Smart Mouse Indicator +cat <<EOF > ~/.byobu/bin/custom +#!/bin/bash +if tmux show-options -g mouse | grep -q "on"; then + echo "#[fg=green]MOUSE: ON (Nav)#[default]" +else + echo "#[fg=red]Alt+F12 (Copy Mode)#[default]" +fi +EOF +chmod +x ~/.byobu/bin/custom + +# 5. Setup Status Bar +echo 'tmux_left="session"' > ~/.byobu/status +echo 'tmux_right="custom cpu_temp load_average"' >> ~/.byobu/status + +# 6. Atuin Global History +if ! command -v atuin &> /dev/null; then + curl --proto '=https' --tlsv1.2 -sSf https://setup.atuin.sh | sh +fi + +# 7. Final Fish Config (The Clean Sticky Logic) +mkdir -p ~/.config/fish +cat <<EOF > ~/.config/fish/config.fish +# Atuin Setup +source ~/.atuin/bin/env.fish +atuin init fish | source + +# Start a UNIQUE session per window without cluttering project folders +if status is-interactive + and not set -q BYOBU_RUN_DIR + # We use a human-readable name: FolderName-Time + set SESSION_NAME (basename (pwd))-(date +%H%M) + exec byobu new-session -A -s "\$SESSION_NAME" +end +EOF + +# Kill any existing server to wipe the old "socket" logic +byobu kill-server 2>/dev/null +echo "Done! No more random files in your project folders." diff --git a/package.json b/package.json new file mode 100644 index 0000000..ae88e8f --- /dev/null +++ b/package.json @@ -0,0 +1,36 @@ +{ + "name": "mcpctl", + "version": "0.1.0", + "private": true, + "description": "kubectl-like CLI for managing MCP servers", + "type": "module", + "scripts": { + "build": "pnpm -r run build", + "test": "vitest", + "test:run": "vitest run", + "test:coverage": "vitest run --coverage", + "test:ui": "vitest --ui", + "lint": "eslint 'src/*/src/**/*.ts'", + "lint:fix": "eslint 'src/*/src/**/*.ts' --fix", + "clean": "pnpm -r run clean && rimraf node_modules", + "db:up": "docker compose -f deploy/docker-compose.yml up -d", + "db:down": "docker compose -f deploy/docker-compose.yml down", + "typecheck": "tsc --build" + }, + "engines": { + "node": ">=20.0.0", + "pnpm": ">=9.0.0" + }, + "packageManager": "pnpm@9.15.0", + "devDependencies": { + "@typescript-eslint/eslint-plugin": "^8.56.0", + "@typescript-eslint/parser": "^8.56.0", + "@vitest/coverage-v8": "^4.0.18", + "eslint": "^10.0.1", + "eslint-config-prettier": "^10.1.8", + "rimraf": "^6.1.3", + "tsx": "^4.21.0", + "typescript": "^5.9.3", + "vitest": "^4.0.18" + } +} diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml new file mode 100644 index 0000000..1c46283 --- /dev/null +++ b/pnpm-lock.yaml @@ -0,0 +1,3618 @@ +lockfileVersion: '9.0' + +settings: + autoInstallPeers: true + excludeLinksFromLockfile: false + +importers: + + .: + devDependencies: + '@typescript-eslint/eslint-plugin': + specifier: ^8.56.0 + version: 8.56.0(@typescript-eslint/parser@8.56.0(eslint@10.0.1(jiti@2.6.1))(typescript@5.9.3))(eslint@10.0.1(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/parser': + specifier: ^8.56.0 + version: 8.56.0(eslint@10.0.1(jiti@2.6.1))(typescript@5.9.3) + '@vitest/coverage-v8': + specifier: ^4.0.18 + version: 4.0.18(vitest@4.0.18(jiti@2.6.1)(tsx@4.21.0)) + eslint: + specifier: ^10.0.1 + version: 10.0.1(jiti@2.6.1) + eslint-config-prettier: + specifier: ^10.1.8 + version: 10.1.8(eslint@10.0.1(jiti@2.6.1)) + rimraf: + specifier: ^6.1.3 + version: 6.1.3 + tsx: + specifier: ^4.21.0 + version: 4.21.0 + typescript: + specifier: ^5.9.3 + version: 5.9.3 + vitest: + specifier: ^4.0.18 + version: 4.0.18(jiti@2.6.1)(tsx@4.21.0) + + src/cli: + dependencies: + '@mcpctl/db': + specifier: workspace:* + version: link:../db + '@mcpctl/shared': + specifier: workspace:* + version: link:../shared + chalk: + specifier: ^5.4.0 + version: 5.6.2 + commander: + specifier: ^13.0.0 + version: 13.1.0 + inquirer: + specifier: ^12.0.0 + version: 12.11.1 + js-yaml: + specifier: ^4.1.0 + version: 4.1.1 + + src/db: + dependencies: + '@mcpctl/shared': + specifier: workspace:* + version: link:../shared + '@prisma/client': + specifier: ^6.0.0 + version: 6.19.2(prisma@6.19.2(typescript@5.9.3))(typescript@5.9.3) + devDependencies: + prisma: + specifier: ^6.0.0 + version: 6.19.2(typescript@5.9.3) + + src/local-proxy: + dependencies: + '@mcpctl/shared': + specifier: workspace:* + version: link:../shared + '@modelcontextprotocol/sdk': + specifier: ^1.0.0 + version: 1.26.0(zod@3.25.76) + + src/mcpd: + dependencies: + '@fastify/cors': + specifier: ^10.0.0 + version: 10.1.0 + '@fastify/helmet': + specifier: ^12.0.0 + version: 12.0.1 + '@fastify/rate-limit': + specifier: ^10.0.0 + version: 10.3.0 + '@mcpctl/db': + specifier: workspace:* + version: link:../db + '@mcpctl/shared': + specifier: workspace:* + version: link:../shared + fastify: + specifier: ^5.0.0 + version: 5.7.4 + zod: + specifier: ^3.24.0 + version: 3.25.76 + + src/shared: + dependencies: + zod: + specifier: ^3.24.0 + version: 3.25.76 + +packages: + + '@babel/helper-string-parser@7.27.1': + resolution: {integrity: sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==} + engines: {node: '>=6.9.0'} + + '@babel/helper-validator-identifier@7.28.5': + resolution: {integrity: sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==} + engines: {node: '>=6.9.0'} + + '@babel/parser@7.29.0': + resolution: {integrity: sha512-IyDgFV5GeDUVX4YdF/3CPULtVGSXXMLh1xVIgdCgxApktqnQV0r7/8Nqthg+8YLGaAtdyIlo2qIdZrbCv4+7ww==} + engines: {node: '>=6.0.0'} + hasBin: true + + '@babel/types@7.29.0': + resolution: {integrity: sha512-LwdZHpScM4Qz8Xw2iKSzS+cfglZzJGvofQICy7W7v4caru4EaAmyUuO6BGrbyQ2mYV11W0U8j5mBhd14dd3B0A==} + engines: {node: '>=6.9.0'} + + '@bcoe/v8-coverage@1.0.2': + resolution: {integrity: sha512-6zABk/ECA/QYSCQ1NGiVwwbQerUCZ+TQbp64Q3AgmfNvurHH0j8TtXa1qbShXA6qqkpAj4V5W8pP6mLe1mcMqA==} + engines: {node: '>=18'} + + '@esbuild/aix-ppc64@0.27.3': + resolution: {integrity: sha512-9fJMTNFTWZMh5qwrBItuziu834eOCUcEqymSH7pY+zoMVEZg3gcPuBNxH1EvfVYe9h0x/Ptw8KBzv7qxb7l8dg==} + engines: {node: '>=18'} + cpu: [ppc64] + os: [aix] + + '@esbuild/android-arm64@0.27.3': + resolution: {integrity: sha512-YdghPYUmj/FX2SYKJ0OZxf+iaKgMsKHVPF1MAq/P8WirnSpCStzKJFjOjzsW0QQ7oIAiccHdcqjbHmJxRb/dmg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [android] + + '@esbuild/android-arm@0.27.3': + resolution: {integrity: sha512-i5D1hPY7GIQmXlXhs2w8AWHhenb00+GxjxRncS2ZM7YNVGNfaMxgzSGuO8o8SJzRc/oZwU2bcScvVERk03QhzA==} + engines: {node: '>=18'} + cpu: [arm] + os: [android] + + '@esbuild/android-x64@0.27.3': + resolution: {integrity: sha512-IN/0BNTkHtk8lkOM8JWAYFg4ORxBkZQf9zXiEOfERX/CzxW3Vg1ewAhU7QSWQpVIzTW+b8Xy+lGzdYXV6UZObQ==} + engines: {node: '>=18'} + cpu: [x64] + os: [android] + + '@esbuild/darwin-arm64@0.27.3': + resolution: {integrity: sha512-Re491k7ByTVRy0t3EKWajdLIr0gz2kKKfzafkth4Q8A5n1xTHrkqZgLLjFEHVD+AXdUGgQMq+Godfq45mGpCKg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [darwin] + + '@esbuild/darwin-x64@0.27.3': + resolution: {integrity: sha512-vHk/hA7/1AckjGzRqi6wbo+jaShzRowYip6rt6q7VYEDX4LEy1pZfDpdxCBnGtl+A5zq8iXDcyuxwtv3hNtHFg==} + engines: {node: '>=18'} + cpu: [x64] + os: [darwin] + + '@esbuild/freebsd-arm64@0.27.3': + resolution: {integrity: sha512-ipTYM2fjt3kQAYOvo6vcxJx3nBYAzPjgTCk7QEgZG8AUO3ydUhvelmhrbOheMnGOlaSFUoHXB6un+A7q4ygY9w==} + engines: {node: '>=18'} + cpu: [arm64] + os: [freebsd] + + '@esbuild/freebsd-x64@0.27.3': + resolution: {integrity: sha512-dDk0X87T7mI6U3K9VjWtHOXqwAMJBNN2r7bejDsc+j03SEjtD9HrOl8gVFByeM0aJksoUuUVU9TBaZa2rgj0oA==} + engines: {node: '>=18'} + cpu: [x64] + os: [freebsd] + + '@esbuild/linux-arm64@0.27.3': + resolution: {integrity: sha512-sZOuFz/xWnZ4KH3YfFrKCf1WyPZHakVzTiqji3WDc0BCl2kBwiJLCXpzLzUBLgmp4veFZdvN5ChW4Eq/8Fc2Fg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [linux] + + '@esbuild/linux-arm@0.27.3': + resolution: {integrity: sha512-s6nPv2QkSupJwLYyfS+gwdirm0ukyTFNl3KTgZEAiJDd+iHZcbTPPcWCcRYH+WlNbwChgH2QkE9NSlNrMT8Gfw==} + engines: {node: '>=18'} + cpu: [arm] + os: [linux] + + '@esbuild/linux-ia32@0.27.3': + resolution: {integrity: sha512-yGlQYjdxtLdh0a3jHjuwOrxQjOZYD/C9PfdbgJJF3TIZWnm/tMd/RcNiLngiu4iwcBAOezdnSLAwQDPqTmtTYg==} + engines: {node: '>=18'} + cpu: [ia32] + os: [linux] + + '@esbuild/linux-loong64@0.27.3': + resolution: {integrity: sha512-WO60Sn8ly3gtzhyjATDgieJNet/KqsDlX5nRC5Y3oTFcS1l0KWba+SEa9Ja1GfDqSF1z6hif/SkpQJbL63cgOA==} + engines: {node: '>=18'} + cpu: [loong64] + os: [linux] + + '@esbuild/linux-mips64el@0.27.3': + resolution: {integrity: sha512-APsymYA6sGcZ4pD6k+UxbDjOFSvPWyZhjaiPyl/f79xKxwTnrn5QUnXR5prvetuaSMsb4jgeHewIDCIWljrSxw==} + engines: {node: '>=18'} + cpu: [mips64el] + os: [linux] + + '@esbuild/linux-ppc64@0.27.3': + resolution: {integrity: sha512-eizBnTeBefojtDb9nSh4vvVQ3V9Qf9Df01PfawPcRzJH4gFSgrObw+LveUyDoKU3kxi5+9RJTCWlj4FjYXVPEA==} + engines: {node: '>=18'} + cpu: [ppc64] + os: [linux] + + '@esbuild/linux-riscv64@0.27.3': + resolution: {integrity: sha512-3Emwh0r5wmfm3ssTWRQSyVhbOHvqegUDRd0WhmXKX2mkHJe1SFCMJhagUleMq+Uci34wLSipf8Lagt4LlpRFWQ==} + engines: {node: '>=18'} + cpu: [riscv64] + os: [linux] + + '@esbuild/linux-s390x@0.27.3': + resolution: {integrity: sha512-pBHUx9LzXWBc7MFIEEL0yD/ZVtNgLytvx60gES28GcWMqil8ElCYR4kvbV2BDqsHOvVDRrOxGySBM9Fcv744hw==} + engines: {node: '>=18'} + cpu: [s390x] + os: [linux] + + '@esbuild/linux-x64@0.27.3': + resolution: {integrity: sha512-Czi8yzXUWIQYAtL/2y6vogER8pvcsOsk5cpwL4Gk5nJqH5UZiVByIY8Eorm5R13gq+DQKYg0+JyQoytLQas4dA==} + engines: {node: '>=18'} + cpu: [x64] + os: [linux] + + '@esbuild/netbsd-arm64@0.27.3': + resolution: {integrity: sha512-sDpk0RgmTCR/5HguIZa9n9u+HVKf40fbEUt+iTzSnCaGvY9kFP0YKBWZtJaraonFnqef5SlJ8/TiPAxzyS+UoA==} + engines: {node: '>=18'} + cpu: [arm64] + os: [netbsd] + + '@esbuild/netbsd-x64@0.27.3': + resolution: {integrity: sha512-P14lFKJl/DdaE00LItAukUdZO5iqNH7+PjoBm+fLQjtxfcfFE20Xf5CrLsmZdq5LFFZzb5JMZ9grUwvtVYzjiA==} + engines: {node: '>=18'} + cpu: [x64] + os: [netbsd] + + '@esbuild/openbsd-arm64@0.27.3': + resolution: {integrity: sha512-AIcMP77AvirGbRl/UZFTq5hjXK+2wC7qFRGoHSDrZ5v5b8DK/GYpXW3CPRL53NkvDqb9D+alBiC/dV0Fb7eJcw==} + engines: {node: '>=18'} + cpu: [arm64] + os: [openbsd] + + '@esbuild/openbsd-x64@0.27.3': + resolution: {integrity: sha512-DnW2sRrBzA+YnE70LKqnM3P+z8vehfJWHXECbwBmH/CU51z6FiqTQTHFenPlHmo3a8UgpLyH3PT+87OViOh1AQ==} + engines: {node: '>=18'} + cpu: [x64] + os: [openbsd] + + '@esbuild/openharmony-arm64@0.27.3': + resolution: {integrity: sha512-NinAEgr/etERPTsZJ7aEZQvvg/A6IsZG/LgZy+81wON2huV7SrK3e63dU0XhyZP4RKGyTm7aOgmQk0bGp0fy2g==} + engines: {node: '>=18'} + cpu: [arm64] + os: [openharmony] + + '@esbuild/sunos-x64@0.27.3': + resolution: {integrity: sha512-PanZ+nEz+eWoBJ8/f8HKxTTD172SKwdXebZ0ndd953gt1HRBbhMsaNqjTyYLGLPdoWHy4zLU7bDVJztF5f3BHA==} + engines: {node: '>=18'} + cpu: [x64] + os: [sunos] + + '@esbuild/win32-arm64@0.27.3': + resolution: {integrity: sha512-B2t59lWWYrbRDw/tjiWOuzSsFh1Y/E95ofKz7rIVYSQkUYBjfSgf6oeYPNWHToFRr2zx52JKApIcAS/D5TUBnA==} + engines: {node: '>=18'} + cpu: [arm64] + os: [win32] + + '@esbuild/win32-ia32@0.27.3': + resolution: {integrity: sha512-QLKSFeXNS8+tHW7tZpMtjlNb7HKau0QDpwm49u0vUp9y1WOF+PEzkU84y9GqYaAVW8aH8f3GcBck26jh54cX4Q==} + engines: {node: '>=18'} + cpu: [ia32] + os: [win32] + + '@esbuild/win32-x64@0.27.3': + resolution: {integrity: sha512-4uJGhsxuptu3OcpVAzli+/gWusVGwZZHTlS63hh++ehExkVT8SgiEf7/uC/PclrPPkLhZqGgCTjd0VWLo6xMqA==} + engines: {node: '>=18'} + cpu: [x64] + os: [win32] + + '@eslint-community/eslint-utils@4.9.1': + resolution: {integrity: sha512-phrYmNiYppR7znFEdqgfWHXR6NCkZEK7hwWDHZUjit/2/U0r6XvkDl0SYnoM51Hq7FhCGdLDT6zxCCOY1hexsQ==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + peerDependencies: + eslint: ^6.0.0 || ^7.0.0 || >=8.0.0 + + '@eslint-community/regexpp@4.12.2': + resolution: {integrity: sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew==} + engines: {node: ^12.0.0 || ^14.0.0 || >=16.0.0} + + '@eslint/config-array@0.23.2': + resolution: {integrity: sha512-YF+fE6LV4v5MGWRGj7G404/OZzGNepVF8fxk7jqmqo3lrza7a0uUcDnROGRBG1WFC1omYUS/Wp1f42i0M+3Q3A==} + engines: {node: ^20.19.0 || ^22.13.0 || >=24} + + '@eslint/config-helpers@0.5.2': + resolution: {integrity: sha512-a5MxrdDXEvqnIq+LisyCX6tQMPF/dSJpCfBgBauY+pNZ28yCtSsTvyTYrMhaI+LK26bVyCJfJkT0u8KIj2i1dQ==} + engines: {node: ^20.19.0 || ^22.13.0 || >=24} + + '@eslint/core@1.1.0': + resolution: {integrity: sha512-/nr9K9wkr3P1EzFTdFdMoLuo1PmIxjmwvPozwoSodjNBdefGujXQUF93u1DDZpEaTuDvMsIQddsd35BwtrW9Xw==} + engines: {node: ^20.19.0 || ^22.13.0 || >=24} + + '@eslint/object-schema@3.0.2': + resolution: {integrity: sha512-HOy56KJt48Bx8KmJ+XGQNSUMT/6dZee/M54XyUyuvTvPXJmsERRvBchsUVx1UMe1WwIH49XLAczNC7V2INsuUw==} + engines: {node: ^20.19.0 || ^22.13.0 || >=24} + + '@eslint/plugin-kit@0.6.0': + resolution: {integrity: sha512-bIZEUzOI1jkhviX2cp5vNyXQc6olzb2ohewQubuYlMXZ2Q/XjBO0x0XhGPvc9fjSIiUN0vw+0hq53BJ4eQSJKQ==} + engines: {node: ^20.19.0 || ^22.13.0 || >=24} + + '@fastify/ajv-compiler@4.0.5': + resolution: {integrity: sha512-KoWKW+MhvfTRWL4qrhUwAAZoaChluo0m0vbiJlGMt2GXvL4LVPQEjt8kSpHI3IBq5Rez8fg+XeH3cneztq+C7A==} + + '@fastify/cors@10.1.0': + resolution: {integrity: sha512-MZyBCBJtII60CU9Xme/iE4aEy8G7QpzGR8zkdXZkDFt7ElEMachbE61tfhAG/bvSaULlqlf0huMT12T7iqEmdQ==} + + '@fastify/error@4.2.0': + resolution: {integrity: sha512-RSo3sVDXfHskiBZKBPRgnQTtIqpi/7zhJOEmAxCiBcM7d0uwdGdxLlsCaLzGs8v8NnxIRlfG0N51p5yFaOentQ==} + + '@fastify/fast-json-stringify-compiler@5.0.3': + resolution: {integrity: sha512-uik7yYHkLr6fxd8hJSZ8c+xF4WafPK+XzneQDPU+D10r5X19GW8lJcom2YijX2+qtFF1ENJlHXKFM9ouXNJYgQ==} + + '@fastify/forwarded@3.0.1': + resolution: {integrity: sha512-JqDochHFqXs3C3Ml3gOY58zM7OqO9ENqPo0UqAjAjH8L01fRZqwX9iLeX34//kiJubF7r2ZQHtBRU36vONbLlw==} + + '@fastify/helmet@12.0.1': + resolution: {integrity: sha512-kkjBcedWwdflRThovGuvN9jB2QQLytBqArCFPdMIb7o2Fp0l/H3xxYi/6x/SSRuH/FFt9qpTGIfJz2bfnMrLqA==} + + '@fastify/merge-json-schemas@0.2.1': + resolution: {integrity: sha512-OA3KGBCy6KtIvLf8DINC5880o5iBlDX4SxzLQS8HorJAbqluzLRn80UXU0bxZn7UOFhFgpRJDasfwn9nG4FG4A==} + + '@fastify/proxy-addr@5.1.0': + resolution: {integrity: sha512-INS+6gh91cLUjB+PVHfu1UqcB76Sqtpyp7bnL+FYojhjygvOPA9ctiD/JDKsyD9Xgu4hUhCSJBPig/w7duNajw==} + + '@fastify/rate-limit@10.3.0': + resolution: {integrity: sha512-eIGkG9XKQs0nyynatApA3EVrojHOuq4l6fhB4eeCk4PIOeadvOJz9/4w3vGI44Go17uaXOWEcPkaD8kuKm7g6Q==} + + '@hono/node-server@1.19.9': + resolution: {integrity: sha512-vHL6w3ecZsky+8P5MD+eFfaGTyCeOHUIFYMGpQGbrBTSmNNoxv0if69rEZ5giu36weC5saFuznL411gRX7bJDw==} + engines: {node: '>=18.14.1'} + peerDependencies: + hono: ^4 + + '@humanfs/core@0.19.1': + resolution: {integrity: sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA==} + engines: {node: '>=18.18.0'} + + '@humanfs/node@0.16.7': + resolution: {integrity: sha512-/zUx+yOsIrG4Y43Eh2peDeKCxlRt/gET6aHfaKpuq267qXdYDFViVHfMaLyygZOnl0kGWxFIgsBy8QFuTLUXEQ==} + engines: {node: '>=18.18.0'} + + '@humanwhocodes/module-importer@1.0.1': + resolution: {integrity: sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==} + engines: {node: '>=12.22'} + + '@humanwhocodes/retry@0.4.3': + resolution: {integrity: sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ==} + engines: {node: '>=18.18'} + + '@inquirer/ansi@1.0.2': + resolution: {integrity: sha512-S8qNSZiYzFd0wAcyG5AXCvUHC5Sr7xpZ9wZ2py9XR88jUz8wooStVx5M6dRzczbBWjic9NP7+rY0Xi7qqK/aMQ==} + engines: {node: '>=18'} + + '@inquirer/checkbox@4.3.2': + resolution: {integrity: sha512-VXukHf0RR1doGe6Sm4F0Em7SWYLTHSsbGfJdS9Ja2bX5/D5uwVOEjr07cncLROdBvmnvCATYEWlHqYmXv2IlQA==} + engines: {node: '>=18'} + peerDependencies: + '@types/node': '>=18' + peerDependenciesMeta: + '@types/node': + optional: true + + '@inquirer/confirm@5.1.21': + resolution: {integrity: sha512-KR8edRkIsUayMXV+o3Gv+q4jlhENF9nMYUZs9PA2HzrXeHI8M5uDag70U7RJn9yyiMZSbtF5/UexBtAVtZGSbQ==} + engines: {node: '>=18'} + peerDependencies: + '@types/node': '>=18' + peerDependenciesMeta: + '@types/node': + optional: true + + '@inquirer/core@10.3.2': + resolution: {integrity: sha512-43RTuEbfP8MbKzedNqBrlhhNKVwoK//vUFNW3Q3vZ88BLcrs4kYpGg+B2mm5p2K/HfygoCxuKwJJiv8PbGmE0A==} + engines: {node: '>=18'} + peerDependencies: + '@types/node': '>=18' + peerDependenciesMeta: + '@types/node': + optional: true + + '@inquirer/editor@4.2.23': + resolution: {integrity: sha512-aLSROkEwirotxZ1pBaP8tugXRFCxW94gwrQLxXfrZsKkfjOYC1aRvAZuhpJOb5cu4IBTJdsCigUlf2iCOu4ZDQ==} + engines: {node: '>=18'} + peerDependencies: + '@types/node': '>=18' + peerDependenciesMeta: + '@types/node': + optional: true + + '@inquirer/expand@4.0.23': + resolution: {integrity: sha512-nRzdOyFYnpeYTTR2qFwEVmIWypzdAx/sIkCMeTNTcflFOovfqUk+HcFhQQVBftAh9gmGrpFj6QcGEqrDMDOiew==} + engines: {node: '>=18'} + peerDependencies: + '@types/node': '>=18' + peerDependenciesMeta: + '@types/node': + optional: true + + '@inquirer/external-editor@1.0.3': + resolution: {integrity: sha512-RWbSrDiYmO4LbejWY7ttpxczuwQyZLBUyygsA9Nsv95hpzUWwnNTVQmAq3xuh7vNwCp07UTmE5i11XAEExx4RA==} + engines: {node: '>=18'} + peerDependencies: + '@types/node': '>=18' + peerDependenciesMeta: + '@types/node': + optional: true + + '@inquirer/figures@1.0.15': + resolution: {integrity: sha512-t2IEY+unGHOzAaVM5Xx6DEWKeXlDDcNPeDyUpsRc6CUhBfU3VQOEl+Vssh7VNp1dR8MdUJBWhuObjXCsVpjN5g==} + engines: {node: '>=18'} + + '@inquirer/input@4.3.1': + resolution: {integrity: sha512-kN0pAM4yPrLjJ1XJBjDxyfDduXOuQHrBB8aLDMueuwUGn+vNpF7Gq7TvyVxx8u4SHlFFj4trmj+a2cbpG4Jn1g==} + engines: {node: '>=18'} + peerDependencies: + '@types/node': '>=18' + peerDependenciesMeta: + '@types/node': + optional: true + + '@inquirer/number@3.0.23': + resolution: {integrity: sha512-5Smv0OK7K0KUzUfYUXDXQc9jrf8OHo4ktlEayFlelCjwMXz0299Y8OrI+lj7i4gCBY15UObk76q0QtxjzFcFcg==} + engines: {node: '>=18'} + peerDependencies: + '@types/node': '>=18' + peerDependenciesMeta: + '@types/node': + optional: true + + '@inquirer/password@4.0.23': + resolution: {integrity: sha512-zREJHjhT5vJBMZX/IUbyI9zVtVfOLiTO66MrF/3GFZYZ7T4YILW5MSkEYHceSii/KtRk+4i3RE7E1CUXA2jHcA==} + engines: {node: '>=18'} + peerDependencies: + '@types/node': '>=18' + peerDependenciesMeta: + '@types/node': + optional: true + + '@inquirer/prompts@7.10.1': + resolution: {integrity: sha512-Dx/y9bCQcXLI5ooQ5KyvA4FTgeo2jYj/7plWfV5Ak5wDPKQZgudKez2ixyfz7tKXzcJciTxqLeK7R9HItwiByg==} + engines: {node: '>=18'} + peerDependencies: + '@types/node': '>=18' + peerDependenciesMeta: + '@types/node': + optional: true + + '@inquirer/rawlist@4.1.11': + resolution: {integrity: sha512-+LLQB8XGr3I5LZN/GuAHo+GpDJegQwuPARLChlMICNdwW7OwV2izlCSCxN6cqpL0sMXmbKbFcItJgdQq5EBXTw==} + engines: {node: '>=18'} + peerDependencies: + '@types/node': '>=18' + peerDependenciesMeta: + '@types/node': + optional: true + + '@inquirer/search@3.2.2': + resolution: {integrity: sha512-p2bvRfENXCZdWF/U2BXvnSI9h+tuA8iNqtUKb9UWbmLYCRQxd8WkvwWvYn+3NgYaNwdUkHytJMGG4MMLucI1kA==} + engines: {node: '>=18'} + peerDependencies: + '@types/node': '>=18' + peerDependenciesMeta: + '@types/node': + optional: true + + '@inquirer/select@4.4.2': + resolution: {integrity: sha512-l4xMuJo55MAe+N7Qr4rX90vypFwCajSakx59qe/tMaC1aEHWLyw68wF4o0A4SLAY4E0nd+Vt+EyskeDIqu1M6w==} + engines: {node: '>=18'} + peerDependencies: + '@types/node': '>=18' + peerDependenciesMeta: + '@types/node': + optional: true + + '@inquirer/type@3.0.10': + resolution: {integrity: sha512-BvziSRxfz5Ov8ch0z/n3oijRSEcEsHnhggm4xFZe93DHcUCTlutlq9Ox4SVENAfcRD22UQq7T/atg9Wr3k09eA==} + engines: {node: '>=18'} + peerDependencies: + '@types/node': '>=18' + peerDependenciesMeta: + '@types/node': + optional: true + + '@jridgewell/resolve-uri@3.1.2': + resolution: {integrity: sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==} + engines: {node: '>=6.0.0'} + + '@jridgewell/sourcemap-codec@1.5.5': + resolution: {integrity: sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==} + + '@jridgewell/trace-mapping@0.3.31': + resolution: {integrity: sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==} + + '@lukeed/ms@2.0.2': + resolution: {integrity: sha512-9I2Zn6+NJLfaGoz9jN3lpwDgAYvfGeNYdbAIjJOqzs4Tpc+VU3Jqq4IofSUBKajiDS8k9fZIg18/z13mpk1bsA==} + engines: {node: '>=8'} + + '@modelcontextprotocol/sdk@1.26.0': + resolution: {integrity: sha512-Y5RmPncpiDtTXDbLKswIJzTqu2hyBKxTNsgKqKclDbhIgg1wgtf1fRuvxgTnRfcnxtvvgbIEcqUOzZrJ6iSReg==} + engines: {node: '>=18'} + peerDependencies: + '@cfworker/json-schema': ^4.1.1 + zod: ^3.25 || ^4.0 + peerDependenciesMeta: + '@cfworker/json-schema': + optional: true + + '@pinojs/redact@0.4.0': + resolution: {integrity: sha512-k2ENnmBugE/rzQfEcdWHcCY+/FM3VLzH9cYEsbdsoqrvzAKRhUZeRNhAZvB8OitQJ1TBed3yqWtdjzS6wJKBwg==} + + '@prisma/client@6.19.2': + resolution: {integrity: sha512-gR2EMvfK/aTxsuooaDA32D8v+us/8AAet+C3J1cc04SW35FPdZYgLF+iN4NDLUgAaUGTKdAB0CYenu1TAgGdMg==} + engines: {node: '>=18.18'} + peerDependencies: + prisma: '*' + typescript: '>=5.1.0' + peerDependenciesMeta: + prisma: + optional: true + typescript: + optional: true + + '@prisma/config@6.19.2': + resolution: {integrity: sha512-kadBGDl+aUswv/zZMk9Mx0C8UZs1kjao8H9/JpI4Wh4SHZaM7zkTwiKn/iFLfRg+XtOAo/Z/c6pAYhijKl0nzQ==} + + '@prisma/debug@6.19.2': + resolution: {integrity: sha512-lFnEZsLdFLmEVCVNdskLDCL8Uup41GDfU0LUfquw+ercJC8ODTuL0WNKgOKmYxCJVvFwf0OuZBzW99DuWmoH2A==} + + '@prisma/engines-version@7.1.1-3.c2990dca591cba766e3b7ef5d9e8a84796e47ab7': + resolution: {integrity: sha512-03bgb1VD5gvuumNf+7fVGBzfpJPjmqV423l/WxsWk2cNQ42JD0/SsFBPhN6z8iAvdHs07/7ei77SKu7aZfq8bA==} + + '@prisma/engines@6.19.2': + resolution: {integrity: sha512-TTkJ8r+uk/uqczX40wb+ODG0E0icVsMgwCTyTHXehaEfb0uo80M9g1aW1tEJrxmFHeOZFXdI2sTA1j1AgcHi4A==} + + '@prisma/fetch-engine@6.19.2': + resolution: {integrity: sha512-h4Ff4Pho+SR1S8XerMCC12X//oY2bG3Iug/fUnudfcXEUnIeRiBdXHFdGlGOgQ3HqKgosTEhkZMvGM9tWtYC+Q==} + + '@prisma/get-platform@6.19.2': + resolution: {integrity: sha512-PGLr06JUSTqIvztJtAzIxOwtWKtJm5WwOG6xpsgD37Rc84FpfUBGLKz65YpJBGtkRQGXTYEFie7pYALocC3MtA==} + + '@rollup/rollup-android-arm-eabi@4.58.0': + resolution: {integrity: sha512-mr0tmS/4FoVk1cnaeN244A/wjvGDNItZKR8hRhnmCzygyRXYtKF5jVDSIILR1U97CTzAYmbgIj/Dukg62ggG5w==} + cpu: [arm] + os: [android] + + '@rollup/rollup-android-arm64@4.58.0': + resolution: {integrity: sha512-+s++dbp+/RTte62mQD9wLSbiMTV+xr/PeRJEc/sFZFSBRlHPNPVaf5FXlzAL77Mr8FtSfQqCN+I598M8U41ccQ==} + cpu: [arm64] + os: [android] + + '@rollup/rollup-darwin-arm64@4.58.0': + resolution: {integrity: sha512-MFWBwTcYs0jZbINQBXHfSrpSQJq3IUOakcKPzfeSznONop14Pxuqa0Kg19GD0rNBMPQI2tFtu3UzapZpH0Uc1Q==} + cpu: [arm64] + os: [darwin] + + '@rollup/rollup-darwin-x64@4.58.0': + resolution: {integrity: sha512-yiKJY7pj9c9JwzuKYLFaDZw5gma3fI9bkPEIyofvVfsPqjCWPglSHdpdwXpKGvDeYDms3Qal8qGMEHZ1M/4Udg==} + cpu: [x64] + os: [darwin] + + '@rollup/rollup-freebsd-arm64@4.58.0': + resolution: {integrity: sha512-x97kCoBh5MOevpn/CNK9W1x8BEzO238541BGWBc315uOlN0AD/ifZ1msg+ZQB05Ux+VF6EcYqpiagfLJ8U3LvQ==} + cpu: [arm64] + os: [freebsd] + + '@rollup/rollup-freebsd-x64@4.58.0': + resolution: {integrity: sha512-Aa8jPoZ6IQAG2eIrcXPpjRcMjROMFxCt1UYPZZtCxRV68WkuSigYtQ/7Zwrcr2IvtNJo7T2JfDXyMLxq5L4Jlg==} + cpu: [x64] + os: [freebsd] + + '@rollup/rollup-linux-arm-gnueabihf@4.58.0': + resolution: {integrity: sha512-Ob8YgT5kD/lSIYW2Rcngs5kNB/44Q2RzBSPz9brf2WEtcGR7/f/E9HeHn1wYaAwKBni+bdXEwgHvUd0x12lQSA==} + cpu: [arm] + os: [linux] + + '@rollup/rollup-linux-arm-musleabihf@4.58.0': + resolution: {integrity: sha512-K+RI5oP1ceqoadvNt1FecL17Qtw/n9BgRSzxif3rTL2QlIu88ccvY+Y9nnHe/cmT5zbH9+bpiJuG1mGHRVwF4Q==} + cpu: [arm] + os: [linux] + + '@rollup/rollup-linux-arm64-gnu@4.58.0': + resolution: {integrity: sha512-T+17JAsCKUjmbopcKepJjHWHXSjeW7O5PL7lEFaeQmiVyw4kkc5/lyYKzrv6ElWRX/MrEWfPiJWqbTvfIvjM1Q==} + cpu: [arm64] + os: [linux] + + '@rollup/rollup-linux-arm64-musl@4.58.0': + resolution: {integrity: sha512-cCePktb9+6R9itIJdeCFF9txPU7pQeEHB5AbHu/MKsfH/k70ZtOeq1k4YAtBv9Z7mmKI5/wOLYjQ+B9QdxR6LA==} + cpu: [arm64] + os: [linux] + + '@rollup/rollup-linux-loong64-gnu@4.58.0': + resolution: {integrity: sha512-iekUaLkfliAsDl4/xSdoCJ1gnnIXvoNz85C8U8+ZxknM5pBStfZjeXgB8lXobDQvvPRCN8FPmmuTtH+z95HTmg==} + cpu: [loong64] + os: [linux] + + '@rollup/rollup-linux-loong64-musl@4.58.0': + resolution: {integrity: sha512-68ofRgJNl/jYJbxFjCKE7IwhbfxOl1muPN4KbIqAIe32lm22KmU7E8OPvyy68HTNkI2iV/c8y2kSPSm2mW/Q9Q==} + cpu: [loong64] + os: [linux] + + '@rollup/rollup-linux-ppc64-gnu@4.58.0': + resolution: {integrity: sha512-dpz8vT0i+JqUKuSNPCP5SYyIV2Lh0sNL1+FhM7eLC457d5B9/BC3kDPp5BBftMmTNsBarcPcoz5UGSsnCiw4XQ==} + cpu: [ppc64] + os: [linux] + + '@rollup/rollup-linux-ppc64-musl@4.58.0': + resolution: {integrity: sha512-4gdkkf9UJ7tafnweBCR/mk4jf3Jfl0cKX9Np80t5i78kjIH0ZdezUv/JDI2VtruE5lunfACqftJ8dIMGN4oHew==} + cpu: [ppc64] + os: [linux] + + '@rollup/rollup-linux-riscv64-gnu@4.58.0': + resolution: {integrity: sha512-YFS4vPnOkDTD/JriUeeZurFYoJhPf9GQQEF/v4lltp3mVcBmnsAdjEWhr2cjUCZzZNzxCG0HZOvJU44UGHSdzw==} + cpu: [riscv64] + os: [linux] + + '@rollup/rollup-linux-riscv64-musl@4.58.0': + resolution: {integrity: sha512-x2xgZlFne+QVNKV8b4wwaCS8pwq3y14zedZ5DqLzjdRITvreBk//4Knbcvm7+lWmms9V9qFp60MtUd0/t/PXPw==} + cpu: [riscv64] + os: [linux] + + '@rollup/rollup-linux-s390x-gnu@4.58.0': + resolution: {integrity: sha512-jIhrujyn4UnWF8S+DHSkAkDEO3hLX0cjzxJZPLF80xFyzyUIYgSMRcYQ3+uqEoyDD2beGq7Dj7edi8OnJcS/hg==} + cpu: [s390x] + os: [linux] + + '@rollup/rollup-linux-x64-gnu@4.58.0': + resolution: {integrity: sha512-+410Srdoh78MKSJxTQ+hZ/Mx+ajd6RjjPwBPNd0R3J9FtL6ZA0GqiiyNjCO9In0IzZkCNrpGymSfn+kgyPQocg==} + cpu: [x64] + os: [linux] + + '@rollup/rollup-linux-x64-musl@4.58.0': + resolution: {integrity: sha512-ZjMyby5SICi227y1MTR3VYBpFTdZs823Rs/hpakufleBoufoOIB6jtm9FEoxn/cgO7l6PM2rCEl5Kre5vX0QrQ==} + cpu: [x64] + os: [linux] + + '@rollup/rollup-openbsd-x64@4.58.0': + resolution: {integrity: sha512-ds4iwfYkSQ0k1nb8LTcyXw//ToHOnNTJtceySpL3fa7tc/AsE+UpUFphW126A6fKBGJD5dhRvg8zw1rvoGFxmw==} + cpu: [x64] + os: [openbsd] + + '@rollup/rollup-openharmony-arm64@4.58.0': + resolution: {integrity: sha512-fd/zpJniln4ICdPkjWFhZYeY/bpnaN9pGa6ko+5WD38I0tTqk9lXMgXZg09MNdhpARngmxiCg0B0XUamNw/5BQ==} + cpu: [arm64] + os: [openharmony] + + '@rollup/rollup-win32-arm64-msvc@4.58.0': + resolution: {integrity: sha512-YpG8dUOip7DCz3nr/JUfPbIUo+2d/dy++5bFzgi4ugOGBIox+qMbbqt/JoORwvI/C9Kn2tz6+Bieoqd5+B1CjA==} + cpu: [arm64] + os: [win32] + + '@rollup/rollup-win32-ia32-msvc@4.58.0': + resolution: {integrity: sha512-b9DI8jpFQVh4hIXFr0/+N/TzLdpBIoPzjt0Rt4xJbW3mzguV3mduR9cNgiuFcuL/TeORejJhCWiAXe3E/6PxWA==} + cpu: [ia32] + os: [win32] + + '@rollup/rollup-win32-x64-gnu@4.58.0': + resolution: {integrity: sha512-CSrVpmoRJFN06LL9xhkitkwUcTZtIotYAF5p6XOR2zW0Zz5mzb3IPpcoPhB02frzMHFNo1reQ9xSF5fFm3hUsQ==} + cpu: [x64] + os: [win32] + + '@rollup/rollup-win32-x64-msvc@4.58.0': + resolution: {integrity: sha512-QFsBgQNTnh5K0t/sBsjJLq24YVqEIVkGpfN2VHsnN90soZyhaiA9UUHufcctVNL4ypJY0wrwad0wslx2KJQ1/w==} + cpu: [x64] + os: [win32] + + '@standard-schema/spec@1.1.0': + resolution: {integrity: sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w==} + + '@types/chai@5.2.3': + resolution: {integrity: sha512-Mw558oeA9fFbv65/y4mHtXDs9bPnFMZAL/jxdPFUpOHHIXX91mcgEHbS5Lahr+pwZFR8A7GQleRWeI6cGFC2UA==} + + '@types/deep-eql@4.0.2': + resolution: {integrity: sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==} + + '@types/esrecurse@4.3.1': + resolution: {integrity: sha512-xJBAbDifo5hpffDBuHl0Y8ywswbiAp/Wi7Y/GtAgSlZyIABppyurxVueOPE8LUQOxdlgi6Zqce7uoEpqNTeiUw==} + + '@types/estree@1.0.8': + resolution: {integrity: sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==} + + '@types/json-schema@7.0.15': + resolution: {integrity: sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==} + + '@typescript-eslint/eslint-plugin@8.56.0': + resolution: {integrity: sha512-lRyPDLzNCuae71A3t9NEINBiTn7swyOhvUj3MyUOxb8x6g6vPEFoOU+ZRmGMusNC3X3YMhqMIX7i8ShqhT74Pw==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + '@typescript-eslint/parser': ^8.56.0 + eslint: ^8.57.0 || ^9.0.0 || ^10.0.0 + typescript: '>=4.8.4 <6.0.0' + + '@typescript-eslint/parser@8.56.0': + resolution: {integrity: sha512-IgSWvLobTDOjnaxAfDTIHaECbkNlAlKv2j5SjpB2v7QHKv1FIfjwMy8FsDbVfDX/KjmCmYICcw7uGaXLhtsLNg==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + eslint: ^8.57.0 || ^9.0.0 || ^10.0.0 + typescript: '>=4.8.4 <6.0.0' + + '@typescript-eslint/project-service@8.56.0': + resolution: {integrity: sha512-M3rnyL1vIQOMeWxTWIW096/TtVP+8W3p/XnaFflhmcFp+U4zlxUxWj4XwNs6HbDeTtN4yun0GNTTDBw/SvufKg==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + typescript: '>=4.8.4 <6.0.0' + + '@typescript-eslint/scope-manager@8.56.0': + resolution: {integrity: sha512-7UiO/XwMHquH+ZzfVCfUNkIXlp/yQjjnlYUyYz7pfvlK3/EyyN6BK+emDmGNyQLBtLGaYrTAI6KOw8tFucWL2w==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + '@typescript-eslint/tsconfig-utils@8.56.0': + resolution: {integrity: sha512-bSJoIIt4o3lKXD3xmDh9chZcjCz5Lk8xS7Rxn+6l5/pKrDpkCwtQNQQwZ2qRPk7TkUYhrq3WPIHXOXlbXP0itg==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + typescript: '>=4.8.4 <6.0.0' + + '@typescript-eslint/type-utils@8.56.0': + resolution: {integrity: sha512-qX2L3HWOU2nuDs6GzglBeuFXviDODreS58tLY/BALPC7iu3Fa+J7EOTwnX9PdNBxUI7Uh0ntP0YWGnxCkXzmfA==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + eslint: ^8.57.0 || ^9.0.0 || ^10.0.0 + typescript: '>=4.8.4 <6.0.0' + + '@typescript-eslint/types@8.56.0': + resolution: {integrity: sha512-DBsLPs3GsWhX5HylbP9HNG15U0bnwut55Lx12bHB9MpXxQ+R5GC8MwQe+N1UFXxAeQDvEsEDY6ZYwX03K7Z6HQ==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + '@typescript-eslint/typescript-estree@8.56.0': + resolution: {integrity: sha512-ex1nTUMWrseMltXUHmR2GAQ4d+WjkZCT4f+4bVsps8QEdh0vlBsaCokKTPlnqBFqqGaxilDNJG7b8dolW2m43Q==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + typescript: '>=4.8.4 <6.0.0' + + '@typescript-eslint/utils@8.56.0': + resolution: {integrity: sha512-RZ3Qsmi2nFGsS+n+kjLAYDPVlrzf7UhTffrDIKr+h2yzAlYP/y5ZulU0yeDEPItos2Ph46JAL5P/On3pe7kDIQ==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + eslint: ^8.57.0 || ^9.0.0 || ^10.0.0 + typescript: '>=4.8.4 <6.0.0' + + '@typescript-eslint/visitor-keys@8.56.0': + resolution: {integrity: sha512-q+SL+b+05Ud6LbEE35qe4A99P+htKTKVbyiNEe45eCbJFyh/HVK9QXwlrbz+Q4L8SOW4roxSVwXYj4DMBT7Ieg==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + '@vitest/coverage-v8@4.0.18': + resolution: {integrity: sha512-7i+N2i0+ME+2JFZhfuz7Tg/FqKtilHjGyGvoHYQ6iLV0zahbsJ9sljC9OcFcPDbhYKCet+sG8SsVqlyGvPflZg==} + peerDependencies: + '@vitest/browser': 4.0.18 + vitest: 4.0.18 + peerDependenciesMeta: + '@vitest/browser': + optional: true + + '@vitest/expect@4.0.18': + resolution: {integrity: sha512-8sCWUyckXXYvx4opfzVY03EOiYVxyNrHS5QxX3DAIi5dpJAAkyJezHCP77VMX4HKA2LDT/Jpfo8i2r5BE3GnQQ==} + + '@vitest/mocker@4.0.18': + resolution: {integrity: sha512-HhVd0MDnzzsgevnOWCBj5Otnzobjy5wLBe4EdeeFGv8luMsGcYqDuFRMcttKWZA5vVO8RFjexVovXvAM4JoJDQ==} + peerDependencies: + msw: ^2.4.9 + vite: ^6.0.0 || ^7.0.0-0 + peerDependenciesMeta: + msw: + optional: true + vite: + optional: true + + '@vitest/pretty-format@4.0.18': + resolution: {integrity: sha512-P24GK3GulZWC5tz87ux0m8OADrQIUVDPIjjj65vBXYG17ZeU3qD7r+MNZ1RNv4l8CGU2vtTRqixrOi9fYk/yKw==} + + '@vitest/runner@4.0.18': + resolution: {integrity: sha512-rpk9y12PGa22Jg6g5M3UVVnTS7+zycIGk9ZNGN+m6tZHKQb7jrP7/77WfZy13Y/EUDd52NDsLRQhYKtv7XfPQw==} + + '@vitest/snapshot@4.0.18': + resolution: {integrity: sha512-PCiV0rcl7jKQjbgYqjtakly6T1uwv/5BQ9SwBLekVg/EaYeQFPiXcgrC2Y7vDMA8dM1SUEAEV82kgSQIlXNMvA==} + + '@vitest/spy@4.0.18': + resolution: {integrity: sha512-cbQt3PTSD7P2OARdVW3qWER5EGq7PHlvE+QfzSC0lbwO+xnt7+XH06ZzFjFRgzUX//JmpxrCu92VdwvEPlWSNw==} + + '@vitest/utils@4.0.18': + resolution: {integrity: sha512-msMRKLMVLWygpK3u2Hybgi4MNjcYJvwTb0Ru09+fOyCXIgT5raYP041DRRdiJiI3k/2U6SEbAETB3YtBrUkCFA==} + + abstract-logging@2.0.1: + resolution: {integrity: sha512-2BjRTZxTPvheOvGbBslFSYOUkr+SjPtOnrLP33f+VIWLzezQpZcqVg7ja3L4dBXmzzgwT+a029jRx5PCi3JuiA==} + + accepts@2.0.0: + resolution: {integrity: sha512-5cvg6CtKwfgdmVqY1WIiXKc3Q1bkRqGLi+2W/6ao+6Y7gu/RCwRuAhGEzh5B4KlszSuTLgZYuqFqo5bImjNKng==} + engines: {node: '>= 0.6'} + + acorn-jsx@5.3.2: + resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==} + peerDependencies: + acorn: ^6.0.0 || ^7.0.0 || ^8.0.0 + + acorn@8.16.0: + resolution: {integrity: sha512-UVJyE9MttOsBQIDKw1skb9nAwQuR5wuGD3+82K6JgJlm/Y+KI92oNsMNGZCYdDsVtRHSak0pcV5Dno5+4jh9sw==} + engines: {node: '>=0.4.0'} + hasBin: true + + ajv-formats@3.0.1: + resolution: {integrity: sha512-8iUql50EUR+uUcdRQ3HDqa6EVyo3docL8g5WJ3FNcWmu62IbkGUue/pEyLBW8VGKKucTPgqeks4fIU1DA4yowQ==} + peerDependencies: + ajv: ^8.0.0 + peerDependenciesMeta: + ajv: + optional: true + + ajv@6.14.0: + resolution: {integrity: sha512-IWrosm/yrn43eiKqkfkHis7QioDleaXQHdDVPKg0FSwwd/DuvyX79TZnFOnYpB7dcsFAMmtFztZuXPDvSePkFw==} + + ajv@8.18.0: + resolution: {integrity: sha512-PlXPeEWMXMZ7sPYOHqmDyCJzcfNrUr3fGNKtezX14ykXOEIvyK81d+qydx89KY5O71FKMPaQ2vBfBFI5NHR63A==} + + ansi-regex@5.0.1: + resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==} + engines: {node: '>=8'} + + ansi-styles@4.3.0: + resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==} + engines: {node: '>=8'} + + argparse@2.0.1: + resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} + + assertion-error@2.0.1: + resolution: {integrity: sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==} + engines: {node: '>=12'} + + ast-v8-to-istanbul@0.3.11: + resolution: {integrity: sha512-Qya9fkoofMjCBNVdWINMjB5KZvkYfaO9/anwkWnjxibpWUxo5iHl2sOdP7/uAqaRuUYuoo8rDwnbaaKVFxoUvw==} + + atomic-sleep@1.0.0: + resolution: {integrity: sha512-kNOjDqAh7px0XWNI+4QbzoiR/nTkHAWNud2uvnJquD1/x5a7EQZMJT0AczqK0Qn67oY/TTQ1LbUKajZpp3I9tQ==} + engines: {node: '>=8.0.0'} + + avvio@9.2.0: + resolution: {integrity: sha512-2t/sy01ArdHHE0vRH5Hsay+RtCZt3dLPji7W7/MMOCEgze5b7SNDC4j5H6FnVgPkI1MTNFGzHdHrVXDDl7QSSQ==} + + balanced-match@1.0.2: + resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==} + + balanced-match@4.0.3: + resolution: {integrity: sha512-1pHv8LX9CpKut1Zp4EXey7Z8OfH11ONNH6Dhi2WDUt31VVZFXZzKwXcysBgqSumFCmR+0dqjMK5v5JiFHzi0+g==} + engines: {node: 20 || >=22} + + body-parser@2.2.2: + resolution: {integrity: sha512-oP5VkATKlNwcgvxi0vM0p/D3n2C3EReYVX+DNYs5TjZFn/oQt2j+4sVJtSMr18pdRr8wjTcBl6LoV+FUwzPmNA==} + engines: {node: '>=18'} + + brace-expansion@2.0.2: + resolution: {integrity: sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==} + + brace-expansion@5.0.2: + resolution: {integrity: sha512-Pdk8c9poy+YhOgVWw1JNN22/HcivgKWwpxKq04M/jTmHyCZn12WPJebZxdjSa5TmBqISrUSgNYU3eRORljfCCw==} + engines: {node: 20 || >=22} + + bytes@3.1.2: + resolution: {integrity: sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==} + engines: {node: '>= 0.8'} + + c12@3.1.0: + resolution: {integrity: sha512-uWoS8OU1MEIsOv8p/5a82c3H31LsWVR5qiyXVfBNOzfffjUWtPnhAb4BYI2uG2HfGmZmFjCtui5XNWaps+iFuw==} + peerDependencies: + magicast: ^0.3.5 + peerDependenciesMeta: + magicast: + optional: true + + call-bind-apply-helpers@1.0.2: + resolution: {integrity: sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==} + engines: {node: '>= 0.4'} + + call-bound@1.0.4: + resolution: {integrity: sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==} + engines: {node: '>= 0.4'} + + chai@6.2.2: + resolution: {integrity: sha512-NUPRluOfOiTKBKvWPtSD4PhFvWCqOi0BGStNWs57X9js7XGTprSmFoz5F0tWhR4WPjNeR9jXqdC7/UpSJTnlRg==} + engines: {node: '>=18'} + + chalk@5.6.2: + resolution: {integrity: sha512-7NzBL0rN6fMUW+f7A6Io4h40qQlG+xGmtMxfbnH/K7TAtt8JQWVQK+6g0UXKMeVJoyV5EkkNsErQ8pVD3bLHbA==} + engines: {node: ^12.17.0 || ^14.13 || >=16.0.0} + + chardet@2.1.1: + resolution: {integrity: sha512-PsezH1rqdV9VvyNhxxOW32/d75r01NY7TQCmOqomRo15ZSOKbpTFVsfjghxo6JloQUCGnH4k1LGu0R4yCLlWQQ==} + + chokidar@4.0.3: + resolution: {integrity: sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==} + engines: {node: '>= 14.16.0'} + + citty@0.1.6: + resolution: {integrity: sha512-tskPPKEs8D2KPafUypv2gxwJP8h/OaJmC82QQGGDQcHvXX43xF2VDACcJVmZ0EuSxkpO9Kc4MlrA3q0+FG58AQ==} + + citty@0.2.1: + resolution: {integrity: sha512-kEV95lFBhQgtogAPlQfJJ0WGVSokvLr/UEoFPiKKOXF7pl98HfUVUD0ejsuTCld/9xH9vogSywZ5KqHzXrZpqg==} + + cli-width@4.1.0: + resolution: {integrity: sha512-ouuZd4/dm2Sw5Gmqy6bGyNNNe1qt9RpmxveLSO7KcgsTnU7RXfsw+/bukWGo1abgBiMAic068rclZsO4IWmmxQ==} + engines: {node: '>= 12'} + + color-convert@2.0.1: + resolution: {integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==} + engines: {node: '>=7.0.0'} + + color-name@1.1.4: + resolution: {integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==} + + commander@13.1.0: + resolution: {integrity: sha512-/rFeCpNJQbhSZjGVwO9RFV3xPqbnERS8MmIQzCtD/zl6gpJuV/bMLuN92oG3F7d8oDEHHRrujSXNUr8fpjntKw==} + engines: {node: '>=18'} + + confbox@0.2.4: + resolution: {integrity: sha512-ysOGlgTFbN2/Y6Cg3Iye8YKulHw+R2fNXHrgSmXISQdMnomY6eNDprVdW9R5xBguEqI954+S6709UyiO7B+6OQ==} + + consola@3.4.2: + resolution: {integrity: sha512-5IKcdX0nnYavi6G7TtOhwkYzyjfJlatbjMjuLSfE2kYT5pMDOilZ4OvMhi637CcDICTmz3wARPoyhqyX1Y+XvA==} + engines: {node: ^14.18.0 || >=16.10.0} + + content-disposition@1.0.1: + resolution: {integrity: sha512-oIXISMynqSqm241k6kcQ5UwttDILMK4BiurCfGEREw6+X9jkkpEe5T9FZaApyLGGOnFuyMWZpdolTXMtvEJ08Q==} + engines: {node: '>=18'} + + content-type@1.0.5: + resolution: {integrity: sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==} + engines: {node: '>= 0.6'} + + cookie-signature@1.2.2: + resolution: {integrity: sha512-D76uU73ulSXrD1UXF4KE2TMxVVwhsnCgfAyTg9k8P6KGZjlXKrOLe4dJQKI3Bxi5wjesZoFXJWElNWBjPZMbhg==} + engines: {node: '>=6.6.0'} + + cookie@0.7.2: + resolution: {integrity: sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w==} + engines: {node: '>= 0.6'} + + cookie@1.1.1: + resolution: {integrity: sha512-ei8Aos7ja0weRpFzJnEA9UHJ/7XQmqglbRwnf2ATjcB9Wq874VKH9kfjjirM6UhU2/E5fFYadylyhFldcqSidQ==} + engines: {node: '>=18'} + + cors@2.8.6: + resolution: {integrity: sha512-tJtZBBHA6vjIAaF6EnIaq6laBBP9aq/Y3ouVJjEfoHbRBcHBAHYcMh/w8LDrk2PvIMMq8gmopa5D4V8RmbrxGw==} + engines: {node: '>= 0.10'} + + cross-spawn@7.0.6: + resolution: {integrity: sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==} + engines: {node: '>= 8'} + + debug@4.4.3: + resolution: {integrity: sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==} + engines: {node: '>=6.0'} + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + + deep-is@0.1.4: + resolution: {integrity: sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==} + + deepmerge-ts@7.1.5: + resolution: {integrity: sha512-HOJkrhaYsweh+W+e74Yn7YStZOilkoPb6fycpwNLKzSPtruFs48nYis0zy5yJz1+ktUhHxoRDJ27RQAWLIJVJw==} + engines: {node: '>=16.0.0'} + + defu@6.1.4: + resolution: {integrity: sha512-mEQCMmwJu317oSz8CwdIOdwf3xMif1ttiM8LTufzc3g6kR+9Pe236twL8j3IYT1F7GfRgGcW6MWxzZjLIkuHIg==} + + depd@2.0.0: + resolution: {integrity: sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==} + engines: {node: '>= 0.8'} + + dequal@2.0.3: + resolution: {integrity: sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==} + engines: {node: '>=6'} + + destr@2.0.5: + resolution: {integrity: sha512-ugFTXCtDZunbzasqBxrK93Ik/DRYsO6S/fedkWEMKqt04xZ4csmnmwGDBAb07QWNaGMAmnTIemsYZCksjATwsA==} + + dotenv@16.6.1: + resolution: {integrity: sha512-uBq4egWHTcTt33a72vpSG0z3HnPuIl6NqYcTrKEg2azoEyl2hpW0zqlxysq2pK9HlDIHyHyakeYaYnSAwd8bow==} + engines: {node: '>=12'} + + dunder-proto@1.0.1: + resolution: {integrity: sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==} + engines: {node: '>= 0.4'} + + ee-first@1.1.1: + resolution: {integrity: sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==} + + effect@3.18.4: + resolution: {integrity: sha512-b1LXQJLe9D11wfnOKAk3PKxuqYshQ0Heez+y5pnkd3jLj1yx9QhM72zZ9uUrOQyNvrs2GZZd/3maL0ZV18YuDA==} + + emoji-regex@8.0.0: + resolution: {integrity: sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==} + + empathic@2.0.0: + resolution: {integrity: sha512-i6UzDscO/XfAcNYD75CfICkmfLedpyPDdozrLMmQc5ORaQcdMoc21OnlEylMIqI7U8eniKrPMxxtj8k0vhmJhA==} + engines: {node: '>=14'} + + encodeurl@2.0.0: + resolution: {integrity: sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==} + engines: {node: '>= 0.8'} + + es-define-property@1.0.1: + resolution: {integrity: sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==} + engines: {node: '>= 0.4'} + + es-errors@1.3.0: + resolution: {integrity: sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==} + engines: {node: '>= 0.4'} + + es-module-lexer@1.7.0: + resolution: {integrity: sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==} + + es-object-atoms@1.1.1: + resolution: {integrity: sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==} + engines: {node: '>= 0.4'} + + esbuild@0.27.3: + resolution: {integrity: sha512-8VwMnyGCONIs6cWue2IdpHxHnAjzxnw2Zr7MkVxB2vjmQ2ivqGFb4LEG3SMnv0Gb2F/G/2yA8zUaiL1gywDCCg==} + engines: {node: '>=18'} + hasBin: true + + escape-html@1.0.3: + resolution: {integrity: sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==} + + escape-string-regexp@4.0.0: + resolution: {integrity: sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==} + engines: {node: '>=10'} + + eslint-config-prettier@10.1.8: + resolution: {integrity: sha512-82GZUjRS0p/jganf6q1rEO25VSoHH0hKPCTrgillPjdI/3bgBhAE1QzHrHTizjpRvy6pGAvKjDJtk2pF9NDq8w==} + hasBin: true + peerDependencies: + eslint: '>=7.0.0' + + eslint-scope@9.1.1: + resolution: {integrity: sha512-GaUN0sWim5qc8KVErfPBWmc31LEsOkrUJbvJZV+xuL3u2phMUK4HIvXlWAakfC8W4nzlK+chPEAkYOYb5ZScIw==} + engines: {node: ^20.19.0 || ^22.13.0 || >=24} + + eslint-visitor-keys@3.4.3: + resolution: {integrity: sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + + eslint-visitor-keys@5.0.1: + resolution: {integrity: sha512-tD40eHxA35h0PEIZNeIjkHoDR4YjjJp34biM0mDvplBe//mB+IHCqHDGV7pxF+7MklTvighcCPPZC7ynWyjdTA==} + engines: {node: ^20.19.0 || ^22.13.0 || >=24} + + eslint@10.0.1: + resolution: {integrity: sha512-20MV9SUdeN6Jd84xESsKhRly+/vxI+hwvpBMA93s+9dAcjdCuCojn4IqUGS3lvVaqjVYGYHSRMCpeFtF2rQYxQ==} + engines: {node: ^20.19.0 || ^22.13.0 || >=24} + hasBin: true + peerDependencies: + jiti: '*' + peerDependenciesMeta: + jiti: + optional: true + + espree@11.1.1: + resolution: {integrity: sha512-AVHPqQoZYc+RUM4/3Ly5udlZY/U4LS8pIG05jEjWM2lQMU/oaZ7qshzAl2YP1tfNmXfftH3ohurfwNAug+MnsQ==} + engines: {node: ^20.19.0 || ^22.13.0 || >=24} + + esquery@1.7.0: + resolution: {integrity: sha512-Ap6G0WQwcU/LHsvLwON1fAQX9Zp0A2Y6Y/cJBl9r/JbW90Zyg4/zbG6zzKa2OTALELarYHmKu0GhpM5EO+7T0g==} + engines: {node: '>=0.10'} + + esrecurse@4.3.0: + resolution: {integrity: sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==} + engines: {node: '>=4.0'} + + estraverse@5.3.0: + resolution: {integrity: sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==} + engines: {node: '>=4.0'} + + estree-walker@3.0.3: + resolution: {integrity: sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==} + + esutils@2.0.3: + resolution: {integrity: sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==} + engines: {node: '>=0.10.0'} + + etag@1.8.1: + resolution: {integrity: sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==} + engines: {node: '>= 0.6'} + + eventsource-parser@3.0.6: + resolution: {integrity: sha512-Vo1ab+QXPzZ4tCa8SwIHJFaSzy4R6SHf7BY79rFBDf0idraZWAkYrDjDj8uWaSm3S2TK+hJ7/t1CEmZ7jXw+pg==} + engines: {node: '>=18.0.0'} + + eventsource@3.0.7: + resolution: {integrity: sha512-CRT1WTyuQoD771GW56XEZFQ/ZoSfWid1alKGDYMmkt2yl8UXrVR4pspqWNEcqKvVIzg6PAltWjxcSSPrboA4iA==} + engines: {node: '>=18.0.0'} + + expect-type@1.3.0: + resolution: {integrity: sha512-knvyeauYhqjOYvQ66MznSMs83wmHrCycNEN6Ao+2AeYEfxUIkuiVxdEa1qlGEPK+We3n0THiDciYSsCcgW/DoA==} + engines: {node: '>=12.0.0'} + + express-rate-limit@8.2.1: + resolution: {integrity: sha512-PCZEIEIxqwhzw4KF0n7QF4QqruVTcF73O5kFKUnGOyjbCCgizBBiFaYpd/fnBLUMPw/BWw9OsiN7GgrNYr7j6g==} + engines: {node: '>= 16'} + peerDependencies: + express: '>= 4.11' + + express@5.2.1: + resolution: {integrity: sha512-hIS4idWWai69NezIdRt2xFVofaF4j+6INOpJlVOLDO8zXGpUVEVzIYk12UUi2JzjEzWL3IOAxcTubgz9Po0yXw==} + engines: {node: '>= 18'} + + exsolve@1.0.8: + resolution: {integrity: sha512-LmDxfWXwcTArk8fUEnOfSZpHOJ6zOMUJKOtFLFqJLoKJetuQG874Uc7/Kki7zFLzYybmZhp1M7+98pfMqeX8yA==} + + fast-check@3.23.2: + resolution: {integrity: sha512-h5+1OzzfCC3Ef7VbtKdcv7zsstUQwUDlYpUTvjeUsJAssPgLn7QzbboPtL5ro04Mq0rPOsMzl7q5hIbRs2wD1A==} + engines: {node: '>=8.0.0'} + + fast-decode-uri-component@1.0.1: + resolution: {integrity: sha512-WKgKWg5eUxvRZGwW8FvfbaH7AXSh2cL+3j5fMGzUMCxWBJ3dV3a7Wz8y2f/uQ0e3B6WmodD3oS54jTQ9HVTIIg==} + + fast-deep-equal@3.1.3: + resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==} + + fast-json-stable-stringify@2.1.0: + resolution: {integrity: sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==} + + fast-json-stringify@6.3.0: + resolution: {integrity: sha512-oRCntNDY/329HJPlmdNLIdogNtt6Vyjb1WuT01Soss3slIdyUp8kAcDU3saQTOquEK8KFVfwIIF7FebxUAu+yA==} + + fast-levenshtein@2.0.6: + resolution: {integrity: sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==} + + fast-querystring@1.1.2: + resolution: {integrity: sha512-g6KuKWmFXc0fID8WWH0jit4g0AGBoJhCkJMb1RmbsSEUNvQ+ZC8D6CUZ+GtF8nMzSPXnhiePyyqqipzNNEnHjg==} + + fast-uri@3.1.0: + resolution: {integrity: sha512-iPeeDKJSWf4IEOasVVrknXpaBV0IApz/gp7S2bb7Z4Lljbl2MGJRqInZiUrQwV16cpzw/D3S5j5Julj/gT52AA==} + + fastify-plugin@5.1.0: + resolution: {integrity: sha512-FAIDA8eovSt5qcDgcBvDuX/v0Cjz0ohGhENZ/wpc3y+oZCY2afZ9Baqql3g/lC+OHRnciQol4ww7tuthOb9idw==} + + fastify@5.7.4: + resolution: {integrity: sha512-e6l5NsRdaEP8rdD8VR0ErJASeyaRbzXYpmkrpr2SuvuMq6Si3lvsaVy5C+7gLanEkvjpMDzBXWE5HPeb/hgTxA==} + + fastq@1.20.1: + resolution: {integrity: sha512-GGToxJ/w1x32s/D2EKND7kTil4n8OVk/9mycTc4VDza13lOvpUZTGX3mFSCtV9ksdGBVzvsyAVLM6mHFThxXxw==} + + fdir@6.5.0: + resolution: {integrity: sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==} + engines: {node: '>=12.0.0'} + peerDependencies: + picomatch: ^3 || ^4 + peerDependenciesMeta: + picomatch: + optional: true + + file-entry-cache@8.0.0: + resolution: {integrity: sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==} + engines: {node: '>=16.0.0'} + + finalhandler@2.1.1: + resolution: {integrity: sha512-S8KoZgRZN+a5rNwqTxlZZePjT/4cnm0ROV70LedRHZ0p8u9fRID0hJUZQpkKLzro8LfmC8sx23bY6tVNxv8pQA==} + engines: {node: '>= 18.0.0'} + + find-my-way@9.4.0: + resolution: {integrity: sha512-5Ye4vHsypZRYtS01ob/iwHzGRUDELlsoCftI/OZFhcLs1M0tkGPcXldE80TAZC5yYuJMBPJQQ43UHlqbJWiX2w==} + engines: {node: '>=20'} + + find-up@5.0.0: + resolution: {integrity: sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==} + engines: {node: '>=10'} + + flat-cache@4.0.1: + resolution: {integrity: sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==} + engines: {node: '>=16'} + + flatted@3.3.3: + resolution: {integrity: sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==} + + forwarded@0.2.0: + resolution: {integrity: sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==} + engines: {node: '>= 0.6'} + + fresh@2.0.0: + resolution: {integrity: sha512-Rx/WycZ60HOaqLKAi6cHRKKI7zxWbJ31MhntmtwMoaTeF7XFH9hhBp8vITaMidfljRQ6eYWCKkaTK+ykVJHP2A==} + engines: {node: '>= 0.8'} + + fsevents@2.3.3: + resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==} + engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} + os: [darwin] + + function-bind@1.1.2: + resolution: {integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==} + + get-intrinsic@1.3.0: + resolution: {integrity: sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==} + engines: {node: '>= 0.4'} + + get-proto@1.0.1: + resolution: {integrity: sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==} + engines: {node: '>= 0.4'} + + get-tsconfig@4.13.6: + resolution: {integrity: sha512-shZT/QMiSHc/YBLxxOkMtgSid5HFoauqCE3/exfsEcwg1WkeqjG+V40yBbBrsD+jW2HDXcs28xOfcbm2jI8Ddw==} + + giget@2.0.0: + resolution: {integrity: sha512-L5bGsVkxJbJgdnwyuheIunkGatUF/zssUoxxjACCseZYAVbaqdh9Tsmmlkl8vYan09H7sbvKt4pS8GqKLBrEzA==} + hasBin: true + + glob-parent@6.0.2: + resolution: {integrity: sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==} + engines: {node: '>=10.13.0'} + + glob@13.0.6: + resolution: {integrity: sha512-Wjlyrolmm8uDpm/ogGyXZXb1Z+Ca2B8NbJwqBVg0axK9GbBeoS7yGV6vjXnYdGm6X53iehEuxxbyiKp8QmN4Vw==} + engines: {node: 18 || 20 || >=22} + + gopd@1.2.0: + resolution: {integrity: sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==} + engines: {node: '>= 0.4'} + + has-flag@4.0.0: + resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==} + engines: {node: '>=8'} + + has-symbols@1.1.0: + resolution: {integrity: sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==} + engines: {node: '>= 0.4'} + + hasown@2.0.2: + resolution: {integrity: sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==} + engines: {node: '>= 0.4'} + + helmet@7.2.0: + resolution: {integrity: sha512-ZRiwvN089JfMXokizgqEPXsl2Guk094yExfoDXR0cBYWxtBbaSww/w+vT4WEJsBW2iTUi1GgZ6swmoug3Oy4Xw==} + engines: {node: '>=16.0.0'} + + hono@4.12.0: + resolution: {integrity: sha512-NekXntS5M94pUfiVZ8oXXK/kkri+5WpX2/Ik+LVsl+uvw+soj4roXIsPqO+XsWrAw20mOzaXOZf3Q7PfB9A/IA==} + engines: {node: '>=16.9.0'} + + html-escaper@2.0.2: + resolution: {integrity: sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==} + + http-errors@2.0.1: + resolution: {integrity: sha512-4FbRdAX+bSdmo4AUFuS0WNiPz8NgFt+r8ThgNWmlrjQjt1Q7ZR9+zTlce2859x4KSXrwIsaeTqDoKQmtP8pLmQ==} + engines: {node: '>= 0.8'} + + iconv-lite@0.7.2: + resolution: {integrity: sha512-im9DjEDQ55s9fL4EYzOAv0yMqmMBSZp6G0VvFyTMPKWxiSBHUj9NW/qqLmXUwXrrM7AvqSlTCfvqRb0cM8yYqw==} + engines: {node: '>=0.10.0'} + + ignore@5.3.2: + resolution: {integrity: sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==} + engines: {node: '>= 4'} + + ignore@7.0.5: + resolution: {integrity: sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg==} + engines: {node: '>= 4'} + + imurmurhash@0.1.4: + resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==} + engines: {node: '>=0.8.19'} + + inherits@2.0.4: + resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==} + + inquirer@12.11.1: + resolution: {integrity: sha512-9VF7mrY+3OmsAfjH3yKz/pLbJ5z22E23hENKw3/LNSaA/sAt3v49bDRY+Ygct1xwuKT+U+cBfTzjCPySna69Qw==} + engines: {node: '>=18'} + peerDependencies: + '@types/node': '>=18' + peerDependenciesMeta: + '@types/node': + optional: true + + ip-address@10.0.1: + resolution: {integrity: sha512-NWv9YLW4PoW2B7xtzaS3NCot75m6nK7Icdv0o3lfMceJVRfSoQwqD4wEH5rLwoKJwUiZ/rfpiVBhnaF0FK4HoA==} + engines: {node: '>= 12'} + + ipaddr.js@1.9.1: + resolution: {integrity: sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==} + engines: {node: '>= 0.10'} + + ipaddr.js@2.3.0: + resolution: {integrity: sha512-Zv/pA+ciVFbCSBBjGfaKUya/CcGmUHzTydLMaTwrUUEM2DIEO3iZvueGxmacvmN50fGpGVKeTXpb2LcYQxeVdg==} + engines: {node: '>= 10'} + + is-extglob@2.1.1: + resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==} + engines: {node: '>=0.10.0'} + + is-fullwidth-code-point@3.0.0: + resolution: {integrity: sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==} + engines: {node: '>=8'} + + is-glob@4.0.3: + resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==} + engines: {node: '>=0.10.0'} + + is-promise@4.0.0: + resolution: {integrity: sha512-hvpoI6korhJMnej285dSg6nu1+e6uxs7zG3BYAm5byqDsgJNWwxzM6z6iZiAgQR4TJ30JmBTOwqZUw3WlyH3AQ==} + + isexe@2.0.0: + resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==} + + istanbul-lib-coverage@3.2.2: + resolution: {integrity: sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==} + engines: {node: '>=8'} + + istanbul-lib-report@3.0.1: + resolution: {integrity: sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==} + engines: {node: '>=10'} + + istanbul-reports@3.2.0: + resolution: {integrity: sha512-HGYWWS/ehqTV3xN10i23tkPkpH46MLCIMFNCaaKNavAXTF1RkqxawEPtnjnGZ6XKSInBKkiOA5BKS+aZiY3AvA==} + engines: {node: '>=8'} + + jiti@2.6.1: + resolution: {integrity: sha512-ekilCSN1jwRvIbgeg/57YFh8qQDNbwDb9xT/qu2DAHbFFZUicIl4ygVaAvzveMhMVr3LnpSKTNnwt8PoOfmKhQ==} + hasBin: true + + jose@6.1.3: + resolution: {integrity: sha512-0TpaTfihd4QMNwrz/ob2Bp7X04yuxJkjRGi4aKmOqwhov54i6u79oCv7T+C7lo70MKH6BesI3vscD1yb/yzKXQ==} + + js-tokens@10.0.0: + resolution: {integrity: sha512-lM/UBzQmfJRo9ABXbPWemivdCW8V2G8FHaHdypQaIy523snUjog0W71ayWXTjiR+ixeMyVHN2XcpnTd/liPg/Q==} + + js-yaml@4.1.1: + resolution: {integrity: sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==} + hasBin: true + + json-buffer@3.0.1: + resolution: {integrity: sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==} + + json-schema-ref-resolver@3.0.0: + resolution: {integrity: sha512-hOrZIVL5jyYFjzk7+y7n5JDzGlU8rfWDuYyHwGa2WA8/pcmMHezp2xsVwxrebD/Q9t8Nc5DboieySDpCp4WG4A==} + + json-schema-traverse@0.4.1: + resolution: {integrity: sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==} + + json-schema-traverse@1.0.0: + resolution: {integrity: sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==} + + json-schema-typed@8.0.2: + resolution: {integrity: sha512-fQhoXdcvc3V28x7C7BMs4P5+kNlgUURe2jmUT1T//oBRMDrqy1QPelJimwZGo7Hg9VPV3EQV5Bnq4hbFy2vetA==} + + json-stable-stringify-without-jsonify@1.0.1: + resolution: {integrity: sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==} + + keyv@4.5.4: + resolution: {integrity: sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==} + + levn@0.4.1: + resolution: {integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==} + engines: {node: '>= 0.8.0'} + + light-my-request@6.6.0: + resolution: {integrity: sha512-CHYbu8RtboSIoVsHZ6Ye4cj4Aw/yg2oAFimlF7mNvfDV192LR7nDiKtSIfCuLT7KokPSTn/9kfVLm5OGN0A28A==} + + locate-path@6.0.0: + resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==} + engines: {node: '>=10'} + + lru-cache@11.2.6: + resolution: {integrity: sha512-ESL2CrkS/2wTPfuend7Zhkzo2u0daGJ/A2VucJOgQ/C48S/zB8MMeMHSGKYpXhIjbPxfuezITkaBH1wqv00DDQ==} + engines: {node: 20 || >=22} + + magic-string@0.30.21: + resolution: {integrity: sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==} + + magicast@0.5.2: + resolution: {integrity: sha512-E3ZJh4J3S9KfwdjZhe2afj6R9lGIN5Pher1pF39UGrXRqq/VDaGVIGN13BjHd2u8B61hArAGOnso7nBOouW3TQ==} + + make-dir@4.0.0: + resolution: {integrity: sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==} + engines: {node: '>=10'} + + math-intrinsics@1.1.0: + resolution: {integrity: sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==} + engines: {node: '>= 0.4'} + + media-typer@1.1.0: + resolution: {integrity: sha512-aisnrDP4GNe06UcKFnV5bfMNPBUw4jsLGaWwWfnH3v02GnBuXX2MCVn5RbrWo0j3pczUilYblq7fQ7Nw2t5XKw==} + engines: {node: '>= 0.8'} + + merge-descriptors@2.0.0: + resolution: {integrity: sha512-Snk314V5ayFLhp3fkUREub6WtjBfPdCPY1Ln8/8munuLuiYhsABgBVWsozAG+MWMbVEvcdcpbi9R7ww22l9Q3g==} + engines: {node: '>=18'} + + mime-db@1.54.0: + resolution: {integrity: sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ==} + engines: {node: '>= 0.6'} + + mime-types@3.0.2: + resolution: {integrity: sha512-Lbgzdk0h4juoQ9fCKXW4by0UJqj+nOOrI9MJ1sSj4nI8aI2eo1qmvQEie4VD1glsS250n15LsWsYtCugiStS5A==} + engines: {node: '>=18'} + + minimatch@10.2.2: + resolution: {integrity: sha512-+G4CpNBxa5MprY+04MbgOw1v7So6n5JY166pFi9KfYwT78fxScCeSNQSNzp6dpPSW2rONOps6Ocam1wFhCgoVw==} + engines: {node: 18 || 20 || >=22} + + minimatch@9.0.5: + resolution: {integrity: sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==} + engines: {node: '>=16 || 14 >=14.17'} + + minipass@7.1.3: + resolution: {integrity: sha512-tEBHqDnIoM/1rXME1zgka9g6Q2lcoCkxHLuc7ODJ5BxbP5d4c2Z5cGgtXAku59200Cx7diuHTOYfSBD8n6mm8A==} + engines: {node: '>=16 || 14 >=14.17'} + + mnemonist@0.40.0: + resolution: {integrity: sha512-kdd8AFNig2AD5Rkih7EPCXhu/iMvwevQFX/uEiGhZyPZi7fHqOoF4V4kHLpCfysxXMgQ4B52kdPMCwARshKvEg==} + + ms@2.1.3: + resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} + + mute-stream@2.0.0: + resolution: {integrity: sha512-WWdIxpyjEn+FhQJQQv9aQAYlHoNVdzIzUySNV1gHUPDSdZJ3yZn7pAAbQcV7B56Mvu881q9FZV+0Vx2xC44VWA==} + engines: {node: ^18.17.0 || >=20.5.0} + + nanoid@3.3.11: + resolution: {integrity: sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==} + engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} + hasBin: true + + natural-compare@1.4.0: + resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==} + + negotiator@1.0.0: + resolution: {integrity: sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg==} + engines: {node: '>= 0.6'} + + node-fetch-native@1.6.7: + resolution: {integrity: sha512-g9yhqoedzIUm0nTnTqAQvueMPVOuIY16bqgAJJC8XOOubYFNwz6IER9qs0Gq2Xd0+CecCKFjtdDTMA4u4xG06Q==} + + nypm@0.6.5: + resolution: {integrity: sha512-K6AJy1GMVyfyMXRVB88700BJqNUkByijGJM8kEHpLdcAt+vSQAVfkWWHYzuRXHSY6xA2sNc5RjTj0p9rE2izVQ==} + engines: {node: '>=18'} + hasBin: true + + object-assign@4.1.1: + resolution: {integrity: sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==} + engines: {node: '>=0.10.0'} + + object-inspect@1.13.4: + resolution: {integrity: sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==} + engines: {node: '>= 0.4'} + + obliterator@2.0.5: + resolution: {integrity: sha512-42CPE9AhahZRsMNslczq0ctAEtqk8Eka26QofnqC346BZdHDySk3LWka23LI7ULIw11NmltpiLagIq8gBozxTw==} + + obug@2.1.1: + resolution: {integrity: sha512-uTqF9MuPraAQ+IsnPf366RG4cP9RtUi7MLO1N3KEc+wb0a6yKpeL0lmk2IB1jY5KHPAlTc6T/JRdC/YqxHNwkQ==} + + ohash@2.0.11: + resolution: {integrity: sha512-RdR9FQrFwNBNXAr4GixM8YaRZRJ5PUWbKYbE5eOsrwAjJW0q2REGcf79oYPsLyskQCZG1PLN+S/K1V00joZAoQ==} + + on-exit-leak-free@2.1.2: + resolution: {integrity: sha512-0eJJY6hXLGf1udHwfNftBqH+g73EU4B504nZeKpz1sYRKafAghwxEJunB2O7rDZkL4PGfsMVnTXZ2EjibbqcsA==} + engines: {node: '>=14.0.0'} + + on-finished@2.4.1: + resolution: {integrity: sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==} + engines: {node: '>= 0.8'} + + once@1.4.0: + resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==} + + optionator@0.9.4: + resolution: {integrity: sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==} + engines: {node: '>= 0.8.0'} + + p-limit@3.1.0: + resolution: {integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==} + engines: {node: '>=10'} + + p-locate@5.0.0: + resolution: {integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==} + engines: {node: '>=10'} + + package-json-from-dist@1.0.1: + resolution: {integrity: sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==} + + parseurl@1.3.3: + resolution: {integrity: sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==} + engines: {node: '>= 0.8'} + + path-exists@4.0.0: + resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==} + engines: {node: '>=8'} + + path-key@3.1.1: + resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==} + engines: {node: '>=8'} + + path-scurry@2.0.2: + resolution: {integrity: sha512-3O/iVVsJAPsOnpwWIeD+d6z/7PmqApyQePUtCndjatj/9I5LylHvt5qluFaBT3I5h3r1ejfR056c+FCv+NnNXg==} + engines: {node: 18 || 20 || >=22} + + path-to-regexp@8.3.0: + resolution: {integrity: sha512-7jdwVIRtsP8MYpdXSwOS0YdD0Du+qOoF/AEPIt88PcCFrZCzx41oxku1jD88hZBwbNUIEfpqvuhjFaMAqMTWnA==} + + pathe@2.0.3: + resolution: {integrity: sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==} + + perfect-debounce@1.0.0: + resolution: {integrity: sha512-xCy9V055GLEqoFaHoC1SoLIaLmWctgCUaBaWxDZ7/Zx4CTyX7cJQLJOok/orfjZAh9kEYpjJa4d0KcJmCbctZA==} + + picocolors@1.1.1: + resolution: {integrity: sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==} + + picomatch@4.0.3: + resolution: {integrity: sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==} + engines: {node: '>=12'} + + pino-abstract-transport@3.0.0: + resolution: {integrity: sha512-wlfUczU+n7Hy/Ha5j9a/gZNy7We5+cXp8YL+X+PG8S0KXxw7n/JXA3c46Y0zQznIJ83URJiwy7Lh56WLokNuxg==} + + pino-std-serializers@7.1.0: + resolution: {integrity: sha512-BndPH67/JxGExRgiX1dX0w1FvZck5Wa4aal9198SrRhZjH3GxKQUKIBnYJTdj2HDN3UQAS06HlfcSbQj2OHmaw==} + + pino@10.3.1: + resolution: {integrity: sha512-r34yH/GlQpKZbU1BvFFqOjhISRo1MNx1tWYsYvmj6KIRHSPMT2+yHOEb1SG6NMvRoHRF0a07kCOox/9yakl1vg==} + hasBin: true + + pkce-challenge@5.0.1: + resolution: {integrity: sha512-wQ0b/W4Fr01qtpHlqSqspcj3EhBvimsdh0KlHhH8HRZnMsEa0ea2fTULOXOS9ccQr3om+GcGRk4e+isrZWV8qQ==} + engines: {node: '>=16.20.0'} + + pkg-types@2.3.0: + resolution: {integrity: sha512-SIqCzDRg0s9npO5XQ3tNZioRY1uK06lA41ynBC1YmFTmnY6FjUjVt6s4LoADmwoig1qqD0oK8h1p/8mlMx8Oig==} + + postcss@8.5.6: + resolution: {integrity: sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==} + engines: {node: ^10 || ^12 || >=14} + + prelude-ls@1.2.1: + resolution: {integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==} + engines: {node: '>= 0.8.0'} + + prisma@6.19.2: + resolution: {integrity: sha512-XTKeKxtQElcq3U9/jHyxSPgiRgeYDKxWTPOf6NkXA0dNj5j40MfEsZkMbyNpwDWCUv7YBFUl7I2VK/6ALbmhEg==} + engines: {node: '>=18.18'} + hasBin: true + peerDependencies: + typescript: '>=5.1.0' + peerDependenciesMeta: + typescript: + optional: true + + process-warning@4.0.1: + resolution: {integrity: sha512-3c2LzQ3rY9d0hc1emcsHhfT9Jwz0cChib/QN89oME2R451w5fy3f0afAhERFZAwrbDU43wk12d0ORBpDVME50Q==} + + process-warning@5.0.0: + resolution: {integrity: sha512-a39t9ApHNx2L4+HBnQKqxxHNs1r7KF+Intd8Q/g1bUh6q0WIp9voPXJ/x0j+ZL45KF1pJd9+q2jLIRMfvEshkA==} + + proxy-addr@2.0.7: + resolution: {integrity: sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==} + engines: {node: '>= 0.10'} + + punycode@2.3.1: + resolution: {integrity: sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==} + engines: {node: '>=6'} + + pure-rand@6.1.0: + resolution: {integrity: sha512-bVWawvoZoBYpp6yIoQtQXHZjmz35RSVHnUOTefl8Vcjr8snTPY1wnpSPMWekcFwbxI6gtmT7rSYPFvz71ldiOA==} + + qs@6.15.0: + resolution: {integrity: sha512-mAZTtNCeetKMH+pSjrb76NAM8V9a05I9aBZOHztWy/UqcJdQYNsf59vrRKWnojAT9Y+GbIvoTBC++CPHqpDBhQ==} + engines: {node: '>=0.6'} + + quick-format-unescaped@4.0.4: + resolution: {integrity: sha512-tYC1Q1hgyRuHgloV/YXs2w15unPVh8qfu/qCTfhTYamaw7fyhumKa2yGpdSo87vY32rIclj+4fWYQXUMs9EHvg==} + + range-parser@1.2.1: + resolution: {integrity: sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==} + engines: {node: '>= 0.6'} + + raw-body@3.0.2: + resolution: {integrity: sha512-K5zQjDllxWkf7Z5xJdV0/B0WTNqx6vxG70zJE4N0kBs4LovmEYWJzQGxC9bS9RAKu3bgM40lrd5zoLJ12MQ5BA==} + engines: {node: '>= 0.10'} + + rc9@2.1.2: + resolution: {integrity: sha512-btXCnMmRIBINM2LDZoEmOogIZU7Qe7zn4BpomSKZ/ykbLObuBdvG+mFq11DL6fjH1DRwHhrlgtYWG96bJiC7Cg==} + + readdirp@4.1.2: + resolution: {integrity: sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg==} + engines: {node: '>= 14.18.0'} + + real-require@0.2.0: + resolution: {integrity: sha512-57frrGM/OCTLqLOAh0mhVA9VBMHd+9U7Zb2THMGdBUoZVOtGbJzjxsYGDJ3A9AYYCP4hn6y1TVbaOfzWtm5GFg==} + engines: {node: '>= 12.13.0'} + + require-from-string@2.0.2: + resolution: {integrity: sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==} + engines: {node: '>=0.10.0'} + + resolve-pkg-maps@1.0.0: + resolution: {integrity: sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==} + + ret@0.5.0: + resolution: {integrity: sha512-I1XxrZSQ+oErkRR4jYbAyEEu2I0avBvvMM5JN+6EBprOGRCs63ENqZ3vjavq8fBw2+62G5LF5XelKwuJpcvcxw==} + engines: {node: '>=10'} + + reusify@1.1.0: + resolution: {integrity: sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==} + engines: {iojs: '>=1.0.0', node: '>=0.10.0'} + + rfdc@1.4.1: + resolution: {integrity: sha512-q1b3N5QkRUWUl7iyylaaj3kOpIT0N2i9MqIEQXP73GVsN9cw3fdx8X63cEmWhJGi2PPCF23Ijp7ktmd39rawIA==} + + rimraf@6.1.3: + resolution: {integrity: sha512-LKg+Cr2ZF61fkcaK1UdkH2yEBBKnYjTyWzTJT6KNPcSPaiT7HSdhtMXQuN5wkTX0Xu72KQ1l8S42rlmexS2hSA==} + engines: {node: 20 || >=22} + hasBin: true + + rollup@4.58.0: + resolution: {integrity: sha512-wbT0mBmWbIvvq8NeEYWWvevvxnOyhKChir47S66WCxw1SXqhw7ssIYejnQEVt7XYQpsj2y8F9PM+Cr3SNEa0gw==} + engines: {node: '>=18.0.0', npm: '>=8.0.0'} + hasBin: true + + router@2.2.0: + resolution: {integrity: sha512-nLTrUKm2UyiL7rlhapu/Zl45FwNgkZGaCpZbIHajDYgwlJCOzLSk+cIPAnsEqV955GjILJnKbdQC1nVPz+gAYQ==} + engines: {node: '>= 18'} + + run-async@4.0.6: + resolution: {integrity: sha512-IoDlSLTs3Yq593mb3ZoKWKXMNu3UpObxhgA/Xuid5p4bbfi2jdY1Hj0m1K+0/tEuQTxIGMhQDqGjKb7RuxGpAQ==} + engines: {node: '>=0.12.0'} + + rxjs@7.8.2: + resolution: {integrity: sha512-dhKf903U/PQZY6boNNtAGdWbG85WAbjT/1xYoZIC7FAY0yWapOBQVsVrDl58W86//e1VpMNBtRV4MaXfdMySFA==} + + safe-regex2@5.0.0: + resolution: {integrity: sha512-YwJwe5a51WlK7KbOJREPdjNrpViQBI3p4T50lfwPuDhZnE3XGVTlGvi+aolc5+RvxDD6bnUmjVsU9n1eboLUYw==} + + safe-stable-stringify@2.5.0: + resolution: {integrity: sha512-b3rppTKm9T+PsVCBEOUR46GWI7fdOs00VKZ1+9c1EWDaDMvjQc6tUwuFyIprgGgTcWoVHSKrU8H31ZHA2e0RHA==} + engines: {node: '>=10'} + + safer-buffer@2.1.2: + resolution: {integrity: sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==} + + secure-json-parse@4.1.0: + resolution: {integrity: sha512-l4KnYfEyqYJxDwlNVyRfO2E4NTHfMKAWdUuA8J0yve2Dz/E/PdBepY03RvyJpssIpRFwJoCD55wA+mEDs6ByWA==} + + semver@7.7.4: + resolution: {integrity: sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA==} + engines: {node: '>=10'} + hasBin: true + + send@1.2.1: + resolution: {integrity: sha512-1gnZf7DFcoIcajTjTwjwuDjzuz4PPcY2StKPlsGAQ1+YH20IRVrBaXSWmdjowTJ6u8Rc01PoYOGHXfP1mYcZNQ==} + engines: {node: '>= 18'} + + serve-static@2.2.1: + resolution: {integrity: sha512-xRXBn0pPqQTVQiC8wyQrKs2MOlX24zQ0POGaj0kultvoOCstBQM5yvOhAVSUwOMjQtTvsPWoNCHfPGwaaQJhTw==} + engines: {node: '>= 18'} + + set-cookie-parser@2.7.2: + resolution: {integrity: sha512-oeM1lpU/UvhTxw+g3cIfxXHyJRc/uidd3yK1P242gzHds0udQBYzs3y8j4gCCW+ZJ7ad0yctld8RYO+bdurlvw==} + + setprototypeof@1.2.0: + resolution: {integrity: sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==} + + shebang-command@2.0.0: + resolution: {integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==} + engines: {node: '>=8'} + + shebang-regex@3.0.0: + resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==} + engines: {node: '>=8'} + + side-channel-list@1.0.0: + resolution: {integrity: sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==} + engines: {node: '>= 0.4'} + + side-channel-map@1.0.1: + resolution: {integrity: sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==} + engines: {node: '>= 0.4'} + + side-channel-weakmap@1.0.2: + resolution: {integrity: sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==} + engines: {node: '>= 0.4'} + + side-channel@1.1.0: + resolution: {integrity: sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==} + engines: {node: '>= 0.4'} + + siginfo@2.0.0: + resolution: {integrity: sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==} + + signal-exit@4.1.0: + resolution: {integrity: sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==} + engines: {node: '>=14'} + + sonic-boom@4.2.1: + resolution: {integrity: sha512-w6AxtubXa2wTXAUsZMMWERrsIRAdrK0Sc+FUytWvYAhBJLyuI4llrMIC1DtlNSdI99EI86KZum2MMq3EAZlF9Q==} + + source-map-js@1.2.1: + resolution: {integrity: sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==} + engines: {node: '>=0.10.0'} + + split2@4.2.0: + resolution: {integrity: sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg==} + engines: {node: '>= 10.x'} + + stackback@0.0.2: + resolution: {integrity: sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==} + + statuses@2.0.2: + resolution: {integrity: sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw==} + engines: {node: '>= 0.8'} + + std-env@3.10.0: + resolution: {integrity: sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==} + + string-width@4.2.3: + resolution: {integrity: sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==} + engines: {node: '>=8'} + + strip-ansi@6.0.1: + resolution: {integrity: sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==} + engines: {node: '>=8'} + + supports-color@7.2.0: + resolution: {integrity: sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==} + engines: {node: '>=8'} + + thread-stream@4.0.0: + resolution: {integrity: sha512-4iMVL6HAINXWf1ZKZjIPcz5wYaOdPhtO8ATvZ+Xqp3BTdaqtAwQkNmKORqcIo5YkQqGXq5cwfswDwMqqQNrpJA==} + engines: {node: '>=20'} + + tinybench@2.9.0: + resolution: {integrity: sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==} + + tinyexec@1.0.2: + resolution: {integrity: sha512-W/KYk+NFhkmsYpuHq5JykngiOCnxeVL8v8dFnqxSD8qEEdRfXk1SDM6JzNqcERbcGYj9tMrDQBYV9cjgnunFIg==} + engines: {node: '>=18'} + + tinyglobby@0.2.15: + resolution: {integrity: sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==} + engines: {node: '>=12.0.0'} + + tinyrainbow@3.0.3: + resolution: {integrity: sha512-PSkbLUoxOFRzJYjjxHJt9xro7D+iilgMX/C9lawzVuYiIdcihh9DXmVibBe8lmcFrRi/VzlPjBxbN7rH24q8/Q==} + engines: {node: '>=14.0.0'} + + toad-cache@3.7.0: + resolution: {integrity: sha512-/m8M+2BJUpoJdgAHoG+baCwBT+tf2VraSfkBgl0Y00qIWt41DJ8R5B8nsEw0I58YwF5IZH6z24/2TobDKnqSWw==} + engines: {node: '>=12'} + + toidentifier@1.0.1: + resolution: {integrity: sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==} + engines: {node: '>=0.6'} + + ts-api-utils@2.4.0: + resolution: {integrity: sha512-3TaVTaAv2gTiMB35i3FiGJaRfwb3Pyn/j3m/bfAvGe8FB7CF6u+LMYqYlDh7reQf7UNvoTvdfAqHGmPGOSsPmA==} + engines: {node: '>=18.12'} + peerDependencies: + typescript: '>=4.8.4' + + tslib@2.8.1: + resolution: {integrity: sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==} + + tsx@4.21.0: + resolution: {integrity: sha512-5C1sg4USs1lfG0GFb2RLXsdpXqBSEhAaA/0kPL01wxzpMqLILNxIxIOKiILz+cdg/pLnOUxFYOR5yhHU666wbw==} + engines: {node: '>=18.0.0'} + hasBin: true + + type-check@0.4.0: + resolution: {integrity: sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==} + engines: {node: '>= 0.8.0'} + + type-is@2.0.1: + resolution: {integrity: sha512-OZs6gsjF4vMp32qrCbiVSkrFmXtG/AZhY3t0iAMrMBiAZyV9oALtXO8hsrHbMXF9x6L3grlFuwW2oAz7cav+Gw==} + engines: {node: '>= 0.6'} + + typescript@5.9.3: + resolution: {integrity: sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==} + engines: {node: '>=14.17'} + hasBin: true + + unpipe@1.0.0: + resolution: {integrity: sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==} + engines: {node: '>= 0.8'} + + uri-js@4.4.1: + resolution: {integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==} + + vary@1.1.2: + resolution: {integrity: sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==} + engines: {node: '>= 0.8'} + + vite@7.3.1: + resolution: {integrity: sha512-w+N7Hifpc3gRjZ63vYBXA56dvvRlNWRczTdmCBBa+CotUzAPf5b7YMdMR/8CQoeYE5LX3W4wj6RYTgonm1b9DA==} + engines: {node: ^20.19.0 || >=22.12.0} + hasBin: true + peerDependencies: + '@types/node': ^20.19.0 || >=22.12.0 + jiti: '>=1.21.0' + less: ^4.0.0 + lightningcss: ^1.21.0 + sass: ^1.70.0 + sass-embedded: ^1.70.0 + stylus: '>=0.54.8' + sugarss: ^5.0.0 + terser: ^5.16.0 + tsx: ^4.8.1 + yaml: ^2.4.2 + peerDependenciesMeta: + '@types/node': + optional: true + jiti: + optional: true + less: + optional: true + lightningcss: + optional: true + sass: + optional: true + sass-embedded: + optional: true + stylus: + optional: true + sugarss: + optional: true + terser: + optional: true + tsx: + optional: true + yaml: + optional: true + + vitest@4.0.18: + resolution: {integrity: sha512-hOQuK7h0FGKgBAas7v0mSAsnvrIgAvWmRFjmzpJ7SwFHH3g1k2u37JtYwOwmEKhK6ZO3v9ggDBBm0La1LCK4uQ==} + engines: {node: ^20.0.0 || ^22.0.0 || >=24.0.0} + hasBin: true + peerDependencies: + '@edge-runtime/vm': '*' + '@opentelemetry/api': ^1.9.0 + '@types/node': ^20.0.0 || ^22.0.0 || >=24.0.0 + '@vitest/browser-playwright': 4.0.18 + '@vitest/browser-preview': 4.0.18 + '@vitest/browser-webdriverio': 4.0.18 + '@vitest/ui': 4.0.18 + happy-dom: '*' + jsdom: '*' + peerDependenciesMeta: + '@edge-runtime/vm': + optional: true + '@opentelemetry/api': + optional: true + '@types/node': + optional: true + '@vitest/browser-playwright': + optional: true + '@vitest/browser-preview': + optional: true + '@vitest/browser-webdriverio': + optional: true + '@vitest/ui': + optional: true + happy-dom: + optional: true + jsdom: + optional: true + + which@2.0.2: + resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==} + engines: {node: '>= 8'} + hasBin: true + + why-is-node-running@2.3.0: + resolution: {integrity: sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==} + engines: {node: '>=8'} + hasBin: true + + word-wrap@1.2.5: + resolution: {integrity: sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==} + engines: {node: '>=0.10.0'} + + wrap-ansi@6.2.0: + resolution: {integrity: sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==} + engines: {node: '>=8'} + + wrappy@1.0.2: + resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==} + + yocto-queue@0.1.0: + resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==} + engines: {node: '>=10'} + + yoctocolors-cjs@2.1.3: + resolution: {integrity: sha512-U/PBtDf35ff0D8X8D0jfdzHYEPFxAI7jJlxZXwCSez5M3190m+QobIfh+sWDWSHMCWWJN2AWamkegn6vr6YBTw==} + engines: {node: '>=18'} + + zod-to-json-schema@3.25.1: + resolution: {integrity: sha512-pM/SU9d3YAggzi6MtR4h7ruuQlqKtad8e9S0fmxcMi+ueAK5Korys/aWcV9LIIHTVbj01NdzxcnXSN+O74ZIVA==} + peerDependencies: + zod: ^3.25 || ^4 + + zod@3.25.76: + resolution: {integrity: sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==} + +snapshots: + + '@babel/helper-string-parser@7.27.1': {} + + '@babel/helper-validator-identifier@7.28.5': {} + + '@babel/parser@7.29.0': + dependencies: + '@babel/types': 7.29.0 + + '@babel/types@7.29.0': + dependencies: + '@babel/helper-string-parser': 7.27.1 + '@babel/helper-validator-identifier': 7.28.5 + + '@bcoe/v8-coverage@1.0.2': {} + + '@esbuild/aix-ppc64@0.27.3': + optional: true + + '@esbuild/android-arm64@0.27.3': + optional: true + + '@esbuild/android-arm@0.27.3': + optional: true + + '@esbuild/android-x64@0.27.3': + optional: true + + '@esbuild/darwin-arm64@0.27.3': + optional: true + + '@esbuild/darwin-x64@0.27.3': + optional: true + + '@esbuild/freebsd-arm64@0.27.3': + optional: true + + '@esbuild/freebsd-x64@0.27.3': + optional: true + + '@esbuild/linux-arm64@0.27.3': + optional: true + + '@esbuild/linux-arm@0.27.3': + optional: true + + '@esbuild/linux-ia32@0.27.3': + optional: true + + '@esbuild/linux-loong64@0.27.3': + optional: true + + '@esbuild/linux-mips64el@0.27.3': + optional: true + + '@esbuild/linux-ppc64@0.27.3': + optional: true + + '@esbuild/linux-riscv64@0.27.3': + optional: true + + '@esbuild/linux-s390x@0.27.3': + optional: true + + '@esbuild/linux-x64@0.27.3': + optional: true + + '@esbuild/netbsd-arm64@0.27.3': + optional: true + + '@esbuild/netbsd-x64@0.27.3': + optional: true + + '@esbuild/openbsd-arm64@0.27.3': + optional: true + + '@esbuild/openbsd-x64@0.27.3': + optional: true + + '@esbuild/openharmony-arm64@0.27.3': + optional: true + + '@esbuild/sunos-x64@0.27.3': + optional: true + + '@esbuild/win32-arm64@0.27.3': + optional: true + + '@esbuild/win32-ia32@0.27.3': + optional: true + + '@esbuild/win32-x64@0.27.3': + optional: true + + '@eslint-community/eslint-utils@4.9.1(eslint@10.0.1(jiti@2.6.1))': + dependencies: + eslint: 10.0.1(jiti@2.6.1) + eslint-visitor-keys: 3.4.3 + + '@eslint-community/regexpp@4.12.2': {} + + '@eslint/config-array@0.23.2': + dependencies: + '@eslint/object-schema': 3.0.2 + debug: 4.4.3 + minimatch: 10.2.2 + transitivePeerDependencies: + - supports-color + + '@eslint/config-helpers@0.5.2': + dependencies: + '@eslint/core': 1.1.0 + + '@eslint/core@1.1.0': + dependencies: + '@types/json-schema': 7.0.15 + + '@eslint/object-schema@3.0.2': {} + + '@eslint/plugin-kit@0.6.0': + dependencies: + '@eslint/core': 1.1.0 + levn: 0.4.1 + + '@fastify/ajv-compiler@4.0.5': + dependencies: + ajv: 8.18.0 + ajv-formats: 3.0.1(ajv@8.18.0) + fast-uri: 3.1.0 + + '@fastify/cors@10.1.0': + dependencies: + fastify-plugin: 5.1.0 + mnemonist: 0.40.0 + + '@fastify/error@4.2.0': {} + + '@fastify/fast-json-stringify-compiler@5.0.3': + dependencies: + fast-json-stringify: 6.3.0 + + '@fastify/forwarded@3.0.1': {} + + '@fastify/helmet@12.0.1': + dependencies: + fastify-plugin: 5.1.0 + helmet: 7.2.0 + + '@fastify/merge-json-schemas@0.2.1': + dependencies: + dequal: 2.0.3 + + '@fastify/proxy-addr@5.1.0': + dependencies: + '@fastify/forwarded': 3.0.1 + ipaddr.js: 2.3.0 + + '@fastify/rate-limit@10.3.0': + dependencies: + '@lukeed/ms': 2.0.2 + fastify-plugin: 5.1.0 + toad-cache: 3.7.0 + + '@hono/node-server@1.19.9(hono@4.12.0)': + dependencies: + hono: 4.12.0 + + '@humanfs/core@0.19.1': {} + + '@humanfs/node@0.16.7': + dependencies: + '@humanfs/core': 0.19.1 + '@humanwhocodes/retry': 0.4.3 + + '@humanwhocodes/module-importer@1.0.1': {} + + '@humanwhocodes/retry@0.4.3': {} + + '@inquirer/ansi@1.0.2': {} + + '@inquirer/checkbox@4.3.2': + dependencies: + '@inquirer/ansi': 1.0.2 + '@inquirer/core': 10.3.2 + '@inquirer/figures': 1.0.15 + '@inquirer/type': 3.0.10 + yoctocolors-cjs: 2.1.3 + + '@inquirer/confirm@5.1.21': + dependencies: + '@inquirer/core': 10.3.2 + '@inquirer/type': 3.0.10 + + '@inquirer/core@10.3.2': + dependencies: + '@inquirer/ansi': 1.0.2 + '@inquirer/figures': 1.0.15 + '@inquirer/type': 3.0.10 + cli-width: 4.1.0 + mute-stream: 2.0.0 + signal-exit: 4.1.0 + wrap-ansi: 6.2.0 + yoctocolors-cjs: 2.1.3 + + '@inquirer/editor@4.2.23': + dependencies: + '@inquirer/core': 10.3.2 + '@inquirer/external-editor': 1.0.3 + '@inquirer/type': 3.0.10 + + '@inquirer/expand@4.0.23': + dependencies: + '@inquirer/core': 10.3.2 + '@inquirer/type': 3.0.10 + yoctocolors-cjs: 2.1.3 + + '@inquirer/external-editor@1.0.3': + dependencies: + chardet: 2.1.1 + iconv-lite: 0.7.2 + + '@inquirer/figures@1.0.15': {} + + '@inquirer/input@4.3.1': + dependencies: + '@inquirer/core': 10.3.2 + '@inquirer/type': 3.0.10 + + '@inquirer/number@3.0.23': + dependencies: + '@inquirer/core': 10.3.2 + '@inquirer/type': 3.0.10 + + '@inquirer/password@4.0.23': + dependencies: + '@inquirer/ansi': 1.0.2 + '@inquirer/core': 10.3.2 + '@inquirer/type': 3.0.10 + + '@inquirer/prompts@7.10.1': + dependencies: + '@inquirer/checkbox': 4.3.2 + '@inquirer/confirm': 5.1.21 + '@inquirer/editor': 4.2.23 + '@inquirer/expand': 4.0.23 + '@inquirer/input': 4.3.1 + '@inquirer/number': 3.0.23 + '@inquirer/password': 4.0.23 + '@inquirer/rawlist': 4.1.11 + '@inquirer/search': 3.2.2 + '@inquirer/select': 4.4.2 + + '@inquirer/rawlist@4.1.11': + dependencies: + '@inquirer/core': 10.3.2 + '@inquirer/type': 3.0.10 + yoctocolors-cjs: 2.1.3 + + '@inquirer/search@3.2.2': + dependencies: + '@inquirer/core': 10.3.2 + '@inquirer/figures': 1.0.15 + '@inquirer/type': 3.0.10 + yoctocolors-cjs: 2.1.3 + + '@inquirer/select@4.4.2': + dependencies: + '@inquirer/ansi': 1.0.2 + '@inquirer/core': 10.3.2 + '@inquirer/figures': 1.0.15 + '@inquirer/type': 3.0.10 + yoctocolors-cjs: 2.1.3 + + '@inquirer/type@3.0.10': {} + + '@jridgewell/resolve-uri@3.1.2': {} + + '@jridgewell/sourcemap-codec@1.5.5': {} + + '@jridgewell/trace-mapping@0.3.31': + dependencies: + '@jridgewell/resolve-uri': 3.1.2 + '@jridgewell/sourcemap-codec': 1.5.5 + + '@lukeed/ms@2.0.2': {} + + '@modelcontextprotocol/sdk@1.26.0(zod@3.25.76)': + dependencies: + '@hono/node-server': 1.19.9(hono@4.12.0) + ajv: 8.18.0 + ajv-formats: 3.0.1(ajv@8.18.0) + content-type: 1.0.5 + cors: 2.8.6 + cross-spawn: 7.0.6 + eventsource: 3.0.7 + eventsource-parser: 3.0.6 + express: 5.2.1 + express-rate-limit: 8.2.1(express@5.2.1) + hono: 4.12.0 + jose: 6.1.3 + json-schema-typed: 8.0.2 + pkce-challenge: 5.0.1 + raw-body: 3.0.2 + zod: 3.25.76 + zod-to-json-schema: 3.25.1(zod@3.25.76) + transitivePeerDependencies: + - supports-color + + '@pinojs/redact@0.4.0': {} + + '@prisma/client@6.19.2(prisma@6.19.2(typescript@5.9.3))(typescript@5.9.3)': + optionalDependencies: + prisma: 6.19.2(typescript@5.9.3) + typescript: 5.9.3 + + '@prisma/config@6.19.2': + dependencies: + c12: 3.1.0 + deepmerge-ts: 7.1.5 + effect: 3.18.4 + empathic: 2.0.0 + transitivePeerDependencies: + - magicast + + '@prisma/debug@6.19.2': {} + + '@prisma/engines-version@7.1.1-3.c2990dca591cba766e3b7ef5d9e8a84796e47ab7': {} + + '@prisma/engines@6.19.2': + dependencies: + '@prisma/debug': 6.19.2 + '@prisma/engines-version': 7.1.1-3.c2990dca591cba766e3b7ef5d9e8a84796e47ab7 + '@prisma/fetch-engine': 6.19.2 + '@prisma/get-platform': 6.19.2 + + '@prisma/fetch-engine@6.19.2': + dependencies: + '@prisma/debug': 6.19.2 + '@prisma/engines-version': 7.1.1-3.c2990dca591cba766e3b7ef5d9e8a84796e47ab7 + '@prisma/get-platform': 6.19.2 + + '@prisma/get-platform@6.19.2': + dependencies: + '@prisma/debug': 6.19.2 + + '@rollup/rollup-android-arm-eabi@4.58.0': + optional: true + + '@rollup/rollup-android-arm64@4.58.0': + optional: true + + '@rollup/rollup-darwin-arm64@4.58.0': + optional: true + + '@rollup/rollup-darwin-x64@4.58.0': + optional: true + + '@rollup/rollup-freebsd-arm64@4.58.0': + optional: true + + '@rollup/rollup-freebsd-x64@4.58.0': + optional: true + + '@rollup/rollup-linux-arm-gnueabihf@4.58.0': + optional: true + + '@rollup/rollup-linux-arm-musleabihf@4.58.0': + optional: true + + '@rollup/rollup-linux-arm64-gnu@4.58.0': + optional: true + + '@rollup/rollup-linux-arm64-musl@4.58.0': + optional: true + + '@rollup/rollup-linux-loong64-gnu@4.58.0': + optional: true + + '@rollup/rollup-linux-loong64-musl@4.58.0': + optional: true + + '@rollup/rollup-linux-ppc64-gnu@4.58.0': + optional: true + + '@rollup/rollup-linux-ppc64-musl@4.58.0': + optional: true + + '@rollup/rollup-linux-riscv64-gnu@4.58.0': + optional: true + + '@rollup/rollup-linux-riscv64-musl@4.58.0': + optional: true + + '@rollup/rollup-linux-s390x-gnu@4.58.0': + optional: true + + '@rollup/rollup-linux-x64-gnu@4.58.0': + optional: true + + '@rollup/rollup-linux-x64-musl@4.58.0': + optional: true + + '@rollup/rollup-openbsd-x64@4.58.0': + optional: true + + '@rollup/rollup-openharmony-arm64@4.58.0': + optional: true + + '@rollup/rollup-win32-arm64-msvc@4.58.0': + optional: true + + '@rollup/rollup-win32-ia32-msvc@4.58.0': + optional: true + + '@rollup/rollup-win32-x64-gnu@4.58.0': + optional: true + + '@rollup/rollup-win32-x64-msvc@4.58.0': + optional: true + + '@standard-schema/spec@1.1.0': {} + + '@types/chai@5.2.3': + dependencies: + '@types/deep-eql': 4.0.2 + assertion-error: 2.0.1 + + '@types/deep-eql@4.0.2': {} + + '@types/esrecurse@4.3.1': {} + + '@types/estree@1.0.8': {} + + '@types/json-schema@7.0.15': {} + + '@typescript-eslint/eslint-plugin@8.56.0(@typescript-eslint/parser@8.56.0(eslint@10.0.1(jiti@2.6.1))(typescript@5.9.3))(eslint@10.0.1(jiti@2.6.1))(typescript@5.9.3)': + dependencies: + '@eslint-community/regexpp': 4.12.2 + '@typescript-eslint/parser': 8.56.0(eslint@10.0.1(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/scope-manager': 8.56.0 + '@typescript-eslint/type-utils': 8.56.0(eslint@10.0.1(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/utils': 8.56.0(eslint@10.0.1(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/visitor-keys': 8.56.0 + eslint: 10.0.1(jiti@2.6.1) + ignore: 7.0.5 + natural-compare: 1.4.0 + ts-api-utils: 2.4.0(typescript@5.9.3) + typescript: 5.9.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/parser@8.56.0(eslint@10.0.1(jiti@2.6.1))(typescript@5.9.3)': + dependencies: + '@typescript-eslint/scope-manager': 8.56.0 + '@typescript-eslint/types': 8.56.0 + '@typescript-eslint/typescript-estree': 8.56.0(typescript@5.9.3) + '@typescript-eslint/visitor-keys': 8.56.0 + debug: 4.4.3 + eslint: 10.0.1(jiti@2.6.1) + typescript: 5.9.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/project-service@8.56.0(typescript@5.9.3)': + dependencies: + '@typescript-eslint/tsconfig-utils': 8.56.0(typescript@5.9.3) + '@typescript-eslint/types': 8.56.0 + debug: 4.4.3 + typescript: 5.9.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/scope-manager@8.56.0': + dependencies: + '@typescript-eslint/types': 8.56.0 + '@typescript-eslint/visitor-keys': 8.56.0 + + '@typescript-eslint/tsconfig-utils@8.56.0(typescript@5.9.3)': + dependencies: + typescript: 5.9.3 + + '@typescript-eslint/type-utils@8.56.0(eslint@10.0.1(jiti@2.6.1))(typescript@5.9.3)': + dependencies: + '@typescript-eslint/types': 8.56.0 + '@typescript-eslint/typescript-estree': 8.56.0(typescript@5.9.3) + '@typescript-eslint/utils': 8.56.0(eslint@10.0.1(jiti@2.6.1))(typescript@5.9.3) + debug: 4.4.3 + eslint: 10.0.1(jiti@2.6.1) + ts-api-utils: 2.4.0(typescript@5.9.3) + typescript: 5.9.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/types@8.56.0': {} + + '@typescript-eslint/typescript-estree@8.56.0(typescript@5.9.3)': + dependencies: + '@typescript-eslint/project-service': 8.56.0(typescript@5.9.3) + '@typescript-eslint/tsconfig-utils': 8.56.0(typescript@5.9.3) + '@typescript-eslint/types': 8.56.0 + '@typescript-eslint/visitor-keys': 8.56.0 + debug: 4.4.3 + minimatch: 9.0.5 + semver: 7.7.4 + tinyglobby: 0.2.15 + ts-api-utils: 2.4.0(typescript@5.9.3) + typescript: 5.9.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/utils@8.56.0(eslint@10.0.1(jiti@2.6.1))(typescript@5.9.3)': + dependencies: + '@eslint-community/eslint-utils': 4.9.1(eslint@10.0.1(jiti@2.6.1)) + '@typescript-eslint/scope-manager': 8.56.0 + '@typescript-eslint/types': 8.56.0 + '@typescript-eslint/typescript-estree': 8.56.0(typescript@5.9.3) + eslint: 10.0.1(jiti@2.6.1) + typescript: 5.9.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/visitor-keys@8.56.0': + dependencies: + '@typescript-eslint/types': 8.56.0 + eslint-visitor-keys: 5.0.1 + + '@vitest/coverage-v8@4.0.18(vitest@4.0.18(jiti@2.6.1)(tsx@4.21.0))': + dependencies: + '@bcoe/v8-coverage': 1.0.2 + '@vitest/utils': 4.0.18 + ast-v8-to-istanbul: 0.3.11 + istanbul-lib-coverage: 3.2.2 + istanbul-lib-report: 3.0.1 + istanbul-reports: 3.2.0 + magicast: 0.5.2 + obug: 2.1.1 + std-env: 3.10.0 + tinyrainbow: 3.0.3 + vitest: 4.0.18(jiti@2.6.1)(tsx@4.21.0) + + '@vitest/expect@4.0.18': + dependencies: + '@standard-schema/spec': 1.1.0 + '@types/chai': 5.2.3 + '@vitest/spy': 4.0.18 + '@vitest/utils': 4.0.18 + chai: 6.2.2 + tinyrainbow: 3.0.3 + + '@vitest/mocker@4.0.18(vite@7.3.1(jiti@2.6.1)(tsx@4.21.0))': + dependencies: + '@vitest/spy': 4.0.18 + estree-walker: 3.0.3 + magic-string: 0.30.21 + optionalDependencies: + vite: 7.3.1(jiti@2.6.1)(tsx@4.21.0) + + '@vitest/pretty-format@4.0.18': + dependencies: + tinyrainbow: 3.0.3 + + '@vitest/runner@4.0.18': + dependencies: + '@vitest/utils': 4.0.18 + pathe: 2.0.3 + + '@vitest/snapshot@4.0.18': + dependencies: + '@vitest/pretty-format': 4.0.18 + magic-string: 0.30.21 + pathe: 2.0.3 + + '@vitest/spy@4.0.18': {} + + '@vitest/utils@4.0.18': + dependencies: + '@vitest/pretty-format': 4.0.18 + tinyrainbow: 3.0.3 + + abstract-logging@2.0.1: {} + + accepts@2.0.0: + dependencies: + mime-types: 3.0.2 + negotiator: 1.0.0 + + acorn-jsx@5.3.2(acorn@8.16.0): + dependencies: + acorn: 8.16.0 + + acorn@8.16.0: {} + + ajv-formats@3.0.1(ajv@8.18.0): + optionalDependencies: + ajv: 8.18.0 + + ajv@6.14.0: + dependencies: + fast-deep-equal: 3.1.3 + fast-json-stable-stringify: 2.1.0 + json-schema-traverse: 0.4.1 + uri-js: 4.4.1 + + ajv@8.18.0: + dependencies: + fast-deep-equal: 3.1.3 + fast-uri: 3.1.0 + json-schema-traverse: 1.0.0 + require-from-string: 2.0.2 + + ansi-regex@5.0.1: {} + + ansi-styles@4.3.0: + dependencies: + color-convert: 2.0.1 + + argparse@2.0.1: {} + + assertion-error@2.0.1: {} + + ast-v8-to-istanbul@0.3.11: + dependencies: + '@jridgewell/trace-mapping': 0.3.31 + estree-walker: 3.0.3 + js-tokens: 10.0.0 + + atomic-sleep@1.0.0: {} + + avvio@9.2.0: + dependencies: + '@fastify/error': 4.2.0 + fastq: 1.20.1 + + balanced-match@1.0.2: {} + + balanced-match@4.0.3: {} + + body-parser@2.2.2: + dependencies: + bytes: 3.1.2 + content-type: 1.0.5 + debug: 4.4.3 + http-errors: 2.0.1 + iconv-lite: 0.7.2 + on-finished: 2.4.1 + qs: 6.15.0 + raw-body: 3.0.2 + type-is: 2.0.1 + transitivePeerDependencies: + - supports-color + + brace-expansion@2.0.2: + dependencies: + balanced-match: 1.0.2 + + brace-expansion@5.0.2: + dependencies: + balanced-match: 4.0.3 + + bytes@3.1.2: {} + + c12@3.1.0: + dependencies: + chokidar: 4.0.3 + confbox: 0.2.4 + defu: 6.1.4 + dotenv: 16.6.1 + exsolve: 1.0.8 + giget: 2.0.0 + jiti: 2.6.1 + ohash: 2.0.11 + pathe: 2.0.3 + perfect-debounce: 1.0.0 + pkg-types: 2.3.0 + rc9: 2.1.2 + + call-bind-apply-helpers@1.0.2: + dependencies: + es-errors: 1.3.0 + function-bind: 1.1.2 + + call-bound@1.0.4: + dependencies: + call-bind-apply-helpers: 1.0.2 + get-intrinsic: 1.3.0 + + chai@6.2.2: {} + + chalk@5.6.2: {} + + chardet@2.1.1: {} + + chokidar@4.0.3: + dependencies: + readdirp: 4.1.2 + + citty@0.1.6: + dependencies: + consola: 3.4.2 + + citty@0.2.1: {} + + cli-width@4.1.0: {} + + color-convert@2.0.1: + dependencies: + color-name: 1.1.4 + + color-name@1.1.4: {} + + commander@13.1.0: {} + + confbox@0.2.4: {} + + consola@3.4.2: {} + + content-disposition@1.0.1: {} + + content-type@1.0.5: {} + + cookie-signature@1.2.2: {} + + cookie@0.7.2: {} + + cookie@1.1.1: {} + + cors@2.8.6: + dependencies: + object-assign: 4.1.1 + vary: 1.1.2 + + cross-spawn@7.0.6: + dependencies: + path-key: 3.1.1 + shebang-command: 2.0.0 + which: 2.0.2 + + debug@4.4.3: + dependencies: + ms: 2.1.3 + + deep-is@0.1.4: {} + + deepmerge-ts@7.1.5: {} + + defu@6.1.4: {} + + depd@2.0.0: {} + + dequal@2.0.3: {} + + destr@2.0.5: {} + + dotenv@16.6.1: {} + + dunder-proto@1.0.1: + dependencies: + call-bind-apply-helpers: 1.0.2 + es-errors: 1.3.0 + gopd: 1.2.0 + + ee-first@1.1.1: {} + + effect@3.18.4: + dependencies: + '@standard-schema/spec': 1.1.0 + fast-check: 3.23.2 + + emoji-regex@8.0.0: {} + + empathic@2.0.0: {} + + encodeurl@2.0.0: {} + + es-define-property@1.0.1: {} + + es-errors@1.3.0: {} + + es-module-lexer@1.7.0: {} + + es-object-atoms@1.1.1: + dependencies: + es-errors: 1.3.0 + + esbuild@0.27.3: + optionalDependencies: + '@esbuild/aix-ppc64': 0.27.3 + '@esbuild/android-arm': 0.27.3 + '@esbuild/android-arm64': 0.27.3 + '@esbuild/android-x64': 0.27.3 + '@esbuild/darwin-arm64': 0.27.3 + '@esbuild/darwin-x64': 0.27.3 + '@esbuild/freebsd-arm64': 0.27.3 + '@esbuild/freebsd-x64': 0.27.3 + '@esbuild/linux-arm': 0.27.3 + '@esbuild/linux-arm64': 0.27.3 + '@esbuild/linux-ia32': 0.27.3 + '@esbuild/linux-loong64': 0.27.3 + '@esbuild/linux-mips64el': 0.27.3 + '@esbuild/linux-ppc64': 0.27.3 + '@esbuild/linux-riscv64': 0.27.3 + '@esbuild/linux-s390x': 0.27.3 + '@esbuild/linux-x64': 0.27.3 + '@esbuild/netbsd-arm64': 0.27.3 + '@esbuild/netbsd-x64': 0.27.3 + '@esbuild/openbsd-arm64': 0.27.3 + '@esbuild/openbsd-x64': 0.27.3 + '@esbuild/openharmony-arm64': 0.27.3 + '@esbuild/sunos-x64': 0.27.3 + '@esbuild/win32-arm64': 0.27.3 + '@esbuild/win32-ia32': 0.27.3 + '@esbuild/win32-x64': 0.27.3 + + escape-html@1.0.3: {} + + escape-string-regexp@4.0.0: {} + + eslint-config-prettier@10.1.8(eslint@10.0.1(jiti@2.6.1)): + dependencies: + eslint: 10.0.1(jiti@2.6.1) + + eslint-scope@9.1.1: + dependencies: + '@types/esrecurse': 4.3.1 + '@types/estree': 1.0.8 + esrecurse: 4.3.0 + estraverse: 5.3.0 + + eslint-visitor-keys@3.4.3: {} + + eslint-visitor-keys@5.0.1: {} + + eslint@10.0.1(jiti@2.6.1): + dependencies: + '@eslint-community/eslint-utils': 4.9.1(eslint@10.0.1(jiti@2.6.1)) + '@eslint-community/regexpp': 4.12.2 + '@eslint/config-array': 0.23.2 + '@eslint/config-helpers': 0.5.2 + '@eslint/core': 1.1.0 + '@eslint/plugin-kit': 0.6.0 + '@humanfs/node': 0.16.7 + '@humanwhocodes/module-importer': 1.0.1 + '@humanwhocodes/retry': 0.4.3 + '@types/estree': 1.0.8 + ajv: 6.14.0 + cross-spawn: 7.0.6 + debug: 4.4.3 + escape-string-regexp: 4.0.0 + eslint-scope: 9.1.1 + eslint-visitor-keys: 5.0.1 + espree: 11.1.1 + esquery: 1.7.0 + esutils: 2.0.3 + fast-deep-equal: 3.1.3 + file-entry-cache: 8.0.0 + find-up: 5.0.0 + glob-parent: 6.0.2 + ignore: 5.3.2 + imurmurhash: 0.1.4 + is-glob: 4.0.3 + json-stable-stringify-without-jsonify: 1.0.1 + minimatch: 10.2.2 + natural-compare: 1.4.0 + optionator: 0.9.4 + optionalDependencies: + jiti: 2.6.1 + transitivePeerDependencies: + - supports-color + + espree@11.1.1: + dependencies: + acorn: 8.16.0 + acorn-jsx: 5.3.2(acorn@8.16.0) + eslint-visitor-keys: 5.0.1 + + esquery@1.7.0: + dependencies: + estraverse: 5.3.0 + + esrecurse@4.3.0: + dependencies: + estraverse: 5.3.0 + + estraverse@5.3.0: {} + + estree-walker@3.0.3: + dependencies: + '@types/estree': 1.0.8 + + esutils@2.0.3: {} + + etag@1.8.1: {} + + eventsource-parser@3.0.6: {} + + eventsource@3.0.7: + dependencies: + eventsource-parser: 3.0.6 + + expect-type@1.3.0: {} + + express-rate-limit@8.2.1(express@5.2.1): + dependencies: + express: 5.2.1 + ip-address: 10.0.1 + + express@5.2.1: + dependencies: + accepts: 2.0.0 + body-parser: 2.2.2 + content-disposition: 1.0.1 + content-type: 1.0.5 + cookie: 0.7.2 + cookie-signature: 1.2.2 + debug: 4.4.3 + depd: 2.0.0 + encodeurl: 2.0.0 + escape-html: 1.0.3 + etag: 1.8.1 + finalhandler: 2.1.1 + fresh: 2.0.0 + http-errors: 2.0.1 + merge-descriptors: 2.0.0 + mime-types: 3.0.2 + on-finished: 2.4.1 + once: 1.4.0 + parseurl: 1.3.3 + proxy-addr: 2.0.7 + qs: 6.15.0 + range-parser: 1.2.1 + router: 2.2.0 + send: 1.2.1 + serve-static: 2.2.1 + statuses: 2.0.2 + type-is: 2.0.1 + vary: 1.1.2 + transitivePeerDependencies: + - supports-color + + exsolve@1.0.8: {} + + fast-check@3.23.2: + dependencies: + pure-rand: 6.1.0 + + fast-decode-uri-component@1.0.1: {} + + fast-deep-equal@3.1.3: {} + + fast-json-stable-stringify@2.1.0: {} + + fast-json-stringify@6.3.0: + dependencies: + '@fastify/merge-json-schemas': 0.2.1 + ajv: 8.18.0 + ajv-formats: 3.0.1(ajv@8.18.0) + fast-uri: 3.1.0 + json-schema-ref-resolver: 3.0.0 + rfdc: 1.4.1 + + fast-levenshtein@2.0.6: {} + + fast-querystring@1.1.2: + dependencies: + fast-decode-uri-component: 1.0.1 + + fast-uri@3.1.0: {} + + fastify-plugin@5.1.0: {} + + fastify@5.7.4: + dependencies: + '@fastify/ajv-compiler': 4.0.5 + '@fastify/error': 4.2.0 + '@fastify/fast-json-stringify-compiler': 5.0.3 + '@fastify/proxy-addr': 5.1.0 + abstract-logging: 2.0.1 + avvio: 9.2.0 + fast-json-stringify: 6.3.0 + find-my-way: 9.4.0 + light-my-request: 6.6.0 + pino: 10.3.1 + process-warning: 5.0.0 + rfdc: 1.4.1 + secure-json-parse: 4.1.0 + semver: 7.7.4 + toad-cache: 3.7.0 + + fastq@1.20.1: + dependencies: + reusify: 1.1.0 + + fdir@6.5.0(picomatch@4.0.3): + optionalDependencies: + picomatch: 4.0.3 + + file-entry-cache@8.0.0: + dependencies: + flat-cache: 4.0.1 + + finalhandler@2.1.1: + dependencies: + debug: 4.4.3 + encodeurl: 2.0.0 + escape-html: 1.0.3 + on-finished: 2.4.1 + parseurl: 1.3.3 + statuses: 2.0.2 + transitivePeerDependencies: + - supports-color + + find-my-way@9.4.0: + dependencies: + fast-deep-equal: 3.1.3 + fast-querystring: 1.1.2 + safe-regex2: 5.0.0 + + find-up@5.0.0: + dependencies: + locate-path: 6.0.0 + path-exists: 4.0.0 + + flat-cache@4.0.1: + dependencies: + flatted: 3.3.3 + keyv: 4.5.4 + + flatted@3.3.3: {} + + forwarded@0.2.0: {} + + fresh@2.0.0: {} + + fsevents@2.3.3: + optional: true + + function-bind@1.1.2: {} + + get-intrinsic@1.3.0: + dependencies: + call-bind-apply-helpers: 1.0.2 + es-define-property: 1.0.1 + es-errors: 1.3.0 + es-object-atoms: 1.1.1 + function-bind: 1.1.2 + get-proto: 1.0.1 + gopd: 1.2.0 + has-symbols: 1.1.0 + hasown: 2.0.2 + math-intrinsics: 1.1.0 + + get-proto@1.0.1: + dependencies: + dunder-proto: 1.0.1 + es-object-atoms: 1.1.1 + + get-tsconfig@4.13.6: + dependencies: + resolve-pkg-maps: 1.0.0 + + giget@2.0.0: + dependencies: + citty: 0.1.6 + consola: 3.4.2 + defu: 6.1.4 + node-fetch-native: 1.6.7 + nypm: 0.6.5 + pathe: 2.0.3 + + glob-parent@6.0.2: + dependencies: + is-glob: 4.0.3 + + glob@13.0.6: + dependencies: + minimatch: 10.2.2 + minipass: 7.1.3 + path-scurry: 2.0.2 + + gopd@1.2.0: {} + + has-flag@4.0.0: {} + + has-symbols@1.1.0: {} + + hasown@2.0.2: + dependencies: + function-bind: 1.1.2 + + helmet@7.2.0: {} + + hono@4.12.0: {} + + html-escaper@2.0.2: {} + + http-errors@2.0.1: + dependencies: + depd: 2.0.0 + inherits: 2.0.4 + setprototypeof: 1.2.0 + statuses: 2.0.2 + toidentifier: 1.0.1 + + iconv-lite@0.7.2: + dependencies: + safer-buffer: 2.1.2 + + ignore@5.3.2: {} + + ignore@7.0.5: {} + + imurmurhash@0.1.4: {} + + inherits@2.0.4: {} + + inquirer@12.11.1: + dependencies: + '@inquirer/ansi': 1.0.2 + '@inquirer/core': 10.3.2 + '@inquirer/prompts': 7.10.1 + '@inquirer/type': 3.0.10 + mute-stream: 2.0.0 + run-async: 4.0.6 + rxjs: 7.8.2 + + ip-address@10.0.1: {} + + ipaddr.js@1.9.1: {} + + ipaddr.js@2.3.0: {} + + is-extglob@2.1.1: {} + + is-fullwidth-code-point@3.0.0: {} + + is-glob@4.0.3: + dependencies: + is-extglob: 2.1.1 + + is-promise@4.0.0: {} + + isexe@2.0.0: {} + + istanbul-lib-coverage@3.2.2: {} + + istanbul-lib-report@3.0.1: + dependencies: + istanbul-lib-coverage: 3.2.2 + make-dir: 4.0.0 + supports-color: 7.2.0 + + istanbul-reports@3.2.0: + dependencies: + html-escaper: 2.0.2 + istanbul-lib-report: 3.0.1 + + jiti@2.6.1: {} + + jose@6.1.3: {} + + js-tokens@10.0.0: {} + + js-yaml@4.1.1: + dependencies: + argparse: 2.0.1 + + json-buffer@3.0.1: {} + + json-schema-ref-resolver@3.0.0: + dependencies: + dequal: 2.0.3 + + json-schema-traverse@0.4.1: {} + + json-schema-traverse@1.0.0: {} + + json-schema-typed@8.0.2: {} + + json-stable-stringify-without-jsonify@1.0.1: {} + + keyv@4.5.4: + dependencies: + json-buffer: 3.0.1 + + levn@0.4.1: + dependencies: + prelude-ls: 1.2.1 + type-check: 0.4.0 + + light-my-request@6.6.0: + dependencies: + cookie: 1.1.1 + process-warning: 4.0.1 + set-cookie-parser: 2.7.2 + + locate-path@6.0.0: + dependencies: + p-locate: 5.0.0 + + lru-cache@11.2.6: {} + + magic-string@0.30.21: + dependencies: + '@jridgewell/sourcemap-codec': 1.5.5 + + magicast@0.5.2: + dependencies: + '@babel/parser': 7.29.0 + '@babel/types': 7.29.0 + source-map-js: 1.2.1 + + make-dir@4.0.0: + dependencies: + semver: 7.7.4 + + math-intrinsics@1.1.0: {} + + media-typer@1.1.0: {} + + merge-descriptors@2.0.0: {} + + mime-db@1.54.0: {} + + mime-types@3.0.2: + dependencies: + mime-db: 1.54.0 + + minimatch@10.2.2: + dependencies: + brace-expansion: 5.0.2 + + minimatch@9.0.5: + dependencies: + brace-expansion: 2.0.2 + + minipass@7.1.3: {} + + mnemonist@0.40.0: + dependencies: + obliterator: 2.0.5 + + ms@2.1.3: {} + + mute-stream@2.0.0: {} + + nanoid@3.3.11: {} + + natural-compare@1.4.0: {} + + negotiator@1.0.0: {} + + node-fetch-native@1.6.7: {} + + nypm@0.6.5: + dependencies: + citty: 0.2.1 + pathe: 2.0.3 + tinyexec: 1.0.2 + + object-assign@4.1.1: {} + + object-inspect@1.13.4: {} + + obliterator@2.0.5: {} + + obug@2.1.1: {} + + ohash@2.0.11: {} + + on-exit-leak-free@2.1.2: {} + + on-finished@2.4.1: + dependencies: + ee-first: 1.1.1 + + once@1.4.0: + dependencies: + wrappy: 1.0.2 + + optionator@0.9.4: + dependencies: + deep-is: 0.1.4 + fast-levenshtein: 2.0.6 + levn: 0.4.1 + prelude-ls: 1.2.1 + type-check: 0.4.0 + word-wrap: 1.2.5 + + p-limit@3.1.0: + dependencies: + yocto-queue: 0.1.0 + + p-locate@5.0.0: + dependencies: + p-limit: 3.1.0 + + package-json-from-dist@1.0.1: {} + + parseurl@1.3.3: {} + + path-exists@4.0.0: {} + + path-key@3.1.1: {} + + path-scurry@2.0.2: + dependencies: + lru-cache: 11.2.6 + minipass: 7.1.3 + + path-to-regexp@8.3.0: {} + + pathe@2.0.3: {} + + perfect-debounce@1.0.0: {} + + picocolors@1.1.1: {} + + picomatch@4.0.3: {} + + pino-abstract-transport@3.0.0: + dependencies: + split2: 4.2.0 + + pino-std-serializers@7.1.0: {} + + pino@10.3.1: + dependencies: + '@pinojs/redact': 0.4.0 + atomic-sleep: 1.0.0 + on-exit-leak-free: 2.1.2 + pino-abstract-transport: 3.0.0 + pino-std-serializers: 7.1.0 + process-warning: 5.0.0 + quick-format-unescaped: 4.0.4 + real-require: 0.2.0 + safe-stable-stringify: 2.5.0 + sonic-boom: 4.2.1 + thread-stream: 4.0.0 + + pkce-challenge@5.0.1: {} + + pkg-types@2.3.0: + dependencies: + confbox: 0.2.4 + exsolve: 1.0.8 + pathe: 2.0.3 + + postcss@8.5.6: + dependencies: + nanoid: 3.3.11 + picocolors: 1.1.1 + source-map-js: 1.2.1 + + prelude-ls@1.2.1: {} + + prisma@6.19.2(typescript@5.9.3): + dependencies: + '@prisma/config': 6.19.2 + '@prisma/engines': 6.19.2 + optionalDependencies: + typescript: 5.9.3 + transitivePeerDependencies: + - magicast + + process-warning@4.0.1: {} + + process-warning@5.0.0: {} + + proxy-addr@2.0.7: + dependencies: + forwarded: 0.2.0 + ipaddr.js: 1.9.1 + + punycode@2.3.1: {} + + pure-rand@6.1.0: {} + + qs@6.15.0: + dependencies: + side-channel: 1.1.0 + + quick-format-unescaped@4.0.4: {} + + range-parser@1.2.1: {} + + raw-body@3.0.2: + dependencies: + bytes: 3.1.2 + http-errors: 2.0.1 + iconv-lite: 0.7.2 + unpipe: 1.0.0 + + rc9@2.1.2: + dependencies: + defu: 6.1.4 + destr: 2.0.5 + + readdirp@4.1.2: {} + + real-require@0.2.0: {} + + require-from-string@2.0.2: {} + + resolve-pkg-maps@1.0.0: {} + + ret@0.5.0: {} + + reusify@1.1.0: {} + + rfdc@1.4.1: {} + + rimraf@6.1.3: + dependencies: + glob: 13.0.6 + package-json-from-dist: 1.0.1 + + rollup@4.58.0: + dependencies: + '@types/estree': 1.0.8 + optionalDependencies: + '@rollup/rollup-android-arm-eabi': 4.58.0 + '@rollup/rollup-android-arm64': 4.58.0 + '@rollup/rollup-darwin-arm64': 4.58.0 + '@rollup/rollup-darwin-x64': 4.58.0 + '@rollup/rollup-freebsd-arm64': 4.58.0 + '@rollup/rollup-freebsd-x64': 4.58.0 + '@rollup/rollup-linux-arm-gnueabihf': 4.58.0 + '@rollup/rollup-linux-arm-musleabihf': 4.58.0 + '@rollup/rollup-linux-arm64-gnu': 4.58.0 + '@rollup/rollup-linux-arm64-musl': 4.58.0 + '@rollup/rollup-linux-loong64-gnu': 4.58.0 + '@rollup/rollup-linux-loong64-musl': 4.58.0 + '@rollup/rollup-linux-ppc64-gnu': 4.58.0 + '@rollup/rollup-linux-ppc64-musl': 4.58.0 + '@rollup/rollup-linux-riscv64-gnu': 4.58.0 + '@rollup/rollup-linux-riscv64-musl': 4.58.0 + '@rollup/rollup-linux-s390x-gnu': 4.58.0 + '@rollup/rollup-linux-x64-gnu': 4.58.0 + '@rollup/rollup-linux-x64-musl': 4.58.0 + '@rollup/rollup-openbsd-x64': 4.58.0 + '@rollup/rollup-openharmony-arm64': 4.58.0 + '@rollup/rollup-win32-arm64-msvc': 4.58.0 + '@rollup/rollup-win32-ia32-msvc': 4.58.0 + '@rollup/rollup-win32-x64-gnu': 4.58.0 + '@rollup/rollup-win32-x64-msvc': 4.58.0 + fsevents: 2.3.3 + + router@2.2.0: + dependencies: + debug: 4.4.3 + depd: 2.0.0 + is-promise: 4.0.0 + parseurl: 1.3.3 + path-to-regexp: 8.3.0 + transitivePeerDependencies: + - supports-color + + run-async@4.0.6: {} + + rxjs@7.8.2: + dependencies: + tslib: 2.8.1 + + safe-regex2@5.0.0: + dependencies: + ret: 0.5.0 + + safe-stable-stringify@2.5.0: {} + + safer-buffer@2.1.2: {} + + secure-json-parse@4.1.0: {} + + semver@7.7.4: {} + + send@1.2.1: + dependencies: + debug: 4.4.3 + encodeurl: 2.0.0 + escape-html: 1.0.3 + etag: 1.8.1 + fresh: 2.0.0 + http-errors: 2.0.1 + mime-types: 3.0.2 + ms: 2.1.3 + on-finished: 2.4.1 + range-parser: 1.2.1 + statuses: 2.0.2 + transitivePeerDependencies: + - supports-color + + serve-static@2.2.1: + dependencies: + encodeurl: 2.0.0 + escape-html: 1.0.3 + parseurl: 1.3.3 + send: 1.2.1 + transitivePeerDependencies: + - supports-color + + set-cookie-parser@2.7.2: {} + + setprototypeof@1.2.0: {} + + shebang-command@2.0.0: + dependencies: + shebang-regex: 3.0.0 + + shebang-regex@3.0.0: {} + + side-channel-list@1.0.0: + dependencies: + es-errors: 1.3.0 + object-inspect: 1.13.4 + + side-channel-map@1.0.1: + dependencies: + call-bound: 1.0.4 + es-errors: 1.3.0 + get-intrinsic: 1.3.0 + object-inspect: 1.13.4 + + side-channel-weakmap@1.0.2: + dependencies: + call-bound: 1.0.4 + es-errors: 1.3.0 + get-intrinsic: 1.3.0 + object-inspect: 1.13.4 + side-channel-map: 1.0.1 + + side-channel@1.1.0: + dependencies: + es-errors: 1.3.0 + object-inspect: 1.13.4 + side-channel-list: 1.0.0 + side-channel-map: 1.0.1 + side-channel-weakmap: 1.0.2 + + siginfo@2.0.0: {} + + signal-exit@4.1.0: {} + + sonic-boom@4.2.1: + dependencies: + atomic-sleep: 1.0.0 + + source-map-js@1.2.1: {} + + split2@4.2.0: {} + + stackback@0.0.2: {} + + statuses@2.0.2: {} + + std-env@3.10.0: {} + + string-width@4.2.3: + dependencies: + emoji-regex: 8.0.0 + is-fullwidth-code-point: 3.0.0 + strip-ansi: 6.0.1 + + strip-ansi@6.0.1: + dependencies: + ansi-regex: 5.0.1 + + supports-color@7.2.0: + dependencies: + has-flag: 4.0.0 + + thread-stream@4.0.0: + dependencies: + real-require: 0.2.0 + + tinybench@2.9.0: {} + + tinyexec@1.0.2: {} + + tinyglobby@0.2.15: + dependencies: + fdir: 6.5.0(picomatch@4.0.3) + picomatch: 4.0.3 + + tinyrainbow@3.0.3: {} + + toad-cache@3.7.0: {} + + toidentifier@1.0.1: {} + + ts-api-utils@2.4.0(typescript@5.9.3): + dependencies: + typescript: 5.9.3 + + tslib@2.8.1: {} + + tsx@4.21.0: + dependencies: + esbuild: 0.27.3 + get-tsconfig: 4.13.6 + optionalDependencies: + fsevents: 2.3.3 + + type-check@0.4.0: + dependencies: + prelude-ls: 1.2.1 + + type-is@2.0.1: + dependencies: + content-type: 1.0.5 + media-typer: 1.1.0 + mime-types: 3.0.2 + + typescript@5.9.3: {} + + unpipe@1.0.0: {} + + uri-js@4.4.1: + dependencies: + punycode: 2.3.1 + + vary@1.1.2: {} + + vite@7.3.1(jiti@2.6.1)(tsx@4.21.0): + dependencies: + esbuild: 0.27.3 + fdir: 6.5.0(picomatch@4.0.3) + picomatch: 4.0.3 + postcss: 8.5.6 + rollup: 4.58.0 + tinyglobby: 0.2.15 + optionalDependencies: + fsevents: 2.3.3 + jiti: 2.6.1 + tsx: 4.21.0 + + vitest@4.0.18(jiti@2.6.1)(tsx@4.21.0): + dependencies: + '@vitest/expect': 4.0.18 + '@vitest/mocker': 4.0.18(vite@7.3.1(jiti@2.6.1)(tsx@4.21.0)) + '@vitest/pretty-format': 4.0.18 + '@vitest/runner': 4.0.18 + '@vitest/snapshot': 4.0.18 + '@vitest/spy': 4.0.18 + '@vitest/utils': 4.0.18 + es-module-lexer: 1.7.0 + expect-type: 1.3.0 + magic-string: 0.30.21 + obug: 2.1.1 + pathe: 2.0.3 + picomatch: 4.0.3 + std-env: 3.10.0 + tinybench: 2.9.0 + tinyexec: 1.0.2 + tinyglobby: 0.2.15 + tinyrainbow: 3.0.3 + vite: 7.3.1(jiti@2.6.1)(tsx@4.21.0) + why-is-node-running: 2.3.0 + transitivePeerDependencies: + - jiti + - less + - lightningcss + - msw + - sass + - sass-embedded + - stylus + - sugarss + - terser + - tsx + - yaml + + which@2.0.2: + dependencies: + isexe: 2.0.0 + + why-is-node-running@2.3.0: + dependencies: + siginfo: 2.0.0 + stackback: 0.0.2 + + word-wrap@1.2.5: {} + + wrap-ansi@6.2.0: + dependencies: + ansi-styles: 4.3.0 + string-width: 4.2.3 + strip-ansi: 6.0.1 + + wrappy@1.0.2: {} + + yocto-queue@0.1.0: {} + + yoctocolors-cjs@2.1.3: {} + + zod-to-json-schema@3.25.1(zod@3.25.76): + dependencies: + zod: 3.25.76 + + zod@3.25.76: {} diff --git a/pnpm-workspace.yaml b/pnpm-workspace.yaml new file mode 100644 index 0000000..2ddfbd3 --- /dev/null +++ b/pnpm-workspace.yaml @@ -0,0 +1,2 @@ +packages: + - "src/*" diff --git a/src/cli/package.json b/src/cli/package.json new file mode 100644 index 0000000..f860d28 --- /dev/null +++ b/src/cli/package.json @@ -0,0 +1,26 @@ +{ + "name": "@mcpctl/cli", + "version": "0.1.0", + "private": true, + "type": "module", + "bin": { + "mcpctl": "./dist/index.js" + }, + "main": "./dist/index.js", + "types": "./dist/index.d.ts", + "scripts": { + "build": "tsc --build", + "clean": "rimraf dist", + "dev": "tsx src/index.ts", + "test": "vitest", + "test:run": "vitest run" + }, + "dependencies": { + "commander": "^13.0.0", + "chalk": "^5.4.0", + "inquirer": "^12.0.0", + "js-yaml": "^4.1.0", + "@mcpctl/shared": "workspace:*", + "@mcpctl/db": "workspace:*" + } +} diff --git a/src/cli/src/index.ts b/src/cli/src/index.ts new file mode 100644 index 0000000..4c6ac3f --- /dev/null +++ b/src/cli/src/index.ts @@ -0,0 +1,2 @@ +// mcpctl CLI entry point +// Will be implemented in Task 7 diff --git a/src/cli/src/registry/base.ts b/src/cli/src/registry/base.ts new file mode 100644 index 0000000..aff9e8d --- /dev/null +++ b/src/cli/src/registry/base.ts @@ -0,0 +1,9 @@ +import type { RegistryServer } from './types.js'; + +export abstract class RegistrySource { + abstract readonly name: string; + + abstract search(query: string, limit: number): Promise<RegistryServer[]>; + + protected abstract normalizeResult(raw: unknown): RegistryServer; +} diff --git a/src/cli/src/registry/cache.ts b/src/cli/src/registry/cache.ts new file mode 100644 index 0000000..2a0f8b8 --- /dev/null +++ b/src/cli/src/registry/cache.ts @@ -0,0 +1,57 @@ +import { createHash } from 'crypto'; +import type { RegistryServer, SearchOptions } from './types.js'; + +export class RegistryCache { + private cache = new Map<string, { data: RegistryServer[]; expires: number }>(); + private defaultTTL: number; + private hits = 0; + private misses = 0; + + constructor(ttlMs = 3_600_000) { + this.defaultTTL = ttlMs; + } + + private getKey(query: string, options: SearchOptions): string { + return createHash('sha256') + .update(JSON.stringify({ query, options })) + .digest('hex'); + } + + get(query: string, options: SearchOptions): RegistryServer[] | null { + const key = this.getKey(query, options); + const entry = this.cache.get(key); + if (entry !== undefined && entry.expires > Date.now()) { + this.hits++; + return entry.data; + } + if (entry !== undefined) { + this.cache.delete(key); + } + this.misses++; + return null; + } + + set(query: string, options: SearchOptions, data: RegistryServer[]): void { + const key = this.getKey(query, options); + this.cache.set(key, { data, expires: Date.now() + this.defaultTTL }); + } + + getHitRatio(): { hits: number; misses: number; ratio: number } { + const total = this.hits + this.misses; + return { + hits: this.hits, + misses: this.misses, + ratio: total === 0 ? 0 : this.hits / total, + }; + } + + clear(): void { + this.cache.clear(); + this.hits = 0; + this.misses = 0; + } + + get size(): number { + return this.cache.size; + } +} diff --git a/src/cli/src/registry/dedup.ts b/src/cli/src/registry/dedup.ts new file mode 100644 index 0000000..b1e2262 --- /dev/null +++ b/src/cli/src/registry/dedup.ts @@ -0,0 +1,76 @@ +import type { RegistryServer, EnvVar } from './types.js'; + +function normalizeGitHubUrl(url: string): string { + return url + .replace(/^git@github\.com:/, 'https://github.com/') + .replace(/\.git$/, '') + .replace(/\/$/, '') + .toLowerCase(); +} + +function mergeEnvTemplates(a: EnvVar[], b: EnvVar[]): EnvVar[] { + const seen = new Map<string, EnvVar>(); + for (const v of a) { + seen.set(v.name, v); + } + for (const v of b) { + if (!seen.has(v.name)) { + seen.set(v.name, v); + } + } + return [...seen.values()]; +} + +export function deduplicateResults(results: RegistryServer[]): RegistryServer[] { + const byNpm = new Map<string, RegistryServer>(); + const byRepo = new Map<string, RegistryServer>(); + const deduped: RegistryServer[] = []; + + for (const server of results) { + const npmKey = server.packages.npm; + const repoKey = + server.repositoryUrl !== undefined ? normalizeGitHubUrl(server.repositoryUrl) : undefined; + + let existing: RegistryServer | undefined; + + if (npmKey !== undefined) { + existing = byNpm.get(npmKey); + } + if (existing === undefined && repoKey !== undefined) { + existing = byRepo.get(repoKey); + } + + if (existing !== undefined) { + // Merge: keep higher popularity, combine envTemplates + if (server.popularityScore > existing.popularityScore) { + const merged: RegistryServer = { + ...server, + envTemplate: mergeEnvTemplates(server.envTemplate, existing.envTemplate), + verified: server.verified || existing.verified, + }; + // Replace existing in deduped array + const idx = deduped.indexOf(existing); + if (idx !== -1) { + deduped[idx] = merged; + } + // Update maps to point to merged + if (npmKey !== undefined) byNpm.set(npmKey, merged); + if (repoKey !== undefined) byRepo.set(repoKey, merged); + if (existing.packages.npm !== undefined) byNpm.set(existing.packages.npm, merged); + if (existing.repositoryUrl !== undefined) { + byRepo.set(normalizeGitHubUrl(existing.repositoryUrl), merged); + } + } else { + // Keep existing but merge envTemplates + existing.envTemplate = mergeEnvTemplates(existing.envTemplate, server.envTemplate); + existing.verified = existing.verified || server.verified; + } + } else { + deduped.push(server); + if (npmKey !== undefined) byNpm.set(npmKey, server); + if (repoKey !== undefined) byRepo.set(repoKey, server); + } + } + + return deduped; +} diff --git a/src/cli/src/registry/types.ts b/src/cli/src/registry/types.ts new file mode 100644 index 0000000..952d371 --- /dev/null +++ b/src/cli/src/registry/types.ts @@ -0,0 +1,180 @@ +import { z } from 'zod'; + +// ── Normalized types used throughout mcpctl ── + +export interface EnvVar { + name: string; + description: string; + isSecret: boolean; + setupUrl?: string; + defaultValue?: string; +} + +export interface RegistryServer { + name: string; + description: string; + packages: { + npm?: string; + pypi?: string; + docker?: string; + }; + envTemplate: EnvVar[]; + transport: 'stdio' | 'sse' | 'streamable-http'; + repositoryUrl?: string; + popularityScore: number; + verified: boolean; + sourceRegistry: 'official' | 'glama' | 'smithery'; + lastUpdated?: Date; +} + +export interface SearchOptions { + query: string; + limit?: number; + registries?: RegistryName[]; + verified?: boolean; + transport?: 'stdio' | 'sse'; + category?: string; +} + +export type RegistryName = 'official' | 'glama' | 'smithery'; + +export interface RegistryClientConfig { + registries?: RegistryName[]; + cacheTTLMs?: number; + smitheryApiKey?: string; + httpProxy?: string; + httpsProxy?: string; +} + +// ── Zod schemas for API response validation ── + +// Official MCP Registry +const OfficialEnvVarSchema = z.object({ + name: z.string(), + description: z.string().optional().default(''), + format: z.string().optional(), + isSecret: z.boolean().optional().default(false), +}); + +const OfficialPackageSchema = z.object({ + registryType: z.string(), + identifier: z.string(), + version: z.string().optional(), + runtimeHint: z.string().optional(), + transport: z.object({ + type: z.string(), + }).optional(), + environmentVariables: z.array(OfficialEnvVarSchema).optional().default([]), +}); + +const OfficialRemoteSchema = z.object({ + type: z.string(), + url: z.string(), + headers: z.array(z.object({ + name: z.string(), + description: z.string().optional().default(''), + value: z.string().optional(), + isRequired: z.boolean().optional(), + isSecret: z.boolean().optional(), + })).optional().default([]), +}); + +const OfficialServerSchema = z.object({ + server: z.object({ + name: z.string(), + title: z.string().optional(), + description: z.string().optional().default(''), + version: z.string().optional(), + repository: z.object({ + url: z.string(), + source: z.string().optional(), + subfolder: z.string().optional(), + }).optional(), + packages: z.array(OfficialPackageSchema).optional().default([]), + remotes: z.array(OfficialRemoteSchema).optional().default([]), + }), + _meta: z.record(z.unknown()).optional(), +}); + +export const OfficialRegistryResponseSchema = z.object({ + servers: z.array(OfficialServerSchema), + metadata: z.object({ + nextCursor: z.string().nullable().optional(), + count: z.number().optional(), + }).optional(), +}); + +// Glama.ai +const GlamaServerSchema = z.object({ + id: z.string(), + name: z.string(), + namespace: z.string().optional().default(''), + slug: z.string().optional().default(''), + description: z.string().optional().default(''), + url: z.string().optional(), + attributes: z.array(z.string()).optional().default([]), + repository: z.object({ + url: z.string(), + }).optional(), + environmentVariablesJsonSchema: z.object({ + type: z.string().optional(), + properties: z.record(z.object({ + type: z.string().optional(), + description: z.string().optional(), + default: z.string().optional(), + })).optional().default({}), + required: z.array(z.string()).optional().default([]), + }).optional(), +}); + +export const GlamaRegistryResponseSchema = z.object({ + servers: z.array(GlamaServerSchema), + pageInfo: z.object({ + startCursor: z.string().nullable().optional(), + endCursor: z.string().nullable().optional(), + hasNextPage: z.boolean(), + hasPreviousPage: z.boolean(), + }), +}); + +// Smithery.ai +const SmitheryServerSchema = z.object({ + qualifiedName: z.string(), + displayName: z.string().optional().default(''), + description: z.string().optional().default(''), + iconUrl: z.string().optional(), + verified: z.boolean().optional().default(false), + useCount: z.number().optional().default(0), + remote: z.boolean().optional().default(false), + isDeployed: z.boolean().optional().default(false), + createdAt: z.string().optional(), + homepage: z.string().optional(), + score: z.number().optional().default(0), +}); + +export const SmitheryRegistryResponseSchema = z.object({ + servers: z.array(SmitheryServerSchema), + pagination: z.object({ + currentPage: z.number(), + pageSize: z.number(), + totalPages: z.number(), + totalCount: z.number(), + }), +}); + +// ── Inferred types from Zod schemas ── + +export type OfficialRegistryResponse = z.infer<typeof OfficialRegistryResponseSchema>; +export type OfficialServerEntry = z.infer<typeof OfficialServerSchema>; +export type GlamaRegistryResponse = z.infer<typeof GlamaRegistryResponseSchema>; +export type GlamaServerEntry = z.infer<typeof GlamaServerSchema>; +export type SmitheryRegistryResponse = z.infer<typeof SmitheryRegistryResponseSchema>; +export type SmitheryServerEntry = z.infer<typeof SmitheryServerSchema>; + +// ── Security utilities ── + +const ANSI_ESCAPE_RE = /[\x00-\x08\x0B\x0C\x0E-\x1F]|\x1b\[[0-9;]*[a-zA-Z]|\033\[[0-9;]*[a-zA-Z]/g; + +export function sanitizeString(text: string): string { + return text.replace(ANSI_ESCAPE_RE, ''); +} diff --git a/src/cli/tests/registry/types.test.ts b/src/cli/tests/registry/types.test.ts new file mode 100644 index 0000000..bf5a6c8 --- /dev/null +++ b/src/cli/tests/registry/types.test.ts @@ -0,0 +1,190 @@ +import { describe, it, expect } from 'vitest'; +import { + OfficialRegistryResponseSchema, + GlamaRegistryResponseSchema, + SmitheryRegistryResponseSchema, + sanitizeString, +} from '../../src/registry/types.js'; + +describe('sanitizeString', () => { + it('removes ANSI escape codes (\\x1b[)', () => { + expect(sanitizeString('\x1b[31mRED\x1b[0m text')).toBe('RED text'); + }); + + it('removes \\033[ style escape codes', () => { + expect(sanitizeString('\x1b[1mBOLD\x1b[0m')).toBe('BOLD'); + }); + + it('removes cursor movement codes', () => { + expect(sanitizeString('\x1b[2J\x1b[Hscreen cleared')).toBe('screen cleared'); + }); + + it('removes control characters', () => { + expect(sanitizeString('hello\x07world')).toBe('helloworld'); + }); + + it('preserves normal text', () => { + expect(sanitizeString('A normal MCP server description.')).toBe('A normal MCP server description.'); + }); + + it('preserves unicode characters', () => { + expect(sanitizeString('Serveur MCP pour Slack 🚀')).toBe('Serveur MCP pour Slack 🚀'); + }); + + it('handles empty string', () => { + expect(sanitizeString('')).toBe(''); + }); +}); + +describe('OfficialRegistryResponseSchema', () => { + it('validates a correct response', () => { + const valid = { + servers: [{ + server: { + name: 'io.github.test/slack-mcp', + description: 'Slack integration', + packages: [{ + registryType: 'npm', + identifier: '@test/slack-mcp', + transport: { type: 'stdio' }, + environmentVariables: [ + { name: 'SLACK_TOKEN', description: 'Bot token', isSecret: true }, + ], + }], + }, + }], + metadata: { nextCursor: 'abc:1.0.0', count: 1 }, + }; + const result = OfficialRegistryResponseSchema.safeParse(valid); + expect(result.success).toBe(true); + }); + + it('validates response with remotes', () => { + const valid = { + servers: [{ + server: { + name: 'io.github.test/remote-mcp', + remotes: [{ + type: 'sse', + url: 'https://example.com/sse', + headers: [{ name: 'Authorization', isSecret: true }], + }], + }, + }], + }; + const result = OfficialRegistryResponseSchema.safeParse(valid); + expect(result.success).toBe(true); + }); + + it('rejects response without servers array', () => { + const result = OfficialRegistryResponseSchema.safeParse({ metadata: {} }); + expect(result.success).toBe(false); + }); + + it('rejects server without name', () => { + const result = OfficialRegistryResponseSchema.safeParse({ + servers: [{ server: { description: 'no name' } }], + }); + expect(result.success).toBe(false); + }); + + it('defaults missing optional fields', () => { + const minimal = { + servers: [{ server: { name: 'test/minimal' } }], + }; + const result = OfficialRegistryResponseSchema.parse(minimal); + expect(result.servers[0]?.server.packages).toEqual([]); + expect(result.servers[0]?.server.remotes).toEqual([]); + expect(result.servers[0]?.server.description).toBe(''); + }); +}); + +describe('GlamaRegistryResponseSchema', () => { + it('validates a correct response', () => { + const valid = { + servers: [{ + id: 'abc123', + name: 'Slack MCP Server', + description: 'Slack integration', + attributes: ['hosting:local-only'], + repository: { url: 'https://github.com/test/slack' }, + environmentVariablesJsonSchema: { + type: 'object', + properties: { + SLACK_TOKEN: { type: 'string', description: 'Bot token' }, + }, + required: ['SLACK_TOKEN'], + }, + }], + pageInfo: { + endCursor: 'xyz', + hasNextPage: true, + hasPreviousPage: false, + }, + }; + const result = GlamaRegistryResponseSchema.safeParse(valid); + expect(result.success).toBe(true); + }); + + it('rejects response without pageInfo', () => { + const result = GlamaRegistryResponseSchema.safeParse({ + servers: [{ id: 'a', name: 'test' }], + }); + expect(result.success).toBe(false); + }); + + it('defaults missing env schema properties', () => { + const minimal = { + servers: [{ + id: 'a', + name: 'test', + environmentVariablesJsonSchema: {}, + }], + pageInfo: { hasNextPage: false, hasPreviousPage: false }, + }; + const result = GlamaRegistryResponseSchema.parse(minimal); + const envSchema = result.servers[0]?.environmentVariablesJsonSchema; + expect(envSchema?.properties).toEqual({}); + expect(envSchema?.required).toEqual([]); + }); +}); + +describe('SmitheryRegistryResponseSchema', () => { + it('validates a correct response', () => { + const valid = { + servers: [{ + qualifiedName: 'slack', + displayName: 'Slack', + description: 'Slack integration', + verified: true, + useCount: 14062, + remote: true, + }], + pagination: { + currentPage: 1, + pageSize: 10, + totalPages: 5, + totalCount: 50, + }, + }; + const result = SmitheryRegistryResponseSchema.safeParse(valid); + expect(result.success).toBe(true); + }); + + it('rejects response without pagination', () => { + const result = SmitheryRegistryResponseSchema.safeParse({ + servers: [{ qualifiedName: 'test' }], + }); + expect(result.success).toBe(false); + }); + + it('defaults useCount and verified', () => { + const minimal = { + servers: [{ qualifiedName: 'test' }], + pagination: { currentPage: 1, pageSize: 10, totalPages: 1, totalCount: 1 }, + }; + const result = SmitheryRegistryResponseSchema.parse(minimal); + expect(result.servers[0]?.useCount).toBe(0); + expect(result.servers[0]?.verified).toBe(false); + }); +}); diff --git a/src/cli/tsconfig.json b/src/cli/tsconfig.json new file mode 100644 index 0000000..1d1421c --- /dev/null +++ b/src/cli/tsconfig.json @@ -0,0 +1,12 @@ +{ + "extends": "../../tsconfig.base.json", + "compilerOptions": { + "rootDir": "src", + "outDir": "dist" + }, + "include": ["src/**/*.ts"], + "references": [ + { "path": "../shared" }, + { "path": "../db" } + ] +} diff --git a/src/cli/vitest.config.ts b/src/cli/vitest.config.ts new file mode 100644 index 0000000..1cbc8c8 --- /dev/null +++ b/src/cli/vitest.config.ts @@ -0,0 +1,8 @@ +import { defineProject } from 'vitest/config'; + +export default defineProject({ + test: { + name: 'cli', + include: ['tests/**/*.test.ts'], + }, +}); diff --git a/src/db/package.json b/src/db/package.json new file mode 100644 index 0000000..c1b97d7 --- /dev/null +++ b/src/db/package.json @@ -0,0 +1,31 @@ +{ + "name": "@mcpctl/db", + "version": "0.1.0", + "private": true, + "type": "module", + "main": "./dist/index.js", + "types": "./dist/index.d.ts", + "exports": { + ".": { + "import": "./dist/index.js", + "types": "./dist/index.d.ts" + } + }, + "scripts": { + "build": "tsc --build", + "clean": "rimraf dist", + "test": "vitest", + "test:run": "vitest run", + "db:generate": "prisma generate", + "db:push": "prisma db push", + "db:migrate": "prisma migrate dev", + "db:seed": "tsx src/seed/index.ts" + }, + "dependencies": { + "@prisma/client": "^6.0.0", + "@mcpctl/shared": "workspace:*" + }, + "devDependencies": { + "prisma": "^6.0.0" + } +} diff --git a/src/db/src/index.ts b/src/db/src/index.ts new file mode 100644 index 0000000..0140567 --- /dev/null +++ b/src/db/src/index.ts @@ -0,0 +1,2 @@ +// Database package - Prisma client and utilities +// Will be implemented in Task 2 diff --git a/src/db/tsconfig.json b/src/db/tsconfig.json new file mode 100644 index 0000000..6d97847 --- /dev/null +++ b/src/db/tsconfig.json @@ -0,0 +1,11 @@ +{ + "extends": "../../tsconfig.base.json", + "compilerOptions": { + "rootDir": "src", + "outDir": "dist" + }, + "include": ["src/**/*.ts"], + "references": [ + { "path": "../shared" } + ] +} diff --git a/src/db/vitest.config.ts b/src/db/vitest.config.ts new file mode 100644 index 0000000..8400038 --- /dev/null +++ b/src/db/vitest.config.ts @@ -0,0 +1,8 @@ +import { defineProject } from 'vitest/config'; + +export default defineProject({ + test: { + name: 'db', + include: ['tests/**/*.test.ts'], + }, +}); diff --git a/src/local-proxy/package.json b/src/local-proxy/package.json new file mode 100644 index 0000000..1060b69 --- /dev/null +++ b/src/local-proxy/package.json @@ -0,0 +1,20 @@ +{ + "name": "@mcpctl/local-proxy", + "version": "0.1.0", + "private": true, + "type": "module", + "main": "./dist/index.js", + "types": "./dist/index.d.ts", + "scripts": { + "build": "tsc --build", + "clean": "rimraf dist", + "dev": "tsx watch src/index.ts", + "start": "node dist/index.js", + "test": "vitest", + "test:run": "vitest run" + }, + "dependencies": { + "@modelcontextprotocol/sdk": "^1.0.0", + "@mcpctl/shared": "workspace:*" + } +} diff --git a/src/local-proxy/src/index.ts b/src/local-proxy/src/index.ts new file mode 100644 index 0000000..4d38b7f --- /dev/null +++ b/src/local-proxy/src/index.ts @@ -0,0 +1,2 @@ +// Local LLM proxy entry point +// Will be implemented in Task 11 diff --git a/src/local-proxy/tsconfig.json b/src/local-proxy/tsconfig.json new file mode 100644 index 0000000..6d97847 --- /dev/null +++ b/src/local-proxy/tsconfig.json @@ -0,0 +1,11 @@ +{ + "extends": "../../tsconfig.base.json", + "compilerOptions": { + "rootDir": "src", + "outDir": "dist" + }, + "include": ["src/**/*.ts"], + "references": [ + { "path": "../shared" } + ] +} diff --git a/src/local-proxy/vitest.config.ts b/src/local-proxy/vitest.config.ts new file mode 100644 index 0000000..25cfcf9 --- /dev/null +++ b/src/local-proxy/vitest.config.ts @@ -0,0 +1,8 @@ +import { defineProject } from 'vitest/config'; + +export default defineProject({ + test: { + name: 'local-proxy', + include: ['tests/**/*.test.ts'], + }, +}); diff --git a/src/mcpd/package.json b/src/mcpd/package.json new file mode 100644 index 0000000..18f0770 --- /dev/null +++ b/src/mcpd/package.json @@ -0,0 +1,25 @@ +{ + "name": "@mcpctl/mcpd", + "version": "0.1.0", + "private": true, + "type": "module", + "main": "./dist/index.js", + "types": "./dist/index.d.ts", + "scripts": { + "build": "tsc --build", + "clean": "rimraf dist", + "dev": "tsx watch src/index.ts", + "start": "node dist/index.js", + "test": "vitest", + "test:run": "vitest run" + }, + "dependencies": { + "fastify": "^5.0.0", + "@fastify/cors": "^10.0.0", + "@fastify/helmet": "^12.0.0", + "@fastify/rate-limit": "^10.0.0", + "zod": "^3.24.0", + "@mcpctl/shared": "workspace:*", + "@mcpctl/db": "workspace:*" + } +} diff --git a/src/mcpd/src/index.ts b/src/mcpd/src/index.ts new file mode 100644 index 0000000..5dee52c --- /dev/null +++ b/src/mcpd/src/index.ts @@ -0,0 +1,2 @@ +// mcpd daemon server entry point +// Will be implemented in Task 3 diff --git a/src/mcpd/tsconfig.json b/src/mcpd/tsconfig.json new file mode 100644 index 0000000..1d1421c --- /dev/null +++ b/src/mcpd/tsconfig.json @@ -0,0 +1,12 @@ +{ + "extends": "../../tsconfig.base.json", + "compilerOptions": { + "rootDir": "src", + "outDir": "dist" + }, + "include": ["src/**/*.ts"], + "references": [ + { "path": "../shared" }, + { "path": "../db" } + ] +} diff --git a/src/mcpd/vitest.config.ts b/src/mcpd/vitest.config.ts new file mode 100644 index 0000000..3e6b311 --- /dev/null +++ b/src/mcpd/vitest.config.ts @@ -0,0 +1,8 @@ +import { defineProject } from 'vitest/config'; + +export default defineProject({ + test: { + name: 'mcpd', + include: ['tests/**/*.test.ts'], + }, +}); diff --git a/src/shared/package.json b/src/shared/package.json new file mode 100644 index 0000000..14a52f3 --- /dev/null +++ b/src/shared/package.json @@ -0,0 +1,23 @@ +{ + "name": "@mcpctl/shared", + "version": "0.1.0", + "private": true, + "type": "module", + "main": "./dist/index.js", + "types": "./dist/index.d.ts", + "exports": { + ".": { + "import": "./dist/index.js", + "types": "./dist/index.d.ts" + } + }, + "scripts": { + "build": "tsc --build", + "clean": "rimraf dist", + "test": "vitest", + "test:run": "vitest run" + }, + "dependencies": { + "zod": "^3.24.0" + } +} diff --git a/src/shared/src/constants/index.ts b/src/shared/src/constants/index.ts new file mode 100644 index 0000000..6a47458 --- /dev/null +++ b/src/shared/src/constants/index.ts @@ -0,0 +1,5 @@ +// Shared constants +export const APP_NAME = 'mcpctl'; +export const APP_VERSION = '0.1.0'; +export const DEFAULT_MCPD_URL = 'http://localhost:3000'; +export const DEFAULT_DB_PORT = 5432; diff --git a/src/shared/src/index.ts b/src/shared/src/index.ts new file mode 100644 index 0000000..9f512e7 --- /dev/null +++ b/src/shared/src/index.ts @@ -0,0 +1,4 @@ +export * from './types/index.js'; +export * from './validation/index.js'; +export * from './constants/index.js'; +export * from './utils/index.js'; diff --git a/src/shared/src/types/index.ts b/src/shared/src/types/index.ts new file mode 100644 index 0000000..bd23840 --- /dev/null +++ b/src/shared/src/types/index.ts @@ -0,0 +1,46 @@ +// Core domain types for mcpctl +// These will be expanded as tasks are implemented + +export interface McpServerConfig { + name: string; + type: string; + command: string; + args: string[]; + envTemplate: EnvTemplateEntry[]; + setupGuide?: string; +} + +export interface EnvTemplateEntry { + name: string; + description: string; + isSecret: boolean; + setupUrl?: string; + defaultValue?: string; +} + +export interface McpProfile { + name: string; + serverId: string; + config: Record<string, unknown>; + filterRules?: Record<string, unknown>; +} + +export interface McpProject { + name: string; + description?: string; + profileIds: string[]; +} + +// Service interfaces for dependency injection +export interface BackupService { + exportConfig(): Promise<string>; + importConfig(data: string): Promise<void>; +} + +export interface ConfigExporter { + serialize(): Promise<Record<string, unknown>>; +} + +export interface ConfigImporter { + deserialize(data: Record<string, unknown>): Promise<void>; +} diff --git a/src/shared/src/utils/index.ts b/src/shared/src/utils/index.ts new file mode 100644 index 0000000..39156df --- /dev/null +++ b/src/shared/src/utils/index.ts @@ -0,0 +1,2 @@ +// Shared utility functions +// Will be expanded as tasks are implemented diff --git a/src/shared/src/validation/index.ts b/src/shared/src/validation/index.ts new file mode 100644 index 0000000..d60120f --- /dev/null +++ b/src/shared/src/validation/index.ts @@ -0,0 +1,4 @@ +// Shared Zod validation schemas +// Will be expanded as tasks are implemented + +export { z } from 'zod'; diff --git a/src/shared/tests/index.test.ts b/src/shared/tests/index.test.ts new file mode 100644 index 0000000..363ef17 --- /dev/null +++ b/src/shared/tests/index.test.ts @@ -0,0 +1,16 @@ +import { describe, it, expect } from 'vitest'; +import { APP_NAME, APP_VERSION, DEFAULT_MCPD_URL } from '../src/constants/index.js'; + +describe('shared package', () => { + it('exports APP_NAME constant', () => { + expect(APP_NAME).toBe('mcpctl'); + }); + + it('exports APP_VERSION constant', () => { + expect(APP_VERSION).toBe('0.1.0'); + }); + + it('exports DEFAULT_MCPD_URL constant', () => { + expect(DEFAULT_MCPD_URL).toBe('http://localhost:3000'); + }); +}); diff --git a/src/shared/tsconfig.json b/src/shared/tsconfig.json new file mode 100644 index 0000000..df59da5 --- /dev/null +++ b/src/shared/tsconfig.json @@ -0,0 +1,8 @@ +{ + "extends": "../../tsconfig.base.json", + "compilerOptions": { + "rootDir": "src", + "outDir": "dist" + }, + "include": ["src/**/*.ts"] +} diff --git a/src/shared/vitest.config.ts b/src/shared/vitest.config.ts new file mode 100644 index 0000000..28c192f --- /dev/null +++ b/src/shared/vitest.config.ts @@ -0,0 +1,8 @@ +import { defineProject } from 'vitest/config'; + +export default defineProject({ + test: { + name: 'shared', + include: ['tests/**/*.test.ts'], + }, +}); diff --git a/tsconfig.base.json b/tsconfig.base.json new file mode 100644 index 0000000..f535eb1 --- /dev/null +++ b/tsconfig.base.json @@ -0,0 +1,25 @@ +{ + "compilerOptions": { + "target": "ES2022", + "module": "NodeNext", + "moduleResolution": "NodeNext", + "lib": ["ES2022"], + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "declaration": true, + "declarationMap": true, + "sourceMap": true, + "composite": true, + "incremental": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "noImplicitReturns": true, + "noFallthroughCasesInSwitch": true, + "exactOptionalPropertyTypes": true, + "noUncheckedIndexedAccess": true, + "isolatedModules": true, + "resolveJsonModule": true + } +} diff --git a/tsconfig.json b/tsconfig.json new file mode 100644 index 0000000..75fcdc7 --- /dev/null +++ b/tsconfig.json @@ -0,0 +1,10 @@ +{ + "files": [], + "references": [ + { "path": "src/shared" }, + { "path": "src/db" }, + { "path": "src/cli" }, + { "path": "src/mcpd" }, + { "path": "src/local-proxy" } + ] +} diff --git a/vitest.config.ts b/vitest.config.ts new file mode 100644 index 0000000..937ce86 --- /dev/null +++ b/vitest.config.ts @@ -0,0 +1,14 @@ +import { defineConfig } from 'vitest/config'; + +export default defineConfig({ + test: { + globals: true, + coverage: { + provider: 'v8', + reporter: ['text', 'json', 'html'], + exclude: ['**/node_modules/**', '**/dist/**', '**/*.config.*'], + }, + include: ['src/*/tests/**/*.test.ts', 'tests/**/*.test.ts'], + testTimeout: 10000, + }, +}); diff --git a/vitest.workspace.ts b/vitest.workspace.ts new file mode 100644 index 0000000..9d3c77e --- /dev/null +++ b/vitest.workspace.ts @@ -0,0 +1,9 @@ +import { defineWorkspace } from 'vitest/config'; + +export default defineWorkspace([ + 'src/shared', + 'src/db', + 'src/cli', + 'src/mcpd', + 'src/local-proxy', +]);