/ src / agents / architect.js
architect.js
   1  /**
   2   * Architect Agent
   3   *
   4   * Responsible for design reviews, refactoring guidance, and documentation freshness.
   5   * CRITICAL: Enforces documentation freshness - updates docs when code changes.
   6   */
   7  
   8  import { BaseAgent } from './base-agent.js';
   9  import { execSync } from 'child_process';
  10  import fs from 'fs/promises';
  11  import { join } from 'path';
  12  import { addReviewItem } from '../utils/human-review-queue.js';
  13  import { generateCode } from './utils/agent-claude-api.js';
  14  import { readFile, editFile, writeFile } from './utils/file-operations.js';
  15  import { getOne, getAll } from '../utils/db.js';
  16  
  17  export class ArchitectAgent extends BaseAgent {
  18    constructor() {
  19      super('architect', ['base.md', 'architect.md']);
  20    }
  21  
  22    /**
  23     * Process an architect task
  24     *
  25     * @param {Object} task - Task object
  26     * @returns {Promise<void>}
  27     */
  28    async processTask(task) {
  29      try {
  30        // Validate context exists and parse if needed
  31        if (!task.context_json) {
  32          // Some tasks may not require context (e.g., check_documentation_freshness)
  33          // Only validate for tasks that explicitly need it
  34          task.context_json = {};
  35        }
  36  
  37        const context =
  38          typeof task.context_json === 'string' ? JSON.parse(task.context_json) : task.context_json;
  39  
  40        // Ensure context is attached to task for handlers
  41        task.context_json = context;
  42  
  43        switch (task.task_type) {
  44          case 'design_proposal':
  45            await this.createDesignProposal(task);
  46            break;
  47  
  48          case 'technical_review':
  49            await this.reviewImplementationPlan(task);
  50            break;
  51  
  52          case 'review_design':
  53            await this.reviewDesign(task);
  54            break;
  55  
  56          case 'suggest_refactor':
  57            await this.suggestRefactor(task);
  58            break;
  59  
  60          case 'update_documentation':
  61            await this.updateDocumentation(task);
  62            break;
  63  
  64          case 'check_documentation_freshness':
  65            await this.checkDocumentationFreshness(task);
  66            break;
  67  
  68          case 'check_complexity':
  69            await this.checkComplexity(task);
  70            break;
  71  
  72          case 'audit_documentation':
  73            await this.auditDocumentation(task);
  74            break;
  75  
  76          case 'check_branch_health':
  77            await this.checkBranchHealth(task);
  78            break;
  79  
  80          case 'profile_performance':
  81            await this.profilePerformance(task);
  82            break;
  83  
  84          case 'review_documentation':
  85            await this.reviewDocumentation(task);
  86            break;
  87  
  88          case 'design_optimization':
  89            await this.analyzeOptimization(task);
  90            break;
  91  
  92          // Task types that should be delegated to other agents
  93          case 'fix_bug':
  94          case 'implement_feature':
  95          case 'investigate_issue':
  96          case 'create_automation':
  97            await this.delegateToCorrectAgent(task);
  98            break;
  99  
 100          default:
 101            // Unknown task types - delegate to correct agent via task routing
 102            await this.log('warn', 'Unknown task type received, delegating', {
 103              task_id: task.id,
 104              task_type: task.task_type,
 105            });
 106            await this.delegateToCorrectAgent(task);
 107        }
 108      } catch (error) {
 109        await this.log('error', `Architect task ${task.id} failed: ${error.message}`, {
 110          task_id: task.id,
 111          task_type: task.task_type,
 112          error: error.message,
 113          stack: error.stack,
 114        });
 115        throw error; // Re-throw so task manager can handle
 116      }
 117    }
 118    /**
 119     * Review a design proposal
 120     *
 121     * @param {Object} task - Task with design details
 122     * @returns {Promise<void>}
 123     */
 124    async reviewDesign(task) {
 125      const { files } = task.context_json || {};
 126  
 127      await this.log('info', 'Reviewing design', {
 128        task_id: task.id,
 129        files: files?.length || 'n/a',
 130      });
 131  
 132      // Check design against architectural principles
 133      const issues = [];
 134  
 135      // Check file size limits
 136      for (const file of files || []) {
 137        try {
 138          const content = await fs.readFile(file, 'utf8');
 139          const lines = content.split('\n').length;
 140  
 141          if (lines > 150) {
 142            issues.push({
 143              type: 'max_lines',
 144              file,
 145              current: lines,
 146              limit: 150,
 147              severity: 'medium',
 148            });
 149          }
 150        } catch (error) {
 151          // File might not exist yet (proposed)
 152        }
 153      }
 154  
 155      // Check for over-engineering indicators
 156      const overEngineeringPatterns = [
 157        {
 158          pattern: /class.*Factory|class.*Builder|class.*Strategy/i,
 159          description: 'Potential premature abstraction',
 160        },
 161        { pattern: /interface.*\{/, description: 'Interfaces not commonly used in this codebase' },
 162      ];
 163  
 164      for (const file of files || []) {
 165        try {
 166          const content = await fs.readFile(file, 'utf8');
 167  
 168          for (const { pattern, description } of overEngineeringPatterns) {
 169            if (pattern.test(content)) {
 170              issues.push({
 171                type: 'over_engineering',
 172                file,
 173                description,
 174                severity: 'low',
 175              });
 176            }
 177          }
 178        } catch (error) {
 179          // File might not exist yet
 180        }
 181      }
 182  
 183      await this.log('info', 'Design review complete', {
 184        task_id: task.id,
 185        issues_found: issues.length,
 186      });
 187  
 188      const approved =
 189        issues.filter(i => i.severity === 'high' || i.severity === 'critical').length === 0;
 190  
 191      await this.completeTask(task.id, {
 192        approved,
 193        issues,
 194        recommendations: this.generateRecommendations(issues),
 195      });
 196    }
 197  
 198    /**
 199     * Generate recommendations from issues
 200     *
 201     * @param {Array} issues - Design issues
 202     * @returns {Array} - Recommendations
 203     */
 204    generateRecommendations(issues) {
 205      const recommendations = [];
 206  
 207      for (const issue of issues) {
 208        if (issue.type === 'max_lines') {
 209          recommendations.push(`Split ${issue.file} into smaller modules (<150 lines each)`);
 210        } else if (issue.type === 'over_engineering') {
 211          recommendations.push(`Simplify ${issue.file}: ${issue.description}`);
 212        }
 213      }
 214  
 215      return recommendations;
 216    }
 217  
 218    /**
 219     * Suggest refactoring for complex code
 220     *
 221     * @param {Object} task - Task with complexity details
 222     * @returns {Promise<void>}
 223     */
 224    async suggestRefactor(task) {
 225      const { file, complexity_issues } = task.context_json || {};
 226  
 227      await this.log('info', 'Analyzing refactoring opportunities', {
 228        task_id: task.id,
 229        file,
 230        issues: complexity_issues,
 231      });
 232  
 233      const suggestions = [];
 234  
 235      // Analyze complexity issues
 236      for (const issue of complexity_issues || []) {
 237        if (issue.includes('nested')) {
 238          suggestions.push({
 239            type: 'extract_function',
 240            description: 'Extract nested logic into helper functions',
 241            priority: 'high',
 242          });
 243        }
 244  
 245        if (issue.includes('parameter')) {
 246          suggestions.push({
 247            type: 'configuration_object',
 248            description: 'Replace multiple parameters with configuration object',
 249            priority: 'medium',
 250          });
 251        }
 252  
 253        if (issue.includes('cyclomatic complexity')) {
 254          suggestions.push({
 255            type: 'simplify_conditionals',
 256            description: 'Use early returns and guard clauses',
 257            priority: 'high',
 258          });
 259        }
 260      }
 261  
 262      // Create developer task if refactoring needed
 263      if (suggestions.length > 0) {
 264        const devTaskId = await this.createTask({
 265          task_type: 'refactor_code',
 266          assigned_to: 'developer',
 267          priority: 6,
 268          context: {
 269            file,
 270            reason: 'Complexity exceeds limits',
 271            complexity_issues,
 272            suggestions,
 273          },
 274        });
 275  
 276        await this.completeTask(task.id, {
 277          suggestions,
 278          developer_task_id: devTaskId,
 279        });
 280      } else {
 281        await this.completeTask(task.id, {
 282          suggestions: [],
 283          note: 'No refactoring needed',
 284        });
 285      }
 286    }
 287  
 288    /**
 289     * Update documentation with Claude API assistance
 290     *
 291     * @param {Object} task - Task with documentation updates or file changes
 292     * @returns {Promise<void>}
 293     */
 294    async updateDocumentation(task) {
 295      const { stale_items, files, change_type } = task.context_json || {};
 296  
 297      await this.log('info', 'Updating documentation', {
 298        task_id: task.id,
 299        items: stale_items?.length || 0,
 300        files: files?.length || 0,
 301      });
 302  
 303      const updated = [];
 304      const errors = [];
 305  
 306      // Determine affected docs
 307      const affectedDocs = stale_items || this.identifyAffectedDocs(files || [], change_type);
 308  
 309      for (const item of affectedDocs) {
 310        try {
 311          await this.log('info', 'Processing documentation update', {
 312            file: item.file,
 313            reason: item.reason,
 314          });
 315  
 316          // Read current documentation
 317          const { content: docContent } = await readFile(item.file);
 318  
 319          // Generate summary of changes
 320          const changesSummary = await this.summarizeChanges(files || []);
 321  
 322          // Use Claude to generate doc updates
 323          const requirements = `Update ${item.file} documentation.
 324  
 325  Reason: ${item.reason}
 326  Suggested fix: ${item.fix || 'Update to reflect current implementation'}
 327  
 328  Recent changes:
 329  ${changesSummary}
 330  
 331  Current documentation:
 332  \`\`\`
 333  ${docContent}
 334  \`\`\`
 335  
 336  Please provide the COMPLETE updated documentation file. Preserve formatting, add new information where needed.`;
 337  
 338          const updatedDoc = await generateCode(
 339            this.agentName,
 340            task.id,
 341            item.file,
 342            requirements,
 343            docContent
 344          );
 345  
 346          // Clean up markdown code fences if Claude added them
 347          let cleanedDoc = updatedDoc.trim();
 348          if (cleanedDoc.startsWith('```')) {
 349            const lines = cleanedDoc.split('\n');
 350            lines.shift(); // Remove opening ```
 351            if (lines[lines.length - 1].trim() === '```') {
 352              lines.pop(); // Remove closing ```
 353            }
 354            cleanedDoc = lines.join('\n');
 355          }
 356  
 357          // Write updated documentation
 358          const result = await writeFile(item.file, cleanedDoc, {
 359            backup: true,
 360            validate: false, // Don't validate non-JS files
 361          });
 362  
 363          updated.push({
 364            file: item.file,
 365            reason: item.reason,
 366            backup_path: result.backupPath,
 367          });
 368  
 369          await this.log('info', 'Documentation updated', {
 370            file: item.file,
 371            backup: result.backupPath,
 372          });
 373        } catch (error) {
 374          errors.push({
 375            file: item.file,
 376            error: error.message,
 377          });
 378  
 379          await this.log('error', 'Failed to update documentation', {
 380            file: item.file,
 381            error: error.message,
 382          });
 383        }
 384      }
 385  
 386      // Verify documentation is accurate
 387      const verificationResults = await this.verifyDocumentation(updated.map(u => u.file));
 388  
 389      // Create commit if updates successful
 390      if (updated.length > 0 && errors.length === 0) {
 391        try {
 392          const fileList = updated.map(u => u.file).join(' ');
 393          execSync(`git add ${fileList}`, { encoding: 'utf8' });
 394  
 395          const commitMsg = `docs: update documentation for recent changes
 396  
 397  Updated files:
 398  ${updated.map(u => `- ${u.file}: ${u.reason}`).join('\n')}
 399  
 400  Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>`;
 401  
 402          execSync(`git commit -m "${commitMsg.replace(/"/g, '\\"')}"`, {
 403            encoding: 'utf8',
 404          });
 405  
 406          await this.log('info', 'Documentation committed', {
 407            files: updated.length,
 408          });
 409        } catch (error) {
 410          await this.log('warn', 'Failed to commit documentation', {
 411            error: error.message,
 412          });
 413        }
 414      }
 415  
 416      await this.completeTask(task.id, {
 417        updated,
 418        errors,
 419        verification: verificationResults,
 420        success: errors.length === 0,
 421      });
 422    }
 423  
 424    /**
 425     * Check documentation freshness
 426     *
 427     * @param {Object} task - Task object
 428     * @returns {Promise<void>}
 429     */
 430    async checkDocumentationFreshness(task) {
 431      await this.log('info', 'Checking documentation freshness', {
 432        task_id: task.id,
 433      });
 434  
 435      // Get recent commits
 436      const commits = execSync('git log -10 --name-only --oneline', {
 437        encoding: 'utf8',
 438      });
 439  
 440      const staleItems = [];
 441  
 442      // Parse changed files
 443      const changedFiles = this.parseGitLog(commits);
 444  
 445      // Rule 1: New env vars → .env.example
 446      const codeFiles = changedFiles.filter(f => f.endsWith('.js'));
 447      for (const file of codeFiles) {
 448        try {
 449          const content = await fs.readFile(file, 'utf8');
 450          const envVars = content.match(/process\.env\.([A-Z_]+)/g) || [];
 451  
 452          for (const envVar of envVars) {
 453            const varName = envVar.replace('process.env.', '');
 454  
 455            // Check if in .env.example
 456            const exampleFile = await fs.readFile('.env.example', 'utf8');
 457  
 458            if (!exampleFile.includes(varName)) {
 459              staleItems.push({
 460                file: '.env.example',
 461                reason: `Missing env var: ${varName}`,
 462                fix: `Add ${varName}= to .env.example`,
 463              });
 464            }
 465          }
 466        } catch (error) {
 467          // File might not exist
 468        }
 469      }
 470  
 471      // Rule 2: New npm scripts → README.md
 472      if (changedFiles.includes('package.json')) {
 473        try {
 474          const diff = execSync('git diff HEAD~1 package.json', {
 475            encoding: 'utf8',
 476          });
 477  
 478          const newScripts = diff.match(/\+\s*"([a-z:]+)":\s*"/g) || [];
 479  
 480          for (const script of newScripts) {
 481            const scriptName = script.match(/"([^"]+)"/)[1];
 482            const readme = await fs.readFile('README.md', 'utf8');
 483  
 484            if (!readme.includes(`npm run ${scriptName}`)) {
 485              staleItems.push({
 486                file: 'README.md',
 487                reason: `Undocumented npm script: ${scriptName}`,
 488                fix: `Add documentation for \`npm run ${scriptName}\``,
 489              });
 490            }
 491          }
 492        } catch (error) {
 493          // No diff available
 494        }
 495      }
 496  
 497      // Rule 3: Schema changes → db/schema.sql
 498      const migrationFiles = changedFiles.filter(f => f.includes('migrations/'));
 499      if (migrationFiles.length > 0) {
 500        staleItems.push({
 501          file: 'db/schema.sql',
 502          reason: 'New migration created',
 503          fix: 'Update db/schema.sql to reflect latest schema',
 504        });
 505      }
 506  
 507      // Rule 4: Agent changes → docs/06-automation/agent-system.md
 508      const agentFiles = changedFiles.filter(f => f.includes('src/agents/') && f.endsWith('.js'));
 509      if (agentFiles.length > 0) {
 510        staleItems.push({
 511          file: 'docs/06-automation/agent-system.md',
 512          reason: `Agent code modified: ${agentFiles.join(', ')}`,
 513          fix: 'Update agent-system.md to reflect new agent capabilities',
 514        });
 515      }
 516  
 517      // Rule 5: Pipeline stage changes → CLAUDE.md
 518      const pipelineFiles = changedFiles.filter(
 519        f => f.includes('src/stages/') || f.includes('src/pipeline')
 520      );
 521      if (pipelineFiles.length > 0) {
 522        staleItems.push({
 523          file: 'CLAUDE.md',
 524          reason: `Pipeline code modified: ${pipelineFiles.join(', ')}`,
 525          fix: 'Update pipeline documentation in CLAUDE.md',
 526        });
 527      }
 528  
 529      // Rule 6: Use Claude API to detect semantic changes requiring doc updates
 530      if (codeFiles.length > 0) {
 531        const semanticIssues = await this.detectSemanticDocChanges(codeFiles.slice(0, 5));
 532        staleItems.push(...semanticIssues);
 533      }
 534  
 535      await this.log('info', 'Documentation freshness check complete', {
 536        task_id: task.id,
 537        stale_items: staleItems.length,
 538      });
 539  
 540      // Create update_documentation task if stale items found
 541      if (staleItems.length > 0) {
 542        const updateTaskId = await this.createTask({
 543          task_type: 'update_documentation',
 544          assigned_to: 'architect', // Self-assign
 545          priority: 6,
 546          context: {
 547            stale_items: staleItems,
 548          },
 549        });
 550  
 551        await this.completeTask(task.id, {
 552          stale_items: staleItems,
 553          update_task_id: updateTaskId,
 554        });
 555      } else {
 556        await this.completeTask(task.id, {
 557          stale_items: [],
 558          documentation_fresh: true,
 559        });
 560      }
 561    }
 562  
 563    /**
 564     * Use Claude API to detect semantic changes requiring documentation updates
 565     *
 566     * @param {string[]} codeFiles - Changed code files to analyze
 567     * @returns {Promise<Array>} - Stale documentation items
 568     */
 569    async detectSemanticDocChanges(codeFiles) {
 570      const staleItems = [];
 571  
 572      try {
 573        // Get git diffs for changed files
 574        let diffsContext = '';
 575        for (const file of codeFiles) {
 576          try {
 577            const diff = execSync(`git diff HEAD~1 ${file}`, {
 578              encoding: 'utf8',
 579              maxBuffer: 1024 * 1024,
 580            });
 581  
 582            if (diff) {
 583              diffsContext += `\n\n### ${file}\n${diff.slice(0, 1000)}${diff.length > 1000 ? '...' : ''}`;
 584            }
 585          } catch (error) {
 586            // File might be new
 587          }
 588        }
 589  
 590        if (!diffsContext) {
 591          return staleItems;
 592        }
 593  
 594        // Ask Claude to identify semantic changes
 595        const analysisPrompt = `Analyze these code changes and identify if they require documentation updates:
 596  
 597  ${diffsContext}
 598  
 599  Look for:
 600  1. New public APIs or exported functions
 601  2. Changed function signatures or behavior
 602  3. New configuration options or environment variables
 603  4. Breaking changes or deprecations
 604  5. New features or capabilities
 605  
 606  For each finding, specify:
 607  - Which documentation file needs updating (README.md, CLAUDE.md, docs/*.md)
 608  - What changed
 609  - What needs to be documented
 610  
 611  Format as a list:
 612  - [file]: [reason] - [fix]
 613  
 614  If no documentation updates needed, respond with "No documentation updates required."`;
 615  
 616        const analysisResponse = await generateCode(
 617          this.agentName,
 618          0, // No specific task ID for this analysis
 619          'doc-freshness-check',
 620          analysisPrompt,
 621          ''
 622        );
 623  
 624        // Parse response for documentation issues
 625        if (!analysisResponse.includes('No documentation updates required')) {
 626          const matches = analysisResponse.match(/[-*]\s*\[?([^\]]+)\]?:\s*(.+?)\s*-\s*(.+)/g);
 627  
 628          if (matches) {
 629            for (const match of matches) {
 630              const parts = match.match(/[-*]\s*\[?([^\]]+)\]?:\s*(.+?)\s*-\s*(.+)/);
 631              if (parts) {
 632                staleItems.push({
 633                  file: parts[1].trim(),
 634                  reason: parts[2].trim(),
 635                  fix: parts[3].trim(),
 636                });
 637              }
 638            }
 639          }
 640        }
 641      } catch (error) {
 642        await this.log('warn', 'Failed to detect semantic doc changes', {
 643          error: error.message,
 644        });
 645      }
 646  
 647      return staleItems;
 648    }
 649  
 650    /**
 651     * Parse git log output to extract changed files
 652     *
 653     * @param {string} gitLog - Git log output
 654     * @returns {string[]} - Changed file paths
 655     */
 656    parseGitLog(gitLog) {
 657      const lines = gitLog.split('\n');
 658      const files = [];
 659  
 660      for (const line of lines) {
 661        // Skip commit lines (start with hash)
 662        if (/^[a-f0-9]{7}/.test(line)) continue;
 663  
 664        // Skip empty lines
 665        if (!line.trim()) continue;
 666  
 667        // File paths
 668        files.push(line.trim());
 669      }
 670  
 671      return [...new Set(files)]; // Deduplicate
 672    }
 673  
 674    /**
 675     * Check code complexity
 676     *
 677     * @param {Object} task - Task object
 678     * @returns {Promise<void>}
 679     */
 680    async checkComplexity(task) {
 681      const { files } = task.context_json || {};
 682  
 683      await this.log('info', 'Checking code complexity', {
 684        task_id: task.id,
 685        files: files?.length || 'all',
 686      });
 687  
 688      const issues = [];
 689  
 690      const filesToCheck = files || (await this.getJsFiles());
 691  
 692      for (const file of filesToCheck) {
 693        try {
 694          const content = await fs.readFile(file, 'utf8');
 695          const lines = content.split('\n');
 696  
 697          // Check file length
 698          if (lines.length > 150) {
 699            issues.push({
 700              type: 'max_lines',
 701              file,
 702              current: lines.length,
 703              limit: 150,
 704              severity: 'medium',
 705            });
 706          }
 707  
 708          // Check nesting depth
 709          const maxDepth = this.calculateMaxDepth(content);
 710          if (maxDepth > 4) {
 711            issues.push({
 712              type: 'max_depth',
 713              file,
 714              current: maxDepth,
 715              limit: 4,
 716              severity: 'medium',
 717            });
 718          }
 719        } catch (error) {
 720          await this.log('warn', 'Failed to check file complexity', {
 721            file,
 722            error: error.message,
 723          });
 724        }
 725      }
 726  
 727      // Create refactoring tasks for complex files
 728      for (const issue of issues) {
 729        if (issue.severity === 'medium' || issue.severity === 'high') {
 730          await this.createTask({
 731            task_type: 'suggest_refactor',
 732            assigned_to: 'architect',
 733            priority: 5,
 734            context: {
 735              file: issue.file,
 736              complexity_issues: [issue.type],
 737            },
 738          });
 739        }
 740      }
 741  
 742      await this.completeTask(task.id, {
 743        issues,
 744        files_checked: filesToCheck.length,
 745      });
 746    }
 747  
 748    /**
 749     * Calculate maximum nesting depth in code
 750     *
 751     * @param {string} content - File content
 752     * @returns {number} - Maximum depth
 753     */
 754    calculateMaxDepth(content) {
 755      let maxDepth = 0;
 756      let currentDepth = 0;
 757  
 758      for (const char of content) {
 759        if (char === '{') {
 760          currentDepth++;
 761          maxDepth = Math.max(maxDepth, currentDepth);
 762        } else if (char === '}') {
 763          currentDepth--;
 764        }
 765      }
 766  
 767      return maxDepth;
 768    }
 769  
 770    /**
 771     * Get all JS files in src/
 772     *
 773     * @returns {Promise<string[]>} - File paths
 774     */
 775    async getJsFiles() {
 776      try {
 777        const output = execSync('find src -name "*.js" -type f', {
 778          encoding: 'utf8',
 779        });
 780  
 781        return output.trim().split('\n').filter(Boolean);
 782      } catch (error) {
 783        await this.log('error', 'Failed to get JS files', {
 784          error: error.message,
 785        });
 786        return [];
 787      }
 788    }
 789  
 790    /**
 791     * Audit documentation vs reality
 792     * Verifies that documented system behavior matches actual implementation
 793     *
 794     * @param {Object} task - Task object
 795     * @returns {Promise<void>}
 796     */
 797    async auditDocumentation(task) {
 798      const { scope, focus_areas } = task.context_json || {};
 799  
 800      await this.log('info', 'Starting documentation audit', {
 801        task_id: task.id,
 802        scope,
 803        focus_areas,
 804      });
 805  
 806      const discrepancies = [];
 807  
 808      // Audit Agent System behavior
 809      if (!focus_areas || focus_areas.includes('agent_system')) {
 810        // Check if Monitor agent is actually proactive or just reactive
 811        const monitorAgentPath = 'src/agents/monitor.js';
 812        const monitorContent = await fs.readFile(monitorAgentPath, 'utf8');
 813  
 814        // Check: Does Monitor have proactive scanning?
 815        const hasProactiveScanning = monitorContent.includes('ensureRecurringTasks');
 816        const hasScanLogs = monitorContent.includes("case 'scan_logs'");
 817  
 818        if (hasProactiveScanning && hasScanLogs) {
 819          await this.log('info', 'Monitor agent has proactive scanning code', {
 820            task_id: task.id,
 821          });
 822        } else {
 823          discrepancies.push({
 824            doc: 'docs/06-automation/agent-system.md',
 825            claim: 'Monitor agent proactively scans logs',
 826            reality: 'Missing proactive scanning implementation',
 827            severity: 'high',
 828            fix: 'Implement recurring scan_logs tasks in Monitor agent',
 829          });
 830        }
 831      }
 832  
 833      // Audit Pipeline Monitoring
 834      if (!focus_areas || focus_areas.includes('pipeline_monitoring')) {
 835        // Check if any agent checks for pipeline blockages
 836        const monitorAgentPath = 'src/agents/monitor.js';
 837        const monitorContent = await fs.readFile(monitorAgentPath, 'utf8');
 838  
 839        const hasPipelineHealthCheck =
 840          monitorContent.includes('check_pipeline_health') ||
 841          monitorContent.includes('checkPipelineHealth');
 842  
 843        if (!hasPipelineHealthCheck) {
 844          discrepancies.push({
 845            doc: 'docs/06-automation/agent-system.md',
 846            claim: 'Monitor agent checks pipeline health',
 847            reality: 'No pipeline health checking implemented',
 848            severity: 'high',
 849            fix: 'Add check_pipeline_health task type to Monitor agent',
 850          });
 851        }
 852      }
 853  
 854      // Check for missing error detection
 855      if (!focus_areas || focus_areas.includes('error_detection')) {
 856        // Verify error detection workflow exists
 857        const hasErrorWorkflow = await this.checkErrorDetectionWorkflow();
 858  
 859        if (!hasErrorWorkflow) {
 860          discrepancies.push({
 861            doc: 'docs/06-automation/agent-system.md',
 862            claim: 'Automated error detection and classification',
 863            reality: 'Workflow not fully implemented or not triggered',
 864            severity: 'high',
 865            fix: 'Bootstrap Monitor agent with initial scan_logs task',
 866          });
 867        }
 868      }
 869  
 870      await this.log('info', 'Documentation audit complete', {
 871        task_id: task.id,
 872        discrepancies_found: discrepancies.length,
 873      });
 874  
 875      // Create tasks to fix high-severity discrepancies
 876      for (const disc of discrepancies) {
 877        if (disc.severity === 'high' || disc.severity === 'critical') {
 878          await addReviewItem({
 879            file: disc.doc,
 880            reason: `${disc.claim} - Reality: ${disc.reality}`,
 881            type: 'documentation',
 882            priority: 'high',
 883          });
 884        }
 885      }
 886  
 887      await this.completeTask(task.id, {
 888        discrepancies,
 889        total_discrepancies: discrepancies.length,
 890        high_severity: discrepancies.filter(d => d.severity === 'high').length,
 891      });
 892    }
 893  
 894    /**
 895     * Check if error detection workflow is implemented
 896     *
 897     * @returns {Promise<boolean>}
 898     */
 899    async checkErrorDetectionWorkflow() {
 900      try {
 901        // Check if there are any completed scan_logs tasks
 902        const scanLogsTask = await getOne(
 903          `SELECT COUNT(*) as count
 904           FROM tel.agent_tasks
 905           WHERE task_type = 'scan_logs'
 906           AND (status = 'completed' OR status = 'running')`
 907        );
 908  
 909        return scanLogsTask ? Number(scanLogsTask.count) > 0 : false;
 910      } catch (error) {
 911        await this.log('warn', 'Failed to check error detection workflow', {
 912          error: error.message,
 913        });
 914        return false;
 915      }
 916    }
 917  
 918    /**
 919     * Create design proposal for feature or significant change
 920     *
 921     * @param {Object} task - Task with feature details
 922     * @returns {Promise<void>}
 923     */
 924    async createDesignProposal(task) {
 925      const context = task.context_json || {};
 926      let { feature_description, requirements, significance, workflow_type, files_to_analyze } =
 927        context;
 928  
 929      // Handle bug fix workflows where feature_description might come from error_message
 930      if (!feature_description && context.error_message) {
 931        feature_description = `Bug Fix: ${context.error_message}`;
 932        await this.log('info', 'Using error_message as feature_description for bug fix', {
 933          task_id: task.id,
 934          error_type: context.error_type,
 935        });
 936      }
 937  
 938      if (!context || !feature_description) {
 939        const errorMsg = !context
 940          ? 'Missing context_json in task'
 941          : 'Missing required field: feature_description or error_message in context';
 942        await this.failTask(task.id, errorMsg);
 943        return;
 944      }
 945  
 946      await this.log('info', 'Creating design proposal', {
 947        task_id: task.id,
 948        feature: feature_description,
 949        significance,
 950      });
 951  
 952      // 1. Analyze requirements and existing codebase
 953      const codebaseContext = await this.analyzeCodebase(feature_description, files_to_analyze);
 954  
 955      // 2. Use Claude API to generate comprehensive design proposal
 956      const designPrompt = `You are an expert software architect reviewing a feature request for the 333Method automation system.
 957  
 958  Feature Description: ${feature_description}
 959  
 960  ${requirements ? `Requirements:\n${Array.isArray(requirements) ? requirements.join('\n') : requirements}\n` : ''}
 961  
 962  Codebase Context:
 963  ${codebaseContext}
 964  
 965  Generate a comprehensive design proposal with the following structure:
 966  
 967  1. **Summary**: Brief overview of the proposed changes
 968  2. **Approach**: Technical approach with specific implementation steps
 969  3. **Files Affected**: List of files to create/modify
 970  4. **Risks**: Potential risks and mitigation strategies
 971  5. **Alternatives Considered**: Alternative approaches and why this is preferred
 972  6. **Estimated Effort**: Time estimate in hours
 973  7. **Breaking Changes**: List any breaking changes (if none, state "None")
 974  8. **Migration Required**: Whether database migrations needed (yes/no)
 975  9. **Testing Strategy**: How to test the changes
 976  
 977  Focus on simplicity, avoid over-engineering, and align with TOGAF/SRE best practices.`;
 978  
 979      const designResponse = await generateCode(
 980        this.agentName,
 981        task.id,
 982        'design-proposal',
 983        designPrompt,
 984        '' // No existing content
 985      );
 986  
 987      // Parse the design response into structured proposal
 988      const proposal = this.parseDesignResponse(designResponse, feature_description);
 989  
 990      await this.log('info', 'Design proposal created', {
 991        task_id: task.id,
 992        files_affected: proposal.files_affected.length,
 993        estimated_effort: proposal.estimated_effort,
 994      });
 995  
 996      // 3. Determine if PO approval needed
 997      const needsPoApproval =
 998        significance === 'significant' ||
 999        proposal.breaking_changes.length > 0 ||
1000        proposal.requires_migration ||
1001        proposal.estimated_effort > 4;
1002  
1003      if (needsPoApproval) {
1004        // Request PO approval
1005        await this.requestPoApproval(task.id, proposal);
1006  
1007        await this.log('info', 'Design proposal awaiting PO approval', {
1008          task_id: task.id,
1009          proposal: proposal.summary,
1010        });
1011      } else {
1012        // Minor change, auto-approve and continue
1013        await this.approveTask(task.id, 'architect', {
1014          decision: 'approved',
1015          notes: 'Minor change, no PO approval required',
1016        });
1017  
1018        // Create implementation plan task for Developer
1019        await this.createTask({
1020          task_type: 'implementation_plan',
1021          assigned_to: 'developer',
1022          parent_task_id: task.id,
1023          priority: task.priority || 6,
1024          context: {
1025            design_proposal: proposal,
1026          },
1027        });
1028  
1029        await this.completeTask(task.id, {
1030          approved: true,
1031          proposal,
1032          auto_approved: true,
1033        });
1034      }
1035    }
1036  
1037    /**
1038     * Analyze codebase for design context
1039     *
1040     * @param {string} featureDescription - Feature being implemented
1041     * @param {string[]} filesToAnalyze - Specific files to analyze (optional)
1042     * @returns {Promise<string>} - Codebase context summary
1043     */
1044    async analyzeCodebase(featureDescription, filesToAnalyze = null) {
1045      try {
1046        // If specific files provided, analyze those
1047        if (filesToAnalyze && filesToAnalyze.length > 0) {
1048          let context = '';
1049          for (const file of filesToAnalyze.slice(0, 5)) {
1050            // Limit to 5 files
1051            try {
1052              const { content } = await readFile(file);
1053              context += `\n\n### ${file}\n\`\`\`javascript\n${content.slice(0, 1000)}${content.length > 1000 ? '...' : ''}\n\`\`\``;
1054            } catch (error) {
1055              // File might not exist
1056            }
1057          }
1058          return context || 'No specific files provided for context.';
1059        }
1060  
1061        // Otherwise, search for relevant patterns
1062        let context = 'Relevant codebase patterns:\n';
1063  
1064        // Search for similar functionality keywords
1065        const keywords = featureDescription.toLowerCase().split(' ').slice(0, 3);
1066        const jsFiles = await this.getJsFiles();
1067  
1068        for (const keyword of keywords) {
1069          const matchingFiles = jsFiles.filter(f => f.toLowerCase().includes(keyword)).slice(0, 3);
1070  
1071          if (matchingFiles.length > 0) {
1072            context += `\n**Files related to "${keyword}":**\n`;
1073            for (const file of matchingFiles) {
1074              context += `- ${file}\n`;
1075            }
1076          }
1077        }
1078  
1079        return context;
1080      } catch (error) {
1081        await this.log('warn', 'Failed to analyze codebase', {
1082          error: error.message,
1083        });
1084        return 'Unable to analyze codebase context.';
1085      }
1086    }
1087  
1088    /**
1089     * Parse Claude's design response into structured proposal
1090     *
1091     * @param {string} designResponse - Raw design response from Claude
1092     * @param {string} featureDescription - Original feature description
1093     * @returns {Object} - Structured proposal
1094     */
1095    parseDesignResponse(designResponse, featureDescription) {
1096      const proposal = {
1097        title: `Design: ${featureDescription}`,
1098        summary: '',
1099        approach: '',
1100        files_affected: [],
1101        risks: [],
1102        alternatives_considered: [],
1103        estimated_effort: 4,
1104        requires_migration: false,
1105        breaking_changes: [],
1106        testing_strategy: '',
1107        raw_response: designResponse,
1108      };
1109  
1110      // Extract summary
1111      const summaryMatch = designResponse.match(/\*\*Summary\*\*:?\s*(.+?)(?:\n\n|\*\*)/s);
1112      if (summaryMatch) {
1113        proposal.summary = summaryMatch[1].trim();
1114      } else {
1115        proposal.summary = `Implement ${featureDescription}`;
1116      }
1117  
1118      // Extract approach
1119      const approachMatch = designResponse.match(/\*\*Approach\*\*:?\s*(.+?)(?:\n\n|\*\*)/s);
1120      if (approachMatch) {
1121        proposal.approach = approachMatch[1].trim();
1122      }
1123  
1124      // Extract files affected
1125      const filesMatch = designResponse.match(/\*\*Files Affected\*\*:?\s*(.+?)(?:\n\n|\*\*)/s);
1126      if (filesMatch) {
1127        const filesList = filesMatch[1].match(/[-*]\s*`?([a-zA-Z0-9_/.]+\.js)`?/g);
1128        if (filesList) {
1129          proposal.files_affected = filesList.map(f => f.replace(/[-*]\s*`?/, '').replace('`', ''));
1130        }
1131      }
1132  
1133      // Extract risks
1134      const risksMatch = designResponse.match(/\*\*Risks\*\*:?\s*(.+?)(?:\n\n|\*\*)/s);
1135      if (risksMatch) {
1136        const risksList = risksMatch[1].match(/[-*]\s*(.+)/g);
1137        if (risksList) {
1138          proposal.risks = risksList.map(r => r.replace(/[-*]\s*/, '').trim());
1139        }
1140      }
1141  
1142      // Extract estimated effort
1143      const effortMatch = designResponse.match(/\*\*Estimated Effort\*\*:?\s*(\d+)/);
1144      if (effortMatch) {
1145        proposal.estimated_effort = parseInt(effortMatch[1], 10);
1146      }
1147  
1148      // Check for breaking changes
1149      const breakingMatch = designResponse.match(/\*\*Breaking Changes\*\*:?\s*(.+?)(?:\n\n|\*\*)/s);
1150      if (breakingMatch && !breakingMatch[1].toLowerCase().includes('none')) {
1151        const breakingList = breakingMatch[1].match(/[-*]\s*(.+)/g);
1152        if (breakingList) {
1153          proposal.breaking_changes = breakingList.map(b => b.replace(/[-*]\s*/, '').trim());
1154        }
1155      }
1156  
1157      // Check for migration required
1158      const migrationMatch = designResponse.match(/\*\*Migration Required\*\*:?\s*(yes|no)/i);
1159      if (migrationMatch) {
1160        proposal.requires_migration = migrationMatch[1].toLowerCase() === 'yes';
1161      }
1162  
1163      // Extract testing strategy
1164      const testingMatch = designResponse.match(/\*\*Testing Strategy\*\*:?\s*(.+?)(?:\n\n|\*\*|$)/s);
1165      if (testingMatch) {
1166        proposal.testing_strategy = testingMatch[1].trim();
1167      }
1168  
1169      return proposal;
1170    }
1171  
1172    /**
1173     * Review implementation plan for technical soundness
1174     *
1175     * @param {Object} task - Task with implementation plan
1176     * @returns {Promise<void>}
1177     */
1178    async reviewImplementationPlan(task) {
1179      const context = task.context_json || {};
1180      const { implementation_plan, original_task_id } = context;
1181  
1182      if (!context || !implementation_plan || !original_task_id) {
1183        const missingFields = [];
1184        if (!context) missingFields.push('context_json');
1185        if (!implementation_plan) missingFields.push('implementation_plan');
1186        if (!original_task_id) missingFields.push('original_task_id');
1187  
1188        await this.failTask(task.id, `Missing required fields: ${missingFields.join(', ')}`);
1189        return;
1190      }
1191  
1192      await this.log('info', 'Reviewing implementation plan', {
1193        task_id: task.id,
1194        plan: implementation_plan.summary,
1195      });
1196  
1197      // Use Claude API for comprehensive architectural review
1198      const reviewPrompt = `You are an expert software architect reviewing an implementation plan for the 333Method automation system.
1199  
1200  Implementation Plan:
1201  ${JSON.stringify(implementation_plan, null, 2)}
1202  
1203  Review the plan for:
1204  1. **Architectural Soundness**: Does it follow best practices? Any over-engineering?
1205  2. **Complexity**: Will files exceed 150 lines? Should they be split?
1206  3. **Testing Coverage**: Is there a clear test plan targeting 85%+ coverage?
1207  4. **Documentation**: Are documentation updates included?
1208  5. **TOGAF/SRE Alignment**: Does it follow enterprise architecture principles?
1209  
1210  Provide a structured review with:
1211  - **Issues**: List of concerns (categorize as high/medium/low severity)
1212  - **Recommendations**: Specific improvements needed
1213  - **Approval**: Should this be approved as-is? (yes/no)
1214  
1215  Be critical but constructive. Focus on catching issues before implementation.`;
1216  
1217      const reviewResponse = await generateCode(
1218        this.agentName,
1219        task.id,
1220        'implementation-review',
1221        reviewPrompt,
1222        ''
1223      );
1224  
1225      // Parse review response
1226      const issues = this.parseReviewResponse(reviewResponse);
1227  
1228      // Also perform automated checks
1229      const automatedIssues = await this.performAutomatedReviewChecks(implementation_plan);
1230      issues.push(...automatedIssues);
1231  
1232      await this.log('info', 'Implementation plan review complete', {
1233        task_id: task.id,
1234        total_issues: issues.length,
1235        high_severity: issues.filter(i => i.severity === 'high').length,
1236      });
1237  
1238      // Check test coverage plan
1239      if (!implementation_plan.test_plan || !implementation_plan.test_plan.coverage_target) {
1240        issues.push({
1241          type: 'missing_test_plan',
1242          severity: 'high',
1243          description: 'Implementation plan lacks test coverage plan',
1244        });
1245      } else if (implementation_plan.test_plan.coverage_target < 85) {
1246        issues.push({
1247          type: 'low_coverage_target',
1248          severity: 'high',
1249          description: `Coverage target ${implementation_plan.test_plan.coverage_target}% is below 85% requirement`,
1250        });
1251      }
1252  
1253      const approved = issues.filter(i => i.severity === 'high').length === 0;
1254  
1255      if (approved) {
1256        // Approve implementation plan
1257        await this.approveTask(original_task_id, 'architect', {
1258          decision: 'approved',
1259          notes: 'Implementation plan is technically sound',
1260          conditions: issues.filter(i => i.severity === 'medium').map(i => i.description),
1261        });
1262  
1263        // Transition original task from blocked to pending (unblock after approval)
1264        const { updateTaskStatus } = await import('./utils/task-manager.js');
1265        updateTaskStatus(original_task_id, 'pending');
1266  
1267        await this.completeTask(task.id, {
1268          approved: true,
1269          issues_found: issues,
1270        });
1271      } else {
1272        // Request changes
1273        await this.failTask(
1274          task.id,
1275          `Implementation plan has ${issues.filter(i => i.severity === 'high').length} high-severity issues requiring resolution`
1276        );
1277  
1278        // Send feedback to Developer
1279        await this.askQuestion(
1280          original_task_id,
1281          'developer',
1282          `Implementation plan needs revision: ${issues
1283            .filter(i => i.severity === 'high')
1284            .map(i => i.description)
1285            .join(', ')}`
1286        );
1287      }
1288    }
1289  
1290    /**
1291     * Parse Claude's review response into structured issues
1292     *
1293     * @param {string} reviewResponse - Raw review response from Claude
1294     * @returns {Array} - List of issues
1295     */
1296    parseReviewResponse(reviewResponse) {
1297      const issues = [];
1298  
1299      // Extract issues section
1300      const issuesMatch = reviewResponse.match(/\*\*Issues\*\*:?\s*(.+?)(?:\*\*|$)/s);
1301      if (issuesMatch) {
1302        const issuesList = issuesMatch[1].match(/[-*]\s*\[?(high|medium|low)\]?\s*(.+)/gi);
1303        if (issuesList) {
1304          for (const issueText of issuesList) {
1305            const severityMatch = issueText.match(/\[?(high|medium|low)\]?/i);
1306            const severity = severityMatch ? severityMatch[1].toLowerCase() : 'medium';
1307            const description = issueText.replace(/[-*]\s*\[?(high|medium|low)\]?\s*/i, '').trim();
1308  
1309            issues.push({
1310              type: 'review_finding',
1311              severity,
1312              description,
1313              source: 'claude_review',
1314            });
1315          }
1316        }
1317      }
1318  
1319      return issues;
1320    }
1321  
1322    /**
1323     * Perform automated checks on implementation plan
1324     *
1325     * @param {Object} implementationPlan - Implementation plan to review
1326     * @returns {Promise<Array>} - List of automated issues found
1327     */
1328    async performAutomatedReviewChecks(implementationPlan) {
1329      const issues = [];
1330  
1331      // Check file complexity
1332      for (const file of implementationPlan.files_to_modify || []) {
1333        try {
1334          const content = await fs.readFile(file, 'utf8');
1335          const lines = content.split('\n').length;
1336  
1337          // Check if file will exceed 150 lines after changes
1338          if (lines > 130) {
1339            // Buffer for new code
1340            issues.push({
1341              type: 'max_lines_risk',
1342              file,
1343              current_lines: lines,
1344              severity: 'medium',
1345              description: `File ${file} has ${lines} lines, may exceed 150 line limit with new changes`,
1346              source: 'automated',
1347            });
1348          }
1349        } catch (error) {
1350          // File might not exist yet (new file) - acceptable
1351        }
1352      }
1353  
1354      // Check test coverage plan
1355      if (!implementationPlan.test_plan || !implementationPlan.test_plan.coverage_target) {
1356        issues.push({
1357          type: 'missing_test_plan',
1358          severity: 'high',
1359          description: 'Implementation plan lacks test coverage plan',
1360          source: 'automated',
1361        });
1362      } else if (implementationPlan.test_plan.coverage_target < 85) {
1363        issues.push({
1364          type: 'low_coverage_target',
1365          severity: 'high',
1366          description: `Coverage target ${implementationPlan.test_plan.coverage_target}% is below 85% requirement`,
1367          source: 'automated',
1368        });
1369      }
1370  
1371      // Check for documentation updates
1372      const hasDocUpdates =
1373        implementationPlan.files_to_modify?.some(f => f.endsWith('.md')) ||
1374        implementationPlan.documentation_updates;
1375  
1376      if (!hasDocUpdates) {
1377        issues.push({
1378          type: 'missing_documentation',
1379          severity: 'medium',
1380          description: 'No documentation updates included in plan',
1381          source: 'automated',
1382        });
1383      }
1384  
1385      return issues;
1386    }
1387  
1388    /**
1389     * Check branch health - prevent stale branches and ensure autofix alignment
1390     *
1391     * @param {Object} task - Task object
1392     * @returns {Promise<void>}
1393     */
1394    async checkBranchHealth(task) {
1395      const {
1396        check_stale_branches = true,
1397        ensure_autofix_aligned = true,
1398        max_divergence_commits = 5,
1399      } = task.context_json || {};
1400  
1401      await this.log('info', 'Checking branch health', {
1402        task_id: task.id,
1403      });
1404  
1405      const issues = [];
1406  
1407      try {
1408        // Get current branch
1409        const currentBranch = execSync('git branch --show-current', {
1410          encoding: 'utf8',
1411        }).trim();
1412  
1413        // Check if autofix exists and is aligned with main
1414        if (ensure_autofix_aligned) {
1415          try {
1416            const branches = execSync('git branch', { encoding: 'utf8' });
1417  
1418            if (branches.includes('autofix')) {
1419              // Check divergence from main
1420              const divergence = execSync('git rev-list --left-right --count main...autofix', {
1421                encoding: 'utf8',
1422              }).trim();
1423  
1424              const [mainAhead, autofixAhead] = divergence.split('\t').map(Number);
1425  
1426              if (mainAhead > max_divergence_commits) {
1427                issues.push({
1428                  type: 'stale_branch',
1429                  branch: 'autofix',
1430                  main_ahead: mainAhead,
1431                  branch_ahead: autofixAhead,
1432                  severity: 'high',
1433                  fix: `Reset autofix: git branch -D autofix && git checkout -b autofix main`,
1434                });
1435  
1436                await this.log('warn', 'Autofix branch is stale', {
1437                  task_id: task.id,
1438                  main_ahead: mainAhead,
1439                  autofix_ahead: autofixAhead,
1440                });
1441  
1442                // Add to human review
1443                await addReviewItem({
1444                  file: 'Git Branches',
1445                  reason: `autofix branch is ${mainAhead} commits behind main - reset recommended`,
1446                  type: 'maintenance',
1447                  priority: 'medium',
1448                });
1449              } else if (mainAhead === 0 && autofixAhead === 0) {
1450                await this.log('info', 'Autofix branch is aligned with main', {
1451                  task_id: task.id,
1452                });
1453              } else {
1454                await this.log('info', 'Autofix branch has acceptable divergence', {
1455                  task_id: task.id,
1456                  main_ahead: mainAhead,
1457                  autofix_ahead: autofixAhead,
1458                });
1459              }
1460            } else {
1461              // Autofix doesn't exist - create it
1462              issues.push({
1463                type: 'missing_branch',
1464                branch: 'autofix',
1465                severity: 'low',
1466                fix: 'Create autofix: git checkout -b autofix main',
1467              });
1468            }
1469          } catch (error) {
1470            await this.log('warn', 'Failed to check autofix alignment', {
1471              task_id: task.id,
1472              error: error.message,
1473            });
1474          }
1475        }
1476  
1477        // Check for other stale branches
1478        if (check_stale_branches) {
1479          try {
1480            const branches = execSync(
1481              'git for-each-ref --format="%(refname:short) %(committerdate:iso8601)" refs/heads/',
1482              {
1483                encoding: 'utf8',
1484              }
1485            )
1486              .trim()
1487              .split('\n');
1488  
1489            const sixtyDaysAgo = new Date(Date.now() - 60 * 24 * 60 * 60 * 1000);
1490  
1491            for (const branchLine of branches) {
1492              const [branch, dateStr] = branchLine.split(' ');
1493              const lastCommit = new Date(dateStr);
1494  
1495              // Skip main and autofix
1496              if (branch === 'main' || branch === 'autofix') continue;
1497  
1498              if (lastCommit < sixtyDaysAgo) {
1499                issues.push({
1500                  type: 'stale_branch',
1501                  branch,
1502                  last_commit: lastCommit.toISOString(),
1503                  days_old: Math.floor((Date.now() - lastCommit.getTime()) / (24 * 60 * 60 * 1000)),
1504                  severity: 'low',
1505                  fix: `Delete stale branch: git branch -D ${branch}`,
1506                });
1507  
1508                await this.log('info', 'Found stale branch', {
1509                  task_id: task.id,
1510                  branch,
1511                  last_commit: lastCommit.toISOString(),
1512                });
1513              }
1514            }
1515          } catch (error) {
1516            await this.log('warn', 'Failed to check stale branches', {
1517              task_id: task.id,
1518              error: error.message,
1519            });
1520          }
1521        }
1522  
1523        await this.log('info', 'Branch health check complete', {
1524          task_id: task.id,
1525          issues_found: issues.length,
1526        });
1527  
1528        await this.completeTask(task.id, {
1529          issues,
1530          total_issues: issues.length,
1531          stale_branches: issues.filter(i => i.type === 'stale_branch').length,
1532          missing_branches: issues.filter(i => i.type === 'missing_branch').length,
1533        });
1534      } catch (error) {
1535        await this.log('error', 'Branch health check failed', {
1536          task_id: task.id,
1537          error: error.message,
1538        });
1539  
1540        await this.failTask(task.id, error.message);
1541      }
1542    }
1543  
1544    /**
1545     * Profile pipeline performance and identify bottlenecks
1546     *
1547     * @param {Object} task - Task object
1548     * @returns {Promise<void>}
1549     */
1550    async profilePerformance(task) {
1551      const { threshold_ms = 60000, days_back = 7 } = task.context_json || {};
1552  
1553      await this.log('info', 'Profiling pipeline performance', {
1554        task_id: task.id,
1555        threshold_ms,
1556        days_back,
1557      });
1558  
1559      try {
1560        // Query pipeline_metrics for slow operations
1561        const slowOps = await getAll(
1562          `SELECT
1563            stage_name,
1564            AVG(duration_ms) as avg_duration,
1565            MAX(duration_ms) as max_duration,
1566            MIN(duration_ms) as min_duration,
1567            COUNT(*) as run_count,
1568            SUM(sites_failed) as total_failures,
1569            SUM(sites_succeeded) as total_successes
1570           FROM tel.pipeline_metrics
1571           WHERE started_at >= NOW() - ($1 || ' days')::interval
1572           GROUP BY stage_name
1573           HAVING AVG(duration_ms) > $2
1574           ORDER BY avg_duration DESC`,
1575          [days_back, threshold_ms]
1576        );
1577  
1578        const bottlenecks = [];
1579  
1580        for (const op of slowOps) {
1581          const bottleneck = {
1582            stage: op.stage_name,
1583            avg_duration_ms: Math.round(op.avg_duration),
1584            max_duration_ms: op.max_duration,
1585            min_duration_ms: op.min_duration,
1586            run_count: op.run_count,
1587            failure_rate:
1588              op.total_successes + op.total_failures > 0
1589                ? (op.total_failures / (op.total_successes + op.total_failures)) * 100
1590                : 0,
1591            severity: this.classifyPerformanceIssue(op.avg_duration, threshold_ms),
1592          };
1593  
1594          bottlenecks.push(bottleneck);
1595  
1596          await this.log('info', 'Performance bottleneck identified', {
1597            stage: op.stage_name,
1598            avg_duration_ms: bottleneck.avg_duration_ms,
1599            severity: bottleneck.severity,
1600          });
1601  
1602          // Create refactor task for high/critical bottlenecks
1603          if (bottleneck.severity === 'high' || bottleneck.severity === 'critical') {
1604            await this.createTask({
1605              task_type: 'suggest_refactor',
1606              assigned_to: 'architect',
1607              priority: bottleneck.severity === 'critical' ? 8 : 6,
1608              context: {
1609                file: `src/stages/${op.stage_name}.js`,
1610                complexity_issues: [
1611                  `Performance bottleneck: ${bottleneck.avg_duration_ms}ms average duration (threshold: ${threshold_ms}ms)`,
1612                  `Failure rate: ${bottleneck.failure_rate.toFixed(1)}%`,
1613                ],
1614                performance_data: bottleneck,
1615              },
1616            });
1617          }
1618        }
1619  
1620        await this.log('info', 'Performance profiling complete', {
1621          task_id: task.id,
1622          bottlenecks_found: bottlenecks.length,
1623        });
1624  
1625        await this.completeTask(task.id, {
1626          bottlenecks,
1627          total_issues: bottlenecks.length,
1628          high_severity: bottlenecks.filter(b => b.severity === 'high' || b.severity === 'critical')
1629            .length,
1630        });
1631      } catch (error) {
1632        await this.log('error', 'Performance profiling failed', {
1633          task_id: task.id,
1634          error: error.message,
1635        });
1636        await this.failTask(task.id, error.message);
1637      }
1638    }
1639  
1640    /**
1641     * Classify performance issue severity
1642     *
1643     * @param {number} avgDuration - Average duration in ms
1644     * @param {number} threshold - Threshold in ms
1645     * @returns {string} - Severity level
1646     */
1647    classifyPerformanceIssue(avgDuration, threshold) {
1648      const ratio = avgDuration / threshold;
1649  
1650      if (ratio >= 5) return 'critical'; // 5x over threshold
1651      if (ratio >= 3) return 'high'; // 3x over threshold
1652      if (ratio >= 2) return 'medium'; // 2x over threshold
1653      return 'low';
1654    }
1655  
1656    /**
1657     * Identify affected documentation files based on code changes
1658     *
1659     * @param {string[]} files - Changed files
1660     * @param {string} changeType - Type of change (new_feature, bug_fix, refactor, etc.)
1661     * @returns {Array<{file: string, reason: string, fix: string}>} - Affected docs
1662     */
1663    identifyAffectedDocs(files, changeType) {
1664      const affectedDocs = [];
1665  
1666      // Check for schema changes
1667      if (files.some(f => f.includes('migrations/') || f.includes('schema.sql'))) {
1668        affectedDocs.push({
1669          file: 'db/schema.sql',
1670          reason: 'Database migration added',
1671          fix: 'Update schema.sql to reflect latest migrations',
1672        });
1673      }
1674  
1675      // Check for package.json changes
1676      if (files.includes('package.json')) {
1677        affectedDocs.push({
1678          file: 'README.md',
1679          reason: 'Package.json modified (may have new scripts or dependencies)',
1680          fix: 'Update README.md with new npm scripts or dependencies',
1681        });
1682      }
1683  
1684      // Check for new agent functionality
1685      if (files.some(f => f.includes('src/agents/'))) {
1686        affectedDocs.push({
1687          file: 'docs/06-automation/agent-system.md',
1688          reason: 'Agent code modified',
1689          fix: 'Update agent documentation to reflect new capabilities',
1690        });
1691      }
1692  
1693      // Check for pipeline stage changes
1694      if (files.some(f => f.includes('src/stages/') || f.includes('src/pipeline'))) {
1695        affectedDocs.push({
1696          file: 'CLAUDE.md',
1697          reason: 'Pipeline stage modified',
1698          fix: 'Update pipeline documentation in CLAUDE.md',
1699        });
1700      }
1701  
1702      // Check for new environment variables in code
1703      for (const file of files) {
1704        try {
1705          const content = fs.readFileSync(file, 'utf8');
1706          const envVars = content.match(/process\.env\.([A-Z_]+)/g) || [];
1707  
1708          if (envVars.length > 0) {
1709            affectedDocs.push({
1710              file: '.env.example',
1711              reason: `New environment variables in ${file}`,
1712              fix: `Add ${envVars.join(', ')} to .env.example`,
1713            });
1714            break; // Only add once
1715          }
1716        } catch (error) {
1717          // File might not exist or be readable
1718        }
1719      }
1720  
1721      return affectedDocs;
1722    }
1723  
1724    /**
1725     * Summarize recent changes for documentation context
1726     *
1727     * @param {string[]} files - Changed files
1728     * @returns {Promise<string>} - Summary of changes
1729     */
1730    async summarizeChanges(files) {
1731      if (!files || files.length === 0) {
1732        return 'No specific files provided. Check recent git commits.';
1733      }
1734  
1735      let summary = '';
1736  
1737      for (const file of files.slice(0, 10)) {
1738        // Limit to 10 files
1739        try {
1740          const diff = execSync(`git diff HEAD~1 ${file}`, {
1741            encoding: 'utf8',
1742            maxBuffer: 1024 * 1024, // 1MB max
1743          });
1744  
1745          if (diff) {
1746            summary += `\n\n### ${file}\n${diff.slice(0, 500)}${diff.length > 500 ? '...' : ''}`;
1747          }
1748        } catch (error) {
1749          // File might be new or git command failed
1750          summary += `\n\n### ${file}\n(New file or diff unavailable)`;
1751        }
1752      }
1753  
1754      return summary || 'No changes detected in git diff.';
1755    }
1756  
1757    /**
1758     * Verify documentation is accurate and up-to-date
1759     *
1760     * @param {string[]} docFiles - Documentation files to verify
1761     * @returns {Promise<Object>} - Verification results
1762     */
1763    async verifyDocumentation(docFiles) {
1764      const results = {
1765        verified: [],
1766        warnings: [],
1767        errors: [],
1768      };
1769  
1770      for (const docFile of docFiles) {
1771        try {
1772          const { content } = await readFile(docFile);
1773  
1774          // Basic checks
1775          const checks = {
1776            has_content: content.length > 0,
1777            no_todo_markers: !content.includes('TODO') && !content.includes('FIXME'),
1778            no_placeholder_text: !content.includes('[placeholder]') && !content.includes('TBD'),
1779            proper_formatting: docFile.endsWith('.md') ? content.includes('#') : true,
1780          };
1781  
1782          const passed = Object.values(checks).every(v => v);
1783  
1784          if (passed) {
1785            results.verified.push({
1786              file: docFile,
1787              status: 'verified',
1788            });
1789          } else {
1790            results.warnings.push({
1791              file: docFile,
1792              issues: Object.entries(checks)
1793                .filter(([, v]) => !v)
1794                .map(([k]) => k),
1795            });
1796          }
1797        } catch (error) {
1798          results.errors.push({
1799            file: docFile,
1800            error: error.message,
1801          });
1802        }
1803      }
1804  
1805      return results;
1806    }
1807  
1808    /**
1809     * Review documentation for accuracy and completeness
1810     *
1811     * @param {Object} task - Task with documentation to review
1812     * @returns {Promise<void>}
1813     */
1814    async reviewDocumentation(task) {
1815      const { files, focus_areas } = task.context_json || {};
1816  
1817      await this.log('info', 'Reviewing documentation', {
1818        task_id: task.id,
1819        files: files?.length || 'all',
1820      });
1821  
1822      const docFiles = files || ['README.md', 'CLAUDE.md', 'docs/06-automation/agent-system.md'];
1823      const results = await this.verifyDocumentation(docFiles);
1824  
1825      await this.log('info', 'Documentation review complete', {
1826        task_id: task.id,
1827        verified: results.verified.length,
1828        warnings: results.warnings.length,
1829        errors: results.errors.length,
1830      });
1831  
1832      // Add to human review queue if issues found
1833      if (results.errors.length > 0 || results.warnings.length > 0) {
1834        for (const warning of results.warnings) {
1835          await addReviewItem({
1836            file: warning.file,
1837            reason: `Documentation issues: ${warning.issues.join(', ')}`,
1838            type: 'documentation',
1839            priority: 'medium',
1840          });
1841        }
1842      }
1843  
1844      await this.completeTask(task.id, results);
1845    }
1846  
1847    /**
1848     * Analyze an SLO violation or performance optimization request.
1849     * Queries real stage timing data from DB and adds a human review item.
1850     *
1851     * @param {Object} task - Task with optimization_type, stage_name, current_p95, target_duration
1852     * @returns {Promise<void>}
1853     */
1854    async analyzeOptimization(task) {
1855      const { optimization_type, stage_name, description, current_p95, target_duration, severity } =
1856        task.context_json || {};
1857  
1858      await this.log('info', 'Analyzing optimization request', {
1859        task_id: task.id,
1860        optimization_type,
1861        stage_name,
1862        severity,
1863      });
1864  
1865      // Query recent stage duration percentiles from pipeline_metrics
1866      let stageSummary = null;
1867      try {
1868        const row = await getOne(
1869          `SELECT
1870            COUNT(*) AS sample_count,
1871            ROUND(AVG(duration_ms) / 60000.0, 1) AS avg_min,
1872            ROUND(MIN(duration_ms) / 60000.0, 1) AS min_min,
1873            ROUND(MAX(duration_ms) / 60000.0, 1) AS max_min
1874           FROM tel.pipeline_metrics
1875           WHERE stage = $1
1876             AND created_at > NOW() - INTERVAL '24 hours'
1877             AND success = 1`,
1878          [stage_name]
1879        );
1880        if (row && Number(row.sample_count) > 0) {
1881          stageSummary = row;
1882        }
1883      } catch {
1884        // non-fatal — continue without DB data
1885      }
1886  
1887      const result = {
1888        optimization_type: optimization_type || 'slo_violation',
1889        stage_name,
1890        severity,
1891        description,
1892        current_p95_minutes: current_p95,
1893        target_duration_minutes: target_duration,
1894        stage_summary_24h: stageSummary,
1895        recommendation:
1896          `SLO violation for stage '${stage_name}': P95=${current_p95}min (target: ${target_duration}min). ` +
1897          `This requires human investigation — check pipeline concurrency settings, ` +
1898          `queue depth at that stage, and whether the stage is CPU/IO bound.`,
1899      };
1900  
1901      // Always add to human review so a human can investigate
1902      await addReviewItem({
1903        file: `Pipeline Stage: ${stage_name}`,
1904        reason: `SLO violation (${severity}): ${description || `P95=${current_p95}min, target=${target_duration}min`}`,
1905        type: 'performance',
1906        priority: severity === 'critical' ? 'high' : 'medium',
1907      });
1908  
1909      await this.log('info', 'Optimization analysis complete — added to human review', {
1910        task_id: task.id,
1911        stage_name,
1912        current_p95,
1913        target_duration,
1914      });
1915  
1916      await this.completeTask(task.id, result);
1917    }
1918  }