precompute-dashboard.js
1 /** 2 * Dashboard Pre-Computation Script 3 * 4 * Pre-computes frequently accessed dashboard metrics and stores them in cache. 5 * This dramatically improves dashboard load times by avoiding expensive queries on every page load. 6 * 7 * Metrics cached: 8 * - Pipeline funnel (count of sites at each stage) 9 * - Response rates by channel 10 * - Outreach funnel (delivery rates) 11 * - LLM usage by stage 12 * - Daily throughput (last 30 days) 13 * - Conversation stats 14 * - Error breakdown 15 * - Cron job summary 16 * 17 * Cache expires after 15 minutes (refreshed every 10 minutes by cron). 18 */ 19 20 import { getAll, getOne, run } from './../utils/db.js'; 21 import Logger from '../utils/logger.js'; 22 import { buildStatusTree, buildOutreachTree } from '../utils/error-categories.js'; 23 24 const logger = new Logger('precompute-dashboard'); 25 26 // Cache expiration: 15 minutes (cron runs every 10 minutes) 27 const CACHE_EXPIRATION_MINUTES = 15; 28 29 /** 30 * Store a value in the dashboard cache 31 */ 32 async function setCache(key, value) { 33 const expiresAt = new Date(Date.now() + CACHE_EXPIRATION_MINUTES * 60 * 1000).toISOString(); 34 35 await run( 36 `INSERT INTO dashboard_cache (cache_key, cache_value, expires_at, updated_at) 37 VALUES ($1, $2, $3, CURRENT_TIMESTAMP) 38 ON CONFLICT (cache_key) DO UPDATE SET 39 cache_value = EXCLUDED.cache_value, 40 expires_at = EXCLUDED.expires_at, 41 updated_at = CURRENT_TIMESTAMP`, 42 [key, JSON.stringify(value), expiresAt] 43 ); 44 } 45 46 /** 47 * Pre-compute pipeline funnel 48 */ 49 async function cachePipelineFunnel() { 50 const results = await getAll(` 51 SELECT status, COUNT(*) as count 52 FROM sites 53 WHERE status NOT IN ('ignored', 'failing') 54 GROUP BY status 55 ORDER BY 56 CASE status 57 WHEN 'found' THEN 1 58 WHEN 'assets_captured' THEN 2 59 WHEN 'prog_scored' THEN 3 60 WHEN 'semantic_scored' THEN 4 61 WHEN 'vision_scored' THEN 5 62 WHEN 'enriched' THEN 6 63 WHEN 'enriched_regex' THEN 6 64 WHEN 'enriched_llm' THEN 6 65 WHEN 'proposals_drafted' THEN 7 66 WHEN 'outreach_partial' THEN 8 67 WHEN 'outreach_sent' THEN 9 68 WHEN 'high_score' THEN 10 69 END 70 `); 71 await setCache('pipeline_funnel', results); 72 return results.length; 73 } 74 75 /** 76 * Pre-compute response rates by channel 77 */ 78 async function cacheResponseRates() { 79 const results = await getAll(` 80 SELECT 81 o.contact_method as channel, 82 COUNT(*) as total_sent, 83 COUNT(DISTINCT CASE WHEN r.direction = 'inbound' THEN r.site_id END) as responses, 84 ROUND(100.0 * COUNT(DISTINCT CASE WHEN r.direction = 'inbound' THEN r.site_id END) / COUNT(*), 2) as response_rate 85 FROM messages o 86 LEFT JOIN messages r ON r.site_id = o.site_id AND r.direction = 'inbound' 87 WHERE o.direction = 'outbound' AND o.delivery_status IN ('sent', 'delivered') 88 GROUP BY o.contact_method 89 ORDER BY response_rate DESC 90 `); 91 await setCache('response_rates', results); 92 return results.length; 93 } 94 95 /** 96 * Pre-compute outreach funnel by channel 97 */ 98 async function cacheOutreachFunnel() { 99 const results = await getAll(` 100 SELECT 101 o.contact_method as channel, 102 COUNT(*) as total, 103 SUM(CASE WHEN o.approval_status = 'pending' THEN 1 ELSE 0 END) as pending, 104 SUM(CASE WHEN o.delivery_status = 'sent' THEN 1 ELSE 0 END) as sent, 105 SUM(CASE WHEN o.delivery_status = 'delivered' THEN 1 ELSE 0 END) as delivered, 106 SUM(CASE WHEN o.delivery_status = 'failed' THEN 1 ELSE 0 END) as failed, 107 SUM(CASE WHEN o.delivery_status = 'skipped' THEN 1 ELSE 0 END) as skipped, 108 SUM(CASE WHEN o.delivery_status = 'bounced' THEN 1 ELSE 0 END) as bounced, 109 ROUND(100.0 * SUM(CASE WHEN o.delivery_status = 'delivered' THEN 1 ELSE 0 END) / COUNT(*), 2) as delivery_rate 110 FROM messages o 111 WHERE o.direction = 'outbound' 112 GROUP BY o.contact_method 113 ORDER BY total DESC 114 `); 115 await setCache('outreach_funnel', results); 116 return results.length; 117 } 118 119 /** 120 * Pre-compute LLM usage by stage 121 */ 122 async function cacheLLMUsage() { 123 const results = await getAll(` 124 SELECT 125 stage, 126 SUM(prompt_tokens) as prompt_tokens, 127 SUM(completion_tokens) as completion_tokens, 128 SUM(total_tokens) as total_tokens, 129 SUM(estimated_cost) as total_cost, 130 COUNT(*) as request_count, 131 AVG(estimated_cost) as avg_cost_per_request 132 FROM tel.llm_usage 133 GROUP BY stage 134 ORDER BY total_cost DESC 135 `); 136 await setCache('llm_usage_by_stage', results); 137 return results.length; 138 } 139 140 /** 141 * Pre-compute daily throughput (last 30 days) 142 */ 143 async function cacheDailyThroughput() { 144 const results = await getAll(` 145 SELECT 146 created_at::date as date, 147 COUNT(*) as sites_added 148 FROM sites 149 WHERE created_at >= NOW() - INTERVAL '30 days' 150 GROUP BY created_at::date 151 ORDER BY date 152 `); 153 await setCache('daily_throughput_30d', results); 154 return results.length; 155 } 156 157 /** 158 * Pre-compute conversation stats 159 */ 160 async function cacheConversationStats() { 161 const results = await getOne(` 162 SELECT 163 COUNT(*) as total_conversations, 164 SUM(CASE WHEN direction = 'inbound' THEN 1 ELSE 0 END) as inbound_count, 165 SUM(CASE WHEN read_at IS NULL AND direction = 'inbound' THEN 1 ELSE 0 END) as unread_count 166 FROM messages 167 WHERE direction = 'inbound' 168 `); 169 await setCache('conversation_stats', results); 170 return 1; 171 } 172 173 /** 174 * Pre-compute error breakdown 175 */ 176 async function cacheErrorBreakdown() { 177 const results = await getAll(` 178 SELECT 179 error_message, 180 status as stage, 181 COUNT(*) as count 182 FROM sites 183 WHERE error_message IS NOT NULL 184 AND status != 'ignored' 185 GROUP BY error_message, status 186 ORDER BY count DESC 187 LIMIT 20 188 `); 189 await setCache('error_breakdown', results); 190 return results.length; 191 } 192 193 /** 194 * Pre-compute excluded sites count 195 */ 196 async function cacheExcludedSites() { 197 const results = await getOne(` 198 SELECT 199 SUM(CASE WHEN status = 'ignored' THEN 1 ELSE 0 END) as ignored, 200 SUM(CASE WHEN status = 'failing' THEN 1 ELSE 0 END) as failing 201 FROM sites 202 `); 203 await setCache('excluded_sites_count', { 204 ignored: results?.ignored || 0, 205 failing: results?.failing || 0, 206 }); 207 return 1; 208 } 209 210 /** 211 * Pre-compute total active errors 212 */ 213 async function cacheTotalActiveErrors() { 214 const results = await getOne(` 215 SELECT COUNT(*) as total 216 FROM sites 217 WHERE error_message IS NOT NULL 218 AND status NOT IN ('ignored', 'failing') 219 `); 220 await setCache('total_active_errors', results?.total || 0); 221 return 1; 222 } 223 224 /** 225 * Pre-compute cron job summary 226 */ 227 async function cacheCronJobSummary() { 228 const results = await getAll(` 229 SELECT 230 l.job_name, 231 COUNT(*) as total_runs, 232 SUM(CASE WHEN l.status = 'success' THEN 1 ELSE 0 END) as successful_runs, 233 SUM(CASE WHEN l.status = 'failed' THEN 1 ELSE 0 END) as failed_runs, 234 ROUND(100.0 * SUM(CASE WHEN l.status = 'success' THEN 1 ELSE 0 END) / COUNT(*), 1) as success_rate, 235 MAX(l.started_at) as last_run, 236 EXTRACT(EPOCH FROM AVG(l.finished_at - l.started_at)) / 60 as avg_duration_minutes, 237 j.interval_value, 238 j.interval_unit 239 FROM ops.cron_job_logs l 240 LEFT JOIN ops.cron_jobs j ON j.name = l.job_name 241 WHERE l.started_at >= NOW() - ( 242 CASE 243 WHEN j.interval_unit IN ('minutes', 'hours') THEN INTERVAL '1 day' 244 WHEN j.interval_unit = 'days' AND j.interval_value < 7 THEN INTERVAL '7 days' 245 ELSE INTERVAL '30 days' 246 END 247 ) 248 GROUP BY l.job_name, j.interval_value, j.interval_unit 249 ORDER BY last_run DESC 250 `); 251 await setCache('cron_job_summary', results); 252 return results.length; 253 } 254 255 /** 256 * Pre-compute database health metrics 257 */ 258 async function cacheDatabaseHealth() { 259 // Database size via pg_database_size 260 const sizeResult = await getOne( 261 `SELECT pg_database_size(current_database()) as size_bytes` 262 ); 263 const sizeMB = (sizeResult?.size_bytes || 0) / (1024 * 1024); 264 265 // Table counts 266 const tableCounts = await getAll(` 267 SELECT 'sites' as table_name, COUNT(*) as count FROM sites 268 UNION ALL 269 SELECT 'messages (outbound)', COUNT(*) FROM messages WHERE direction='outbound' 270 UNION ALL 271 SELECT 'messages (inbound)', COUNT(*) FROM messages WHERE direction='inbound' 272 UNION ALL 273 SELECT 'keywords', COUNT(*) FROM keywords 274 `); 275 276 // NOTE: integrity_check is SQLite-specific — PG uses pg_dump for verification. 277 // Run pg_dump manually during maintenance. 278 279 await setCache('database_health', { 280 size_mb: sizeMB, 281 integrity: 'ok', // not checked here; run pg_dump manually during maintenance 282 table_counts: tableCounts, 283 }); 284 return 1; 285 } 286 287 // ============================================================================ 288 // CHART DATA CACHING (Large/Expensive Charts) 289 // ============================================================================ 290 291 /** 292 * Cache hourly status breakdown (48 hours) - Pipeline page 293 * This is expensive due to pivot table transformation 294 */ 295 async function cacheHourlyStatusBreakdown() { 296 const results = await getAll(` 297 SELECT 298 to_char(date_trunc('hour', created_at), 'YYYY-MM-DD HH24:00:00') as hour, 299 status, 300 COUNT(*) as count 301 FROM site_status 302 WHERE created_at >= NOW() - INTERVAL '48 hours' 303 AND status NOT IN ('failing', 'ignored') 304 GROUP BY date_trunc('hour', created_at), status 305 ORDER BY hour, status 306 `); 307 await setCache('chart_hourly_status_breakdown_48h', results); 308 return results.length; 309 } 310 311 /** 312 * Cache hourly throughput (48 hours) - Pipeline page 313 */ 314 async function cacheHourlyThroughput() { 315 const results = await getAll(` 316 SELECT 317 to_char(date_trunc('hour', created_at), 'YYYY-MM-DD HH24:00:00') as hour, 318 COUNT(*) as sites_added 319 FROM sites 320 WHERE created_at >= NOW() - INTERVAL '48 hours' 321 GROUP BY date_trunc('hour', created_at) 322 ORDER BY hour 323 `); 324 await setCache('chart_hourly_throughput_48h', results); 325 return results.length; 326 } 327 328 /** 329 * Cache cron job timeline (24 hours, 200 limit) - Cron Jobs page Gantt chart 330 * This is expensive due to large result set 331 */ 332 async function cacheCronJobTimeline() { 333 const results = await getAll(` 334 SELECT 335 job_name, 336 started_at, 337 finished_at, 338 status, 339 ROUND( 340 EXTRACT(EPOCH FROM (COALESCE(finished_at, NOW()) - started_at)) / 60, 341 2 342 ) as duration_minutes 343 FROM ops.cron_job_logs 344 WHERE started_at >= NOW() - INTERVAL '24 hours' 345 ORDER BY started_at DESC 346 LIMIT 200 347 `); 348 // Sort chronologically for timeline display 349 const sorted = results.sort((a, b) => new Date(a.started_at) - new Date(b.started_at)); 350 await setCache('chart_cron_timeline_24h', sorted); 351 return sorted.length; 352 } 353 354 /** 355 * Cache system metrics timeline (24 hours) - Cron Jobs page 356 */ 357 async function cacheSystemMetricsTimeline() { 358 const results = await getAll(` 359 SELECT 360 recorded_at, 361 cpu_percent, 362 disk_read_mb, 363 disk_write_mb, 364 memory_percent 365 FROM tel.system_metrics 366 WHERE recorded_at >= NOW() - INTERVAL '24 hours' 367 ORDER BY recorded_at 368 `); 369 await setCache('chart_system_metrics_24h', results); 370 return results.length; 371 } 372 373 /** 374 * Cache LLM daily costs (30 days) - Outreach page 375 */ 376 async function cacheLLMDailyCosts() { 377 const results = await getAll(` 378 SELECT 379 created_at::date as date, 380 SUM(estimated_cost) as daily_cost, 381 SUM(total_tokens) as daily_tokens, 382 COUNT(*) as request_count 383 FROM tel.llm_usage 384 WHERE created_at >= NOW() - INTERVAL '30 days' 385 GROUP BY created_at::date 386 ORDER BY date 387 `); 388 await setCache('chart_llm_daily_costs_30d', results); 389 return results.length; 390 } 391 392 /** 393 * Cache LLM cost by stage over time (30 days) - For stacked area charts 394 */ 395 async function cacheLLMCostByStageAndDate() { 396 const results = await getAll(` 397 SELECT 398 created_at::date as date, 399 stage, 400 SUM(estimated_cost) as cost 401 FROM tel.llm_usage 402 WHERE created_at >= NOW() - INTERVAL '30 days' 403 GROUP BY created_at::date, stage 404 ORDER BY date, stage 405 `); 406 await setCache('chart_llm_cost_by_stage_30d', results); 407 return results.length; 408 } 409 410 /** 411 * Cache HTTP error history (30 days) - System Health page 412 */ 413 async function cacheHTTPErrorHistory() { 414 const results = await getAll(` 415 SELECT 416 updated_at::date as date, 417 http_status_code, 418 status as stage, 419 COUNT(*) as count 420 FROM sites 421 WHERE updated_at >= NOW() - INTERVAL '30 days' 422 AND http_status_code IS NOT NULL 423 AND http_status_code != 200 424 AND status IN ('assets_captured', 'enriched', 'enriched_regex', 'enriched_llm') 425 GROUP BY updated_at::date, http_status_code, status 426 ORDER BY date, http_status_code 427 `); 428 await setCache('chart_http_errors_30d', results); 429 return results.length; 430 } 431 432 /** 433 * Cache cron daily history (7 days) - Cron Jobs page 434 */ 435 async function cacheCronDailyHistory() { 436 const results = await getAll(` 437 SELECT 438 started_at::date as date, 439 SUM(CASE WHEN status = 'success' THEN 1 ELSE 0 END) as successful, 440 SUM(CASE WHEN status = 'failed' THEN 1 ELSE 0 END) as failed, 441 COUNT(*) as total 442 FROM ops.cron_job_logs 443 WHERE started_at >= NOW() - INTERVAL '7 days' 444 GROUP BY started_at::date 445 ORDER BY date 446 `); 447 await setCache('chart_cron_daily_history_7d', results); 448 return results.length; 449 } 450 451 /** 452 * Cache cost forecast + profitability data — refreshed every 4 days. 453 * Only writes to cache if existing entry is older than 4 days. 454 * Falls back to live query in FastAPI on cache miss. 455 */ 456 async function cacheCostForecast() { 457 // Skip if recent cache entry exists (< 4 days old) 458 const existing = await getOne( 459 `SELECT expires_at FROM dashboard_cache WHERE cache_key = 'cost_forecast' AND expires_at > NOW()` 460 ); 461 if (existing) return 0; // still fresh — skip 462 463 // Daily API cost (30-day rolling average) 464 const dailyAvg = await getOne( 465 `SELECT ROUND(AVG(daily_cost)::numeric, 4) as avg_cost FROM ( 466 SELECT created_at::date as d, SUM(estimated_cost) as daily_cost 467 FROM tel.llm_usage WHERE created_at >= NOW() - INTERVAL '30 days' 468 GROUP BY created_at::date 469 ) sub` 470 ); 471 472 // Avg cost per pipeline stage 473 const stageCosts = await getAll( 474 `SELECT stage, AVG(estimated_cost) as avg_cost, COUNT(*) as sample_size 475 FROM tel.llm_usage GROUP BY stage ORDER BY avg_cost DESC` 476 ); 477 478 // Sites pending processing (pipeline queue) 479 const pipelineCounts = await getAll( 480 `SELECT status, COUNT(*) as count FROM sites 481 WHERE status IN ('found','assets_captured','prog_scored','semantic_scored','vision_scored','enriched','enriched_regex','enriched_llm','proposals_drafted') 482 GROUP BY status` 483 ); 484 485 // Monthly sales actuals (last 30 days) 486 const salesData = await getOne( 487 `SELECT 488 COUNT(CASE WHEN s.resulted_in_sale = true AND m.created_at >= NOW() - INTERVAL '30 days' THEN 1 END) as monthly_sales, 489 COALESCE(SUM(CASE WHEN s.resulted_in_sale = true AND m.created_at >= NOW() - INTERVAL '30 days' THEN s.sale_amount END), 0) as monthly_revenue, 490 CASE WHEN COUNT(CASE WHEN s.resulted_in_sale = true THEN 1 END) > 0 491 THEN SUM(s.sale_amount) / COUNT(CASE WHEN s.resulted_in_sale = true THEN 1 END) 492 ELSE 297 END as avg_deal_value 493 FROM messages m 494 JOIN sites s ON m.site_id = s.id 495 WHERE m.direction = 'outbound' AND m.delivery_status IN ('sent', 'delivered')` 496 ); 497 498 const stageCostMap = Object.fromEntries(stageCosts.map(r => [r.stage, r.avg_cost])); 499 const pipelineCostForecast = Object.fromEntries( 500 pipelineCounts.map(r => [r.status, { count: r.count, avg_cost: stageCostMap[r.status] || 0 }]) 501 ); 502 503 const result = { 504 daily_api_cost_avg: dailyAvg?.avg_cost || 0, 505 monthly_sales: salesData?.monthly_sales || 0, 506 monthly_revenue: salesData?.monthly_revenue || 0, 507 avg_deal_value: salesData?.avg_deal_value || 297, 508 pipeline_cost_forecast: pipelineCostForecast, 509 }; 510 511 // Store with 4-day expiry 512 const expiresAt = new Date(Date.now() + 4 * 24 * 60 * 60 * 1000).toISOString(); 513 514 await run( 515 `INSERT INTO dashboard_cache (cache_key, cache_value, expires_at, updated_at) 516 VALUES ('cost_forecast', $1, $2, CURRENT_TIMESTAMP) 517 ON CONFLICT (cache_key) DO UPDATE SET 518 cache_value = EXCLUDED.cache_value, 519 expires_at = EXCLUDED.expires_at, 520 updated_at = CURRENT_TIMESTAMP`, 521 [JSON.stringify(result), expiresAt] 522 ); 523 524 return pipelineCounts.length + stageCosts.length; 525 } 526 527 /** 528 * Pre-compute purchases summary 529 */ 530 async function cachePurchasesSummary() { 531 // Check if purchases table exists in PG 532 const tableExists = await getOne( 533 `SELECT tablename FROM pg_tables WHERE schemaname = 'm333' AND tablename = 'purchases'` 534 ); 535 536 if (!tableExists) { 537 await setCache('purchases_summary', { 538 by_status: [], 539 revenue_by_country: [], 540 total_revenue_usd: 0, 541 failed_count: 0, 542 avg_delivery_hours: null, 543 }); 544 return 0; 545 } 546 547 const byStatus = await getAll('SELECT status, COUNT(*) as count FROM purchases GROUP BY status'); 548 549 const revenueByCountry = await getAll( 550 `SELECT country_code, SUM(amount_usd) as total_usd, COUNT(*) as count 551 FROM purchases WHERE status NOT IN ('refunded', 'failed') 552 GROUP BY country_code ORDER BY total_usd DESC` 553 ); 554 555 const totalRevenue = await getOne( 556 `SELECT COALESCE(SUM(amount_usd), 0) as total 557 FROM purchases WHERE status NOT IN ('refunded', 'failed')` 558 ); 559 560 const failedCount = await getOne( 561 `SELECT COUNT(*) as count FROM purchases WHERE status = 'failed'` 562 ); 563 564 const avgDelivery = await getOne( 565 `SELECT EXTRACT(EPOCH FROM AVG(delivered_at - created_at)) / 3600 as avg_hours 566 FROM purchases WHERE delivered_at IS NOT NULL` 567 ); 568 569 const result = { 570 by_status: byStatus, 571 revenue_by_country: revenueByCountry, 572 total_revenue_usd: totalRevenue?.total || 0, 573 failed_count: failedCount?.count || 0, 574 avg_delivery_hours: avgDelivery?.avg_hours 575 ? parseFloat(parseFloat(avgDelivery.avg_hours).toFixed(1)) 576 : null, 577 }; 578 579 await setCache('purchases_summary', result); 580 return byStatus.length + revenueByCountry.length; 581 } 582 583 /** 584 * Cache site and outreach status trees (error breakdown by reason) 585 */ 586 async function cacheStatusTrees() { 587 const siteTree = await buildStatusTree(); 588 const outreachTree = await buildOutreachTree(); 589 await setCache('status_tree', siteTree); 590 await setCache('outreach_tree', outreachTree); 591 return siteTree.length + outreachTree.length; 592 } 593 594 export async function precomputeDashboard() { 595 // Hard wall-clock budget: 5 minutes. We check elapsed time BETWEEN each section 596 // and skip the rest if we are over budget. This guarantees the function returns before 597 // the cron job's 8-min function timeout kills the whole service. 598 const BUDGET_MS = 5 * 60 * 1000; 599 const startTime = Date.now(); 600 const skipped = []; 601 602 function overBudget(label) { 603 const elapsed = Date.now() - startTime; 604 if (elapsed > BUDGET_MS) { 605 skipped.push(label); 606 return true; 607 } 608 return false; 609 } 610 611 logger.info('Starting dashboard pre-computation...'); 612 613 // Pre-compute all metrics — ordered cheapest → most expensive. 614 // Each call is wrapped in overBudget() to abort early under load. 615 const metrics = {}; 616 617 if (!overBudget('pipeline_funnel')) metrics.pipeline_funnel = await cachePipelineFunnel(); 618 if (!overBudget('response_rates')) metrics.response_rates = await cacheResponseRates(); 619 if (!overBudget('outreach_funnel')) metrics.outreach_funnel = await cacheOutreachFunnel(); 620 if (!overBudget('conversation_stats')) metrics.conversation_stats = await cacheConversationStats(); 621 if (!overBudget('excluded_sites')) metrics.excluded_sites = await cacheExcludedSites(); 622 if (!overBudget('active_errors')) metrics.active_errors = await cacheTotalActiveErrors(); 623 if (!overBudget('error_breakdown')) metrics.error_breakdown = await cacheErrorBreakdown(); 624 if (!overBudget('database_health')) metrics.database_health = await cacheDatabaseHealth(); 625 if (!overBudget('daily_throughput')) metrics.daily_throughput = await cacheDailyThroughput(); 626 if (!overBudget('llm_usage')) metrics.llm_usage = await cacheLLMUsage(); 627 if (!overBudget('cron_summary')) metrics.cron_summary = await cacheCronJobSummary(); 628 if (!overBudget('purchases_summary')) metrics.purchases_summary = await cachePurchasesSummary(); 629 if (!overBudget('status_trees')) metrics.status_trees = await cacheStatusTrees(); 630 if (!overBudget('cost_forecast')) metrics.cost_forecast = await cacheCostForecast(); // 4-day TTL 631 632 // Pre-compute chart data (large datasets) 633 const charts = {}; 634 if (!overBudget('hourly_status_breakdown_48h')) 635 charts.hourly_status_breakdown_48h = await cacheHourlyStatusBreakdown(); 636 if (!overBudget('hourly_throughput_48h')) 637 charts.hourly_throughput_48h = await cacheHourlyThroughput(); 638 if (!overBudget('cron_timeline_24h')) charts.cron_timeline_24h = await cacheCronJobTimeline(); 639 if (!overBudget('system_metrics_24h')) 640 charts.system_metrics_24h = await cacheSystemMetricsTimeline(); 641 if (!overBudget('llm_daily_costs_30d')) charts.llm_daily_costs_30d = await cacheLLMDailyCosts(); 642 if (!overBudget('llm_cost_by_stage_30d')) 643 charts.llm_cost_by_stage_30d = await cacheLLMCostByStageAndDate(); 644 if (!overBudget('http_errors_30d')) charts.http_errors_30d = await cacheHTTPErrorHistory(); 645 if (!overBudget('cron_daily_history_7d')) 646 charts.cron_daily_history_7d = await cacheCronDailyHistory(); 647 648 const duration = Date.now() - startTime; 649 const metricsTotal = Object.values(metrics).reduce((sum, count) => sum + (count || 0), 0); 650 const chartsTotal = Object.values(charts).reduce((sum, count) => sum + (count || 0), 0); 651 const totalEntries = metricsTotal + chartsTotal; 652 653 if (skipped.length > 0) { 654 logger.warn( 655 `Dashboard pre-computation skipped ${skipped.length} sections (over 5-min budget): ${skipped.join(', ')}` 656 ); 657 } 658 659 logger.info( 660 `Dashboard pre-computation completed: ${totalEntries} cache entries (${metricsTotal} metrics + ${chartsTotal} chart points) in ${duration}ms` 661 ); 662 663 return { 664 summary: `Pre-computed ${totalEntries} cache entries in ${duration}ms${skipped.length ? ` (${skipped.length} skipped)` : ''}`, 665 details: { 666 metrics_cached: metrics, 667 charts_cached: charts, 668 cache_expiration_minutes: CACHE_EXPIRATION_MINUTES, 669 duration_ms: duration, 670 skipped, 671 timestamp: new Date().toISOString(), 672 }, 673 metrics: { 674 entries_cached: totalEntries, 675 metrics_entries: metricsTotal, 676 chart_points: chartsTotal, 677 duration_ms: duration, 678 cache_ttl_minutes: CACHE_EXPIRATION_MINUTES, 679 skipped_sections: skipped.length, 680 }, 681 }; 682 } 683 684 // Run if called directly 685 if (import.meta.url === `file://${process.argv[1]}`) { 686 precomputeDashboard() 687 .then(result => { 688 console.log(JSON.stringify(result, null, 2)); 689 process.exit(0); 690 }) 691 .catch(err => { 692 console.error('Pre-computation failed:', err); 693 process.exit(1); 694 }); 695 }