/ src / evidently / cli / report.py
report.py
  1  import os
  2  from collections import Counter
  3  from typing import Dict
  4  from typing import List
  5  from typing import Optional
  6  
  7  import typer
  8  from typer import Argument
  9  from typer import Option
 10  
 11  from evidently import Dataset
 12  from evidently import Report
 13  from evidently.cli.main import app
 14  from evidently.cli.utils import _URI
 15  from evidently.cli.utils import _Config
 16  from evidently.cli.utils import load_config
 17  from evidently.core.container import MetricOrContainer
 18  from evidently.core.datasets import Descriptor
 19  from evidently.core.report import Snapshot
 20  from evidently.legacy.options.base import Option as EvidentlyOption
 21  from evidently.legacy.suite.base_suite import MetadataValueType
 22  from evidently.legacy.tests.base_test import TestStatus
 23  from evidently.metrics.row_test_summary import RowTestSummary
 24  
 25  
 26  class ReportConfig(_Config):
 27      descriptors: List[Descriptor] = []
 28      options: List[EvidentlyOption] = []
 29      metrics: List[MetricOrContainer] = []
 30      metadata: Dict[str, MetadataValueType] = {}
 31      tags: List[str] = []
 32      include_tests: bool = False
 33  
 34      def to_report(self) -> Report:
 35          return Report(metrics=self.metrics, metadata=self.metadata, tags=self.tags, include_tests=self.include_tests)
 36  
 37  
 38  @app.command("report")
 39  def run_report(
 40      config_path: str = Argument(..., help="Report configuration path"),
 41      input_path: str = Argument(..., help="Input dataset URI", metavar="input"),
 42      output: str = Argument(..., help="Output URI"),
 43      reference_path: Optional[str] = Option(default=None, help="reference dataset"),
 44      dataset_name: str = Option("CLI run", help="Name of dataset"),
 45      test_summary: bool = Option(False, help="Run tests summary"),
 46      save_dataset: bool = Option(True, help="Save output dataset"),
 47      save_report: bool = Option(True, help="Save output report"),
 48  ):
 49      """Run evidently report"""
 50      typer.echo(f"Loading config from {os.path.abspath(config_path)}")
 51      config = load_config(ReportConfig, config_path)
 52      typer.echo(f"Loading dataset from {input_path}")
 53      input_data = _URI(input_path).load_dataset()
 54  
 55      has_descriptors = len(config.descriptors) > 0
 56      has_report = len(config.metrics) > 0
 57      if has_descriptors:
 58          typer.echo(f"Running {len(config.descriptors)} descriptors")
 59          input_data.add_descriptors(config.descriptors, config.options)
 60  
 61      if not has_report:
 62          if save_dataset:
 63              link = _URI(output).upload_dataset(input_data, dataset_name)
 64              typer.echo(f"Saving dataset to {link}")
 65          if test_summary:
 66              typer.echo("Running tests summary")
 67              any_failed = _run_summary_report(input_data)
 68              if any_failed:
 69                  typer.echo("Some tests failed")
 70                  raise typer.Exit(code=1)
 71          return
 72  
 73      reference = None
 74      if has_report and reference_path is not None:
 75          typer.echo(f"Loading reference dataset from {reference_path}")
 76          reference = _URI(reference_path).load_dataset()
 77          reference.add_descriptors(config.descriptors, config.options)
 78  
 79      report = config.to_report()
 80      if test_summary and not any(isinstance(m, RowTestSummary) for m in report.metrics):
 81          report.metrics.append(RowTestSummary())
 82      snapshot = report.run(
 83          input_data,
 84          reference,
 85      )
 86      if save_report:
 87          link = _URI(output).upload_snapshot(snapshot, include_datasets=save_dataset)
 88          typer.echo(f"Saving snapshot to {link}")
 89      elif save_dataset:
 90          link = _URI(output).upload_dataset(input_data, dataset_name)
 91          typer.echo(f"Saving dataset to {link}")
 92      if test_summary:
 93          typer.echo("Running tests summary")
 94          any_failed = _print_summary_report(snapshot)
 95          if any_failed:
 96              typer.echo("Some tests failed")
 97              raise typer.Exit(code=1)
 98  
 99  
100  def _run_summary_report(dataset: Dataset) -> bool:
101      report = Report(metrics=[RowTestSummary()])
102      summary = report.run(dataset)
103      return _print_summary_report(summary)
104  
105  
106  def _print_summary_report(summary: Snapshot) -> bool:
107      any_failed = False
108      colormap = {
109          TestStatus.WARNING: typer.colors.YELLOW,
110          TestStatus.SUCCESS: typer.colors.GREEN,
111          TestStatus.ERROR: typer.colors.RED,
112          TestStatus.SKIPPED: typer.colors.WHITE,
113          TestStatus.FAIL: typer.colors.RED,
114      }
115      total_tests = len(summary.tests_results)
116      status_counter = Counter(tr.status for tr in summary.tests_results)
117      for status, count in status_counter.items():
118          typer.secho(f"{status.value} [{count}/{total_tests}]", fg=colormap[status])
119      typer.echo("-" * 20)
120      for tr in summary.tests_results:
121          typer.secho(tr.status.value, fg=colormap[tr.status], nl=False, bold=True)
122          typer.echo(f": {tr.description}")
123          if tr.status not in (TestStatus.SUCCESS, TestStatus.WARNING, TestStatus.SKIPPED):
124              any_failed = True
125      typer.echo("-" * 20)
126      return any_failed