/ tests / metrics / data_interity / test_dataset_summary_metric.py
test_dataset_summary_metric.py
  1  import json
  2  
  3  import numpy as np
  4  import pandas as pd
  5  import pytest
  6  
  7  from evidently.legacy.metrics import DatasetSummaryMetric
  8  from evidently.legacy.metrics.data_integrity.dataset_summary_metric import DatasetSummary
  9  from evidently.legacy.metrics.data_integrity.dataset_summary_metric import DatasetSummaryMetricResult
 10  from evidently.legacy.metrics.data_integrity.dataset_summary_metric import NumpyDtype
 11  from evidently.legacy.pipeline.column_mapping import ColumnMapping
 12  from evidently.legacy.report import Report
 13  from tests.conftest import smart_assert_equal
 14  
 15  
 16  @pytest.mark.parametrize(
 17      "current_data, reference_data, column_mapping, metric, expected_result",
 18      (
 19          (
 20              pd.DataFrame({}),
 21              None,
 22              ColumnMapping(),
 23              DatasetSummaryMetric(),
 24              DatasetSummaryMetricResult(
 25                  almost_duplicated_threshold=0.95,
 26                  current=DatasetSummary(
 27                      target=None,
 28                      prediction=None,
 29                      date_column=None,
 30                      id_column=None,
 31                      number_of_columns=0,
 32                      number_of_rows=0,
 33                      number_of_missing_values=0,
 34                      number_of_categorical_columns=0,
 35                      number_of_numeric_columns=0,
 36                      number_of_text_columns=0,
 37                      number_of_datetime_columns=0,
 38                      number_of_constant_columns=0,
 39                      number_of_empty_columns=0,
 40                      number_of_almost_constant_columns=0,
 41                      number_of_duplicated_columns=0,
 42                      number_of_almost_duplicated_columns=0,
 43                      number_of_empty_rows=0,
 44                      number_of_duplicated_rows=0,
 45                      columns_type_data={},
 46                      nans_by_columns={},
 47                      number_uniques_by_columns={},
 48                  ),
 49                  reference=None,
 50              ),
 51          ),
 52          (
 53              pd.DataFrame({"target": [1, 8, 3], "prediction": [6, 7, 8]}),
 54              pd.DataFrame({"target": [1, 2, 3, 4, 5], "prediction": [np.nan, 2, 3, 4, 5]}),
 55              ColumnMapping(),
 56              DatasetSummaryMetric(),
 57              DatasetSummaryMetricResult(
 58                  almost_duplicated_threshold=0.95,
 59                  current=DatasetSummary(
 60                      target="target",
 61                      prediction="prediction",
 62                      date_column=None,
 63                      id_column=None,
 64                      number_of_columns=2,
 65                      number_of_rows=3,
 66                      number_of_missing_values=0,
 67                      number_of_categorical_columns=0,
 68                      number_of_numeric_columns=0,
 69                      number_of_text_columns=0,
 70                      number_of_datetime_columns=0,
 71                      number_of_constant_columns=0,
 72                      number_of_almost_constant_columns=0,
 73                      number_of_duplicated_columns=0,
 74                      number_of_almost_duplicated_columns=0,
 75                      number_of_empty_rows=0,
 76                      number_of_empty_columns=0,
 77                      number_of_duplicated_rows=0,
 78                      columns_type_data={"target": NumpyDtype(dtype="int64"), "prediction": NumpyDtype(dtype="int64")},
 79                      nans_by_columns={"target": 0, "prediction": 0},
 80                      number_uniques_by_columns={"target": 3, "prediction": 3},
 81                  ),
 82                  reference=DatasetSummary(
 83                      target="target",
 84                      prediction="prediction",
 85                      date_column=None,
 86                      id_column=None,
 87                      number_of_columns=2,
 88                      number_of_rows=5,
 89                      number_of_missing_values=1,
 90                      number_of_categorical_columns=0,
 91                      number_of_numeric_columns=0,
 92                      number_of_text_columns=0,
 93                      number_of_datetime_columns=0,
 94                      number_of_constant_columns=0,
 95                      number_of_almost_constant_columns=0,
 96                      number_of_duplicated_columns=0,
 97                      number_of_almost_duplicated_columns=0,
 98                      number_of_empty_rows=0,
 99                      number_of_empty_columns=0,
100                      number_of_duplicated_rows=0,
101                      columns_type_data={"target": NumpyDtype(dtype="int64"), "prediction": NumpyDtype(dtype="float64")},
102                      nans_by_columns={"target": 0, "prediction": 1},
103                      number_uniques_by_columns={"target": 5, "prediction": 4},
104                  ),
105              ),
106          ),
107      ),
108  )
109  def test_dataset_summary_metric_success(
110      current_data: pd.DataFrame,
111      reference_data: pd.DataFrame,
112      column_mapping: ColumnMapping,
113      metric: DatasetSummaryMetric,
114      expected_result: DatasetSummaryMetricResult,
115  ) -> None:
116      report = Report(metrics=[metric])
117      report.run(current_data=current_data, reference_data=reference_data, column_mapping=column_mapping)
118      result = metric.get_result()
119      smart_assert_equal(result, expected_result)
120  
121  
122  @pytest.mark.parametrize(
123      "current_data, reference_data, metric",
124      (
125          (
126              pd.DataFrame(
127                  {
128                      "col": [1, 2, 1, 2, 1],
129                  }
130              ),
131              None,
132              DatasetSummaryMetric(almost_duplicated_threshold=-0.1),
133          ),
134          (
135              pd.DataFrame(
136                  {
137                      "col": [1, 2, 1, 2, 1],
138                  }
139              ),
140              None,
141              DatasetSummaryMetric(almost_duplicated_threshold=95),
142          ),
143      ),
144  )
145  def test_dataset_summary_metric_value_error(
146      current_data: pd.DataFrame, reference_data: pd.DataFrame, metric: DatasetSummaryMetric
147  ) -> None:
148      with pytest.raises(ValueError):
149          report = Report(metrics=[metric])
150          report.run(current_data=current_data, reference_data=reference_data, column_mapping=ColumnMapping())
151          metric.get_result()
152  
153  
154  @pytest.mark.parametrize(
155      "current_data, reference_data, column_mapping, metric, expected_json",
156      (
157          (
158              pd.DataFrame(),
159              None,
160              ColumnMapping(),
161              DatasetSummaryMetric(almost_duplicated_threshold=0.9),
162              {
163                  "almost_duplicated_threshold": 0.9,
164                  "current": {
165                      "date_column": None,
166                      "id_column": None,
167                      "nans_by_columns": {},
168                      "number_of_almost_constant_columns": 0,
169                      "number_of_almost_duplicated_columns": 0,
170                      "number_of_categorical_columns": 0,
171                      "number_of_columns": 0,
172                      "number_of_constant_columns": 0,
173                      "number_of_datetime_columns": 0,
174                      "number_of_duplicated_columns": 0,
175                      "number_of_duplicated_rows": 0,
176                      "number_of_empty_columns": 0,
177                      "number_of_empty_rows": 0,
178                      "number_of_missing_values": 0.0,
179                      "number_of_numeric_columns": 0,
180                      "number_of_text_columns": 0,
181                      "number_of_rows": 0,
182                      "number_uniques_by_columns": {},
183                      "prediction": None,
184                      "target": None,
185                  },
186                  "reference": None,
187              },
188          ),
189          (
190              pd.DataFrame({"test1": [1, 2, 3], "test2": [1, 2, 3], "test3": [1, 1, 1]}),
191              pd.DataFrame({"test4": [1, 2, 3], "test2": ["a", "a", "a"], "test3": [1, 1, 1]}),
192              ColumnMapping(),
193              DatasetSummaryMetric(almost_duplicated_threshold=0.9),
194              {
195                  "almost_duplicated_threshold": 0.9,
196                  "current": {
197                      "date_column": None,
198                      "id_column": None,
199                      "nans_by_columns": {"test1": 0, "test2": 0, "test3": 0},
200                      "number_of_almost_constant_columns": 1,
201                      "number_of_almost_duplicated_columns": 1,
202                      "number_of_categorical_columns": 0,
203                      "number_of_columns": 3,
204                      "number_of_constant_columns": 1,
205                      "number_of_datetime_columns": 0,
206                      "number_of_duplicated_columns": 1,
207                      "number_of_duplicated_rows": 0,
208                      "number_of_empty_columns": 0,
209                      "number_of_empty_rows": 0,
210                      "number_of_missing_values": 0,
211                      "number_of_numeric_columns": 3,
212                      "number_of_text_columns": 0,
213                      "number_of_rows": 3,
214                      "number_uniques_by_columns": {"test1": 3, "test2": 3, "test3": 1},
215                      "prediction": None,
216                      "target": None,
217                  },
218                  "reference": {
219                      "date_column": None,
220                      "id_column": None,
221                      "nans_by_columns": {"test2": 0, "test3": 0, "test4": 0},
222                      "number_of_almost_constant_columns": 2,
223                      "number_of_almost_duplicated_columns": 0,
224                      "number_of_categorical_columns": 1,
225                      "number_of_columns": 3,
226                      "number_of_constant_columns": 2,
227                      "number_of_datetime_columns": 0,
228                      "number_of_duplicated_columns": 0,
229                      "number_of_duplicated_rows": 0,
230                      "number_of_empty_columns": 0,
231                      "number_of_empty_rows": 0,
232                      "number_of_missing_values": 0,
233                      "number_of_numeric_columns": 2,
234                      "number_of_text_columns": 0,
235                      "number_of_rows": 3,
236                      "number_uniques_by_columns": {"test2": 1, "test3": 1, "test4": 3},
237                      "prediction": None,
238                      "target": None,
239                  },
240              },
241          ),
242      ),
243  )
244  def test_dataset_summary_metric_with_report(
245      current_data: pd.DataFrame,
246      reference_data: pd.DataFrame,
247      column_mapping: ColumnMapping,
248      metric: DatasetSummaryMetric,
249      expected_json: dict,
250  ) -> None:
251      report = Report(metrics=[metric])
252      report.run(current_data=current_data, reference_data=reference_data, column_mapping=column_mapping)
253      assert report.show()
254      json_result = report.json()
255      assert len(json_result) > 0
256      result = json.loads(json_result)
257      assert result["metrics"][0]["metric"] == "DatasetSummaryMetric"
258      assert result["metrics"][0]["result"] == expected_json
259  
260  
261  @pytest.mark.parametrize(
262      "pd_type,np_type",
263      [
264          (pd.Int64Dtype(), np.int64),
265          (pd.CategoricalDtype(categories=["1"]), pd.CategoricalDtype(categories=["1"])),
266          (pd.Float64Dtype(), np.float64),
267      ],
268  )
269  def test_numpy_dtype_from_pandas(pd_type, np_type):
270      t = NumpyDtype.from_dtype(pd_type)
271      assert t.type == np_type