/ src / evidently / legacy / tests / recsys_tests.py
recsys_tests.py
  1  import abc
  2  from typing import ClassVar
  3  from typing import Generic
  4  from typing import List
  5  from typing import Optional
  6  from typing import TypeVar
  7  from typing import Union
  8  
  9  from evidently.legacy.metric_results import HistogramData
 10  from evidently.legacy.metrics import DiversityMetric
 11  from evidently.legacy.metrics import FBetaTopKMetric
 12  from evidently.legacy.metrics import HitRateKMetric
 13  from evidently.legacy.metrics import MAPKMetric
 14  from evidently.legacy.metrics import MARKMetric
 15  from evidently.legacy.metrics import MRRKMetric
 16  from evidently.legacy.metrics import NDCGKMetric
 17  from evidently.legacy.metrics import NoveltyMetric
 18  from evidently.legacy.metrics import PersonalizationMetric
 19  from evidently.legacy.metrics import PopularityBias
 20  from evidently.legacy.metrics import PrecisionTopKMetric
 21  from evidently.legacy.metrics import RecallTopKMetric
 22  from evidently.legacy.metrics import ScoreDistribution
 23  from evidently.legacy.metrics import SerendipityMetric
 24  from evidently.legacy.model.widget import BaseWidgetInfo
 25  from evidently.legacy.renderers.base_renderer import TestHtmlInfo
 26  from evidently.legacy.renderers.base_renderer import TestRenderer
 27  from evidently.legacy.renderers.base_renderer import default_renderer
 28  from evidently.legacy.renderers.html_widgets import TabData
 29  from evidently.legacy.renderers.html_widgets import plotly_figure
 30  from evidently.legacy.renderers.html_widgets import table_data
 31  from evidently.legacy.renderers.html_widgets import widget_tabs
 32  from evidently.legacy.tests.base_test import BaseCheckValueTest
 33  from evidently.legacy.tests.base_test import GroupData
 34  from evidently.legacy.tests.base_test import GroupingTypes
 35  from evidently.legacy.tests.base_test import TestValueCondition
 36  from evidently.legacy.tests.utils import approx
 37  from evidently.legacy.utils.types import Numeric
 38  from evidently.legacy.utils.visualizations import plot_4_distr
 39  from evidently.legacy.utils.visualizations import plot_distr_with_perc_button
 40  from evidently.legacy.utils.visualizations import plot_metric_k
 41  
 42  RECSYS_GROUP = GroupData(id="recsys", title="Recommendations", description="")
 43  GroupingTypes.TestGroup.add_value(RECSYS_GROUP)
 44  
 45  
 46  BaseTopKRecsysType = Union[
 47      PrecisionTopKMetric,
 48      RecallTopKMetric,
 49      FBetaTopKMetric,
 50      MAPKMetric,
 51      MARKMetric,
 52      NDCGKMetric,
 53      MRRKMetric,
 54      HitRateKMetric,
 55  ]
 56  
 57  
 58  class BaseTopkRecsysTest(BaseCheckValueTest, abc.ABC):
 59      group: ClassVar = RECSYS_GROUP.id
 60      header: str
 61      k: int
 62      min_rel_score: Optional[int]
 63      no_feedback_users: bool
 64      _metric: BaseTopKRecsysType
 65  
 66      def __init__(
 67          self,
 68          k: int,
 69          min_rel_score: Optional[int] = None,
 70          no_feedback_users: bool = False,
 71          eq: Optional[Numeric] = None,
 72          gt: Optional[Numeric] = None,
 73          gte: Optional[Numeric] = None,
 74          is_in: Optional[List[Union[Numeric, str, bool]]] = None,
 75          lt: Optional[Numeric] = None,
 76          lte: Optional[Numeric] = None,
 77          not_eq: Optional[Numeric] = None,
 78          not_in: Optional[List[Union[Numeric, str, bool]]] = None,
 79          is_critical: bool = True,
 80      ):
 81          self.k = k
 82          self.min_rel_score = min_rel_score
 83          self.no_feedback_users = no_feedback_users
 84          self._metric = self.get_metric(k, min_rel_score, no_feedback_users)
 85          super().__init__(
 86              eq=eq,
 87              gt=gt,
 88              gte=gte,
 89              is_in=is_in,
 90              lt=lt,
 91              lte=lte,
 92              not_eq=not_eq,
 93              not_in=not_in,
 94              is_critical=is_critical,
 95          )
 96  
 97      def get_condition(self) -> TestValueCondition:
 98          if self.condition.has_condition():
 99              return self.condition
100          metric_result = self.metric.get_result()
101          ref_value = metric_result.reference[self.k] if metric_result.reference is not None else None
102          if ref_value is not None:
103              return TestValueCondition(eq=approx(ref_value, relative=0.1))
104          return TestValueCondition(gt=0)
105  
106      def calculate_value_for_test(self) -> Numeric:
107          return self.metric.get_result().current[self.k]
108  
109      def get_description(self, value: Numeric) -> str:
110          header_part = "(no feedback users included)"
111          if not self.no_feedback_users:
112              header_part = "(no feedback users excluded)"
113          return f"{self.header}@{self.k} {header_part} is {value:.3}. The test threshold is {self.get_condition()}"
114  
115      @abc.abstractmethod
116      def get_metric(self, k, min_rel_score, no_feedback_users) -> BaseTopKRecsysType:
117          raise NotImplementedError()
118  
119      @property
120      def metric(self):
121          return self._metric
122  
123  
124  @default_renderer(wrap_type=BaseTopkRecsysTest)
125  class BaseTopkRecsysRenderer(TestRenderer):
126      yaxis_name: str
127  
128      def render_html(self, obj: BaseTopkRecsysTest) -> TestHtmlInfo:
129          info = super().render_html(obj)
130          result = obj.metric.get_result()
131          fig = plot_metric_k(result.current, result.reference, self.yaxis_name)
132          info.with_details("", plotly_figure(figure=fig, title=""))
133          return info
134  
135  
136  class TestPrecisionTopK(BaseTopkRecsysTest):
137      class Config:
138          type_alias = "evidently:test:TestPrecisionTopK"
139  
140      name: ClassVar = "Precision (top-k)"
141      header: str = "Precision"
142  
143      def get_metric(self, k, min_rel_score, no_feedback_users) -> BaseTopKRecsysType:
144          return PrecisionTopKMetric(k=k, min_rel_score=min_rel_score, no_feedback_users=no_feedback_users)
145  
146  
147  @default_renderer(wrap_type=TestPrecisionTopK)
148  class TestPrecisionTopKRenderer(BaseTopkRecsysRenderer):
149      yaxis_name = "precision@k"
150  
151  
152  class TestRecallTopK(BaseTopkRecsysTest):
153      class Config:
154          type_alias = "evidently:test:TestRecallTopK"
155  
156      name: ClassVar = "Recall (top-k)"
157      header: str = "Recall"
158  
159      def get_metric(self, k, min_rel_score, no_feedback_users) -> BaseTopKRecsysType:
160          return RecallTopKMetric(k=k, min_rel_score=min_rel_score, no_feedback_users=no_feedback_users)
161  
162  
163  @default_renderer(wrap_type=TestRecallTopK)
164  class TestRecallTopKRenderer(BaseTopkRecsysRenderer):
165      yaxis_name = "recall@k"
166  
167  
168  class TestFBetaTopK(BaseTopkRecsysTest):
169      class Config:
170          type_alias = "evidently:test:TestFBetaTopK"
171  
172      name: ClassVar = "F_beta (top-k)"
173      header: str = "F_beta"
174  
175      def get_metric(self, k, min_rel_score, no_feedback_users) -> BaseTopKRecsysType:
176          return FBetaTopKMetric(k=k, min_rel_score=min_rel_score, no_feedback_users=no_feedback_users)
177  
178  
179  @default_renderer(wrap_type=TestFBetaTopK)
180  class TestFBetaTopKRenderer(BaseTopkRecsysRenderer):
181      yaxis_name = "f_beta@k"
182  
183  
184  class TestMAPK(BaseTopkRecsysTest):
185      class Config:
186          type_alias = "evidently:test:TestMAPK"
187  
188      name: ClassVar = "MAP (top-k)"
189      header: str = "MAP"
190  
191      def get_metric(self, k, min_rel_score, no_feedback_users) -> BaseTopKRecsysType:
192          return MAPKMetric(k=k, min_rel_score=min_rel_score, no_feedback_users=no_feedback_users)
193  
194  
195  @default_renderer(wrap_type=TestMAPK)
196  class TestMAPKRenderer(BaseTopkRecsysRenderer):
197      yaxis_name = "map@k"
198  
199  
200  class TestMARK(BaseTopkRecsysTest):
201      class Config:
202          type_alias = "evidently:test:TestMARK"
203  
204      name: ClassVar = "MAR (top-k)"
205      header: str = "MAR"
206  
207      def get_metric(self, k, min_rel_score, no_feedback_users) -> BaseTopKRecsysType:
208          return MARKMetric(k=k, min_rel_score=min_rel_score, no_feedback_users=no_feedback_users)
209  
210  
211  @default_renderer(wrap_type=TestMARK)
212  class TestMARKRenderer(BaseTopkRecsysRenderer):
213      yaxis_name = "mar@k"
214  
215  
216  class TestNDCGK(BaseTopkRecsysTest):
217      class Config:
218          type_alias = "evidently:test:TestNDCGK"
219  
220      name: ClassVar = "NDCG (top-k)"
221      header: str = "NDCG"
222  
223      def get_metric(self, k, min_rel_score, no_feedback_users) -> BaseTopKRecsysType:
224          return NDCGKMetric(k=k, min_rel_score=min_rel_score, no_feedback_users=no_feedback_users)
225  
226  
227  @default_renderer(wrap_type=TestNDCGK)
228  class TestNDCGKRenderer(BaseTopkRecsysRenderer):
229      yaxis_name = "ndcg@k"
230  
231  
232  class TestHitRateK(BaseTopkRecsysTest):
233      class Config:
234          type_alias = "evidently:test:TestHitRateK"
235  
236      name: ClassVar = "Hit Rate (top-k)"
237      header: str = "Hit Rate"
238  
239      def get_metric(self, k, min_rel_score, no_feedback_users) -> BaseTopKRecsysType:
240          return HitRateKMetric(k=k, min_rel_score=min_rel_score, no_feedback_users=no_feedback_users)
241  
242  
243  @default_renderer(wrap_type=TestHitRateK)
244  class TestHitRateKRenderer(BaseTopkRecsysRenderer):
245      yaxis_name = "hit_rate@k"
246  
247  
248  class TestMRRK(BaseTopkRecsysTest):
249      class Config:
250          type_alias = "evidently:test:TestMRRK"
251  
252      name: ClassVar = "MRR (top-k)"
253      header: str = "MRR"
254  
255      def get_metric(self, k, min_rel_score, no_feedback_users) -> BaseTopKRecsysType:
256          return MRRKMetric(k=k, min_rel_score=min_rel_score, no_feedback_users=no_feedback_users)
257  
258  
259  @default_renderer(wrap_type=TestMRRK)
260  class TestMRRKRenderer(BaseTopkRecsysRenderer):
261      yaxis_name = "mrr@k"
262  
263  
264  BaseNotRankRecsysType = Union[
265      PersonalizationMetric,
266      NoveltyMetric,
267      SerendipityMetric,
268      DiversityMetric,
269  ]
270  
271  
272  TBaseNotRankRecsysType = TypeVar("TBaseNotRankRecsysType")
273  
274  
275  class BaseNotRankRecsysTest(Generic[TBaseNotRankRecsysType], BaseCheckValueTest, abc.ABC):
276      group: ClassVar = RECSYS_GROUP.id
277      header: str
278      k: int
279      min_rel_score: Optional[int]
280      item_features: Optional[List[str]]
281      _metric: TBaseNotRankRecsysType
282  
283      def __init__(
284          self,
285          k: int,
286          min_rel_score: Optional[int] = None,
287          item_features: Optional[List[str]] = None,
288          eq: Optional[Numeric] = None,
289          gt: Optional[Numeric] = None,
290          gte: Optional[Numeric] = None,
291          is_in: Optional[List[Union[Numeric, str, bool]]] = None,
292          lt: Optional[Numeric] = None,
293          lte: Optional[Numeric] = None,
294          not_eq: Optional[Numeric] = None,
295          not_in: Optional[List[Union[Numeric, str, bool]]] = None,
296          is_critical: bool = True,
297      ):
298          self.k = k
299          self.min_rel_score = min_rel_score
300          self.item_features = item_features
301          self._metric = self.get_metric(k, min_rel_score, item_features)
302          super().__init__(
303              eq=eq,
304              gt=gt,
305              gte=gte,
306              is_in=is_in,
307              lt=lt,
308              lte=lte,
309              not_eq=not_eq,
310              not_in=not_in,
311              is_critical=is_critical,
312          )
313  
314      def get_condition(self) -> TestValueCondition:
315          if self.condition.has_condition():
316              return self.condition
317          metric_result = self.metric.get_result()
318  
319          ref_value = metric_result.reference_value
320          if ref_value is not None:
321              return TestValueCondition(eq=approx(ref_value, relative=0.1))
322          return TestValueCondition(gt=0)
323  
324      def calculate_value_for_test(self) -> Numeric:
325          return self.metric.get_result().current_value
326  
327      def get_description(self, value: Numeric) -> str:
328          return f"{self.header}@{self.k} is {value:.3}. The test threshold is {self.get_condition()}"
329  
330      @abc.abstractmethod
331      def get_metric(self, k, min_rel_score, item_features) -> TBaseNotRankRecsysType:
332          raise NotImplementedError()
333  
334      @property
335      def metric(self):
336          return self._metric
337  
338  
339  @default_renderer(wrap_type=BaseNotRankRecsysTest)
340  class BaseNotRankRecsysTestRenderer(TestRenderer):
341      xaxis_name: str
342  
343      def render_html(self, obj: BaseNotRankRecsysTest) -> TestHtmlInfo:
344          info = super().render_html(obj)
345          result = obj.metric.get_result()
346          fig = plot_distr_with_perc_button(
347              hist_curr=HistogramData.from_distribution(result.current_distr),
348              hist_ref=HistogramData.from_distribution(result.reference_distr),
349              xaxis_name=self.xaxis_name,
350              yaxis_name="Count",
351              yaxis_name_perc="Percent",
352              same_color=False,
353              color_options=self.color_options,
354              subplots=False,
355              to_json=False,
356          )
357          info.with_details("", plotly_figure(figure=fig, title=""))
358          return info
359  
360  
361  class TestNovelty(BaseNotRankRecsysTest[NoveltyMetric]):
362      class Config:
363          type_alias = "evidently:test:TestNovelty"
364  
365      name: ClassVar = "Novelty (top-k)"
366      header: str = "Novelty"
367  
368      def get_metric(self, k, min_rel_score, item_features) -> NoveltyMetric:
369          return NoveltyMetric(k=k)
370  
371  
372  @default_renderer(wrap_type=TestNovelty)
373  class TestNoveltyRenderer(BaseNotRankRecsysTestRenderer):
374      xaxis_name = "novelty by user"
375  
376  
377  class TestDiversity(BaseNotRankRecsysTest[DiversityMetric]):
378      class Config:
379          type_alias = "evidently:test:TestDiversity"
380  
381      name: ClassVar = "Diversity (top-k)"
382      header: str = "Diversity"
383  
384      def get_metric(self, k, min_rel_score, item_features) -> DiversityMetric:
385          return DiversityMetric(k=k, item_features=item_features)
386  
387  
388  @default_renderer(wrap_type=TestDiversity)
389  class TestDiversityRenderer(BaseNotRankRecsysTestRenderer):
390      xaxis_name = "intra list diversity by user"
391  
392  
393  class TestSerendipity(BaseNotRankRecsysTest[SerendipityMetric]):
394      class Config:
395          type_alias = "evidently:test:TestSerendipity"
396  
397      name: ClassVar = "Serendipity (top-k)"
398      header: str = "Serendipity"
399  
400      def get_metric(self, k, min_rel_score, item_features) -> SerendipityMetric:
401          return SerendipityMetric(k=k, min_rel_score=min_rel_score, item_features=item_features)
402  
403  
404  @default_renderer(wrap_type=TestSerendipity)
405  class TestSerendipityRenderer(BaseNotRankRecsysTestRenderer):
406      xaxis_name = "serendipity by user"
407  
408  
409  class TestPersonalization(BaseNotRankRecsysTest[PersonalizationMetric]):
410      class Config:
411          type_alias = "evidently:test:TestPersonalization"
412  
413      name: ClassVar = "Personalization (top-k)"
414      header: str = "Personalization"
415  
416      def get_metric(self, k, min_rel_score, item_features) -> PersonalizationMetric:
417          return PersonalizationMetric(k=k)
418  
419  
420  @default_renderer(wrap_type=TestPersonalization)
421  class TestPersonalizationRenderer(TestRenderer):
422      @staticmethod
423      def _get_table_stat(dataset_name: str, curr_table: dict, ref_table: Optional[dict]) -> BaseWidgetInfo:
424          matched_stat_headers = ["Value", "Count"]
425          tabs = [
426              TabData(
427                  title="CURRENT: Top 10 popular items",
428                  widget=table_data(
429                      title="",
430                      column_names=matched_stat_headers,
431                      data=[(k, v) for k, v in curr_table.items() if v > 0][:10],
432                  ),
433              ),
434          ]
435          if ref_table is not None:
436              tabs.append(
437                  TabData(
438                      title="REFERENCE: Top 10 popular items",
439                      widget=table_data(
440                          title="",
441                          column_names=matched_stat_headers,
442                          data=[(k, v) for k, v in ref_table.items() if v > 0][:10],
443                      ),
444                  ),
445              )
446          return widget_tabs(title="", tabs=tabs)
447  
448      def render_html(self, obj: BaseNotRankRecsysTest) -> TestHtmlInfo:
449          info = super().render_html(obj)
450          result = obj.metric.get_result()
451          info.with_details("", self._get_table_stat("", result.current_table, result.reference_table))
452          return info
453  
454  
455  class TestARP(BaseCheckValueTest):
456      class Config:
457          type_alias = "evidently:test:TestARP"
458  
459      group: ClassVar = RECSYS_GROUP.id
460      name: ClassVar = "ARP (top-k)"
461      k: int
462      normalize_arp: bool
463      _metric: PopularityBias
464  
465      def __init__(
466          self,
467          k: int,
468          normalize_arp: bool = False,
469          eq: Optional[Numeric] = None,
470          gt: Optional[Numeric] = None,
471          gte: Optional[Numeric] = None,
472          is_in: Optional[List[Union[Numeric, str, bool]]] = None,
473          lt: Optional[Numeric] = None,
474          lte: Optional[Numeric] = None,
475          not_eq: Optional[Numeric] = None,
476          not_in: Optional[List[Union[Numeric, str, bool]]] = None,
477          is_critical: bool = True,
478      ):
479          self.k = k
480          self.normalize_arp = normalize_arp
481          self._metric = PopularityBias(k, normalize_arp=normalize_arp)
482          super().__init__(
483              eq=eq,
484              gt=gt,
485              gte=gte,
486              is_in=is_in,
487              lt=lt,
488              lte=lte,
489              not_eq=not_eq,
490              not_in=not_in,
491              is_critical=is_critical,
492          )
493  
494      def get_condition(self) -> TestValueCondition:
495          if self.condition.has_condition():
496              return self.condition
497          metric_result = self.metric.get_result()
498          ref_value = metric_result.reference_apr
499          if ref_value is not None:
500              return TestValueCondition(eq=approx(ref_value, relative=0.1))
501          return TestValueCondition(gt=0)
502  
503      def calculate_value_for_test(self) -> Numeric:
504          return self.metric.get_result().current_apr
505  
506      def get_description(self, value: Numeric) -> str:
507          return f"ARP (top-{self.k}) is {value:.3}. The test threshold is {self.get_condition()}"
508  
509      @property
510      def metric(self):
511          return self._metric
512  
513  
514  class TestGiniIndex(BaseCheckValueTest):
515      class Config:
516          type_alias = "evidently:test:TestGiniIndex"
517  
518      group: ClassVar = RECSYS_GROUP.id
519      name: ClassVar = "Gini Index (top-k)"
520      k: int
521      _metric: PopularityBias
522  
523      def __init__(
524          self,
525          k: int,
526          eq: Optional[Numeric] = None,
527          gt: Optional[Numeric] = None,
528          gte: Optional[Numeric] = None,
529          is_in: Optional[List[Union[Numeric, str, bool]]] = None,
530          lt: Optional[Numeric] = None,
531          lte: Optional[Numeric] = None,
532          not_eq: Optional[Numeric] = None,
533          not_in: Optional[List[Union[Numeric, str, bool]]] = None,
534          is_critical: bool = True,
535      ):
536          self.k = k
537          self._metric = PopularityBias(k)
538          super().__init__(
539              eq=eq,
540              gt=gt,
541              gte=gte,
542              is_in=is_in,
543              lt=lt,
544              lte=lte,
545              not_eq=not_eq,
546              not_in=not_in,
547              is_critical=is_critical,
548          )
549  
550      def get_condition(self) -> TestValueCondition:
551          if self.condition.has_condition():
552              return self.condition
553          metric_result = self.metric.get_result()
554          ref_value = metric_result.reference_gini
555          if ref_value is not None:
556              return TestValueCondition(eq=approx(ref_value, relative=0.1))
557          return TestValueCondition(lt=1)
558  
559      def calculate_value_for_test(self) -> Numeric:
560          return self.metric.get_result().current_gini
561  
562      def get_description(self, value: Numeric) -> str:
563          return f"Gini index (top-{self.k}) is {value:.3}. The test threshold is {self.get_condition()}"
564  
565      @property
566      def metric(self):
567          return self._metric
568  
569  
570  class TestCoverage(BaseCheckValueTest):
571      class Config:
572          type_alias = "evidently:test:TestCoverage"
573  
574      group: ClassVar = RECSYS_GROUP.id
575      name: ClassVar = "Coverage (top-k)"
576      k: int
577      _metric: PopularityBias
578  
579      def __init__(
580          self,
581          k: int,
582          eq: Optional[Numeric] = None,
583          gt: Optional[Numeric] = None,
584          gte: Optional[Numeric] = None,
585          is_in: Optional[List[Union[Numeric, str, bool]]] = None,
586          lt: Optional[Numeric] = None,
587          lte: Optional[Numeric] = None,
588          not_eq: Optional[Numeric] = None,
589          not_in: Optional[List[Union[Numeric, str, bool]]] = None,
590          is_critical: bool = True,
591      ):
592          self.k = k
593          self._metric = PopularityBias(k)
594          super().__init__(
595              eq=eq,
596              gt=gt,
597              gte=gte,
598              is_in=is_in,
599              lt=lt,
600              lte=lte,
601              not_eq=not_eq,
602              not_in=not_in,
603              is_critical=is_critical,
604          )
605  
606      def get_condition(self) -> TestValueCondition:
607          if self.condition.has_condition():
608              return self.condition
609          metric_result = self.metric.get_result()
610          ref_value = metric_result.reference_coverage
611          if ref_value is not None:
612              return TestValueCondition(eq=approx(ref_value, relative=0.1))
613          return TestValueCondition(gt=0)
614  
615      def calculate_value_for_test(self) -> Numeric:
616          return self.metric.get_result().current_coverage
617  
618      def get_description(self, value: Numeric) -> str:
619          return f"Coverage (top-{self.k}) is {value:.3}. The test threshold is {self.get_condition()}"
620  
621      @property
622      def metric(self):
623          return self._metric
624  
625  
626  @default_renderer(wrap_type=TestARP)
627  @default_renderer(wrap_type=TestGiniIndex)
628  @default_renderer(wrap_type=TestCoverage)
629  class TestPopularityBiasRenderer(TestRenderer):
630      def render_html(self, obj: Union[TestARP, TestGiniIndex, TestCoverage]) -> TestHtmlInfo:
631          info = super().render_html(obj)
632          metric_result = obj.metric.get_result()
633          is_normed = ""
634          if metric_result.normalize_arp:
635              is_normed = " normilized"
636          distr_fig = plot_distr_with_perc_button(
637              hist_curr=HistogramData.from_distribution(metric_result.current_distr),
638              hist_ref=HistogramData.from_distribution(metric_result.reference_distr),
639              xaxis_name="item popularity" + is_normed,
640              yaxis_name="Count",
641              yaxis_name_perc="Percent",
642              same_color=False,
643              color_options=self.color_options,
644              subplots=False,
645              to_json=False,
646          )
647          info.with_details("", plotly_figure(figure=distr_fig, title=""))
648          return info
649  
650  
651  class TestScoreEntropy(BaseCheckValueTest):
652      class Config:
653          type_alias = "evidently:test:TestScoreEntropy"
654  
655      group: ClassVar = RECSYS_GROUP.id
656      name: ClassVar = "Score Entropy (top-k)"
657      k: int
658      _metric: ScoreDistribution
659  
660      def __init__(
661          self,
662          k: int,
663          eq: Optional[Numeric] = None,
664          gt: Optional[Numeric] = None,
665          gte: Optional[Numeric] = None,
666          is_in: Optional[List[Union[Numeric, str, bool]]] = None,
667          lt: Optional[Numeric] = None,
668          lte: Optional[Numeric] = None,
669          not_eq: Optional[Numeric] = None,
670          not_in: Optional[List[Union[Numeric, str, bool]]] = None,
671          is_critical: bool = True,
672      ):
673          self.k = k
674          self._metric = ScoreDistribution(k)
675          super().__init__(
676              eq=eq,
677              gt=gt,
678              gte=gte,
679              is_in=is_in,
680              lt=lt,
681              lte=lte,
682              not_eq=not_eq,
683              not_in=not_in,
684              is_critical=is_critical,
685          )
686  
687      def get_condition(self) -> TestValueCondition:
688          if self.condition.has_condition():
689              return self.condition
690          metric_result = self.metric.get_result()
691          ref_value = metric_result.reference_entropy
692          if ref_value is not None:
693              return TestValueCondition(eq=approx(ref_value, relative=0.1))
694          return TestValueCondition(gt=0)
695  
696      def calculate_value_for_test(self) -> Numeric:
697          return self.metric.get_result().current_entropy
698  
699      def get_description(self, value: Numeric) -> str:
700          return f"Score Entropy (top-{self.k}) is {value:.3}. The test threshold is {self.get_condition()}"
701  
702      @property
703      def metric(self):
704          return self._metric
705  
706  
707  @default_renderer(wrap_type=TestScoreEntropy)
708  class TestScoreEntropyRenderer(TestRenderer):
709      def render_html(self, obj: TestScoreEntropy) -> TestHtmlInfo:
710          info = super().render_html(obj)
711          metric_result = obj.metric.get_result()
712          distr_fig = plot_4_distr(
713              curr_1=HistogramData.from_distribution(metric_result.current_top_k_distr),
714              curr_2=HistogramData.from_distribution(metric_result.current_other_distr),
715              ref_1=HistogramData.from_distribution(metric_result.reference_top_k_distr),
716              ref_2=HistogramData.from_distribution(metric_result.reference_other_distr),
717              name_1="top_k",
718              name_2="other",
719              xaxis_name="scores",
720              color_2="secondary",
721          )
722          info.with_details("", plotly_figure(figure=distr_fig, title=""))
723          return info