/ mlflow / entities / assessment.py
assessment.py
  1  from __future__ import annotations
  2  
  3  import json
  4  import time
  5  from dataclasses import dataclass
  6  from typing import Any
  7  
  8  from google.protobuf.json_format import MessageToDict, ParseDict
  9  from google.protobuf.struct_pb2 import Value
 10  
 11  from mlflow.entities._mlflow_object import _MlflowObject
 12  from mlflow.entities.assessment_error import AssessmentError
 13  from mlflow.entities.assessment_source import AssessmentSource, AssessmentSourceType
 14  from mlflow.exceptions import MlflowException
 15  from mlflow.protos.assessments_pb2 import Assessment as ProtoAssessment
 16  from mlflow.protos.assessments_pb2 import Expectation as ProtoExpectation
 17  from mlflow.protos.assessments_pb2 import Feedback as ProtoFeedback
 18  from mlflow.protos.assessments_pb2 import IssueReference as ProtoIssueReference
 19  from mlflow.utils.exception_utils import get_stacktrace
 20  from mlflow.utils.proto_json_utils import proto_timestamp_to_milliseconds
 21  
 22  # Feedback value should be one of the following types:
 23  # - float
 24  # - int
 25  # - str
 26  # - bool
 27  # - list of values of the same types as above
 28  # - dict with string keys and values of the same types as above
 29  PbValueType = float | int | str | bool
 30  FeedbackValueType = PbValueType | dict[str, PbValueType] | list[PbValueType]
 31  
 32  
 33  @dataclass
 34  class Assessment(_MlflowObject):
 35      """
 36      Base class for assessments that can be attached to a trace.
 37      An Assessment should be one of the following types:
 38  
 39      - Expectations: A label that represents the expected value for a particular operation.
 40          For example, an expected answer for a user question from a chatbot.
 41      - Feedback: A label that represents the feedback on the quality of the operation.
 42          Feedback can come from different sources, such as human judges, heuristic scorers,
 43          or LLM-as-a-Judge.
 44      - IssueReference: A reference to an issue associated with a trace, used to link traces
 45          to discovered quality or operational problems.
 46      """
 47  
 48      name: str
 49      source: AssessmentSource
 50      # NB: The trace ID is optional because the assessment object itself may be created
 51      #   standalone. For example, a custom metric function returns an assessment object
 52      #   without a trace ID. That said, the trace ID is required when logging the
 53      #   assessment to a trace in the backend eventually.
 54      #   https://docs.databricks.com/aws/en/generative-ai/agent-evaluation/custom-metrics#-metric-decorator
 55      trace_id: str | None = None
 56      run_id: str | None = None
 57      rationale: str | None = None
 58      metadata: dict[str, str] | None = None
 59      span_id: str | None = None
 60      create_time_ms: int | None = None
 61      last_update_time_ms: int | None = None
 62      # NB: The assessment ID should always be generated in the backend. The CreateAssessment
 63      #   backend API asks for an incomplete Assessment object without an ID and returns a
 64      #   complete one with assessment_id, so the ID is Optional in the constructor here.
 65      assessment_id: str | None = None
 66      # Deprecated, use `error` in Feedback instead. Just kept for backward compatibility
 67      # and will be removed in the 3.0.0 release.
 68      error: AssessmentError | None = None
 69      # Should only be used internally. To create an assessment with an expectation, feedback,
 70      # or issue reference, use the `Expectation`, `Feedback`, or `IssueReference` classes instead.
 71      expectation: ExpectationValue | None = None
 72      feedback: FeedbackValue | None = None
 73      issue: IssueReferenceValue | None = None
 74      # The ID of the assessment which this assessment overrides.
 75      overrides: str | None = None
 76      # Whether this assessment is valid (i.e. has not been overridden).
 77      # This should not be set by the user, it is automatically set by the backend.
 78      valid: bool | None = None
 79  
 80      def __post_init__(self):
 81          from mlflow.tracing.constant import AssessmentMetadataKey
 82  
 83          if (self.expectation is not None) + (self.feedback is not None) + (
 84              self.issue is not None
 85          ) != 1:
 86              raise MlflowException.invalid_parameter_value(
 87                  "Exactly one of `expectation`, `feedback`, or `issue` should be specified.",
 88              )
 89  
 90          # Populate the error field to the feedback object
 91          if self.error is not None:
 92              if self.expectation is not None:
 93                  raise MlflowException.invalid_parameter_value(
 94                      "Cannot set `error` when `expectation` is specified.",
 95                  )
 96              if self.feedback is None:
 97                  raise MlflowException.invalid_parameter_value(
 98                      "Cannot set `error` when `feedback` is not specified.",
 99                  )
100              self.feedback.error = self.error
101  
102          # Set timestamp if not provided
103          current_time = int(time.time() * 1000)  # milliseconds
104          if self.create_time_ms is None:
105              self.create_time_ms = current_time
106          if self.last_update_time_ms is None:
107              self.last_update_time_ms = current_time
108  
109          if not isinstance(self.source, AssessmentSource):
110              raise MlflowException.invalid_parameter_value(
111                  "`source` must be an instance of `AssessmentSource`. "
112                  f"Got {type(self.source)} instead."
113              )
114          # Extract and set run_id from metadata but don't modify the proto representation
115          if (
116              self.run_id is None
117              and self.metadata
118              and AssessmentMetadataKey.SOURCE_RUN_ID in self.metadata
119          ):
120              self.run_id = self.metadata[AssessmentMetadataKey.SOURCE_RUN_ID]
121  
122      def to_proto(self):
123          assessment = ProtoAssessment()
124          assessment.assessment_name = self.name
125          assessment.trace_id = self.trace_id or ""
126  
127          assessment.source.CopyFrom(self.source.to_proto())
128  
129          # Convert time in milliseconds to protobuf Timestamp
130          assessment.create_time.FromMilliseconds(self.create_time_ms)
131          assessment.last_update_time.FromMilliseconds(self.last_update_time_ms)
132  
133          if self.span_id is not None:
134              assessment.span_id = self.span_id
135          if self.rationale is not None:
136              assessment.rationale = self.rationale
137          if self.assessment_id is not None:
138              assessment.assessment_id = self.assessment_id
139  
140          if self.expectation is not None:
141              assessment.expectation.CopyFrom(self.expectation.to_proto())
142          elif self.feedback is not None:
143              assessment.feedback.CopyFrom(self.feedback.to_proto())
144          elif self.issue is not None:
145              assessment.issue.CopyFrom(self.issue.to_proto())
146  
147          if self.metadata:
148              for key, value in self.metadata.items():
149                  assessment.metadata[key] = str(value)
150          if self.overrides:
151              assessment.overrides = self.overrides
152          if self.valid is not None:
153              assessment.valid = self.valid
154  
155          return assessment
156  
157      @classmethod
158      def from_proto(cls, proto):
159          if proto.WhichOneof("value") == "expectation":
160              return Expectation.from_proto(proto)
161          elif proto.WhichOneof("value") == "feedback":
162              return Feedback.from_proto(proto)
163          elif proto.WhichOneof("value") == "issue":
164              return IssueReference.from_proto(proto)
165          else:
166              raise MlflowException.invalid_parameter_value(
167                  f"Unknown assessment type: {proto.WhichOneof('value')}"
168              )
169  
170      def to_dictionary(self):
171          # Note that MessageToDict excludes None fields. For example, if assessment_id is None,
172          # it won't be included in the resulting dictionary.
173          return MessageToDict(self.to_proto(), preserving_proto_field_name=True)
174  
175      @classmethod
176      def from_dictionary(cls, d: dict[str, Any]) -> "Assessment":
177          if d.get("expectation"):
178              return Expectation.from_dictionary(d)
179          elif d.get("feedback"):
180              return Feedback.from_dictionary(d)
181          elif d.get("issue"):
182              return IssueReference.from_dictionary(d)
183          else:
184              raise MlflowException.invalid_parameter_value(
185                  f"Unknown assessment type: {d.get('assessment_name')}"
186              )
187  
188  
189  DEFAULT_FEEDBACK_NAME = "feedback"
190  
191  
192  @dataclass
193  class Feedback(Assessment):
194      """
195      Represents feedback about the output of an operation. For example, if the response from a
196      generative AI application to a particular user query is correct, then a human or LLM judge
197      may provide feedback with the value ``"correct"``.
198  
199      Args:
200          name: The name of the assessment. If not provided, the default name "feedback" is used.
201          value: The feedback value. This can be one of the following types:
202              - float
203              - int
204              - str
205              - bool
206              - list of values of the same types as above
207              - dict with string keys and values of the same types as above
208          error: An optional error associated with the feedback. This is used to indicate
209              that the feedback is not valid or cannot be processed. Accepts an exception
210              object, or an :py:class:`~mlflow.entities.Expectation` object.
211          rationale: The rationale / justification for the feedback.
212          source: The source of the assessment. If not provided, the default source is CODE.
213          trace_id: The ID of the trace associated with the assessment. If unset, the assessment
214              is not associated with any trace yet.
215              should be specified.
216          metadata: The metadata associated with the assessment.
217          span_id: The ID of the span associated with the assessment, if the assessment should
218              be associated with a particular span in the trace.
219          create_time_ms: The creation time of the assessment in milliseconds. If unset, the
220              current time is used.
221          last_update_time_ms: The last update time of the assessment in milliseconds.
222              If unset, the current time is used.
223  
224      Example:
225  
226          .. code-block:: python
227  
228              from mlflow.entities import AssessmentSource, Feedback
229  
230              feedback = Feedback(
231                  name="correctness",
232                  value=True,
233                  rationale="The response is correct.",
234                  source=AssessmentSource(
235                      source_type="HUMAN",
236                      source_id="john@example.com",
237                  ),
238                  metadata={"project": "my-project"},
239              )
240      """
241  
242      def __init__(
243          self,
244          name: str = DEFAULT_FEEDBACK_NAME,
245          value: FeedbackValueType | None = None,
246          error: Exception | AssessmentError | str | None = None,
247          source: AssessmentSource | None = None,
248          trace_id: str | None = None,
249          metadata: dict[str, str] | None = None,
250          span_id: str | None = None,
251          create_time_ms: int | None = None,
252          last_update_time_ms: int | None = None,
253          rationale: str | None = None,
254          overrides: str | None = None,
255          valid: bool = True,
256      ):
257          # Default to CODE source if not provided
258          if source is None:
259              source = AssessmentSource(source_type=AssessmentSourceType.CODE)
260  
261          if isinstance(error, Exception):
262              error = AssessmentError(
263                  error_message=str(error),
264                  error_code=error.__class__.__name__,
265                  stack_trace=get_stacktrace(error),
266              )
267          elif isinstance(error, str):
268              # Convert string errors to AssessmentError objects
269              error = AssessmentError(
270                  error_message=error,
271                  error_code="ASSESSMENT_ERROR",
272              )
273          elif error is not None and not isinstance(error, AssessmentError):
274              # Handle any other unexpected types
275              raise MlflowException.invalid_parameter_value(
276                  f"'error' must be an Exception, AssessmentError, or string. Got: {type(error)}"
277              )
278  
279          super().__init__(
280              name=name,
281              source=source,
282              trace_id=trace_id,
283              metadata=metadata,
284              span_id=span_id,
285              create_time_ms=create_time_ms,
286              last_update_time_ms=last_update_time_ms,
287              feedback=FeedbackValue(value=value, error=error),
288              rationale=rationale,
289              overrides=overrides,
290              valid=valid,
291          )
292          self.error = error
293  
294      @property
295      def value(self) -> FeedbackValueType:
296          return self.feedback.value
297  
298      @value.setter
299      def value(self, value: FeedbackValueType):
300          self.feedback.value = value
301  
302      @classmethod
303      def from_proto(cls, proto):
304          from mlflow.utils.databricks_tracing_utils import get_trace_id_from_assessment_proto
305  
306          # Convert ScalarMapContainer to a normal Python dict
307          metadata = dict(proto.metadata) if proto.metadata else None
308          feedback_value = FeedbackValue.from_proto(proto.feedback)
309          feedback = cls(
310              trace_id=get_trace_id_from_assessment_proto(proto),
311              name=proto.assessment_name,
312              source=AssessmentSource.from_proto(proto.source),
313              create_time_ms=proto.create_time.ToMilliseconds(),
314              last_update_time_ms=proto.last_update_time.ToMilliseconds(),
315              value=feedback_value.value,
316              error=feedback_value.error,
317              rationale=proto.rationale or None,
318              metadata=metadata,
319              span_id=proto.span_id or None,
320              overrides=proto.overrides or None,
321              valid=proto.valid,
322          )
323          feedback.assessment_id = proto.assessment_id or None
324          return feedback
325  
326      @classmethod
327      def from_dictionary(cls, d: dict[str, Any]) -> "Feedback":
328          feedback_value = d.get("feedback")
329  
330          if not feedback_value:
331              raise MlflowException.invalid_parameter_value(
332                  "`feedback` must exist in the dictionary."
333              )
334  
335          feedback_value = FeedbackValue.from_dictionary(feedback_value)
336  
337          feedback = cls(
338              trace_id=d.get("trace_id"),
339              name=d["assessment_name"],
340              source=AssessmentSource.from_dictionary(d["source"]),
341              create_time_ms=proto_timestamp_to_milliseconds(d["create_time"]),
342              last_update_time_ms=proto_timestamp_to_milliseconds(d["last_update_time"]),
343              value=feedback_value.value,
344              error=feedback_value.error,
345              rationale=d.get("rationale"),
346              metadata=d.get("metadata"),
347              span_id=d.get("span_id"),
348              overrides=d.get("overrides"),
349              valid=d.get("valid", True),
350          )
351          feedback.assessment_id = d.get("assessment_id") or None
352          return feedback
353  
354      # Backward compatibility: The old assessment object had these fields at top level.
355      @property
356      def error_code(self) -> str | None:
357          """The error code of the error that occurred when the feedback was created."""
358          return self.feedback.error.error_code if self.feedback.error else None
359  
360      @property
361      def error_message(self) -> str | None:
362          """The error message of the error that occurred when the feedback was created."""
363          return self.feedback.error.error_message if self.feedback.error else None
364  
365  
366  @dataclass
367  class Expectation(Assessment):
368      """
369      Represents an expectation about the output of an operation, such as the expected response
370      that a generative AI application should provide to a particular user query.
371  
372      Args:
373          name: The name of the assessment.
374          value: The expected value of the operation. This can be any JSON-serializable value.
375          source: The source of the assessment. If not provided, the default source is HUMAN.
376          trace_id: The ID of the trace associated with the assessment. If unset, the assessment
377              is not associated with any trace yet.
378              should be specified.
379          metadata: The metadata associated with the assessment.
380          span_id: The ID of the span associated with the assessment, if the assessment should
381              be associated with a particular span in the trace.
382          create_time_ms: The creation time of the assessment in milliseconds. If unset, the
383              current time is used.
384          last_update_time_ms: The last update time of the assessment in milliseconds.
385              If unset, the current time is used.
386  
387      Example:
388  
389          .. code-block:: python
390  
391              from mlflow.entities import AssessmentSource, Expectation
392  
393              expectation = Expectation(
394                  name="expected_response",
395                  value="The capital of France is Paris.",
396                  source=AssessmentSource(
397                      source_type=AssessmentSourceType.HUMAN,
398                      source_id="john@example.com",
399                  ),
400                  metadata={"project": "my-project"},
401              )
402      """
403  
404      def __init__(
405          self,
406          name: str,
407          value: Any,
408          source: AssessmentSource | None = None,
409          trace_id: str | None = None,
410          metadata: dict[str, str] | None = None,
411          span_id: str | None = None,
412          create_time_ms: int | None = None,
413          last_update_time_ms: int | None = None,
414      ):
415          if source is None:
416              source = AssessmentSource(source_type=AssessmentSourceType.HUMAN)
417  
418          if value is None:
419              raise MlflowException.invalid_parameter_value("The `value` field must be specified.")
420  
421          super().__init__(
422              name=name,
423              source=source,
424              trace_id=trace_id,
425              metadata=metadata,
426              span_id=span_id,
427              create_time_ms=create_time_ms,
428              last_update_time_ms=last_update_time_ms,
429              expectation=ExpectationValue(value=value),
430          )
431  
432      @property
433      def value(self) -> Any:
434          return self.expectation.value
435  
436      @value.setter
437      def value(self, value: Any):
438          self.expectation.value = value
439  
440      @classmethod
441      def from_proto(cls, proto) -> "Expectation":
442          from mlflow.utils.databricks_tracing_utils import get_trace_id_from_assessment_proto
443  
444          # Convert ScalarMapContainer to a normal Python dict
445          metadata = dict(proto.metadata) if proto.metadata else None
446          expectation_value = ExpectationValue.from_proto(proto.expectation)
447          expectation = cls(
448              trace_id=get_trace_id_from_assessment_proto(proto),
449              name=proto.assessment_name,
450              source=AssessmentSource.from_proto(proto.source),
451              create_time_ms=proto.create_time.ToMilliseconds(),
452              last_update_time_ms=proto.last_update_time.ToMilliseconds(),
453              value=expectation_value.value,
454              metadata=metadata,
455              span_id=proto.span_id or None,
456          )
457          expectation.assessment_id = proto.assessment_id or None
458          return expectation
459  
460      @classmethod
461      def from_dictionary(cls, d: dict[str, Any]) -> "Expectation":
462          expectation_value = d.get("expectation")
463  
464          if not expectation_value:
465              raise MlflowException.invalid_parameter_value(
466                  "`expectation` must exist in the dictionary."
467              )
468  
469          expectation_value = ExpectationValue.from_dictionary(expectation_value)
470  
471          expectation = cls(
472              trace_id=d.get("trace_id"),
473              name=d["assessment_name"],
474              source=AssessmentSource.from_dictionary(d["source"]),
475              create_time_ms=proto_timestamp_to_milliseconds(d["create_time"]),
476              last_update_time_ms=proto_timestamp_to_milliseconds(d["last_update_time"]),
477              value=expectation_value.value,
478              metadata=d.get("metadata"),
479              span_id=d.get("span_id"),
480          )
481          expectation.assessment_id = d.get("assessment_id") or None
482          return expectation
483  
484  
485  _JSON_SERIALIZATION_FORMAT = "JSON_FORMAT"
486  
487  
488  @dataclass
489  class IssueReference(Assessment):
490      """
491      Represents a reference to an issue associated with a trace. This type of assessment
492      is used internally to link traces to discovered issues.
493  
494      Args:
495          issue_id: The ID of the issue this assessment references (stored in assessment name).
496          issue_name: The name of the issue (stored in the issue value).
497          source: The source of the assessment. If not provided, the default source is CODE.
498          trace_id: The ID of the trace associated with the assessment.
499          run_id: The ID of the run that discovered the issue.
500          rationale: The rationale / justification for the issue reference.
501          span_id: The ID of the span associated with the assessment, if applicable.
502          create_time_ms: The creation time of the assessment in milliseconds.
503          last_update_time_ms: The last update time of the assessment in milliseconds.
504      """
505  
506      def __init__(
507          self,
508          issue_id: str,
509          issue_name: str,
510          source: AssessmentSource | None = None,
511          trace_id: str | None = None,
512          run_id: str | None = None,
513          rationale: str | None = None,
514          metadata: dict[str, str] | None = None,
515          span_id: str | None = None,
516          create_time_ms: int | None = None,
517          last_update_time_ms: int | None = None,
518      ):
519          if source is None:
520              source = AssessmentSource(source_type=AssessmentSourceType.LLM_JUDGE)
521  
522          if issue_id is None:
523              raise MlflowException.invalid_parameter_value("The `issue_id` field must be specified.")
524          if issue_name is None:
525              raise MlflowException.invalid_parameter_value(
526                  "The `issue_name` field must be specified."
527              )
528  
529          super().__init__(
530              name=issue_id,
531              source=source,
532              trace_id=trace_id,
533              run_id=run_id,
534              rationale=rationale,
535              metadata=metadata,
536              span_id=span_id,
537              create_time_ms=create_time_ms,
538              last_update_time_ms=last_update_time_ms,
539              issue=IssueReferenceValue(issue_name=issue_name),
540          )
541  
542      @property
543      def issue_id(self) -> str:
544          return self.name
545  
546      @issue_id.setter
547      def issue_id(self, issue_id: str):
548          self.name = issue_id
549  
550      @property
551      def issue_name(self) -> str:
552          return self.issue.issue_name
553  
554      @issue_name.setter
555      def issue_name(self, issue_name: str):
556          self.issue.issue_name = issue_name
557  
558      @classmethod
559      def from_proto(cls, proto) -> "IssueReference":
560          from mlflow.utils.databricks_tracing_utils import get_trace_id_from_assessment_proto
561  
562          metadata = dict(proto.metadata) if proto.metadata else None
563          issue_ref = cls(
564              trace_id=get_trace_id_from_assessment_proto(proto),
565              issue_id=proto.assessment_name,
566              issue_name=proto.issue.issue_name,
567              source=AssessmentSource.from_proto(proto.source),
568              create_time_ms=proto.create_time.ToMilliseconds(),
569              last_update_time_ms=proto.last_update_time.ToMilliseconds(),
570              rationale=proto.rationale or None,
571              metadata=metadata,
572              span_id=proto.span_id or None,
573          )
574          issue_ref.assessment_id = proto.assessment_id or None
575          return issue_ref
576  
577      @classmethod
578      def from_dictionary(cls, d: dict[str, Any]) -> "IssueReference":
579          issue_value = d.get("issue")
580  
581          if not issue_value:
582              raise MlflowException.invalid_parameter_value("`issue` must exist in the dictionary.")
583  
584          issue_ref = cls(
585              trace_id=d.get("trace_id"),
586              issue_id=d["assessment_name"],
587              issue_name=issue_value["issue_name"],
588              source=AssessmentSource.from_dictionary(d["source"]),
589              create_time_ms=proto_timestamp_to_milliseconds(d["create_time"]),
590              last_update_time_ms=proto_timestamp_to_milliseconds(d["last_update_time"]),
591              rationale=d.get("rationale"),
592              metadata=d.get("metadata"),
593              span_id=d.get("span_id"),
594          )
595  
596          issue_ref.assessment_id = d.get("assessment_id") or None
597          if run_id := d.get("run_id"):
598              issue_ref.run_id = run_id
599          return issue_ref
600  
601  
602  @dataclass
603  class IssueReferenceValue(_MlflowObject):
604      """Represents an issue reference value."""
605  
606      issue_name: str
607  
608      def to_proto(self):
609          return ProtoIssueReference(issue_name=self.issue_name)
610  
611      @classmethod
612      def from_proto(cls, proto) -> "IssueReferenceValue":
613          return cls(issue_name=proto.issue_name)
614  
615      def to_dictionary(self):
616          return {"issue_name": self.issue_name}
617  
618      @classmethod
619      def from_dictionary(cls, d):
620          return cls(issue_name=d["issue_name"])
621  
622  
623  @dataclass
624  class ExpectationValue(_MlflowObject):
625      """Represents an expectation value."""
626  
627      value: Any
628  
629      def to_proto(self):
630          if self._need_serialization():
631              try:
632                  serialized_value = json.dumps(self.value)
633              except Exception as e:
634                  raise MlflowException.invalid_parameter_value(
635                      f"Failed to serialize value {self.value} to JSON string. "
636                      "Expectation value must be JSON-serializable."
637                  ) from e
638              return ProtoExpectation(
639                  serialized_value=ProtoExpectation.SerializedValue(
640                      serialization_format=_JSON_SERIALIZATION_FORMAT,
641                      value=serialized_value,
642                  )
643              )
644  
645          return ProtoExpectation(value=ParseDict(self.value, Value()))
646  
647      @classmethod
648      def from_proto(cls, proto) -> "Expectation":
649          if proto.HasField("serialized_value"):
650              if proto.serialized_value.serialization_format != _JSON_SERIALIZATION_FORMAT:
651                  raise MlflowException.invalid_parameter_value(
652                      f"Unknown serialization format: {proto.serialized_value.serialization_format}. "
653                      "Only JSON_FORMAT is supported."
654                  )
655              return cls(value=json.loads(proto.serialized_value.value))
656          else:
657              return cls(value=MessageToDict(proto.value))
658  
659      def to_dictionary(self):
660          return MessageToDict(self.to_proto(), preserving_proto_field_name=True)
661  
662      @classmethod
663      def from_dictionary(cls, d):
664          if "value" in d:
665              return cls(d["value"])
666          elif "serialized_value" in d:
667              return cls(value=json.loads(d["serialized_value"]["value"]))
668          else:
669              raise MlflowException.invalid_parameter_value(
670                  "Either 'value' or 'serialized_value' must be present in the dictionary "
671                  "representation of an Expectation."
672              )
673  
674      def _need_serialization(self):
675          # Values like None, lists, dicts, should be serialized as a JSON string
676          return self.value is not None and not isinstance(self.value, (int, float, bool, str))
677  
678  
679  @dataclass
680  class FeedbackValue(_MlflowObject):
681      """Represents a feedback value."""
682  
683      value: FeedbackValueType
684      error: AssessmentError | None = None
685  
686      def to_proto(self):
687          return ProtoFeedback(
688              value=ParseDict(self.value, Value(), ignore_unknown_fields=True),
689              error=self.error.to_proto() if self.error else None,
690          )
691  
692      @classmethod
693      def from_proto(cls, proto) -> "FeedbackValue":
694          return FeedbackValue(
695              value=MessageToDict(proto.value),
696              error=AssessmentError.from_proto(proto.error) if proto.HasField("error") else None,
697          )
698  
699      def to_dictionary(self):
700          return MessageToDict(self.to_proto(), preserving_proto_field_name=True)
701  
702      @classmethod
703      def from_dictionary(cls, d):
704          return cls(
705              value=d["value"],
706              error=AssessmentError.from_dictionary(err) if (err := d.get("error")) else None,
707          )