/ mlflow / models / flavor_backend.py
flavor_backend.py
  1  from abc import ABCMeta, abstractmethod
  2  
  3  from mlflow.utils.annotations import developer_stable
  4  
  5  
  6  @developer_stable
  7  class FlavorBackend:
  8      """
  9      Abstract class for Flavor Backend.
 10      This class defines the API interface for local model deployment of MLflow model flavors.
 11      """
 12  
 13      __metaclass__ = ABCMeta
 14  
 15      def __init__(self, config, **kwargs):
 16          self._config = config
 17  
 18      @abstractmethod
 19      def predict(self, model_uri, input_path, output_path, content_type):
 20          """
 21          Generate predictions using a saved MLflow model referenced by the given URI.
 22          Input and output are read from and written to a file or stdin / stdout.
 23  
 24          Args:
 25              model_uri: URI pointing to the MLflow model to be used for scoring.
 26              input_path: Path to the file with input data. If not specified, data is read from
 27                          stdin.
 28              output_path: Path to the file with output predictions. If not specified, data is
 29                           written to stdout.
 30              content_type: Specifies the input format. Can be one of {``json``, ``csv``}
 31          """
 32  
 33      @abstractmethod
 34      def serve(
 35          self,
 36          model_uri,
 37          port,
 38          host,
 39          timeout,
 40          enable_mlserver,
 41          synchronous=True,
 42          stdout=None,
 43          stderr=None,
 44      ):
 45          """
 46          Serve the specified MLflow model locally.
 47  
 48          Args:
 49              model_uri: URI pointing to the MLflow model to be used for scoring.
 50              port: Port to use for the model deployment.
 51              host: Host to use for the model deployment. Defaults to ``localhost``.
 52              timeout: Timeout in seconds to serve a request. Defaults to 60.
 53              enable_mlserver: Whether to use MLServer or the local scoring server.
 54              synchronous: If True, wait until server process exit and return 0, if process exit
 55                  with non-zero return code, raise exception.
 56                  If False, return the server process `Popen` instance immediately.
 57              stdout: Redirect server stdout
 58              stderr: Redirect server stderr
 59          """
 60  
 61      def prepare_env(self, model_uri, capture_output=False):
 62          """
 63          Performs any preparation necessary to predict or serve the model, for example
 64          downloading dependencies or initializing a conda environment. After preparation,
 65          calling predict or serve should be fast.
 66          """
 67  
 68      @abstractmethod
 69      def build_image(
 70          self,
 71          model_uri,
 72          image_name,
 73          install_java=False,
 74          install_mlflow=False,
 75          mlflow_home=None,
 76          enable_mlserver=False,
 77          base_image=None,
 78      ): ...
 79  
 80      @abstractmethod
 81      def generate_dockerfile(
 82          self,
 83          model_uri,
 84          output_dir,
 85          install_java=False,
 86          install_mlflow=False,
 87          mlflow_home=None,
 88          enable_mlserver=False,
 89          base_image=None,
 90      ): ...
 91  
 92      @abstractmethod
 93      def can_score_model(self):
 94          """
 95          Check whether this flavor backend can be deployed in the current environment.
 96  
 97          Returns:
 98              True if this flavor backend can be applied in the current environment.
 99          """
100  
101      def can_build_image(self):
102          """
103          Returns:
104              True if this flavor has a `build_image` method defined for building a docker
105              container capable of serving the model, False otherwise.
106          """
107          return callable(getattr(self.__class__, "build_image", None))