/ tests / examples / test_examples.py
test_examples.py
  1  import os
  2  import re
  3  import shutil
  4  import sys
  5  import uuid
  6  from pathlib import Path
  7  
  8  import pytest
  9  
 10  import mlflow
 11  from mlflow import cli
 12  from mlflow.utils import process
 13  from mlflow.utils.virtualenv import _get_mlflow_virtualenv_root
 14  
 15  from tests.helper_functions import clear_hub_cache, flaky, start_mock_openai_server
 16  from tests.integration.utils import invoke_cli_runner
 17  
 18  EXAMPLES_DIR = "examples"
 19  
 20  
 21  def find_python_env_yaml(directory: Path) -> Path:
 22      return next(filter(lambda p: p.name == "python_env.yaml", Path(directory).iterdir()))
 23  
 24  
 25  def replace_mlflow_with_dev_version(yml_path: Path) -> None:
 26      old_src = yml_path.read_text()
 27      mlflow_dir = Path(mlflow.__path__[0]).parent
 28      new_src = re.sub(r"- mlflow.*\n", f"- {mlflow_dir}\n", old_src)
 29      yml_path.write_text(new_src)
 30  
 31  
 32  @pytest.fixture(autouse=True)
 33  def clean_up_mlflow_virtual_environments():
 34      yield
 35  
 36      for path in Path(_get_mlflow_virtualenv_root()).iterdir():
 37          if path.is_dir():
 38              shutil.rmtree(path)
 39  
 40  
 41  @pytest.fixture(scope="module", autouse=True)
 42  def mock_openai():
 43      # Some examples includes OpenAI API calls, so we start a mock server.
 44      with start_mock_openai_server() as base_url:
 45          with pytest.MonkeyPatch.context() as mp:
 46              mp.setenv("OPENAI_API_BASE", base_url)
 47              mp.setenv("OPENAI_API_KEY", "test")
 48              yield
 49  
 50  
 51  @pytest.mark.notrackingurimock
 52  @flaky()
 53  @pytest.mark.parametrize(
 54      ("directory", "params"),
 55      [
 56          ("h2o", []),
 57          # TODO: Fix the hyperparam example and re-enable it
 58          # ("hyperparam", ["-e", "train", "-P", "epochs=1"]),
 59          # ("hyperparam", ["-e", "random", "-P", "epochs=1"]),
 60          # ("hyperparam", ["-e", "hyperopt", "-P", "epochs=1"]),
 61          (
 62              "lightgbm/lightgbm_native",
 63              ["-P", "learning_rate=0.1", "-P", "colsample_bytree=0.8", "-P", "subsample=0.9"],
 64          ),
 65          ("lightgbm/lightgbm_sklearn", []),
 66          ("statsmodels", ["-P", "inverse_method=qr"]),
 67          ("pytorch", ["-P", "epochs=2"]),
 68          ("sklearn_logistic_regression", []),
 69          ("sklearn_elasticnet_wine", ["-P", "alpha=0.5"]),
 70          ("sklearn_elasticnet_diabetes/linux", []),
 71          ("spacy", []),
 72          (
 73              "xgboost/xgboost_native",
 74              ["-P", "learning_rate=0.3", "-P", "colsample_bytree=0.8", "-P", "subsample=0.9"],
 75          ),
 76          ("xgboost/xgboost_sklearn", []),
 77          ("pytorch/MNIST", ["-P", "max_epochs=1"]),
 78          ("pytorch/HPOExample", ["-P", "n_trials=2", "-P", "max_epochs=1"]),
 79          ("pytorch/CaptumExample", ["-P", "max_epochs=50"]),
 80          ("supply_chain_security", []),
 81          ("tensorflow", []),
 82          ("sktime", []),
 83      ],
 84  )
 85  def test_mlflow_run_example(directory, params, tmp_path):
 86      # Use tmp_path+uuid as tmp directory to avoid the same
 87      # directory being reused when re-trying the test since
 88      # tmp_path is named as the test name
 89      random_tmp_path = tmp_path / str(uuid.uuid4())
 90      mlflow.set_tracking_uri(random_tmp_path.joinpath("mlruns").as_uri())
 91      example_dir = Path(EXAMPLES_DIR, directory)
 92      tmp_example_dir = random_tmp_path.joinpath(example_dir)
 93      shutil.copytree(example_dir, tmp_example_dir)
 94      python_env_path = find_python_env_yaml(tmp_example_dir)
 95      replace_mlflow_with_dev_version(python_env_path)
 96      cli_run_list = [tmp_example_dir] + params
 97      invoke_cli_runner(cli.run, list(map(str, cli_run_list)))
 98  
 99  
100  @pytest.mark.notrackingurimock
101  @pytest.mark.parametrize(
102      ("directory", "command"),
103      [
104          ("docker", ["docker", "build", "-t", "mlflow-docker-example", "-f", "Dockerfile", "."]),
105          ("keras", [sys.executable, "train.py"]),
106          (
107              "lightgbm/lightgbm_native",
108              [
109                  sys.executable,
110                  "train.py",
111                  "--learning-rate",
112                  "0.2",
113                  "--colsample-bytree",
114                  "0.8",
115                  "--subsample",
116                  "0.9",
117              ],
118          ),
119          ("lightgbm/lightgbm_sklearn", [sys.executable, "train.py"]),
120          ("statsmodels", [sys.executable, "train.py", "--inverse-method", "qr"]),
121          ("quickstart", [sys.executable, "mlflow_tracking.py"]),
122          ("remote_store", [sys.executable, "remote_server.py"]),
123          (
124              "xgboost/xgboost_native",
125              [
126                  sys.executable,
127                  "train.py",
128                  "--learning-rate",
129                  "0.2",
130                  "--colsample-bytree",
131                  "0.8",
132                  "--subsample",
133                  "0.9",
134              ],
135          ),
136          ("xgboost/xgboost_sklearn", [sys.executable, "train.py"]),
137          ("catboost", [sys.executable, "train.py"]),
138          ("prophet", [sys.executable, "train.py"]),
139          ("sklearn_autolog", [sys.executable, "linear_regression.py"]),
140          ("sklearn_autolog", [sys.executable, "pipeline.py"]),
141          ("sklearn_autolog", [sys.executable, "grid_search_cv.py"]),
142          ("pyspark_ml_autologging", [sys.executable, "logistic_regression.py"]),
143          ("pyspark_ml_autologging", [sys.executable, "one_vs_rest.py"]),
144          ("pyspark_ml_autologging", [sys.executable, "pipeline.py"]),
145          ("shap", [sys.executable, "regression.py"]),
146          ("shap", [sys.executable, "binary_classification.py"]),
147          ("shap", [sys.executable, "multiclass_classification.py"]),
148          ("shap", [sys.executable, "explainer_logging.py"]),
149          ("ray_serve", [sys.executable, "train_model.py"]),
150          ("pip_requirements", [sys.executable, "pip_requirements.py"]),
151          ("pmdarima", [sys.executable, "train.py"]),
152          ("evaluation", [sys.executable, "evaluate_on_binary_classifier.py"]),
153          ("evaluation", [sys.executable, "evaluate_on_multiclass_classifier.py"]),
154          ("evaluation", [sys.executable, "evaluate_on_regressor.py"]),
155          ("evaluation", [sys.executable, "evaluate_with_custom_metrics.py"]),
156          ("evaluation", [sys.executable, "evaluate_with_custom_metrics_comprehensive.py"]),
157          ("evaluation", [sys.executable, "evaluate_with_model_validation.py"]),
158          ("spark_udf", [sys.executable, "spark_udf_datetime.py"]),
159          ("pyfunc", [sys.executable, "train.py"]),
160          ("tensorflow", [sys.executable, "train.py"]),
161          ("transformers", [sys.executable, "conversational.py"]),
162          ("transformers", [sys.executable, "load_components.py"]),
163          ("transformers", [sys.executable, "simple.py"]),
164          ("transformers", [sys.executable, "sentence_transformer.py"]),
165          ("transformers", [sys.executable, "whisper.py"]),
166          ("sentence_transformers", [sys.executable, "simple.py"]),
167          ("tracing", [sys.executable, "fluent.py"]),
168          ("tracing", [sys.executable, "client.py"]),
169          ("llama_index", [sys.executable, "simple_index.py"]),
170          ("llama_index", [sys.executable, "autolog.py"]),
171      ],
172  )
173  def test_command_example(directory, command):
174      cwd_dir = Path(EXAMPLES_DIR, directory)
175      assert os.environ.get("MLFLOW_HOME") is not None
176      if directory == "transformers":
177          # NB: Clearing the huggingface_hub cache is to lower the disk storage pressure for CI
178          clear_hub_cache()
179  
180      process._exec_cmd(command, cwd=cwd_dir, env=os.environ)