/ tests / test_runs.py
test_runs.py
  1  import json
  2  import logging
  3  import os
  4  import textwrap
  5  from unittest import mock
  6  from unittest.mock import patch
  7  
  8  import pytest
  9  from click.testing import CliRunner
 10  
 11  import mlflow
 12  from mlflow import experiments
 13  from mlflow.exceptions import MlflowException
 14  from mlflow.runs import create_run, link_traces, list_run
 15  
 16  
 17  @pytest.fixture(autouse=True)
 18  def suppress_logging():
 19      """Suppress logging for all tests to ensure clean CLI output."""
 20      # Suppress all logging
 21      logging.disable(logging.CRITICAL)
 22  
 23      yield
 24  
 25      # Re-enable logging
 26      logging.disable(logging.NOTSET)
 27  
 28  
 29  def test_list_run():
 30      with mlflow.start_run(run_name="apple"):
 31          pass
 32      result = CliRunner().invoke(list_run, ["--experiment-id", "0"])
 33      assert "apple" in result.output
 34  
 35  
 36  def test_list_run_experiment_id_required():
 37      result = CliRunner().invoke(list_run, [])
 38      assert "Missing option '--experiment-id'" in result.output
 39  
 40  
 41  @pytest.mark.skipif(
 42      "MLFLOW_SKINNY" in os.environ,
 43      reason="Skinny Client does not support predict due to the pandas dependency",
 44  )
 45  def test_csv_generation(tmp_path):
 46      import numpy as np
 47      import pandas as pd
 48  
 49      with mock.patch(
 50          "mlflow.experiments.fluent.search_runs",
 51          return_value=pd.DataFrame(
 52              {
 53                  "run_id": np.array(["all_set", "with_none", "with_nan"]),
 54                  "experiment_id": np.array([1, 1, 1]),
 55                  "param_optimizer": np.array(["Adam", None, "Adam"]),
 56                  "avg_loss": np.array([42.0, None, np.nan], dtype=np.float32),
 57              },
 58              columns=["run_id", "experiment_id", "param_optimizer", "avg_loss"],
 59          ),
 60      ):
 61          expected_csv = textwrap.dedent(
 62              """\
 63          run_id,experiment_id,param_optimizer,avg_loss
 64          all_set,1,Adam,42.0
 65          with_none,1,,
 66          with_nan,1,Adam,
 67          """
 68          )
 69          result_filename = os.path.join(tmp_path, "result.csv")
 70          CliRunner().invoke(
 71              experiments.generate_csv_with_runs,
 72              ["--experiment-id", "1", "--filename", result_filename],
 73          )
 74          with open(result_filename) as fd:
 75              assert expected_csv == fd.read()
 76  
 77  
 78  def test_create_run_with_experiment_id():
 79      mlflow.create_experiment("test_create_run_exp")
 80      exp = mlflow.get_experiment_by_name("test_create_run_exp")
 81  
 82      result = CliRunner().invoke(create_run, ["--experiment-id", exp.experiment_id])
 83      assert result.exit_code == 0
 84  
 85      output = json.loads(result.output)
 86      assert "run_id" in output
 87      assert output["experiment_id"] == exp.experiment_id
 88      assert output["status"] == "FINISHED"
 89  
 90      # Verify the run was created
 91      run = mlflow.get_run(output["run_id"])
 92      assert run.info.experiment_id == exp.experiment_id
 93      assert run.info.status == "FINISHED"
 94  
 95  
 96  def test_create_run_with_experiment_name():
 97      exp_name = "test_create_run_by_name"
 98  
 99      result = CliRunner().invoke(create_run, ["--experiment-name", exp_name])
100      assert result.exit_code == 0
101  
102      output = json.loads(result.output)
103      assert "run_id" in output
104      assert output["status"] == "FINISHED"
105  
106      # Verify experiment was created
107      exp = mlflow.get_experiment_by_name(exp_name)
108      assert exp is not None
109      assert output["experiment_id"] == exp.experiment_id
110  
111  
112  def test_create_run_with_custom_name_and_description():
113      mlflow.create_experiment("test_run_with_details")
114      exp = mlflow.get_experiment_by_name("test_run_with_details")
115  
116      run_name = "my-custom-run"
117      description = "This is a test run"
118  
119      result = CliRunner().invoke(
120          create_run,
121          [
122              "--experiment-id",
123              exp.experiment_id,
124              "--run-name",
125              run_name,
126              "--description",
127              description,
128          ],
129      )
130      assert result.exit_code == 0
131  
132      output = json.loads(result.output)
133      assert output["run_name"] == run_name
134  
135      # Verify run details
136      run = mlflow.get_run(output["run_id"])
137      assert run.data.tags.get("mlflow.note.content") == description
138      assert run.info.run_name == run_name
139  
140  
141  def test_create_run_with_tags():
142      mlflow.create_experiment("test_run_with_tags")
143      exp = mlflow.get_experiment_by_name("test_run_with_tags")
144  
145      result = CliRunner().invoke(
146          create_run,
147          [
148              "--experiment-id",
149              exp.experiment_id,
150              "--tags",
151              "env=test",
152              "--tags",
153              "model=linear",
154              "--tags",
155              "version=1.0",
156          ],
157      )
158      assert result.exit_code == 0
159  
160      output = json.loads(result.output)
161      run = mlflow.get_run(output["run_id"])
162  
163      assert run.data.tags["env"] == "test"
164      assert run.data.tags["model"] == "linear"
165      assert run.data.tags["version"] == "1.0"
166  
167  
168  @pytest.mark.parametrize("status", ["FAILED", "KILLED"])
169  def test_create_run_with_different_status(status):
170      mlflow.create_experiment("test_run_statuses")
171      exp = mlflow.get_experiment_by_name("test_run_statuses")
172  
173      result = CliRunner().invoke(
174          create_run, ["--experiment-id", exp.experiment_id, "--status", status]
175      )
176      assert result.exit_code == 0
177      output = json.loads(result.output)
178      assert output["status"] == status
179  
180      run = mlflow.get_run(output["run_id"])
181      assert run.info.status == status
182  
183  
184  def test_create_run_missing_experiment():
185      result = CliRunner().invoke(create_run, [])
186      assert result.exit_code != 0
187      assert "Must specify exactly one of --experiment-id or --experiment-name" in result.output
188  
189  
190  def test_create_run_both_experiment_params():
191      result = CliRunner().invoke(create_run, ["--experiment-id", "0", "--experiment-name", "test"])
192      assert result.exit_code != 0
193      assert "Must specify exactly one of --experiment-id or --experiment-name" in result.output
194  
195  
196  def test_create_run_invalid_tag_format():
197      mlflow.create_experiment("test_invalid_tag")
198      exp = mlflow.get_experiment_by_name("test_invalid_tag")
199  
200      result = CliRunner().invoke(
201          create_run, ["--experiment-id", exp.experiment_id, "--tags", "invalid-tag"]
202      )
203      assert result.exit_code != 0
204      assert "Invalid tag format" in result.output
205  
206  
207  def test_create_run_duplicate_tag_key():
208      mlflow.create_experiment("test_duplicate_tag")
209      exp = mlflow.get_experiment_by_name("test_duplicate_tag")
210  
211      result = CliRunner().invoke(
212          create_run,
213          ["--experiment-id", exp.experiment_id, "--tags", "env=test", "--tags", "env=prod"],
214      )
215      assert result.exit_code != 0
216      assert "Duplicate tag key" in result.output
217  
218  
219  def test_link_traces_single_trace():
220      with patch("mlflow.runs.MlflowClient.link_traces_to_run") as mock_link_traces:
221          result = CliRunner().invoke(
222              link_traces,
223              ["--run-id", "test_run_123", "--trace-id", "trace_abc"],
224          )
225  
226          assert result.exit_code == 0
227          assert "Successfully linked 1 trace(s) to run 'test_run_123'" in result.output
228          mock_link_traces.assert_called_once_with(["trace_abc"], "test_run_123")
229  
230  
231  def test_link_traces_multiple_traces():
232      with patch("mlflow.runs.MlflowClient.link_traces_to_run") as mock_link_traces:
233          result = CliRunner().invoke(
234              link_traces,
235              [
236                  "--run-id",
237                  "test_run_456",
238                  "--trace-id",
239                  "trace_1",
240                  "--trace-id",
241                  "trace_2",
242                  "--trace-id",
243                  "trace_3",
244              ],
245          )
246  
247          assert result.exit_code == 0
248          assert "Successfully linked 3 trace(s) to run 'test_run_456'" in result.output
249          mock_link_traces.assert_called_once_with(["trace_1", "trace_2", "trace_3"], "test_run_456")
250  
251  
252  def test_link_traces_with_short_option():
253      with patch("mlflow.runs.MlflowClient.link_traces_to_run") as mock_link_traces:
254          result = CliRunner().invoke(
255              link_traces,
256              ["--run-id", "run_789", "-t", "trace_x", "-t", "trace_y"],
257          )
258  
259          assert result.exit_code == 0
260          assert "Successfully linked 2 trace(s) to run 'run_789'" in result.output
261          mock_link_traces.assert_called_once_with(["trace_x", "trace_y"], "run_789")
262  
263  
264  def test_link_traces_file_store_error():
265      with patch(
266          "mlflow.runs.MlflowClient.link_traces_to_run",
267          side_effect=MlflowException(
268              "Linking traces to runs is not supported in FileStore. "
269              "Please use a database-backed store (e.g., SQLAlchemy store) for this feature."
270          ),
271      ):
272          result = CliRunner().invoke(
273              link_traces,
274              ["--run-id", "test_run", "--trace-id", "trace_1"],
275          )
276  
277          assert result.exit_code != 0
278          assert "Failed to link traces" in result.output
279          assert "not supported in FileStore" in result.output
280  
281  
282  def test_link_traces_too_many_traces_error():
283      with patch(
284          "mlflow.runs.MlflowClient.link_traces_to_run",
285          side_effect=MlflowException(
286              "Cannot link more than 100 traces to a run in a single request. Provided 101 traces."
287          ),
288      ):
289          result = CliRunner().invoke(
290              link_traces,
291              ["--run-id", "test_run", "--trace-id", "trace_1"],
292          )
293  
294          assert result.exit_code != 0
295          assert "Failed to link traces" in result.output
296          assert "100" in result.output
297  
298  
299  def test_link_traces_missing_run_id():
300      result = CliRunner().invoke(link_traces, ["--trace-id", "trace_1"])
301  
302      assert result.exit_code != 0
303      assert "Missing option '--run-id'" in result.output
304  
305  
306  def test_link_traces_missing_trace_id():
307      result = CliRunner().invoke(link_traces, ["--run-id", "test_run"])
308  
309      assert result.exit_code != 0
310      assert "Missing option '--trace-id'" in result.output
311  
312  
313  def test_link_traces_generic_error():
314      with patch(
315          "mlflow.runs.MlflowClient.link_traces_to_run",
316          side_effect=MlflowException("Some other error"),
317      ):
318          result = CliRunner().invoke(
319              link_traces,
320              ["--run-id", "test_run", "--trace-id", "trace_1"],
321          )
322  
323          assert result.exit_code != 0
324          assert "Failed to link traces: Some other error" in result.output
325  
326  
327  def test_get_experiment_default():
328      result = CliRunner().invoke(experiments.get_experiment, ["--experiment-id", "0"])
329      assert result.exit_code == 0
330  
331      # Default output is table format
332      assert "Experiment ID" in result.output
333      assert "Name" in result.output
334      assert "Artifact Location" in result.output
335      assert "Lifecycle Stage" in result.output
336      assert ":" in result.output
337  
338  
339  def test_get_experiment_json():
340      exp_id = mlflow.create_experiment("test_get_exp_json", tags={"env": "test"})
341      exp = mlflow.get_experiment(exp_id)
342  
343      result = CliRunner().invoke(
344          experiments.get_experiment, ["--experiment-id", exp_id, "--output", "json"]
345      )
346      assert result.exit_code == 0
347  
348      output = json.loads(result.output)
349      expected = {
350          "experiment_id": exp_id,
351          "name": "test_get_exp_json",
352          "artifact_location": exp.artifact_location,
353          "lifecycle_stage": "active",
354          "tags": {"env": "test"},
355          "creation_time": exp.creation_time,
356          "last_update_time": exp.last_update_time,
357          "trace_location": exp.trace_location,
358          "workspace": exp.workspace,
359      }
360      assert output == expected
361  
362  
363  def test_get_experiment_table():
364      exp_id = mlflow.create_experiment("test_get_exp_table", tags={"env": "test", "team": "ml"})
365  
366      result = CliRunner().invoke(
367          experiments.get_experiment, ["--experiment-id", exp_id, "--output", "table"]
368      )
369      assert result.exit_code == 0
370  
371      # Verify table format
372      assert "Experiment ID" in result.output
373      assert exp_id in result.output
374      assert "Name" in result.output
375      assert "test_get_exp_table" in result.output
376      assert "Lifecycle Stage" in result.output
377      assert "active" in result.output
378      assert "Tags" in result.output
379      assert "env=test" in result.output
380      assert "team=ml" in result.output
381  
382  
383  def test_get_experiment_table_no_tags():
384      exp_id = mlflow.create_experiment("test_get_exp_no_tags")
385  
386      result = CliRunner().invoke(experiments.get_experiment, ["-x", exp_id, "--output", "table"])
387      assert result.exit_code == 0
388  
389      assert "Experiment ID" in result.output
390      assert exp_id in result.output
391      assert "Tags" in result.output
392  
393  
394  def test_get_experiment_missing_id():
395      result = CliRunner().invoke(experiments.get_experiment, [])
396      assert result.exit_code != 0
397      assert "Must specify exactly one of --experiment-id or --experiment-name" in result.output
398  
399  
400  def test_get_experiment_invalid_id():
401      result = CliRunner().invoke(experiments.get_experiment, ["-x", "999999"])
402      assert result.exit_code != 0
403  
404  
405  def test_get_experiment_deleted():
406      exp_id = mlflow.create_experiment("test_deleted")
407      mlflow.delete_experiment(exp_id)
408  
409      result = CliRunner().invoke(experiments.get_experiment, ["-x", exp_id, "--output", "json"])
410      assert result.exit_code == 0
411  
412      output = json.loads(result.output)
413      assert output["lifecycle_stage"] == "deleted"
414      assert output["experiment_id"] == exp_id
415  
416  
417  def test_get_experiment_by_name_table():
418      exp_name = "test_get_by_name"
419      exp_id = mlflow.create_experiment(exp_name, tags={"env": "test"})
420  
421      result = CliRunner().invoke(
422          experiments.get_experiment, ["--experiment-name", exp_name, "--output", "table"]
423      )
424      assert result.exit_code == 0
425      assert "Experiment ID" in result.output
426      assert exp_id in result.output
427      assert "Name" in result.output
428      assert exp_name in result.output
429      assert "Tags" in result.output
430      assert "env=test" in result.output
431  
432  
433  def test_get_experiment_by_name_json():
434      exp_name = "test_get_by_name_json"
435      exp_id = mlflow.create_experiment(exp_name, tags={"team": "ml"})
436      exp = mlflow.get_experiment(exp_id)
437  
438      result = CliRunner().invoke(
439          experiments.get_experiment, ["--experiment-name", exp_name, "--output", "json"]
440      )
441      assert result.exit_code == 0
442  
443      output = json.loads(result.output)
444      expected = {
445          "experiment_id": exp_id,
446          "name": exp_name,
447          "artifact_location": exp.artifact_location,
448          "lifecycle_stage": "active",
449          "tags": {"team": "ml"},
450          "creation_time": exp.creation_time,
451          "last_update_time": exp.last_update_time,
452          "trace_location": exp.trace_location,
453          "workspace": "default",
454      }
455      assert output == expected
456  
457  
458  def test_get_experiment_by_name_short_option():
459      exp_name = "test_short_option"
460      exp_id = mlflow.create_experiment(exp_name)
461  
462      result = CliRunner().invoke(experiments.get_experiment, ["-n", exp_name])
463      assert result.exit_code == 0
464      assert exp_id in result.output
465      assert exp_name in result.output
466  
467  
468  def test_get_experiment_by_name_not_found():
469      result = CliRunner().invoke(
470          experiments.get_experiment, ["--experiment-name", "nonexistent_experiment"]
471      )
472      assert result.exit_code != 0
473  
474  
475  def test_get_experiment_both_options_provided():
476      result = CliRunner().invoke(
477          experiments.get_experiment, ["--experiment-id", "0", "--experiment-name", "Default"]
478      )
479      assert result.exit_code != 0
480      assert "Must specify exactly one of --experiment-id or --experiment-name" in result.output
481  
482  
483  def test_get_experiment_by_name_deleted():
484      exp_name = "test_deleted_by_name"
485      exp_id = mlflow.create_experiment(exp_name)
486      mlflow.delete_experiment(exp_id)
487  
488      result = CliRunner().invoke(
489          experiments.get_experiment, ["--experiment-name", exp_name, "--output", "json"]
490      )
491      assert result.exit_code == 0
492  
493      output = json.loads(result.output)
494      assert output["lifecycle_stage"] == "deleted"
495      assert output["name"] == exp_name