/ mlflow / utils / requirements_utils.py
requirements_utils.py
  1  """
  2  This module provides a set of utilities for interpreting and creating requirements files
  3  (e.g. pip's `requirements.txt`), which is useful for managing ML software environments.
  4  """
  5  
  6  import importlib.metadata
  7  import json
  8  import logging
  9  import os
 10  import re
 11  import subprocess
 12  import sys
 13  import tempfile
 14  from itertools import chain, filterfalse
 15  from pathlib import Path
 16  from threading import Timer
 17  from typing import NamedTuple
 18  
 19  import importlib_metadata
 20  from packaging.requirements import Requirement
 21  from packaging.version import InvalidVersion, Version
 22  
 23  import mlflow
 24  from mlflow.environment_variables import (
 25      _MLFLOW_IN_CAPTURE_MODULE_PROCESS,
 26      MLFLOW_REQUIREMENTS_INFERENCE_RAISE_ERRORS,
 27      MLFLOW_REQUIREMENTS_INFERENCE_TIMEOUT,
 28  )
 29  from mlflow.exceptions import MlflowException
 30  from mlflow.tracking.artifact_utils import _download_artifact_from_uri
 31  from mlflow.utils.autologging_utils.versioning import _strip_dev_version_suffix
 32  from mlflow.utils.databricks_utils import (
 33      get_databricks_env_vars,
 34      is_in_databricks_runtime,
 35  )
 36  
 37  _logger = logging.getLogger(__name__)
 38  
 39  
 40  def _is_comment(line):
 41      return line.startswith("#")
 42  
 43  
 44  def _is_empty(line):
 45      return line == ""
 46  
 47  
 48  def _strip_inline_comment(line):
 49      return line[: line.find(" #")].rstrip() if " #" in line else line
 50  
 51  
 52  def _is_requirements_file(line):
 53      return line.startswith("-r ") or line.startswith("--requirement ")
 54  
 55  
 56  def _is_constraints_file(line):
 57      return line.startswith("-c ") or line.startswith("--constraint ")
 58  
 59  
 60  def _join_continued_lines(lines):
 61      """
 62      Joins lines ending with '\\'.
 63  
 64      >>> _join_continued_lines["a\\", "b\\", "c"]
 65      >>> "abc"
 66      """
 67      continued_lines = []
 68  
 69      for line in lines:
 70          if line.endswith("\\"):
 71              continued_lines.append(line.rstrip("\\"))
 72          else:
 73              continued_lines.append(line)
 74              yield "".join(continued_lines)
 75              continued_lines.clear()
 76  
 77      # The last line ends with '\'
 78      if continued_lines:
 79          yield "".join(continued_lines)
 80  
 81  
 82  class _Requirement(NamedTuple):
 83      # A string representation of the requirement.
 84      req_str: str
 85      # A boolean indicating whether this requirement is a constraint.
 86      is_constraint: bool
 87  
 88  
 89  def _parse_requirements(requirements, is_constraint, base_dir=None):
 90      """A simplified version of `pip._internal.req.parse_requirements` which performs the following
 91      operations on the given requirements file and yields the parsed requirements.
 92  
 93      - Remove comments and blank lines
 94      - Join continued lines
 95      - Resolve requirements file references (e.g. '-r requirements.txt')
 96      - Resolve constraints file references (e.g. '-c constraints.txt')
 97  
 98      Args:
 99          requirements: A string path to a requirements file on the local filesystem or
100              an iterable of pip requirement strings.
101          is_constraint: Indicates the parsed requirements file is a constraint file.
102          base_dir: If specified, resolve relative file references (e.g. '-r requirements.txt')
103              against the specified directory.
104  
105      Returns:
106          A list of ``_Requirement`` instances.
107  
108      References:
109      - `pip._internal.req.parse_requirements`:
110        https://github.com/pypa/pip/blob/7a77484a492c8f1e1f5ef24eaf71a43df9ea47eb/src/pip/_internal/req/req_file.py#L118
111      - Requirements File Format:
112        https://pip.pypa.io/en/stable/cli/pip_install/#requirements-file-format
113      - Constraints Files:
114        https://pip.pypa.io/en/stable/user_guide/#constraints-files
115      """
116      if base_dir is None:
117          if isinstance(requirements, (str, Path)):
118              base_dir = os.path.dirname(requirements)
119              with open(requirements) as f:
120                  requirements = f.read().splitlines()
121          else:
122              base_dir = os.getcwd()
123  
124      lines = map(str.strip, requirements)
125      lines = map(_strip_inline_comment, lines)
126      lines = _join_continued_lines(lines)
127      lines = filterfalse(_is_comment, lines)
128      lines = filterfalse(_is_empty, lines)
129  
130      for line in lines:
131          if _is_requirements_file(line):
132              req_file = line.split(maxsplit=1)[1]
133              # If `req_file` is an absolute path, `os.path.join` returns `req_file`:
134              # https://docs.python.org/3/library/os.path.html#os.path.join
135              abs_path = os.path.join(base_dir, req_file)
136              yield from _parse_requirements(abs_path, is_constraint=False)
137          elif _is_constraints_file(line):
138              req_file = line.split(maxsplit=1)[1]
139              abs_path = os.path.join(base_dir, req_file)
140              yield from _parse_requirements(abs_path, is_constraint=True)
141          else:
142              yield _Requirement(line, is_constraint)
143  
144  
145  def _flatten(iterable):
146      return chain.from_iterable(iterable)
147  
148  
149  # https://www.python.org/dev/peps/pep-0508/#names
150  _PACKAGE_NAME_REGEX = re.compile(r"^(\w+|\w+[\w._-]*\w+)")
151  
152  
153  def _get_package_name(requirement):
154      m = _PACKAGE_NAME_REGEX.match(requirement)
155      return m and m.group(1)
156  
157  
158  _NORMALIZE_REGEX = re.compile(r"[-_.]+")
159  
160  
161  def _normalize_package_name(pkg_name):
162      """
163      Normalizes a package name using the rule defined in PEP 503:
164      https://www.python.org/dev/peps/pep-0503/#normalized-names
165      """
166      return _NORMALIZE_REGEX.sub("-", pkg_name).lower()
167  
168  
169  def _iter_requires(name: str):
170      """
171      Iterates over the requirements of the specified package.
172  
173      Args:
174          name: The name of the package.
175  
176      Yields:
177          The names of the required packages.
178      """
179      try:
180          reqs = importlib.metadata.requires(name)
181      except importlib.metadata.PackageNotFoundError:
182          return
183  
184      if reqs is None:
185          return
186  
187      for req in reqs:
188          # Skip extra dependencies
189          semi_colon_idx = req.find(";")
190          if (semi_colon_idx != -1) and req[semi_colon_idx:].startswith("; extra =="):
191              continue
192  
193          req = Requirement(req)
194          # Skip the requirement if the environment marker is not satisfied
195          if req.marker and not req.marker.evaluate():
196              continue
197  
198          yield req.name
199  
200  
201  def _get_requires(pkg_name):
202      norm_pkg_name = _normalize_package_name(pkg_name)
203      for req in _iter_requires(norm_pkg_name):
204          yield _normalize_package_name(req)
205  
206  
207  def _get_requires_recursive(pkg_name, seen_before=None):
208      """
209      Recursively yields both direct and transitive dependencies of the specified
210      package.
211      """
212      norm_pkg_name = _normalize_package_name(pkg_name)
213      seen_before = seen_before or {norm_pkg_name}
214      for req in _get_requires(pkg_name):
215          # Prevent infinite recursion due to cyclic dependencies
216          if req in seen_before:
217              continue
218          seen_before.add(req)
219          yield req
220          yield from _get_requires_recursive(req, seen_before)
221  
222  
223  def _prune_packages(packages):
224      """
225      Prunes packages required by other packages. For example, `["scikit-learn", "numpy"]` is pruned
226      to `["scikit-learn"]`.
227      """
228      packages = set(packages)
229      requires = set(_flatten(map(_get_requires_recursive, packages)))
230  
231      # LlamaIndex have one root "llama-index" package that bundles many sub-packages such as
232      # llama-index-llms-openai. Many of those sub-packages are optional, but some are defined
233      # as dependencies of the root package. However, the root package does not pin the versions
234      # for those sub-packages, resulting in non-deterministic behavior when loading the model
235      # later. To address this issue, we keep all sub-packages within the requirements.
236      # Ref: https://github.com/run-llama/llama_index/issues/14788#issuecomment-2232107585
237      requires = {req for req in requires if not req.startswith("llama-index-")}
238  
239      # Do not exclude mlflow's dependencies
240      # Do not exclude databricks-connect since it conflicts with pyspark during execution time,
241      # and we need to determine if pyspark needs to be stripped based on the inferred packages
242      return packages - (requires - set(_get_requires("mlflow")) - {"databricks-connect"})
243  
244  
245  def _run_command(cmd, timeout_seconds, env=None):
246      """
247      Runs the specified command. If it exits with non-zero status, `MlflowException` is raised.
248      """
249      proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
250      timer = Timer(timeout_seconds, proc.kill)
251      try:
252          timer.start()
253          stdout, stderr = proc.communicate()
254          stdout = stdout.decode("utf-8")
255          stderr = stderr.decode("utf-8")
256          if proc.returncode != 0:
257              msg = "\n".join([
258                  f"Encountered an unexpected error while running {cmd}",
259                  f"exit status: {proc.returncode}",
260                  f"stdout: {stdout}",
261                  f"stderr: {stderr}",
262              ])
263              raise MlflowException(msg)
264      finally:
265          if timer.is_alive():
266              timer.cancel()
267  
268  
269  def _get_installed_version(package: str, module: str | None = None) -> str:
270      """
271      Obtains the installed package version using `importlib_metadata.version`. If it fails, use
272      `__import__(module or package).__version__`.
273      """
274      if package == "mlflow":
275          # `importlib.metadata.version` may return an incorrect version of MLflow when it's
276          # installed in editable mode (e.g. `pip install -e .`).
277          return mlflow.__version__
278  
279      try:
280          version = importlib_metadata.version(package)
281      except importlib_metadata.PackageNotFoundError:
282          # Note `importlib_metadata.version(package)` is not necessarily equal to
283          # `__import__(package).__version__`. See the example for pytorch below.
284          #
285          # Example
286          # -------
287          # $ pip install torch==1.9.0
288          # $ python -c "import torch; print(torch.__version__)"
289          # 1.9.0+cu102
290          # $ python -c "import importlib_metadata; print(importlib_metadata.version('torch'))"
291          # 1.9.0
292          version = __import__(module or package).__version__
293  
294      # Strip the suffix from `dev` versions of PySpark, which are not available for installation
295      # from Anaconda or PyPI
296      if package == "pyspark":
297          version = _strip_dev_version_suffix(version)
298  
299      return version
300  
301  
302  def _capture_imported_modules(model_uri, flavor, record_full_module=False, extra_env_vars=None):
303      """Runs `_capture_modules.py` in a subprocess and captures modules imported during the model
304      loading procedure.
305      If flavor is `transformers`, `_capture_transformers_modules.py` is run instead.
306  
307      Args:
308          model_uri: The URI of the model.
309          flavor: The flavor name of the model.
310          record_full_module: Whether to capture top level modules for inferring python
311              package purpose. Default to False.
312          extra_env_vars: A dictionary of extra environment variables to pass to the subprocess.
313              Default to None.
314  
315      Returns:
316          A list of captured modules.
317  
318      """
319      local_model_path = _download_artifact_from_uri(model_uri)
320  
321      process_timeout = MLFLOW_REQUIREMENTS_INFERENCE_TIMEOUT.get()
322      raise_on_error = MLFLOW_REQUIREMENTS_INFERENCE_RAISE_ERRORS.get()
323      extra_env_vars = extra_env_vars or {}
324  
325      # Run `_capture_modules.py` to capture modules imported during the loading procedure
326      with tempfile.TemporaryDirectory() as tmpdir:
327          output_file = os.path.join(tmpdir, "imported_modules.txt")
328          # Pass the main environment variables to the subprocess for environment variable mapping
329          main_env = os.environ.copy()
330          # Reset the path variable from the main process so that the subprocess retains all
331          # main process configuration that a user has.
332          # See: ``https://github.com/mlflow/mlflow/issues/6905`` for context on minio configuration
333          # resolution in a subprocess based on PATH entries.
334          main_env["PATH"] = "/usr/sbin:/sbin:" + main_env["PATH"]
335          # Clear py4j gateway env vars to prevent the subprocess from connecting to the parent's
336          # py4j gateway. If these are inherited, libraries like databricks-sdk may attempt to use
337          # them, which can corrupt the parent process's py4j connection state and cause errors
338          # like "Error while obtaining a new communication channel" after the subprocess exits.
339          main_env.pop("PYSPARK_GATEWAY_PORT", None)
340          main_env.pop("PYSPARK_GATEWAY_SECRET", None)
341          # Add databricks env, for langchain models loading we might need CLI configurations
342          if is_in_databricks_runtime():
343              main_env.update(get_databricks_env_vars(mlflow.get_tracking_uri()))
344  
345          record_full_module_args = ["--record-full-module"] if record_full_module else []
346  
347          if flavor == mlflow.transformers.FLAVOR_NAME:
348              # Lazily import `_capture_transformers_module` here to avoid circular imports.
349              from mlflow.utils import _capture_transformers_modules
350  
351              for module_to_throw in ["tensorflow", "torch"]:
352                  # NB: Setting USE_TF or USE_TORCH here as Transformers only checks these env
353                  # variable on the first import of the library, which could happen anytime during
354                  # the model loading process (or even mlflow import). When these variables are not
355                  # set, Transformers import some torch/tensorflow modules even if they are not
356                  # used by the model, resulting in false positives in the captured modules.
357                  transformer_env = (
358                      {"USE_TF": "TRUE"} if module_to_throw == "torch" else {"USE_TORCH": "TRUE"}
359                  )
360                  try:
361                      _run_command(
362                          [
363                              sys.executable,
364                              _capture_transformers_modules.__file__,
365                              "--model-path",
366                              local_model_path,
367                              "--flavor",
368                              flavor,
369                              "--output-file",
370                              output_file,
371                              "--sys-path",
372                              json.dumps(sys.path),
373                              "--module-to-throw",
374                              module_to_throw,
375                              *record_full_module_args,
376                          ],
377                          timeout_seconds=process_timeout,
378                          env={
379                              **main_env,
380                              **transformer_env,
381                              _MLFLOW_IN_CAPTURE_MODULE_PROCESS.name: "true",
382                              **extra_env_vars,
383                          },
384                      )
385                      with open(output_file) as f:
386                          return f.read().splitlines()
387  
388                  except MlflowException:
389                      pass
390  
391          # Lazily import `_capture_module` here to avoid circular imports.
392          from mlflow.utils import _capture_modules
393  
394          error_file = os.path.join(tmpdir, "error.txt")
395          _run_command(
396              [
397                  sys.executable,
398                  _capture_modules.__file__,
399                  "--model-path",
400                  local_model_path,
401                  "--flavor",
402                  flavor,
403                  "--output-file",
404                  output_file,
405                  "--error-file",
406                  error_file,
407                  "--sys-path",
408                  json.dumps(sys.path),
409                  *record_full_module_args,
410              ],
411              timeout_seconds=process_timeout,
412              env={
413                  **main_env,
414                  _MLFLOW_IN_CAPTURE_MODULE_PROCESS.name: "true",
415                  **extra_env_vars,
416              },
417          )
418  
419          if os.path.exists(error_file):
420              with open(error_file) as f:
421                  errors = f.read()
422              if errors:
423                  if raise_on_error:
424                      raise MlflowException(
425                          f"Encountered an error while capturing imported modules: {errors}"
426                      )
427                  _logger.warning(errors)
428  
429          with open(output_file) as f:
430              return f.read().splitlines()
431  
432  
433  DATABRICKS_MODULES_TO_PACKAGES = {
434      "databricks.automl": ["databricks-automl-runtime"],
435      "databricks.automl_runtime": ["databricks-automl-runtime"],
436      "databricks.model_monitoring": ["databricks-model-monitoring"],
437  }
438  MLFLOW_MODULES_TO_PACKAGES = {
439      "mlflow.gateway": ["mlflow[gateway]"],
440  }
441  _MODULES_TO_PACKAGES = None
442  _PACKAGES_TO_MODULES = None
443  
444  
445  def _init_modules_to_packages_map():
446      global _MODULES_TO_PACKAGES
447      if _MODULES_TO_PACKAGES is None:
448          # Note `importlib_metadata.packages_distributions` only captures packages installed into
449          # Python's site-packages directory via tools such as pip:
450          # https://importlib-metadata.readthedocs.io/en/latest/using.html#using-importlib-metadata
451          _MODULES_TO_PACKAGES = importlib_metadata.packages_distributions()
452  
453          # Add mapping for MLflow extras
454          _MODULES_TO_PACKAGES.update(MLFLOW_MODULES_TO_PACKAGES)
455  
456          # Multiple packages populate the `databricks` module namespace on Databricks; to avoid
457          # bundling extraneous Databricks packages into model dependencies, we scope each module
458          # to its relevant package
459          _MODULES_TO_PACKAGES.update(DATABRICKS_MODULES_TO_PACKAGES)
460          if "databricks" in _MODULES_TO_PACKAGES:
461              _MODULES_TO_PACKAGES["databricks"] = [
462                  package
463                  for package in _MODULES_TO_PACKAGES["databricks"]
464                  if package not in _flatten(DATABRICKS_MODULES_TO_PACKAGES.values())
465              ]
466  
467          # In Databricks, `_MODULES_TO_PACKAGES` doesn't contain pyspark since it's not installed
468          # via pip or conda. To work around this issue, manually add pyspark.
469          if is_in_databricks_runtime():
470              _MODULES_TO_PACKAGES.update({"pyspark": ["pyspark"]})
471  
472  
473  def _init_packages_to_modules_map():
474      _init_modules_to_packages_map()
475      global _PACKAGES_TO_MODULES
476      _PACKAGES_TO_MODULES = {}
477      for module, pkg_list in _MODULES_TO_PACKAGES.items():
478          for pkg_name in pkg_list:
479              _PACKAGES_TO_MODULES[pkg_name] = module
480  
481  
482  def _infer_requirements(model_uri, flavor, raise_on_error=False, extra_env_vars=None):
483      """Infers the pip requirements of the specified model by creating a subprocess and loading
484      the model in it to determine which packages are imported.
485  
486      Args:
487          model_uri: The URI of the model.
488          flavor: The flavor name of the model.
489          raise_on_error: If True, raise an exception if an unrecognized package is encountered.
490          extra_env_vars: A dictionary of extra environment variables to pass to the subprocess.
491              Default to None.
492  
493      Returns:
494          A list of inferred pip requirements.
495  
496      """
497      _init_modules_to_packages_map()
498  
499      modules = _capture_imported_modules(model_uri, flavor, extra_env_vars=extra_env_vars)
500      packages = _flatten([_MODULES_TO_PACKAGES.get(module, []) for module in modules])
501      packages = map(_normalize_package_name, packages)
502      packages = _prune_packages(packages)
503      excluded_packages = [
504          # Certain packages (e.g. scikit-learn 0.24.2) imports `setuptools` or `pkg_resources`
505          # (a module provided by `setuptools`) to process or interact with package metadata.
506          # It should be safe to exclude `setuptools` because it's rare to encounter a python
507          # environment where `setuptools` is not pre-installed.
508          "setuptools",
509          # Exclude a package that provides the mlflow module (e.g. mlflow, mlflow-skinny).
510          # Certain flavors (e.g. pytorch) import mlflow while loading a model, but mlflow should
511          # not be counted as a model requirement.
512          *_MODULES_TO_PACKAGES.get("mlflow", []),
513      ]
514      packages = packages - set(excluded_packages)
515  
516      # Handle pandas incompatibility issue with numpy 2.x https://github.com/pandas-dev/pandas/issues/55519
517      # pandas == 2.2.*: compatible with numpy >= 2
518      # pandas >= 2.1.2: incompatible with numpy >= 2, but it pins numpy < 2
519      # pandas < 2.1.2: incompatible with numpy >= 2 and doesn't pin numpy, so we need to pin numpy
520      if any(
521          package == "pandas"
522          and Version(_get_pinned_requirement(package).split("==")[1]) < Version("2.1.2")
523          for package in packages
524      ):
525          packages.add("numpy")
526  
527      return sorted(map(_get_pinned_requirement, packages))
528  
529  
530  def _get_local_version_label(version):
531      """Extracts a local version label from `version`.
532  
533      Args:
534          version: A version string.
535      """
536      try:
537          return Version(version).local
538      except InvalidVersion:
539          return None
540  
541  
542  def _strip_local_version_label(version):
543      """Strips a local version label in `version`.
544  
545      Local version identifiers:
546      https://www.python.org/dev/peps/pep-0440/#local-version-identifiers
547  
548      Args:
549          version: A version string to strip.
550      """
551  
552      class IgnoreLocal(Version):
553          @property
554          def local(self):
555              return None
556  
557      try:
558          return str(IgnoreLocal(version))
559      except InvalidVersion:
560          return version
561  
562  
563  def _get_pinned_requirement(req_str, version=None, module=None):
564      """Returns a string representing a pinned pip requirement to install the specified package and
565      version (e.g. 'mlflow==1.2.3').
566  
567      Args:
568          req_str: The package requirement string (e.g. "mlflow" or "mlflow[gateway]").
569          version: The version of the package. If None, defaults to the installed version.
570          module: The name of the top-level module provided by the package . For example,
571              if `package` is 'scikit-learn', `module` should be 'sklearn'. If None, defaults
572              to `package`.
573          extras: A list of extra names for the package.
574  
575      """
576      req = Requirement(req_str)
577      package = req.name
578      if version is None:
579          version_raw = _get_installed_version(package, module)
580          if local_version_label := _get_local_version_label(version_raw):
581              version = _strip_local_version_label(version_raw)
582              if not (is_in_databricks_runtime() and package in ("torch", "torchvision")):
583                  msg = (
584                      f"Found {package} version ({version_raw}) contains a local version label "
585                      f"(+{local_version_label}). MLflow logged a pip requirement for this package "
586                      f"as '{package}=={version}' without the local version label to make it "
587                      "installable from PyPI. To specify pip requirements containing local version "
588                      "labels, please use `conda_env` or `pip_requirements`."
589                  )
590                  _logger.warning(msg)
591  
592          else:
593              version = version_raw
594  
595      if req.extras:
596          return f"{package}[{','.join(req.extras)}]=={version}"
597      return f"{package}=={version}"
598  
599  
600  class _MismatchedPackageInfo(NamedTuple):
601      package_name: str
602      installed_version: str | None
603      requirement: str
604  
605      def __str__(self):
606          current_status = self.installed_version or "uninstalled"
607          return f"{self.package_name} (current: {current_status}, required: {self.requirement})"
608  
609  
610  def _check_requirement_satisfied(requirement_str: str) -> _MismatchedPackageInfo | None:
611      """
612      Checks whether the current python environment satisfies the given requirement if it is parsable
613      as a package name and a set of version specifiers, and returns a `_MismatchedPackageInfo`
614      object containing the mismatched package name, installed version, and requirement if the
615      requirement is not satisfied. Otherwise, returns None.
616      """
617      try:
618          req = Requirement(requirement_str)
619      except Exception:
620          # We reach here if the requirement string is a file path or a URL.
621          # Extracting the package name from the requirement string is not trivial,
622          # so we skip the check.
623          return None
624      if req.marker and not req.marker.evaluate():
625          return None
626  
627      _init_packages_to_modules_map()
628      pkg_name = req.name
629  
630      try:
631          installed_version = _get_installed_version(pkg_name, _PACKAGES_TO_MODULES.get(pkg_name))
632      except ModuleNotFoundError:
633          return _MismatchedPackageInfo(
634              package_name=pkg_name,
635              installed_version=None,
636              requirement=requirement_str,
637          )
638  
639      if pkg_name == "mlflow" and "gateway" in req.extras:
640          try:
641              from mlflow import gateway  # noqa: F401
642          except ModuleNotFoundError:
643              return _MismatchedPackageInfo(
644                  package_name="mlflow[gateway]",
645                  installed_version=None,
646                  requirement=requirement_str,
647              )
648  
649      if pkg_name == "mlflow" and Version(installed_version).is_devrelease:
650          return None
651  
652      if len(req.specifier) > 0 and not req.specifier.contains(installed_version):
653          return _MismatchedPackageInfo(
654              package_name=pkg_name,
655              installed_version=installed_version,
656              requirement=requirement_str,
657          )
658  
659      return None
660  
661  
662  def warn_dependency_requirement_mismatches(model_requirements: list[str]):
663      """
664      Inspects the model's dependencies and prints a warning if the current Python environment
665      doesn't satisfy them.
666      """
667      # Suppress databricks-feature-lookup warning for feature store cases
668      # Suppress databricks-chains, databricks-rag, and databricks-agents warnings for RAG
669      # Studio cases
670      # NB: When a final name has been decided for GA for the aforementioned
671      # "Databricks RAG Studio" product, remove unrelated names from this listing.
672      _DATABRICKS_FEATURE_LOOKUP = "databricks-feature-lookup"
673      _DATABRICKS_AGENTS = "databricks-agents"
674  
675      # List of packages to ignore
676      packages_to_ignore = [
677          _DATABRICKS_FEATURE_LOOKUP,
678          _DATABRICKS_AGENTS,
679      ]
680  
681      # Normalize package names and create ignore list
682      ignore_packages = list(map(_normalize_package_name, packages_to_ignore))
683  
684      try:
685          mismatch_infos = []
686          for req in model_requirements:
687              mismatch_info = _check_requirement_satisfied(req)
688              if mismatch_info is not None:
689                  if _normalize_package_name(mismatch_info.package_name) in ignore_packages:
690                      continue
691                  mismatch_infos.append(str(mismatch_info))
692  
693          if len(mismatch_infos) > 0:
694              mismatch_str = " - " + "\n - ".join(mismatch_infos)
695              warning_msg = (
696                  "Detected one or more mismatches between the model's dependencies and the current "
697                  f"Python environment:\n{mismatch_str}\n"
698                  "To fix the mismatches, call `mlflow.pyfunc.get_model_dependencies(model_uri)` "
699                  "to fetch the model's environment and install dependencies using the resulting "
700                  "environment file."
701              )
702              _logger.warning(warning_msg)
703  
704      except Exception as e:
705          _logger.warning(
706              f"Encountered an unexpected error ({e!r}) while detecting model dependency "
707              "mismatches. Set logging level to DEBUG to see the full traceback."
708          )
709          _logger.debug("", exc_info=True)