/ examples / openai / chat_completions.py
chat_completions.py
  1  import logging
  2  import os
  3  
  4  import openai
  5  import pandas as pd
  6  
  7  import mlflow
  8  from mlflow.models.signature import ModelSignature
  9  from mlflow.types.schema import ColSpec, ParamSchema, ParamSpec, Schema
 10  
 11  logging.getLogger("mlflow").setLevel(logging.ERROR)
 12  
 13  # Uncomment the following lines to run this script without using a real OpenAI API key.
 14  # os.environ["MLFLOW_TESTING"] = "true"
 15  # os.environ["OPENAI_API_KEY"] = "test"
 16  
 17  assert "OPENAI_API_KEY" in os.environ, "Please set the OPENAI_API_KEY environment variable."
 18  
 19  
 20  print(
 21      """
 22  # ******************************************************************************
 23  # Single variable
 24  # ******************************************************************************
 25  """
 26  )
 27  with mlflow.start_run():
 28      model_info = mlflow.openai.log_model(
 29          model="gpt-4o-mini",
 30          task=openai.chat.completions,
 31          name="model",
 32          messages=[{"role": "user", "content": "Tell me a joke about {animal}."}],
 33      )
 34  
 35  
 36  model = mlflow.pyfunc.load_model(model_info.model_uri)
 37  df = pd.DataFrame({
 38      "animal": [
 39          "cats",
 40          "dogs",
 41      ]
 42  })
 43  print(model.predict(df))
 44  
 45  list_of_dicts = [
 46      {"animal": "cats"},
 47      {"animal": "dogs"},
 48  ]
 49  print(model.predict(list_of_dicts))
 50  
 51  list_of_strings = [
 52      "cats",
 53      "dogs",
 54  ]
 55  print(model.predict(list_of_strings))
 56  print(
 57      """
 58  # ******************************************************************************
 59  # Multiple variables
 60  # ******************************************************************************
 61  """
 62  )
 63  with mlflow.start_run():
 64      model_info = mlflow.openai.log_model(
 65          model="gpt-4o-mini",
 66          task=openai.chat.completions,
 67          name="model",
 68          messages=[{"role": "user", "content": "Tell me a {adjective} joke about {animal}."}],
 69      )
 70  
 71  
 72  model = mlflow.pyfunc.load_model(model_info.model_uri)
 73  df = pd.DataFrame({
 74      "adjective": ["funny", "scary"],
 75      "animal": ["cats", "dogs"],
 76  })
 77  print(model.predict(df))
 78  
 79  
 80  list_of_dicts = [
 81      {"adjective": "funny", "animal": "cats"},
 82      {"adjective": "scary", "animal": "dogs"},
 83  ]
 84  print(model.predict(list_of_dicts))
 85  
 86  print(
 87      """
 88  # ******************************************************************************
 89  # Multiple prompts
 90  # ******************************************************************************
 91  """
 92  )
 93  with mlflow.start_run():
 94      model_info = mlflow.openai.log_model(
 95          model="gpt-4o-mini",
 96          task=openai.chat.completions,
 97          name="model",
 98          messages=[
 99              {"role": "system", "content": "You are {person}"},
100              {"role": "user", "content": "Let me hear your thoughts on {topic}"},
101          ],
102      )
103  
104  
105  model = mlflow.pyfunc.load_model(model_info.model_uri)
106  df = pd.DataFrame({
107      "person": ["Elon Musk", "Jeff Bezos"],
108      "topic": ["AI", "ML"],
109  })
110  print(model.predict(df))
111  
112  list_of_dicts = [
113      {"person": "Elon Musk", "topic": "AI"},
114      {"person": "Jeff Bezos", "topic": "ML"},
115  ]
116  print(model.predict(list_of_dicts))
117  
118  
119  print(
120      """
121  # ******************************************************************************
122  # No input variables
123  # ******************************************************************************
124  """
125  )
126  with mlflow.start_run():
127      model_info = mlflow.openai.log_model(
128          model="gpt-4o-mini",
129          task=openai.chat.completions,
130          name="model",
131          messages=[{"role": "system", "content": "You are Elon Musk"}],
132      )
133  
134  model = mlflow.pyfunc.load_model(model_info.model_uri)
135  df = pd.DataFrame({
136      "question": [
137          "Let me hear your thoughts on AI",
138          "Let me hear your thoughts on ML",
139      ],
140  })
141  print(model.predict(df))
142  
143  list_of_dicts = [
144      {"question": "Let me hear your thoughts on AI"},
145      {"question": "Let me hear your thoughts on ML"},
146  ]
147  model = mlflow.pyfunc.load_model(model_info.model_uri)
148  print(model.predict(list_of_dicts))
149  
150  list_of_strings = [
151      "Let me hear your thoughts on AI",
152      "Let me hear your thoughts on ML",
153  ]
154  model = mlflow.pyfunc.load_model(model_info.model_uri)
155  print(model.predict(list_of_strings))
156  
157  
158  print(
159      """
160  # ******************************************************************************
161  # Inference parameters with chat completions
162  # ******************************************************************************
163  """
164  )
165  with mlflow.start_run():
166      model_info = mlflow.openai.log_model(
167          model="gpt-4o-mini",
168          task=openai.chat.completions,
169          name="model",
170          messages=[{"role": "user", "content": "Tell me a joke about {animal}."}],
171          signature=ModelSignature(
172              inputs=Schema([ColSpec(type="string", name=None)]),
173              outputs=Schema([ColSpec(type="string", name=None)]),
174              params=ParamSchema([
175                  ParamSpec(name="temperature", default=0, dtype="float"),
176              ]),
177          ),
178      )
179  
180  
181  model = mlflow.pyfunc.load_model(model_info.model_uri)
182  df = pd.DataFrame({
183      "animal": [
184          "cats",
185          "dogs",
186      ]
187  })
188  print(model.predict(df, params={"temperature": 1}))