langchain_auto.py
1 """ 2 This example demonstrates how to enable automatic tracing for LangChain. 3 4 Note: this example requires the `langchain` and `langchain-openai` package to be installed. 5 """ 6 7 import json 8 import os 9 10 from langchain.prompts import PromptTemplate 11 from langchain.schema.output_parser import StrOutputParser 12 from langchain_openai import OpenAI 13 14 import mlflow 15 16 exp = mlflow.set_experiment("mlflow-tracing-langchain") 17 exp_id = exp.experiment_id 18 19 # This example uses OpenAI LLM. If you want to use other LLMs, you can 20 # uncomment the following line and replace `OpenAI` with the desired LLM class. 21 assert "OPENAI_API_KEY" in os.environ, "Please set the OPENAI_API_KEY environment variable." 22 23 24 # You can enable automatic tracing for LangChain by simply calling `mlflow langchain.autolog()`. 25 # (Note: By default this only enables tracing and does not log any other artifacts such as 26 # models, dataset, etc. To enable auto logging of other artifacts, please refer to the example 27 # at examples/langchain/chain_autolog.py) 28 mlflow.langchain.autolog() 29 30 # Build a simple chain 31 prompt = PromptTemplate( 32 input_variables=["question"], template="Please answer this question: {question}" 33 ) 34 llm = OpenAI(temperature=0.9) 35 chain = prompt | llm | StrOutputParser() 36 37 # Invoke the chain. Each invocation will generate a new trace. 38 chain.invoke({"question": "What is the capital of Japan?"}) 39 chain.invoke({"question": "How many animals are there in the world?"}) 40 chain.invoke({"question": "Who is the first person to land on the moon?"}) 41 42 # Retrieve the traces 43 traces = mlflow.search_traces(locations=[exp_id], max_results=3, return_type="list") 44 print(json.dumps([t.to_dict() for t in traces], indent=2)) 45 46 print( 47 "\033[92m" 48 + "🤖Now run `mlflow server` and open MLflow UI to see the trace visualization!" 49 + "\033[0m" 50 )