simple_agent.py
1 import os 2 3 from langchain.agents import AgentType, initialize_agent, load_tools 4 from langchain.llms import OpenAI 5 6 import mlflow 7 8 # Note: Ensure that the package 'google-search-results' is installed via pypi to run this example 9 # and that you have a accounts with SerpAPI and OpenAI to use their APIs. 10 11 # Ensuring necessary API keys are set 12 assert "OPENAI_API_KEY" in os.environ, "Please set the OPENAI_API_KEY environment variable." 13 assert "SERPAPI_API_KEY" in os.environ, "Please set the SERPAPI_API_KEY environment variable." 14 15 # Load the language model for agent control 16 llm = OpenAI(temperature=0) 17 18 # Next, let's load some tools to use. Note that the `llm-math` tool uses an LLM, so we need to pass that in. 19 tools = load_tools(["serpapi", "llm-math"], llm=llm) 20 21 # Finally, let's initialize an agent with the tools, the language model, and the type of agent we want to use. 22 agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True) 23 24 # Log the agent in an MLflow run 25 with mlflow.start_run(): 26 logged_model = mlflow.langchain.log_model(agent, name="langchain_model") 27 28 # Load the logged agent model for prediction 29 loaded_model = mlflow.pyfunc.load_model(logged_model.model_uri) 30 31 # Generate an inference result using the loaded model 32 question = "What was the high temperature in SF yesterday in Fahrenheit? What is that number raised to the .023 power?" 33 34 answer = loaded_model.predict([{"input": question}]) 35 36 print(answer)