tracing.py
1 """ 2 This is an example for leveraging MLflow's auto tracing capabilities for Gemini. 3 4 For more information about MLflow Tracing, see: https://mlflow.org/docs/latest/llms/tracing/index.html 5 """ 6 7 import os 8 9 import mlflow 10 11 # Turn on auto tracing for Gemini by calling mlflow.gemini.autolog() 12 mlflow.gemini.autolog() 13 14 # Import the SDK and configure your API key. 15 from google import genai 16 17 client = genai.Client(api_key=os.environ["GEMINI_API_KEY"]) 18 19 # Use the generate_content method to generate responses to your prompts. 20 response = client.models.generate_content( 21 model="gemini-1.5-flash", contents="The opposite of hot is" 22 ) 23 print(response.text) 24 25 # Also leverage the chat feature to conduct multi-turn interactions 26 chat = client.chats.create(model="gemini-1.5-flash") 27 response = chat.send_message("In one sentence, explain how a computer works to a young child.") 28 print(response.text) 29 response = chat.send_message("Okay, how about a more detailed explanation to a high schooler?") 30 print(response.text) 31 32 # Count tokens for your statement 33 response = client.models.count_tokens("The quick brown fox jumps over the lazy dog.") 34 print(response.total_tokens) 35 36 # Generate text embeddings for your content 37 text = "Hello world" 38 result = client.models.embed_content(model="text-embedding-004", contents=text) 39 print(result["embedding"])