/ tests / semantic_kernel / resources.py
resources.py
  1  import openai
  2  from semantic_kernel import Kernel
  3  from semantic_kernel.agents import ChatCompletionAgent
  4  from semantic_kernel.connectors.ai.open_ai import (
  5      OpenAIChatCompletion,
  6      OpenAITextCompletion,
  7      OpenAITextEmbedding,
  8  )
  9  from semantic_kernel.contents import ChatHistory
 10  from semantic_kernel.functions import KernelArguments
 11  
 12  from tests.tracing.helper import reset_autolog_state  # noqa: F401
 13  
 14  
 15  async def _create_and_invoke_kernel_simple(mock_openai):
 16      openai_client = openai.AsyncOpenAI(api_key="test", base_url=mock_openai)
 17  
 18      kernel = Kernel()
 19      kernel.add_service(
 20          OpenAIChatCompletion(
 21              service_id="chat-gpt",
 22              ai_model_id="gpt-4o-mini",
 23              async_client=openai_client,
 24          )
 25      )
 26      return await kernel.invoke_prompt("Is sushi the best food ever?")
 27  
 28  
 29  async def _create_and_invoke_kernel_complex(mock_openai):
 30      from semantic_kernel.prompt_template import PromptTemplateConfig
 31  
 32      openai_client = openai.AsyncOpenAI(api_key="test", base_url=mock_openai)
 33      kernel = Kernel()
 34      kernel.add_service(
 35          OpenAIChatCompletion(
 36              service_id="chat-gpt",
 37              ai_model_id="gpt-4o-mini",
 38              async_client=openai_client,
 39          )
 40      )
 41  
 42      settings = kernel.get_prompt_execution_settings_from_service_id("chat-gpt")
 43      settings.max_tokens = 100
 44      settings.temperature = 0.7
 45      settings.top_p = 0.8
 46  
 47      prompt_template_config = PromptTemplateConfig(
 48          template="{{$chat_history}}{{$user_input}}", allow_dangerously_set_content=True
 49      )
 50  
 51      chat_function = kernel.add_function(
 52          plugin_name="ChatBot",
 53          function_name="Chat",
 54          prompt_template_config=prompt_template_config,
 55          template_format="semantic-kernel",
 56          prompt_execution_settings=settings,
 57      )
 58  
 59      chat_history = ChatHistory(
 60          system_message=(
 61              "You are a chat bot named Mosscap, dedicated to figuring out what people need."
 62          )
 63      )
 64      chat_history.add_user_message("Hi there, who are you?")
 65      chat_history.add_assistant_message(
 66          "I am Mosscap, a chat bot. I'm trying to figure out what people need."
 67      )
 68      user_input = "I want to find a hotel in Seattle with free wifi and a pool."
 69  
 70      return await kernel.invoke(
 71          chat_function,
 72          KernelArguments(
 73              user_input=user_input,
 74              chat_history=chat_history,
 75          ),
 76          allow_dangerously_set_content=True,
 77      )
 78  
 79  
 80  async def _create_and_invoke_chat_agent(mock_openai):
 81      openai_client = openai.AsyncOpenAI(api_key="test", base_url=mock_openai)
 82      service = OpenAIChatCompletion(
 83          service_id="chat-gpt",
 84          ai_model_id="gpt-4o-mini",
 85          async_client=openai_client,
 86      )
 87      agent = ChatCompletionAgent(
 88          service=service,
 89          name="sushi_agent",
 90          instructions="You are a master at all things sushi. But, you are not very smart.",
 91      )
 92      return await agent.get_response(messages="How do I make sushi?")
 93  
 94  
 95  async def _create_and_invoke_text_completion(mock_openai):
 96      """Test text completion methods - parser extracts {"prompt": "..."}"""
 97      openai_client = openai.AsyncOpenAI(api_key="test", base_url=mock_openai)
 98      kernel = Kernel()
 99      kernel.add_service(
100          OpenAITextCompletion(
101              service_id="text-davinci",
102              ai_model_id="text-davinci-003",
103              async_client=openai_client,
104          )
105      )
106      text_service = kernel.get_service("text-davinci")
107      settings = kernel.get_prompt_execution_settings_from_service_id("text-davinci")
108      return await text_service.get_text_content("Complete this: The sky is", settings)
109  
110  
111  async def _create_and_invoke_embeddings(mock_openai):
112      """Test embedding methods - parser extracts {"texts": [...]}"""
113      openai_client = openai.AsyncOpenAI(api_key="test", base_url=mock_openai)
114      embedding_service = OpenAITextEmbedding(
115          service_id="embedding",
116          ai_model_id="text-embedding-ada-002",
117          async_client=openai_client,
118      )
119      texts = ["Hello world", "Semantic kernel", "MLflow tracing"]
120      return await embedding_service.generate_embeddings(texts)
121  
122  
123  async def _create_and_invoke_chat_completion_direct(mock_openai):
124      """Test direct chat completion - parser extracts {"messages": [...]}"""
125      openai_client = openai.AsyncOpenAI(api_key="test", base_url=mock_openai)
126      kernel = Kernel()
127      kernel.add_service(
128          OpenAIChatCompletion(
129              service_id="chat",
130              ai_model_id="gpt-4o-mini",
131              async_client=openai_client,
132          )
133      )
134  
135      chat_history = ChatHistory()
136      chat_history.add_user_message("What is semantic kernel?")
137      chat_history.add_assistant_message("Semantic Kernel is an AI orchestration framework.")
138      chat_history.add_user_message("Tell me more about it.")
139  
140      chat_service = kernel.get_service("chat")
141      settings = kernel.get_prompt_execution_settings_from_service_id("chat")
142      return await chat_service.get_chat_message_content(chat_history, settings)
143  
144  
145  async def _create_and_invoke_kernel_function_object(mock_openai):
146      """
147      Test kernel.invoke with function object and arguments
148      """
149      openai_client = openai.AsyncOpenAI(api_key="test", base_url=mock_openai)
150      kernel = Kernel()
151      kernel.add_service(
152          OpenAIChatCompletion(
153              service_id="chat",
154              ai_model_id="gpt-4o-mini",
155              async_client=openai_client,
156          )
157      )
158  
159      function = kernel.add_function(
160          plugin_name="MathPlugin",
161          function_name="Add",
162          prompt="Add {{$num1}} and {{$num2}}",
163          template_format="semantic-kernel",
164      )
165  
166      return await kernel.invoke(function, KernelArguments(num1=5, num2=3))