/ test / components / generators / chat / test_openai.py
test_openai.py
   1  # SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
   2  #
   3  # SPDX-License-Identifier: Apache-2.0
   4  
   5  import json
   6  import logging
   7  import os
   8  from datetime import datetime
   9  from typing import Any
  10  from unittest.mock import ANY, MagicMock, patch
  11  
  12  import pytest
  13  from openai import OpenAIError
  14  from openai.types.chat import (
  15      ChatCompletion,
  16      ChatCompletionChunk,
  17      ChatCompletionMessage,
  18      ChatCompletionMessageFunctionToolCall,
  19      ParsedChatCompletion,
  20      ParsedChatCompletionMessage,
  21      ParsedChoice,
  22      ParsedFunction,
  23      ParsedFunctionToolCall,
  24      chat_completion_chunk,
  25  )
  26  from openai.types.chat.chat_completion import Choice
  27  from openai.types.chat.chat_completion_chunk import ChoiceDelta, ChoiceDeltaToolCall, ChoiceDeltaToolCallFunction
  28  from openai.types.chat.chat_completion_message_function_tool_call import Function
  29  from openai.types.completion_usage import CompletionTokensDetails, CompletionUsage, PromptTokensDetails
  30  from pydantic import BaseModel
  31  
  32  from haystack import component
  33  from haystack.components.generators.chat.openai import (
  34      OpenAIChatGenerator,
  35      _check_finish_reason,
  36      _convert_chat_completion_chunk_to_streaming_chunk,
  37  )
  38  from haystack.components.generators.utils import print_streaming_chunk
  39  from haystack.dataclasses import (
  40      ChatMessage,
  41      ChatRole,
  42      FileContent,
  43      ImageContent,
  44      StreamingChunk,
  45      ToolCall,
  46      ToolCallDelta,
  47  )
  48  from haystack.tools import ComponentTool, Tool
  49  from haystack.tools.toolset import Toolset
  50  from haystack.utils.auth import Secret
  51  
  52  
  53  class CalendarEvent(BaseModel):
  54      event_name: str
  55      event_date: str
  56      event_location: str
  57  
  58  
  59  @pytest.fixture
  60  def calendar_event_model():
  61      return CalendarEvent
  62  
  63  
  64  @pytest.fixture
  65  def chat_messages():
  66      return [
  67          ChatMessage.from_system("You are a helpful assistant"),
  68          ChatMessage.from_user("What's the capital of France"),
  69      ]
  70  
  71  
  72  @pytest.fixture
  73  def mock_chat_completion_chunk_with_tools(openai_mock_stream):
  74      """
  75      Mock the OpenAI API completion chunk response and reuse it for tests
  76      """
  77  
  78      with patch("openai.resources.chat.completions.Completions.create") as mock_chat_completion_create:
  79          completion = ChatCompletionChunk(
  80              id="foo",
  81              model="gpt-4",
  82              object="chat.completion.chunk",
  83              choices=[
  84                  chat_completion_chunk.Choice(
  85                      finish_reason="tool_calls",
  86                      logprobs=None,
  87                      index=0,
  88                      delta=chat_completion_chunk.ChoiceDelta(
  89                          role="assistant",
  90                          tool_calls=[
  91                              chat_completion_chunk.ChoiceDeltaToolCall(
  92                                  index=0,
  93                                  id="123",
  94                                  type="function",
  95                                  function=chat_completion_chunk.ChoiceDeltaToolCallFunction(
  96                                      name="weather", arguments='{"city": "Paris"}'
  97                                  ),
  98                              )
  99                          ],
 100                      ),
 101                  )
 102              ],
 103              created=int(datetime.now().timestamp()),
 104          )
 105          mock_chat_completion_create.return_value = openai_mock_stream(
 106              completion, cast_to=None, response=None, client=None
 107          )
 108          yield mock_chat_completion_create
 109  
 110  
 111  def weather_function(city: str) -> dict[str, Any]:
 112      weather_info = {
 113          "Berlin": {"weather": "mostly sunny", "temperature": 7, "unit": "celsius"},
 114          "Paris": {"weather": "mostly cloudy", "temperature": 8, "unit": "celsius"},
 115          "Rome": {"weather": "sunny", "temperature": 14, "unit": "celsius"},
 116      }
 117      return weather_info.get(city, {"weather": "unknown", "temperature": 0, "unit": "celsius"})
 118  
 119  
 120  # mock chat completions with structured outputs
 121  @pytest.fixture
 122  def mock_parsed_chat_completion():
 123      with patch("openai.resources.chat.completions.Completions.parse") as mock_chat_completion_parse:
 124          completion = ParsedChatCompletion[CalendarEvent](
 125              id="json_foo",
 126              model="gpt-5-mini",
 127              object="chat.completion",
 128              choices=[
 129                  ParsedChoice[CalendarEvent](
 130                      finish_reason="stop",
 131                      index=0,
 132                      message=ParsedChatCompletionMessage[CalendarEvent](
 133                          content='{"event_name":"Team Meeting","event_date":"2024-03-15",'
 134                          '"event_location":"Conference Room A"}',
 135                          refusal=None,
 136                          role="assistant",
 137                          annotations=[],
 138                          audio=None,
 139                          function_call=None,
 140                          tool_calls=None,
 141                          parsed=CalendarEvent(
 142                              event_name="Team Meeting", event_date="2024-03-15", event_location="Conference Room A"
 143                          ),
 144                      ),
 145                  )
 146              ],
 147              created=1757328264,
 148              usage=CompletionUsage(completion_tokens=29, prompt_tokens=86, total_tokens=115),
 149          )
 150          mock_chat_completion_parse.return_value = completion
 151          yield mock_chat_completion_parse
 152  
 153  
 154  @component
 155  class MessageExtractor:
 156      @component.output_types(messages=list[str], meta=dict[str, Any])
 157      def run(self, messages: list[ChatMessage], meta: dict[str, Any] | None = None) -> dict[str, Any]:
 158          """
 159          Extracts the text content of ChatMessage objects
 160  
 161          :param messages: List of Haystack ChatMessage objects
 162          :param meta: Optional metadata to include in the response.
 163          :returns:
 164              A dictionary with keys "messages" and "meta".
 165          """
 166          if meta is None:
 167              meta = {}
 168          return {"messages": [m.text for m in messages], "meta": meta}
 169  
 170  
 171  @pytest.fixture
 172  def tools():
 173      weather_tool = Tool(
 174          name="weather",
 175          description="useful to determine the weather in a given location",
 176          parameters={"type": "object", "properties": {"city": {"type": "string"}}, "required": ["city"]},
 177          function=weather_function,
 178      )
 179      # We add a tool that has a more complex parameter signature
 180      message_extractor_tool = ComponentTool(
 181          component=MessageExtractor(),
 182          name="message_extractor",
 183          description="Useful for returning the text content of ChatMessage objects",
 184      )
 185      return [weather_tool, message_extractor_tool]
 186  
 187  
 188  class TestOpenAIChatGenerator:
 189      def test_supported_models(self):
 190          """SUPPORTED_MODELS is a non-empty list of strings."""
 191          models = OpenAIChatGenerator.SUPPORTED_MODELS
 192          assert isinstance(models, list)
 193          assert len(models) > 0
 194          assert all(isinstance(m, str) for m in models)
 195  
 196      def test_init_default(self, monkeypatch):
 197          monkeypatch.setenv("OPENAI_API_KEY", "test-api-key")
 198          component = OpenAIChatGenerator()
 199          assert component.client.api_key == "test-api-key"
 200          assert component.model == "gpt-5-mini"
 201          assert component.streaming_callback is None
 202          assert not component.generation_kwargs
 203          assert component.client.timeout == 30
 204          assert component.client.max_retries == 5
 205          assert component.tools is None
 206          assert not component.tools_strict
 207          assert component.http_client_kwargs is None
 208  
 209      def test_init_fail_wo_api_key(self, monkeypatch):
 210          monkeypatch.delenv("OPENAI_API_KEY", raising=False)
 211          with pytest.raises(ValueError):
 212              OpenAIChatGenerator()
 213  
 214      def test_init_fail_with_duplicate_tool_names(self, monkeypatch, tools):
 215          monkeypatch.setenv("OPENAI_API_KEY", "test-api-key")
 216  
 217          duplicate_tools = [tools[0], tools[0]]
 218          with pytest.raises(ValueError):
 219              OpenAIChatGenerator(tools=duplicate_tools)
 220  
 221      def test_init_with_parameters(self, monkeypatch):
 222          tool = Tool(name="name", description="description", parameters={"x": {"type": "string"}}, function=lambda x: x)
 223  
 224          monkeypatch.setenv("OPENAI_TIMEOUT", "100")
 225          monkeypatch.setenv("OPENAI_MAX_RETRIES", "10")
 226          component = OpenAIChatGenerator(
 227              api_key=Secret.from_token("test-api-key"),
 228              streaming_callback=print_streaming_chunk,
 229              api_base_url="test-base-url",
 230              generation_kwargs={"max_completion_tokens": 10, "some_test_param": "test-params"},
 231              timeout=40.0,
 232              max_retries=1,
 233              tools=[tool],
 234              tools_strict=True,
 235              http_client_kwargs={"proxy": "http://example.com:8080", "verify": False},
 236          )
 237          assert component.client.api_key == "test-api-key"
 238          assert component.model == "gpt-5-mini"
 239          assert component.streaming_callback is print_streaming_chunk
 240          assert component.generation_kwargs == {"max_completion_tokens": 10, "some_test_param": "test-params"}
 241          assert component.client.timeout == 40.0
 242          assert component.client.max_retries == 1
 243          assert component.tools == [tool]
 244          assert component.tools_strict
 245          assert component.http_client_kwargs == {"proxy": "http://example.com:8080", "verify": False}
 246  
 247      def test_init_with_parameters_and_env_vars(self, monkeypatch):
 248          monkeypatch.setenv("OPENAI_TIMEOUT", "100")
 249          monkeypatch.setenv("OPENAI_MAX_RETRIES", "10")
 250          component = OpenAIChatGenerator(
 251              api_key=Secret.from_token("test-api-key"),
 252              streaming_callback=print_streaming_chunk,
 253              api_base_url="test-base-url",
 254              generation_kwargs={"max_completion_tokens": 10, "some_test_param": "test-params"},
 255          )
 256          assert component.client.api_key == "test-api-key"
 257          assert component.model == "gpt-5-mini"
 258          assert component.streaming_callback is print_streaming_chunk
 259          assert component.generation_kwargs == {"max_completion_tokens": 10, "some_test_param": "test-params"}
 260          assert component.client.timeout == 100.0
 261          assert component.client.max_retries == 10
 262  
 263      def test_to_dict_default(self, monkeypatch):
 264          monkeypatch.setenv("OPENAI_API_KEY", "test-api-key")
 265          component = OpenAIChatGenerator()
 266          data = component.to_dict()
 267          assert data == {
 268              "type": "haystack.components.generators.chat.openai.OpenAIChatGenerator",
 269              "init_parameters": {
 270                  "api_key": {"env_vars": ["OPENAI_API_KEY"], "strict": True, "type": "env_var"},
 271                  "model": "gpt-5-mini",
 272                  "organization": None,
 273                  "streaming_callback": None,
 274                  "api_base_url": None,
 275                  "generation_kwargs": {},
 276                  "tools": None,
 277                  "tools_strict": False,
 278                  "max_retries": None,
 279                  "timeout": None,
 280                  "http_client_kwargs": None,
 281              },
 282          }
 283  
 284      def test_to_dict_with_parameters(self, monkeypatch, calendar_event_model):
 285          tool = Tool(name="name", description="description", parameters={"x": {"type": "string"}}, function=print)
 286  
 287          monkeypatch.setenv("ENV_VAR", "test-api-key")
 288          component = OpenAIChatGenerator(
 289              api_key=Secret.from_env_var("ENV_VAR"),
 290              streaming_callback=print_streaming_chunk,
 291              api_base_url="test-base-url",
 292              generation_kwargs={
 293                  "max_completion_tokens": 10,
 294                  "some_test_param": "test-params",
 295                  "response_format": calendar_event_model,
 296                  "logprobs": True,
 297              },
 298              tools=[tool],
 299              tools_strict=True,
 300              max_retries=10,
 301              timeout=100.0,
 302              http_client_kwargs={"proxy": "http://example.com:8080", "verify": False},
 303          )
 304          data = component.to_dict()
 305  
 306          assert data == {
 307              "type": "haystack.components.generators.chat.openai.OpenAIChatGenerator",
 308              "init_parameters": {
 309                  "api_key": {"env_vars": ["ENV_VAR"], "strict": True, "type": "env_var"},
 310                  "model": "gpt-5-mini",
 311                  "organization": None,
 312                  "api_base_url": "test-base-url",
 313                  "max_retries": 10,
 314                  "timeout": 100.0,
 315                  "streaming_callback": "haystack.components.generators.utils.print_streaming_chunk",
 316                  "generation_kwargs": {
 317                      "max_completion_tokens": 10,
 318                      "some_test_param": "test-params",
 319                      "logprobs": True,
 320                      "response_format": {
 321                          "type": "json_schema",
 322                          "json_schema": {
 323                              "name": "CalendarEvent",
 324                              "strict": True,
 325                              "schema": {
 326                                  "properties": {
 327                                      "event_name": {"title": "Event Name", "type": "string"},
 328                                      "event_date": {"title": "Event Date", "type": "string"},
 329                                      "event_location": {"title": "Event Location", "type": "string"},
 330                                  },
 331                                  "required": ["event_name", "event_date", "event_location"],
 332                                  "title": "CalendarEvent",
 333                                  "type": "object",
 334                                  "additionalProperties": False,
 335                              },
 336                          },
 337                      },
 338                  },
 339                  "tools": [
 340                      {
 341                          "type": "haystack.tools.tool.Tool",
 342                          "data": {
 343                              "description": "description",
 344                              "function": "builtins.print",
 345                              "inputs_from_state": None,
 346                              "name": "name",
 347                              "outputs_to_state": None,
 348                              "outputs_to_string": None,
 349                              "parameters": {"x": {"type": "string"}},
 350                          },
 351                      }
 352                  ],
 353                  "tools_strict": True,
 354                  "http_client_kwargs": {"proxy": "http://example.com:8080", "verify": False},
 355              },
 356          }
 357  
 358      def test_to_dict_with_response_format_json_object(self, monkeypatch):
 359          monkeypatch.setenv("OPENAI_API_KEY", "test-api-key")
 360          component = OpenAIChatGenerator(
 361              api_key=Secret.from_env_var("OPENAI_API_KEY"),
 362              generation_kwargs={"response_format": {"type": "json_object"}},
 363          )
 364          data = component.to_dict()
 365          assert data == {
 366              "type": "haystack.components.generators.chat.openai.OpenAIChatGenerator",
 367              "init_parameters": {
 368                  "api_key": {"env_vars": ["OPENAI_API_KEY"], "strict": True, "type": "env_var"},
 369                  "model": "gpt-5-mini",
 370                  "api_base_url": None,
 371                  "organization": None,
 372                  "streaming_callback": None,
 373                  "generation_kwargs": {"response_format": {"type": "json_object"}},
 374                  "tools": None,
 375                  "tools_strict": False,
 376                  "max_retries": None,
 377                  "timeout": None,
 378                  "http_client_kwargs": None,
 379              },
 380          }
 381  
 382      def test_from_dict(self, monkeypatch):
 383          monkeypatch.setenv("OPENAI_API_KEY", "fake-api-key")
 384          data = {
 385              "type": "haystack.components.generators.chat.openai.OpenAIChatGenerator",
 386              "init_parameters": {
 387                  "api_key": {"env_vars": ["OPENAI_API_KEY"], "strict": True, "type": "env_var"},
 388                  "model": "gpt-5-mini",
 389                  "api_base_url": "test-base-url",
 390                  "streaming_callback": "haystack.components.generators.utils.print_streaming_chunk",
 391                  "max_retries": 10,
 392                  "timeout": 100.0,
 393                  "generation_kwargs": {"max_completion_tokens": 10, "some_test_param": "test-params"},
 394                  "tools": [
 395                      {
 396                          "type": "haystack.tools.tool.Tool",
 397                          "data": {
 398                              "description": "description",
 399                              "function": "builtins.print",
 400                              "name": "name",
 401                              "parameters": {"x": {"type": "string"}},
 402                          },
 403                      }
 404                  ],
 405                  "tools_strict": True,
 406                  "http_client_kwargs": {"proxy": "http://example.com:8080", "verify": False},
 407              },
 408          }
 409          component = OpenAIChatGenerator.from_dict(data)
 410  
 411          assert isinstance(component, OpenAIChatGenerator)
 412          assert component.model == "gpt-5-mini"
 413          assert component.streaming_callback is print_streaming_chunk
 414          assert component.api_base_url == "test-base-url"
 415          assert component.generation_kwargs == {"max_completion_tokens": 10, "some_test_param": "test-params"}
 416          assert component.api_key == Secret.from_env_var("OPENAI_API_KEY")
 417          assert component.tools == [
 418              Tool(name="name", description="description", parameters={"x": {"type": "string"}}, function=print)
 419          ]
 420          assert component.tools_strict
 421          assert component.client.timeout == 100.0
 422          assert component.client.max_retries == 10
 423          assert component.http_client_kwargs == {"proxy": "http://example.com:8080", "verify": False}
 424  
 425      def test_from_dict_fail_wo_env_var(self, monkeypatch):
 426          monkeypatch.delenv("OPENAI_API_KEY", raising=False)
 427          data = {
 428              "type": "haystack.components.generators.chat.openai.OpenAIChatGenerator",
 429              "init_parameters": {
 430                  "api_key": {"env_vars": ["OPENAI_API_KEY"], "strict": True, "type": "env_var"},
 431                  "model": "gpt-4",
 432                  "organization": None,
 433                  "api_base_url": "test-base-url",
 434                  "streaming_callback": "haystack.components.generators.utils.print_streaming_chunk",
 435                  "generation_kwargs": {"max_completion_tokens": 10, "some_test_param": "test-params"},
 436                  "tools": None,
 437              },
 438          }
 439          with pytest.raises(ValueError):
 440              OpenAIChatGenerator.from_dict(data)
 441  
 442      def test_run(self, chat_messages, openai_mock_chat_completion):
 443          component = OpenAIChatGenerator(api_key=Secret.from_token("test-api-key"))
 444          response = component.run(chat_messages)
 445  
 446          # check that the component returns the correct ChatMessage response
 447          assert isinstance(response, dict)
 448          assert "replies" in response
 449          assert isinstance(response["replies"], list)
 450          assert len(response["replies"]) == 1
 451          assert [isinstance(reply, ChatMessage) for reply in response["replies"]]
 452  
 453      def test_run_with_params(self, chat_messages, openai_mock_chat_completion):
 454          component = OpenAIChatGenerator(
 455              api_key=Secret.from_token("test-api-key"),
 456              generation_kwargs={"max_completion_tokens": 10, "temperature": 0.5},
 457          )
 458          response = component.run(chat_messages)
 459  
 460          # check that the component calls the OpenAI API with the correct parameters
 461          _, kwargs = openai_mock_chat_completion.call_args
 462          assert kwargs["max_completion_tokens"] == 10
 463          assert kwargs["temperature"] == 0.5
 464  
 465          # check that the tools are not passed to the OpenAI API (the generator is initialized without tools)
 466          assert "tools" not in kwargs
 467  
 468          # check that the component returns the correct response
 469          assert isinstance(response, dict)
 470          assert "replies" in response
 471          assert isinstance(response["replies"], list)
 472          assert len(response["replies"]) == 1
 473          assert [isinstance(reply, ChatMessage) for reply in response["replies"]]
 474  
 475      def test_run_with_params_streaming(self, chat_messages, openai_mock_chat_completion_chunk):
 476          streaming_callback_called = False
 477  
 478          def streaming_callback(chunk: StreamingChunk) -> None:
 479              nonlocal streaming_callback_called
 480              streaming_callback_called = True
 481  
 482          component = OpenAIChatGenerator(
 483              api_key=Secret.from_token("test-api-key"), streaming_callback=streaming_callback
 484          )
 485          response = component.run(chat_messages)
 486  
 487          # check we called the streaming callback
 488          assert streaming_callback_called
 489  
 490          # check that the component still returns the correct response
 491          assert isinstance(response, dict)
 492          assert "replies" in response
 493          assert isinstance(response["replies"], list)
 494          assert len(response["replies"]) == 1
 495          assert [isinstance(reply, ChatMessage) for reply in response["replies"]]
 496          assert "Hello" in response["replies"][0].text  # see openai_mock_chat_completion_chunk
 497  
 498      def test_run_with_streaming_callback_in_run_method(self, chat_messages, openai_mock_chat_completion_chunk):
 499          streaming_callback_called = False
 500  
 501          def streaming_callback(chunk: StreamingChunk) -> None:
 502              nonlocal streaming_callback_called
 503              streaming_callback_called = True
 504  
 505          component = OpenAIChatGenerator(api_key=Secret.from_token("test-api-key"))
 506          response = component.run(chat_messages, streaming_callback=streaming_callback)
 507  
 508          # check we called the streaming callback
 509          assert streaming_callback_called
 510  
 511          # check that the component still returns the correct response
 512          assert isinstance(response, dict)
 513          assert "replies" in response
 514          assert isinstance(response["replies"], list)
 515          assert len(response["replies"]) == 1
 516          assert [isinstance(reply, ChatMessage) for reply in response["replies"]]
 517          assert "Hello" in response["replies"][0].text  # see openai_mock_chat_completion_chunk
 518  
 519      def test_run_with_response_format(self, chat_messages, mock_parsed_chat_completion):
 520          component = OpenAIChatGenerator(
 521              api_key=Secret.from_token("test-api-key"), generation_kwargs={"response_format": CalendarEvent}
 522          )
 523          response = component.run(chat_messages)
 524          assert isinstance(response, dict)
 525          assert "replies" in response
 526          assert isinstance(response["replies"], list)
 527          assert len(response["replies"]) == 1
 528          assert [isinstance(reply, ChatMessage) for reply in response["replies"]]
 529          assert "Team Meeting" in response["replies"][0].text  # see mock_parsed_chat_completion
 530  
 531      def test_run_with_response_format_in_run_method(self, chat_messages, mock_parsed_chat_completion):
 532          component = OpenAIChatGenerator(api_key=Secret.from_token("test-api-key"))
 533          response = component.run(chat_messages, generation_kwargs={"response_format": CalendarEvent})
 534          assert isinstance(response, dict)
 535          assert "replies" in response
 536          assert isinstance(response["replies"], list)
 537          assert len(response["replies"]) == 1
 538          assert [isinstance(reply, ChatMessage) for reply in response["replies"]]
 539          assert "Team Meeting" in response["replies"][0].text  # see mock_parsed_chat_completion
 540  
 541      def test_run_with_wrapped_stream_simulation(self, chat_messages, openai_mock_stream):
 542          streaming_callback_called = False
 543  
 544          def streaming_callback(chunk: StreamingChunk) -> None:
 545              nonlocal streaming_callback_called
 546              streaming_callback_called = True
 547              assert isinstance(chunk, StreamingChunk)
 548  
 549          chunk = ChatCompletionChunk(
 550              id="id",
 551              model="gpt-4",
 552              object="chat.completion.chunk",
 553              choices=[chat_completion_chunk.Choice(index=0, delta=chat_completion_chunk.ChoiceDelta(content="Hello"))],
 554              created=int(datetime.now().timestamp()),
 555          )
 556  
 557          # Here we wrap the OpenAI stream in a MagicMock
 558          # This is to simulate the behavior of some tools like Weave (https://github.com/wandb/weave)
 559          # which wrap the OpenAI stream in their own stream
 560          wrapped_openai_stream = MagicMock()
 561          wrapped_openai_stream.__iter__.return_value = iter([chunk])
 562  
 563          component = OpenAIChatGenerator(api_key=Secret.from_token("test-api-key"))
 564  
 565          with patch.object(
 566              component.client.chat.completions, "create", return_value=wrapped_openai_stream
 567          ) as mock_create:
 568              response = component.run(chat_messages, streaming_callback=streaming_callback)
 569  
 570              mock_create.assert_called_once()
 571              assert streaming_callback_called
 572              assert "replies" in response
 573              assert "Hello" in response["replies"][0].text
 574  
 575      def test_check_abnormal_completions(self, caplog):
 576          caplog.set_level(logging.INFO)
 577          messages = [
 578              ChatMessage.from_assistant(
 579                  "", meta={"finish_reason": "content_filter" if i % 2 == 0 else "length", "index": i}
 580              )
 581              for i, _ in enumerate(range(4))
 582          ]
 583  
 584          for m in messages:
 585              _check_finish_reason(m.meta)
 586  
 587          # check truncation warning
 588          message_template = (
 589              "The completion for index {index} has been truncated before reaching a natural stopping point. "
 590              "Increase the max_completion_tokens parameter to allow for longer completions."
 591          )
 592  
 593          for index in [1, 3]:
 594              assert caplog.records[index].message == message_template.format(index=index)
 595  
 596          # check content filter warning
 597          message_template = "The completion for index {index} has been truncated due to the content filter."
 598          for index in [0, 2]:
 599              assert caplog.records[index].message == message_template.format(index=index)
 600  
 601      def test_run_with_tools(self, tools):
 602          with patch("openai.resources.chat.completions.Completions.create") as mock_chat_completion_create:
 603              completion = ChatCompletion(
 604                  id="foo",
 605                  model="gpt-4",
 606                  object="chat.completion",
 607                  choices=[
 608                      Choice(
 609                          finish_reason="tool_calls",
 610                          logprobs=None,
 611                          index=0,
 612                          message=ChatCompletionMessage(
 613                              role="assistant",
 614                              tool_calls=[
 615                                  ChatCompletionMessageFunctionToolCall(
 616                                      id="123",
 617                                      type="function",
 618                                      function=Function(name="weather", arguments='{"city": "Paris"}'),
 619                                  )
 620                              ],
 621                          ),
 622                      )
 623                  ],
 624                  created=int(datetime.now().timestamp()),
 625                  usage=CompletionUsage(
 626                      completion_tokens=40,
 627                      prompt_tokens=57,
 628                      total_tokens=97,
 629                      completion_tokens_details=CompletionTokensDetails(
 630                          accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0
 631                      ),
 632                      prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0),
 633                  ),
 634              )
 635  
 636              mock_chat_completion_create.return_value = completion
 637  
 638              component = OpenAIChatGenerator(
 639                  api_key=Secret.from_token("test-api-key"), tools=tools[:1], tools_strict=True
 640              )
 641              response = component.run([ChatMessage.from_user("What's the weather like in Paris?")])
 642  
 643          # ensure that the tools are passed to the OpenAI API
 644          function_spec = {**tools[0].tool_spec}
 645          function_spec["strict"] = True
 646          function_spec["parameters"]["additionalProperties"] = False
 647          assert mock_chat_completion_create.call_args[1]["tools"] == [{"type": "function", "function": function_spec}]
 648  
 649          assert len(response["replies"]) == 1
 650          message = response["replies"][0]
 651  
 652          assert not message.texts
 653          assert not message.text
 654  
 655          assert message.tool_calls
 656          tool_call = message.tool_call
 657          assert isinstance(tool_call, ToolCall)
 658          assert tool_call.tool_name == "weather"
 659          assert tool_call.arguments == {"city": "Paris"}
 660          assert message.meta["finish_reason"] == "tool_calls"
 661          assert message.meta["usage"]["completion_tokens"] == 40
 662  
 663      def test_run_with_tools_and_response_format(self, tools, mock_parsed_chat_completion):
 664          """
 665          Test the run method with tools and response format
 666              When tools are used, the function call overrides the schema passed in response_format
 667          """
 668          with patch("openai.resources.chat.completions.Completions.parse") as mock_chat_completion_parse:
 669              completion = ParsedChatCompletion[CalendarEvent](
 670                  id="foo",
 671                  model="gpt-4",
 672                  object="chat.completion",
 673                  choices=[
 674                      ParsedChoice[CalendarEvent](
 675                          finish_reason="tool_calls",
 676                          logprobs=None,
 677                          index=0,
 678                          message=ParsedChatCompletionMessage[CalendarEvent](
 679                              role="assistant",
 680                              tool_calls=[
 681                                  ParsedFunctionToolCall(
 682                                      id="123",
 683                                      type="function",
 684                                      function=ParsedFunction(name="weather", arguments='{"city": "Paris"}'),
 685                                  )
 686                              ],
 687                          ),
 688                      )
 689                  ],
 690                  created=int(datetime.now().timestamp()),
 691                  usage=CompletionUsage(
 692                      completion_tokens=40,
 693                      prompt_tokens=57,
 694                      total_tokens=97,
 695                      completion_tokens_details=CompletionTokensDetails(
 696                          accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0
 697                      ),
 698                      prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0),
 699                  ),
 700              )
 701              mock_chat_completion_parse.return_value = completion
 702  
 703              component = OpenAIChatGenerator(
 704                  api_key=Secret.from_token("test-api-key"), tools=tools[:1], tools_strict=True
 705              )
 706              response_with_format = component.run(
 707                  [ChatMessage.from_user("What's the weather like in Paris?")],
 708                  generation_kwargs={"response_format": CalendarEvent},
 709              )
 710  
 711          assert len(response_with_format["replies"]) == 1
 712          message_with_format = response_with_format["replies"][0]
 713          assert not message_with_format.texts
 714          assert not message_with_format.text
 715          assert message_with_format.tool_calls
 716          tool_call = message_with_format.tool_call
 717          assert isinstance(tool_call, ToolCall)
 718          assert tool_call.tool_name == "weather"
 719          assert tool_call.arguments == {"city": "Paris"}
 720          assert message_with_format.meta["finish_reason"] == "tool_calls"
 721          assert message_with_format.meta["usage"]["completion_tokens"] == 40
 722  
 723      def test_run_with_tools_streaming(self, mock_chat_completion_chunk_with_tools, tools):
 724          streaming_callback_called = False
 725  
 726          def streaming_callback(chunk: StreamingChunk) -> None:
 727              nonlocal streaming_callback_called
 728              streaming_callback_called = True
 729  
 730          component = OpenAIChatGenerator(
 731              api_key=Secret.from_token("test-api-key"), streaming_callback=streaming_callback
 732          )
 733          chat_messages = [ChatMessage.from_user("What's the weather like in Paris?")]
 734          response = component.run(chat_messages, tools=tools)
 735  
 736          # check we called the streaming callback
 737          assert streaming_callback_called
 738  
 739          # check that the component still returns the correct response
 740          assert isinstance(response, dict)
 741          assert "replies" in response
 742          assert isinstance(response["replies"], list)
 743          assert len(response["replies"]) == 1
 744          assert [isinstance(reply, ChatMessage) for reply in response["replies"]]
 745  
 746          message = response["replies"][0]
 747  
 748          assert message.tool_calls
 749          tool_call = message.tool_call
 750          assert isinstance(tool_call, ToolCall)
 751          assert tool_call.tool_name == "weather"
 752          assert tool_call.arguments == {"city": "Paris"}
 753          assert message.meta["finish_reason"] == "tool_calls"
 754  
 755      def test_invalid_tool_call_json(self, tools, caplog):
 756          caplog.set_level(logging.WARNING)
 757  
 758          with patch("openai.resources.chat.completions.Completions.create") as mock_create:
 759              mock_create.return_value = ChatCompletion(
 760                  id="test",
 761                  model="gpt-5-mini",
 762                  object="chat.completion",
 763                  choices=[
 764                      Choice(
 765                          finish_reason="tool_calls",
 766                          index=0,
 767                          message=ChatCompletionMessage(
 768                              role="assistant",
 769                              tool_calls=[
 770                                  ChatCompletionMessageFunctionToolCall(
 771                                      id="1",
 772                                      type="function",
 773                                      function=Function(name="weather", arguments='"invalid": "json"'),
 774                                  )
 775                              ],
 776                          ),
 777                      )
 778                  ],
 779                  created=1234567890,
 780                  usage=CompletionUsage(
 781                      completion_tokens=47,
 782                      prompt_tokens=540,
 783                      total_tokens=587,
 784                      completion_tokens_details=CompletionTokensDetails(
 785                          accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0
 786                      ),
 787                      prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0),
 788                  ),
 789              )
 790  
 791              component = OpenAIChatGenerator(api_key=Secret.from_token("test-api-key"), tools=tools)
 792              response = component.run([ChatMessage.from_user("What's the weather in Paris?")])
 793  
 794          assert len(response["replies"]) == 1
 795          message = response["replies"][0]
 796          assert len(message.tool_calls) == 0
 797          assert "OpenAI returned a malformed JSON string for tool call arguments" in caplog.text
 798          assert message.meta["finish_reason"] == "tool_calls"
 799          assert message.meta["usage"]["completion_tokens"] == 47
 800  
 801      def test_run_with_response_format_and_streaming_pydantic_model(self, calendar_event_model):
 802          chat_messages = [
 803              ChatMessage.from_user("The marketing summit takes place on October12th at the Hilton Hotel downtown.")
 804          ]
 805          component = OpenAIChatGenerator(
 806              api_key=Secret.from_token("test-api-key"),
 807              generation_kwargs={"response_format": calendar_event_model},
 808              streaming_callback=print_streaming_chunk,
 809          )
 810          with pytest.raises(TypeError):
 811              component.run(chat_messages)
 812  
 813      @pytest.mark.skipif(
 814          not os.environ.get("OPENAI_API_KEY", None),
 815          reason="Export an env var called OPENAI_API_KEY containing the OpenAI API key to run this test.",
 816      )
 817      @pytest.mark.integration
 818      def test_live_run(self):
 819          chat_messages = [ChatMessage.from_user("What's the capital of France")]
 820          component = OpenAIChatGenerator(model="gpt-4.1-nano", generation_kwargs={"n": 1})
 821          results = component.run(chat_messages)
 822          assert len(results["replies"]) == 1
 823          message: ChatMessage = results["replies"][0]
 824          assert "Paris" in message.text
 825          assert "gpt-4.1-nano" in message.meta["model"]
 826          assert message.meta["finish_reason"] == "stop"
 827          assert message.meta["usage"]["prompt_tokens"] > 0
 828  
 829      @pytest.mark.skipif(
 830          not os.environ.get("OPENAI_API_KEY", None),
 831          reason="Export an env var called OPENAI_API_KEY containing the OpenAI API key to run this test.",
 832      )
 833      @pytest.mark.integration
 834      def test_live_run_with_response_format_pydantic_model(self, calendar_event_model):
 835          chat_messages = [
 836              ChatMessage.from_user("The marketing summit takes place on October12th at the Hilton Hotel downtown.")
 837          ]
 838          component = OpenAIChatGenerator(
 839              model="gpt-4.1-nano", generation_kwargs={"response_format": calendar_event_model}
 840          )
 841          results = component.run(chat_messages)
 842          assert len(results["replies"]) == 1
 843          message: ChatMessage = results["replies"][0]
 844          msg = json.loads(message.text)
 845          assert "Marketing Summit" in msg["event_name"]
 846          assert isinstance(msg["event_date"], str)
 847          assert isinstance(msg["event_location"], str)
 848  
 849      @pytest.mark.skipif(
 850          not os.environ.get("OPENAI_API_KEY", None),
 851          reason="Export an env var called OPENAI_API_KEY containing the OpenAI API key to run this test.",
 852      )
 853      @pytest.mark.integration
 854      def test_live_run_with_response_format_json_object(self):
 855          chat_messages = [
 856              ChatMessage.from_user(
 857                  'Answer in JSON: What\'s the capital of France? Please respond with a JSON object with the key "city". '
 858                  'For example: {"city": "Paris"}'
 859              )
 860          ]
 861          comp = OpenAIChatGenerator(model="gpt-4.1-nano", generation_kwargs={"response_format": {"type": "json_object"}})
 862          results = comp.run(chat_messages)
 863          assert len(results["replies"]) == 1
 864          message: ChatMessage = results["replies"][0]
 865          msg = json.loads(message.text)
 866          assert "paris" in msg["city"].lower()
 867          assert message.meta["finish_reason"] == "stop"
 868  
 869      @pytest.mark.skipif(
 870          not os.environ.get("OPENAI_API_KEY", None),
 871          reason="Export an env var called OPENAI_API_KEY containing the OpenAI API key to run this test.",
 872      )
 873      @pytest.mark.integration
 874      def test_live_run_with_response_format_json_object_streaming(self):
 875          streaming_callback_called = False
 876  
 877          def streaming_callback(chunk: StreamingChunk) -> None:
 878              nonlocal streaming_callback_called
 879              streaming_callback_called = True
 880  
 881          chat_messages = [
 882              ChatMessage.from_user(
 883                  'Answer in JSON: What\'s the capital of France? Please respond with a JSON object with the key "city". '
 884                  'For example: {"city": "Paris"}'
 885              )
 886          ]
 887          comp = OpenAIChatGenerator(
 888              model="gpt-4.1-nano",
 889              generation_kwargs={"response_format": {"type": "json_object"}},
 890              streaming_callback=streaming_callback,
 891          )
 892          results = comp.run(chat_messages)
 893          assert len(results["replies"]) == 1
 894          message: ChatMessage = results["replies"][0]
 895          msg = json.loads(message.text)
 896          assert "paris" in msg["city"].lower()
 897          assert message.meta["finish_reason"] == "stop"
 898          assert streaming_callback_called is True
 899  
 900      @pytest.mark.skipif(
 901          not os.environ.get("OPENAI_API_KEY", None),
 902          reason="Export an env var called OPENAI_API_KEY containing the OpenAI API key to run this test.",
 903      )
 904      @pytest.mark.integration
 905      def test_live_run_with_response_format_json_schema(self):
 906          response_schema = {
 907              "type": "json_schema",
 908              "json_schema": {
 909                  "name": "CapitalCity",
 910                  "strict": True,
 911                  "schema": {
 912                      "title": "CapitalCity",
 913                      "type": "object",
 914                      "properties": {
 915                          "city": {"title": "City", "type": "string"},
 916                          "country": {"title": "Country", "type": "string"},
 917                      },
 918                      "required": ["city", "country"],
 919                      "additionalProperties": False,
 920                  },
 921              },
 922          }
 923  
 924          chat_messages = [ChatMessage.from_user("What's the capital of France?")]
 925          comp = OpenAIChatGenerator(model="gpt-4.1-nano", generation_kwargs={"response_format": response_schema})
 926          results = comp.run(chat_messages)
 927          assert len(results["replies"]) == 1
 928          message: ChatMessage = results["replies"][0]
 929          msg = json.loads(message.text)
 930          assert "Paris" in msg["city"]
 931          assert isinstance(msg["country"], str)
 932          assert "France" in msg["country"]
 933          assert message.meta["finish_reason"] == "stop"
 934  
 935      @pytest.mark.skipif(
 936          not os.environ.get("OPENAI_API_KEY", None),
 937          reason="Export an env var called OPENAI_API_KEY containing the OpenAI API key to run this test.",
 938      )
 939      @pytest.mark.integration
 940      def test_live_run_with_response_format_json_schema_streaming(self):
 941          streaming_callback_called = False
 942  
 943          def streaming_callback(chunk: StreamingChunk) -> None:
 944              nonlocal streaming_callback_called
 945              streaming_callback_called = True
 946  
 947          response_schema = {
 948              "type": "json_schema",
 949              "json_schema": {
 950                  "name": "CapitalCity",
 951                  "strict": True,
 952                  "schema": {
 953                      "title": "CapitalCity",
 954                      "type": "object",
 955                      "properties": {
 956                          "city": {"title": "City", "type": "string"},
 957                          "country": {"title": "Country", "type": "string"},
 958                      },
 959                      "required": ["city", "country"],
 960                      "additionalProperties": False,
 961                  },
 962              },
 963          }
 964  
 965          chat_messages = [ChatMessage.from_user("What's the capital of France?")]
 966          comp = OpenAIChatGenerator(
 967              model="gpt-4.1-nano",
 968              generation_kwargs={"response_format": response_schema},
 969              streaming_callback=streaming_callback,
 970          )
 971          results = comp.run(chat_messages)
 972          assert len(results["replies"]) == 1
 973          message: ChatMessage = results["replies"][0]
 974          msg = json.loads(message.text)
 975          assert "Paris" in msg["city"]
 976          assert isinstance(msg["country"], str)
 977          assert "France" in msg["country"]
 978          assert message.meta["finish_reason"] == "stop"
 979          assert streaming_callback_called is True
 980  
 981      def test_run_with_wrong_model(self):
 982          mock_client = MagicMock()
 983          mock_client.chat.completions.create.side_effect = OpenAIError("Invalid model name")
 984  
 985          generator = OpenAIChatGenerator(api_key=Secret.from_token("test-api-key"), model="something-obviously-wrong")
 986  
 987          generator.client = mock_client
 988  
 989          with pytest.raises(OpenAIError):
 990              generator.run([ChatMessage.from_user("irrelevant")])
 991  
 992      @pytest.mark.skipif(
 993          not os.environ.get("OPENAI_API_KEY", None),
 994          reason="Export an env var called OPENAI_API_KEY containing the OpenAI API key to run this test.",
 995      )
 996      @pytest.mark.integration
 997      def test_live_run_streaming(self):
 998          class Callback:
 999              def __init__(self):
1000                  self.responses = ""
1001                  self.counter = 0
1002  
1003              def __call__(self, chunk: StreamingChunk) -> None:
1004                  self.counter += 1
1005                  self.responses += chunk.content if chunk.content else ""
1006  
1007          callback = Callback()
1008          component = OpenAIChatGenerator(
1009              model="gpt-4.1-nano",
1010              streaming_callback=callback,
1011              generation_kwargs={"stream_options": {"include_usage": True}},
1012          )
1013          results = component.run([ChatMessage.from_user("What's the capital of France?")])
1014  
1015          # Basic response checks
1016          assert "replies" in results
1017          assert len(results["replies"]) == 1
1018          message: ChatMessage = results["replies"][0]
1019          assert "Paris" in message.text
1020          assert isinstance(message.meta, dict)
1021  
1022          # Metadata checks
1023          metadata = message.meta
1024          assert "gpt-4.1-nano" in metadata["model"]
1025          assert metadata["finish_reason"] == "stop"
1026  
1027          # Usage information checks
1028          assert isinstance(metadata.get("usage"), dict), "meta.usage not a dict"
1029          usage = metadata["usage"]
1030          assert "prompt_tokens" in usage and usage["prompt_tokens"] > 0
1031          assert "completion_tokens" in usage and usage["completion_tokens"] > 0
1032  
1033          # Detailed token information checks
1034          assert isinstance(usage.get("completion_tokens_details"), dict), "usage.completion_tokens_details not a dict"
1035          assert isinstance(usage.get("prompt_tokens_details"), dict), "usage.prompt_tokens_details not a dict"
1036  
1037          # Streaming callback verification
1038          assert callback.counter > 1
1039          assert "Paris" in callback.responses
1040  
1041      @pytest.mark.skipif(
1042          not os.environ.get("OPENAI_API_KEY", None),
1043          reason="Export an env var called OPENAI_API_KEY containing the OpenAI API key to run this test.",
1044      )
1045      @pytest.mark.integration
1046      def test_live_run_with_tools_streaming(self, tools):
1047          chat_messages = [ChatMessage.from_user("What's the weather like in Paris and Berlin?")]
1048          component = OpenAIChatGenerator(
1049              model="gpt-4.1-nano",
1050              tools=tools,
1051              streaming_callback=print_streaming_chunk,
1052              generation_kwargs={"stream_options": {"include_usage": True}},
1053          )
1054          results = component.run(chat_messages)
1055          assert len(results["replies"]) == 1
1056          message = results["replies"][0]
1057  
1058          assert not message.texts
1059          assert not message.text
1060          assert message.tool_calls
1061          tool_calls = message.tool_calls
1062          assert len(tool_calls) == 2
1063  
1064          for tool_call in tool_calls:
1065              assert isinstance(tool_call, ToolCall)
1066              assert tool_call.tool_name == "weather"
1067  
1068          arguments = [tool_call.arguments for tool_call in tool_calls]
1069          # Check that both cities are present (case-insensitive, allowing for variations like "Paris, France")
1070          city_values = [arg["city"].lower() for arg in arguments]
1071          assert any("berlin" in city for city in city_values)
1072          assert any("paris" in city for city in city_values)
1073          assert message.meta["finish_reason"] == "tool_calls"
1074  
1075      def test_openai_chat_generator_with_toolset_initialization(self, tools, monkeypatch):
1076          """Test that the OpenAIChatGenerator can be initialized with a Toolset."""
1077          monkeypatch.setenv("OPENAI_API_KEY", "test-api-key")
1078          toolset = Toolset(tools)
1079          generator = OpenAIChatGenerator(tools=toolset)
1080          assert generator.tools == toolset
1081  
1082      def test_from_dict_with_toolset(self, tools, monkeypatch):
1083          """Test that the OpenAIChatGenerator can be deserialized from a dictionary with a Toolset."""
1084          monkeypatch.setenv("OPENAI_API_KEY", "test-api-key")
1085          toolset = Toolset(tools)
1086          component = OpenAIChatGenerator(tools=toolset)
1087          data = component.to_dict()
1088  
1089          deserialized_component = OpenAIChatGenerator.from_dict(data)
1090  
1091          assert isinstance(deserialized_component.tools, Toolset)
1092          assert len(deserialized_component.tools) == len(tools)
1093          assert all(isinstance(tool, Tool) for tool in deserialized_component.tools)
1094  
1095      @pytest.mark.skipif(
1096          not os.environ.get("OPENAI_API_KEY", None),
1097          reason="Export an env var called OPENAI_API_KEY containing the OpenAI API key to run this test.",
1098      )
1099      @pytest.mark.integration
1100      def test_live_run_with_toolset(self, tools):
1101          chat_messages = [ChatMessage.from_user("What's the weather like in Paris?")]
1102          toolset = Toolset(tools)
1103          component = OpenAIChatGenerator(model="gpt-4.1-nano", tools=toolset)
1104          results = component.run(chat_messages)
1105          assert len(results["replies"]) == 1
1106          message = results["replies"][0]
1107  
1108          assert not message.texts
1109          assert not message.text
1110          assert message.tool_calls
1111          tool_call = message.tool_call
1112          assert isinstance(tool_call, ToolCall)
1113          assert tool_call.tool_name == "weather"
1114          assert tool_call.arguments.keys() == {"city"}
1115          assert "Paris" in tool_call.arguments["city"]
1116  
1117      @pytest.mark.skipif(
1118          not os.environ.get("OPENAI_API_KEY", None),
1119          reason="Export an env var called OPENAI_API_KEY containing the OpenAI API key to run this test.",
1120      )
1121      @pytest.mark.integration
1122      def test_live_run_multimodal(self, test_files_path):
1123          image_path = test_files_path / "images" / "apple.jpg"
1124  
1125          # we resize the image to keep this test fast (around 1s) - increase the size in case of errors
1126          image_content = ImageContent.from_file_path(file_path=image_path, size=(100, 100), detail="low")
1127  
1128          chat_messages = [ChatMessage.from_user(content_parts=["What does this image show? Max 5 words", image_content])]
1129  
1130          generator = OpenAIChatGenerator(model="gpt-4.1-nano")
1131          results = generator.run(chat_messages)
1132  
1133          assert len(results["replies"]) == 1
1134          message: ChatMessage = results["replies"][0]
1135  
1136          assert message.text
1137          assert "apple" in message.text.lower()
1138  
1139          assert message.is_from(ChatRole.ASSISTANT)
1140          assert not message.tool_calls
1141          assert not message.tool_call_results
1142  
1143      @pytest.mark.skipif(
1144          not os.environ.get("OPENAI_API_KEY", None),
1145          reason="Export an env var called OPENAI_API_KEY containing the OpenAI API key to run this test.",
1146      )
1147      @pytest.mark.integration
1148      def test_live_run_with_file_content(self, test_files_path):
1149          pdf_path = test_files_path / "pdf" / "sample_pdf_3.pdf"
1150  
1151          file_content = FileContent.from_file_path(file_path=pdf_path)
1152  
1153          chat_messages = [
1154              ChatMessage.from_user(
1155                  content_parts=[file_content, "Is this document a paper about LLMs? Respond with 'yes' or 'no' only."]
1156              )
1157          ]
1158  
1159          generator = OpenAIChatGenerator(model="gpt-4.1-nano")
1160          results = generator.run(chat_messages)
1161  
1162          assert len(results["replies"]) == 1
1163          message: ChatMessage = results["replies"][0]
1164  
1165          assert message.is_from(ChatRole.ASSISTANT)
1166  
1167          assert message.text
1168          assert "no" in message.text.lower()
1169  
1170      def test_init_with_list_of_toolsets(self, monkeypatch, tools):
1171          """Test initialization with a list of Toolsets."""
1172          monkeypatch.setenv("OPENAI_API_KEY", "test-api-key")
1173  
1174          toolset1 = Toolset([tools[0]])
1175          toolset2 = Toolset([tools[1]])
1176  
1177          component = OpenAIChatGenerator(tools=[toolset1, toolset2])
1178  
1179          assert component.tools == [toolset1, toolset2]
1180          assert isinstance(component.tools, list)
1181          assert len(component.tools) == 2
1182          assert all(isinstance(ts, Toolset) for ts in component.tools)
1183  
1184      def test_serde_with_list_of_toolsets(self, monkeypatch, tools):
1185          """Test serialization and deserialization with a list of Toolsets."""
1186          monkeypatch.setenv("OPENAI_API_KEY", "test-api-key")
1187  
1188          toolset1 = Toolset([tools[0]])
1189          toolset2 = Toolset([tools[1]])
1190  
1191          component = OpenAIChatGenerator(tools=[toolset1, toolset2])
1192          data = component.to_dict()
1193  
1194          # Verify serialization preserves list[Toolset] structure
1195          tools_data = data["init_parameters"]["tools"]
1196          assert isinstance(tools_data, list)
1197          assert len(tools_data) == 2
1198          assert all(isinstance(ts, dict) for ts in tools_data)
1199          assert tools_data[0]["type"] == "haystack.tools.toolset.Toolset"
1200          assert tools_data[1]["type"] == "haystack.tools.toolset.Toolset"
1201  
1202          # Deserialize and verify
1203          deserialized = OpenAIChatGenerator.from_dict(data)
1204          assert isinstance(deserialized.tools, list)
1205          assert len(deserialized.tools) == 2
1206          assert all(isinstance(ts, Toolset) for ts in deserialized.tools)
1207  
1208      def test_warm_up_with_tools(self, monkeypatch):
1209          """Test that warm_up() calls warm_up on tools and is idempotent."""
1210          monkeypatch.setenv("OPENAI_API_KEY", "test-api-key")
1211  
1212          # Create a mock tool that tracks if warm_up() was called
1213          class MockTool(Tool):
1214              warm_up_call_count = 0  # Class variable to track calls
1215  
1216              def __init__(self):
1217                  super().__init__(
1218                      name="mock_tool",
1219                      description="A mock tool for testing",
1220                      parameters={"x": {"type": "string"}},
1221                      function=lambda x: x,
1222                  )
1223  
1224              def warm_up(self):
1225                  MockTool.warm_up_call_count += 1
1226  
1227          # Reset the class variable before test
1228          MockTool.warm_up_call_count = 0
1229          mock_tool = MockTool()
1230  
1231          # Create OpenAIChatGenerator with the mock tool
1232          component = OpenAIChatGenerator(tools=[mock_tool])
1233  
1234          # Verify initial state - warm_up not called yet
1235          assert MockTool.warm_up_call_count == 0
1236          assert not component._is_warmed_up
1237  
1238          # Call warm_up() on the generator
1239          component.warm_up()
1240  
1241          # Assert that the tool's warm_up() was called
1242          assert MockTool.warm_up_call_count == 1
1243          assert component._is_warmed_up
1244  
1245          # Call warm_up() again and verify it's idempotent (only warms up once)
1246          component.warm_up()
1247  
1248          # The tool's warm_up should still only have been called once
1249          assert MockTool.warm_up_call_count == 1
1250          assert component._is_warmed_up
1251  
1252      def test_warm_up_with_no_tools(self, monkeypatch):
1253          """Test that warm_up() works when no tools are provided."""
1254          monkeypatch.setenv("OPENAI_API_KEY", "test-api-key")
1255  
1256          component = OpenAIChatGenerator()
1257  
1258          # Verify initial state
1259          assert not component._is_warmed_up
1260          assert component.tools is None
1261  
1262          # Call warm_up() - should not raise an error
1263          component.warm_up()
1264  
1265          # Verify the component is warmed up
1266          assert component._is_warmed_up
1267  
1268          # Call warm_up() again - should be idempotent
1269          component.warm_up()
1270          assert component._is_warmed_up
1271  
1272      def test_warm_up_with_multiple_tools(self, monkeypatch):
1273          """Test that warm_up() works with multiple tools."""
1274          monkeypatch.setenv("OPENAI_API_KEY", "test-api-key")
1275  
1276          from haystack.tools import Tool
1277  
1278          # Track warm_up calls
1279          warm_up_calls = []
1280  
1281          class MockTool(Tool):
1282              def __init__(self, tool_name):
1283                  super().__init__(
1284                      name=tool_name,
1285                      description=f"Mock tool {tool_name}",
1286                      parameters={"type": "object", "properties": {"x": {"type": "string"}}, "required": ["x"]},
1287                      function=lambda x: f"{tool_name} result: {x}",
1288                  )
1289  
1290              def warm_up(self):
1291                  warm_up_calls.append(self.name)
1292  
1293          mock_tool1 = MockTool("tool1")
1294          mock_tool2 = MockTool("tool2")
1295  
1296          # Use a LIST of tools, not a Toolset
1297          component = OpenAIChatGenerator(tools=[mock_tool1, mock_tool2])
1298  
1299          # Call warm_up()
1300          component.warm_up()
1301  
1302          # Assert that both tools' warm_up() were called
1303          assert "tool1" in warm_up_calls
1304          assert "tool2" in warm_up_calls
1305          assert component._is_warmed_up
1306  
1307          # Track count
1308          call_count = len(warm_up_calls)
1309  
1310          # Verify idempotency
1311          component.warm_up()
1312          assert len(warm_up_calls) == call_count
1313  
1314  
1315  @pytest.fixture
1316  def chat_completion_chunks():
1317      return [
1318          ChatCompletionChunk(
1319              id="chatcmpl-BZdwjFecdcaQfCf7bn319vRp6fY8F",
1320              choices=[chat_completion_chunk.Choice(delta=ChoiceDelta(role="assistant"), index=0)],
1321              created=1747834733,
1322              model="gpt-5-mini",
1323              object="chat.completion.chunk",
1324              service_tier="default",
1325              system_fingerprint="fp_54eb4bd693",
1326          ),
1327          ChatCompletionChunk(
1328              id="chatcmpl-BZdwjFecdcaQfCf7bn319vRp6fY8F",
1329              choices=[
1330                  chat_completion_chunk.Choice(
1331                      delta=ChoiceDelta(
1332                          tool_calls=[
1333                              ChoiceDeltaToolCall(
1334                                  index=0,
1335                                  id="call_zcvlnVaTeJWRjLAFfYxX69z4",
1336                                  function=ChoiceDeltaToolCallFunction(arguments="", name="weather"),
1337                                  type="function",
1338                              )
1339                          ]
1340                      ),
1341                      index=0,
1342                  )
1343              ],
1344              created=1747834733,
1345              model="gpt-5-mini",
1346              object="chat.completion.chunk",
1347              service_tier="default",
1348              system_fingerprint="fp_54eb4bd693",
1349          ),
1350          ChatCompletionChunk(
1351              id="chatcmpl-BZdwjFecdcaQfCf7bn319vRp6fY8F",
1352              choices=[
1353                  chat_completion_chunk.Choice(
1354                      delta=ChoiceDelta(
1355                          tool_calls=[
1356                              ChoiceDeltaToolCall(index=0, function=ChoiceDeltaToolCallFunction(arguments='{"ci'))
1357                          ]
1358                      ),
1359                      index=0,
1360                  )
1361              ],
1362              created=1747834733,
1363              model="gpt-5-mini",
1364              object="chat.completion.chunk",
1365              service_tier="default",
1366              system_fingerprint="fp_54eb4bd693",
1367          ),
1368          ChatCompletionChunk(
1369              id="chatcmpl-BZdwjFecdcaQfCf7bn319vRp6fY8F",
1370              choices=[
1371                  chat_completion_chunk.Choice(
1372                      delta=ChoiceDelta(
1373                          tool_calls=[
1374                              ChoiceDeltaToolCall(index=0, function=ChoiceDeltaToolCallFunction(arguments='ty": '))
1375                          ]
1376                      ),
1377                      index=0,
1378                  )
1379              ],
1380              created=1747834733,
1381              model="gpt-5-mini",
1382              object="chat.completion.chunk",
1383              service_tier="default",
1384              system_fingerprint="fp_54eb4bd693",
1385          ),
1386          ChatCompletionChunk(
1387              id="chatcmpl-BZdwjFecdcaQfCf7bn319vRp6fY8F",
1388              choices=[
1389                  chat_completion_chunk.Choice(
1390                      delta=ChoiceDelta(
1391                          tool_calls=[
1392                              ChoiceDeltaToolCall(index=0, function=ChoiceDeltaToolCallFunction(arguments='"Paris'))
1393                          ]
1394                      ),
1395                      index=0,
1396                  )
1397              ],
1398              created=1747834733,
1399              model="gpt-5-mini",
1400              object="chat.completion.chunk",
1401              service_tier="default",
1402              system_fingerprint="fp_54eb4bd693",
1403          ),
1404          ChatCompletionChunk(
1405              id="chatcmpl-BZdwjFecdcaQfCf7bn319vRp6fY8F",
1406              choices=[
1407                  chat_completion_chunk.Choice(
1408                      delta=ChoiceDelta(
1409                          tool_calls=[ChoiceDeltaToolCall(index=0, function=ChoiceDeltaToolCallFunction(arguments='"}'))]
1410                      ),
1411                      index=0,
1412                  )
1413              ],
1414              created=1747834733,
1415              model="gpt-5-mini",
1416              object="chat.completion.chunk",
1417              service_tier="default",
1418              system_fingerprint="fp_54eb4bd693",
1419          ),
1420          ChatCompletionChunk(
1421              id="chatcmpl-BZdwjFecdcaQfCf7bn319vRp6fY8F",
1422              choices=[
1423                  chat_completion_chunk.Choice(
1424                      delta=ChoiceDelta(
1425                          tool_calls=[
1426                              ChoiceDeltaToolCall(
1427                                  index=1,
1428                                  id="call_C88m67V16CrETq6jbNXjdZI9",
1429                                  function=ChoiceDeltaToolCallFunction(arguments="", name="weather"),
1430                                  type="function",
1431                              )
1432                          ]
1433                      ),
1434                      index=0,
1435                  )
1436              ],
1437              created=1747834733,
1438              model="gpt-5-mini",
1439              object="chat.completion.chunk",
1440              service_tier="default",
1441              system_fingerprint="fp_54eb4bd693",
1442          ),
1443          ChatCompletionChunk(
1444              id="chatcmpl-BZdwjFecdcaQfCf7bn319vRp6fY8F",
1445              choices=[
1446                  chat_completion_chunk.Choice(
1447                      delta=ChoiceDelta(
1448                          tool_calls=[
1449                              ChoiceDeltaToolCall(index=1, function=ChoiceDeltaToolCallFunction(arguments='{"ci'))
1450                          ]
1451                      ),
1452                      index=0,
1453                  )
1454              ],
1455              created=1747834733,
1456              model="gpt-5-mini",
1457              object="chat.completion.chunk",
1458              service_tier="default",
1459              system_fingerprint="fp_54eb4bd693",
1460          ),
1461          ChatCompletionChunk(
1462              id="chatcmpl-BZdwjFecdcaQfCf7bn319vRp6fY8F",
1463              choices=[
1464                  chat_completion_chunk.Choice(
1465                      delta=ChoiceDelta(
1466                          tool_calls=[
1467                              ChoiceDeltaToolCall(index=1, function=ChoiceDeltaToolCallFunction(arguments='ty": '))
1468                          ]
1469                      ),
1470                      index=0,
1471                  )
1472              ],
1473              created=1747834733,
1474              model="gpt-5-mini",
1475              object="chat.completion.chunk",
1476              service_tier="default",
1477              system_fingerprint="fp_54eb4bd693",
1478          ),
1479          ChatCompletionChunk(
1480              id="chatcmpl-BZdwjFecdcaQfCf7bn319vRp6fY8F",
1481              choices=[
1482                  chat_completion_chunk.Choice(
1483                      delta=ChoiceDelta(
1484                          tool_calls=[
1485                              ChoiceDeltaToolCall(index=1, function=ChoiceDeltaToolCallFunction(arguments='"Berli'))
1486                          ]
1487                      ),
1488                      index=0,
1489                  )
1490              ],
1491              created=1747834733,
1492              model="gpt-5-mini",
1493              object="chat.completion.chunk",
1494              service_tier="default",
1495              system_fingerprint="fp_54eb4bd693",
1496          ),
1497          ChatCompletionChunk(
1498              id="chatcmpl-BZdwjFecdcaQfCf7bn319vRp6fY8F",
1499              choices=[
1500                  chat_completion_chunk.Choice(
1501                      delta=ChoiceDelta(
1502                          tool_calls=[ChoiceDeltaToolCall(index=1, function=ChoiceDeltaToolCallFunction(arguments='n"}'))]
1503                      ),
1504                      index=0,
1505                  )
1506              ],
1507              created=1747834733,
1508              model="gpt-5-mini",
1509              object="chat.completion.chunk",
1510              service_tier="default",
1511              system_fingerprint="fp_54eb4bd693",
1512          ),
1513          ChatCompletionChunk(
1514              id="chatcmpl-BZdwjFecdcaQfCf7bn319vRp6fY8F",
1515              choices=[chat_completion_chunk.Choice(delta=ChoiceDelta(), finish_reason="tool_calls", index=0)],
1516              created=1747834733,
1517              model="gpt-5-mini",
1518              object="chat.completion.chunk",
1519              service_tier="default",
1520              system_fingerprint="fp_54eb4bd693",
1521          ),
1522          ChatCompletionChunk(
1523              id="chatcmpl-BZdwjFecdcaQfCf7bn319vRp6fY8F",
1524              choices=[],
1525              created=1747834733,
1526              model="gpt-5-mini",
1527              object="chat.completion.chunk",
1528              service_tier="default",
1529              system_fingerprint="fp_54eb4bd693",
1530              usage=CompletionUsage(
1531                  completion_tokens=42,
1532                  prompt_tokens=282,
1533                  total_tokens=324,
1534                  completion_tokens_details=CompletionTokensDetails(
1535                      accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0
1536                  ),
1537                  prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0),
1538              ),
1539          ),
1540      ]
1541  
1542  
1543  @pytest.fixture
1544  def chat_completion_chunk_delta_none():
1545      chunk = ChatCompletionChunk(
1546          id="chatcmpl-BC1y4wqIhe17R8sv3lgLcWlB4tXCw",
1547          choices=[chat_completion_chunk.Choice(delta=ChoiceDelta(), index=0)],
1548          created=1742207200,
1549          model="gpt-5-mini",
1550          object="chat.completion.chunk",
1551      )
1552      # pydantic complains if we set delta to None at initialization
1553      chunk.choices[0].delta = None
1554      return chunk
1555  
1556  
1557  @pytest.fixture
1558  def streaming_chunks():
1559      return [
1560          StreamingChunk(
1561              content="",
1562              meta={
1563                  "model": "gpt-5-mini",
1564                  "index": 0,
1565                  "tool_calls": None,
1566                  "finish_reason": None,
1567                  "received_at": ANY,
1568                  "usage": None,
1569              },
1570          ),
1571          StreamingChunk(
1572              content="",
1573              meta={
1574                  "model": "gpt-5-mini",
1575                  "index": 0,
1576                  "tool_calls": [
1577                      ChoiceDeltaToolCall(
1578                          index=0,
1579                          id="call_zcvlnVaTeJWRjLAFfYxX69z4",
1580                          function=ChoiceDeltaToolCallFunction(arguments="", name="weather"),
1581                          type="function",
1582                      )
1583                  ],
1584                  "finish_reason": None,
1585                  "received_at": ANY,
1586                  "usage": None,
1587              },
1588              index=0,
1589              tool_calls=[ToolCallDelta(tool_name="weather", id="call_zcvlnVaTeJWRjLAFfYxX69z4", index=0)],
1590              start=True,
1591          ),
1592          StreamingChunk(
1593              content="",
1594              meta={
1595                  "model": "gpt-5-mini",
1596                  "index": 0,
1597                  "tool_calls": [ChoiceDeltaToolCall(index=0, function=ChoiceDeltaToolCallFunction(arguments='{"ci'))],
1598                  "finish_reason": None,
1599                  "received_at": ANY,
1600                  "usage": None,
1601              },
1602              index=0,
1603              tool_calls=[ToolCallDelta(arguments='{"ci', index=0)],
1604          ),
1605          StreamingChunk(
1606              content="",
1607              meta={
1608                  "model": "gpt-5-mini",
1609                  "index": 0,
1610                  "tool_calls": [ChoiceDeltaToolCall(index=0, function=ChoiceDeltaToolCallFunction(arguments='ty": '))],
1611                  "finish_reason": None,
1612                  "received_at": ANY,
1613                  "usage": None,
1614              },
1615              index=0,
1616              tool_calls=[ToolCallDelta(arguments='ty": ', index=0)],
1617          ),
1618          StreamingChunk(
1619              content="",
1620              meta={
1621                  "model": "gpt-5-mini",
1622                  "index": 0,
1623                  "tool_calls": [ChoiceDeltaToolCall(index=0, function=ChoiceDeltaToolCallFunction(arguments='"Paris'))],
1624                  "finish_reason": None,
1625                  "received_at": ANY,
1626                  "usage": None,
1627              },
1628              index=0,
1629              tool_calls=[ToolCallDelta(arguments='"Paris', index=0)],
1630          ),
1631          StreamingChunk(
1632              content="",
1633              meta={
1634                  "model": "gpt-5-mini",
1635                  "index": 0,
1636                  "tool_calls": [ChoiceDeltaToolCall(index=0, function=ChoiceDeltaToolCallFunction(arguments='"}'))],
1637                  "finish_reason": None,
1638                  "received_at": ANY,
1639                  "usage": None,
1640              },
1641              index=0,
1642              tool_calls=[ToolCallDelta(arguments='"}', index=0)],
1643          ),
1644          StreamingChunk(
1645              content="",
1646              meta={
1647                  "model": "gpt-5-mini",
1648                  "index": 0,
1649                  "tool_calls": [
1650                      ChoiceDeltaToolCall(
1651                          index=1,
1652                          id="call_C88m67V16CrETq6jbNXjdZI9",
1653                          function=ChoiceDeltaToolCallFunction(arguments="", name="weather"),
1654                          type="function",
1655                      )
1656                  ],
1657                  "finish_reason": None,
1658                  "received_at": ANY,
1659                  "usage": None,
1660              },
1661              index=1,
1662              tool_calls=[ToolCallDelta(tool_name="weather", id="call_C88m67V16CrETq6jbNXjdZI9", index=1)],
1663              start=True,
1664          ),
1665          StreamingChunk(
1666              content="",
1667              meta={
1668                  "model": "gpt-5-mini",
1669                  "index": 0,
1670                  "tool_calls": [ChoiceDeltaToolCall(index=1, function=ChoiceDeltaToolCallFunction(arguments='{"ci'))],
1671                  "finish_reason": None,
1672                  "received_at": ANY,
1673                  "usage": None,
1674              },
1675              index=1,
1676              tool_calls=[ToolCallDelta(arguments='{"ci', index=1)],
1677          ),
1678          StreamingChunk(
1679              content="",
1680              meta={
1681                  "model": "gpt-5-mini",
1682                  "index": 0,
1683                  "tool_calls": [ChoiceDeltaToolCall(index=1, function=ChoiceDeltaToolCallFunction(arguments='ty": '))],
1684                  "finish_reason": None,
1685                  "received_at": ANY,
1686                  "usage": None,
1687              },
1688              index=1,
1689              tool_calls=[ToolCallDelta(arguments='ty": ', index=1)],
1690          ),
1691          StreamingChunk(
1692              content="",
1693              meta={
1694                  "model": "gpt-5-mini",
1695                  "index": 0,
1696                  "tool_calls": [ChoiceDeltaToolCall(index=1, function=ChoiceDeltaToolCallFunction(arguments='"Berli'))],
1697                  "finish_reason": None,
1698                  "received_at": ANY,
1699                  "usage": None,
1700              },
1701              index=1,
1702              tool_calls=[ToolCallDelta(arguments='"Berli', index=1)],
1703          ),
1704          StreamingChunk(
1705              content="",
1706              meta={
1707                  "model": "gpt-5-mini",
1708                  "index": 0,
1709                  "tool_calls": [ChoiceDeltaToolCall(index=1, function=ChoiceDeltaToolCallFunction(arguments='n"}'))],
1710                  "finish_reason": None,
1711                  "received_at": ANY,
1712                  "usage": None,
1713              },
1714              index=1,
1715              tool_calls=[ToolCallDelta(arguments='n"}', index=1)],
1716          ),
1717          StreamingChunk(
1718              content="",
1719              meta={
1720                  "model": "gpt-5-mini",
1721                  "index": 0,
1722                  "tool_calls": None,
1723                  "finish_reason": "tool_calls",
1724                  "received_at": ANY,
1725                  "usage": None,
1726              },
1727              finish_reason="tool_calls",
1728          ),
1729          StreamingChunk(
1730              content="",
1731              meta={
1732                  "model": "gpt-5-mini",
1733                  "received_at": ANY,
1734                  "usage": {
1735                      "completion_tokens": 42,
1736                      "prompt_tokens": 282,
1737                      "total_tokens": 324,
1738                      "completion_tokens_details": {
1739                          "accepted_prediction_tokens": 0,
1740                          "audio_tokens": 0,
1741                          "reasoning_tokens": 0,
1742                          "rejected_prediction_tokens": 0,
1743                      },
1744                      "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0},
1745                  },
1746              },
1747          ),
1748      ]
1749  
1750  
1751  class TestChatCompletionChunkConversion:
1752      def test_convert_chat_completion_chunk_to_streaming_chunk(self, chat_completion_chunks, streaming_chunks):
1753          previous_chunks = []
1754          for openai_chunk, haystack_chunk in zip(chat_completion_chunks, streaming_chunks, strict=True):
1755              stream_chunk = _convert_chat_completion_chunk_to_streaming_chunk(
1756                  chunk=openai_chunk, previous_chunks=previous_chunks
1757              )
1758              assert stream_chunk == haystack_chunk
1759              previous_chunks.append(stream_chunk)
1760  
1761      def test_convert_chat_completion_chunk_with_empty_tool_calls(self):
1762          # This can happen with some LLM providers where tool calls are not present but the pydantic models are still
1763          # initialized.
1764          chunk = ChatCompletionChunk(
1765              id="chatcmpl-BC1y4wqIhe17R8sv3lgLcWlB4tXCw",
1766              choices=[
1767                  chat_completion_chunk.Choice(
1768                      delta=chat_completion_chunk.ChoiceDelta(
1769                          tool_calls=[ChoiceDeltaToolCall(index=0, function=ChoiceDeltaToolCallFunction())]
1770                      ),
1771                      index=0,
1772                  )
1773              ],
1774              created=1742207200,
1775              model="gpt-5-mini",
1776              object="chat.completion.chunk",
1777          )
1778          result = _convert_chat_completion_chunk_to_streaming_chunk(chunk=chunk, previous_chunks=[])
1779          assert result.content == ""
1780          assert result.start is False
1781          assert result.tool_calls == [ToolCallDelta(index=0)]
1782          assert result.tool_call_result is None
1783          assert result.index == 0
1784          assert result.meta["model"] == "gpt-5-mini"
1785          assert result.meta["received_at"] is not None
1786  
1787      def test_convert_chat_completion_chunk_with_delta_none(self, chat_completion_chunk_delta_none):
1788          """
1789          Test that a chat completion chunk with a delta set to None is converted to a streaming chunk properly.
1790          This should not happen, but some OpenAI-compatible providers sometimes return a delta set to None.
1791          """
1792  
1793          result = _convert_chat_completion_chunk_to_streaming_chunk(
1794              chunk=chat_completion_chunk_delta_none, previous_chunks=[]
1795          )
1796  
1797          assert result.content == ""
1798          assert result.start is False
1799          assert result.tool_calls is None
1800          assert result.tool_call_result is None
1801          assert result.index == 0
1802          assert result.component_info is None
1803          assert result.finish_reason is None
1804          assert result.reasoning is None
1805  
1806          assert result.meta["model"] == "gpt-5-mini"
1807          assert result.meta["received_at"] is not None
1808          assert result.meta["index"] == 0
1809          assert result.meta["finish_reason"] is None
1810          assert result.meta["usage"] is None
1811          assert result.meta["tool_calls"] is None
1812  
1813      def test_handle_stream_response(self, chat_completion_chunks, chat_completion_chunk_delta_none):
1814          openai_chunks = [chat_completion_chunk_delta_none] + chat_completion_chunks
1815          comp = OpenAIChatGenerator(api_key=Secret.from_token("test-api-key"))
1816          result = comp._handle_stream_response(openai_chunks, callback=lambda _: None)[0]  # type: ignore
1817  
1818          assert not result.texts
1819          assert not result.text
1820  
1821          # Verify both tool calls were found and processed
1822          assert len(result.tool_calls) == 2
1823          assert result.tool_calls[0].id == "call_zcvlnVaTeJWRjLAFfYxX69z4"
1824          assert result.tool_calls[0].tool_name == "weather"
1825          assert result.tool_calls[0].arguments == {"city": "Paris"}
1826          assert result.tool_calls[1].id == "call_C88m67V16CrETq6jbNXjdZI9"
1827          assert result.tool_calls[1].tool_name == "weather"
1828          assert result.tool_calls[1].arguments == {"city": "Berlin"}
1829  
1830          # Verify meta information
1831          assert result.meta["model"] == "gpt-5-mini"
1832          assert result.meta["finish_reason"] == "tool_calls"
1833          assert result.meta["index"] == 0
1834          assert result.meta["completion_start_time"] is not None
1835          assert result.meta["usage"] == {
1836              "completion_tokens": 42,
1837              "prompt_tokens": 282,
1838              "total_tokens": 324,
1839              "completion_tokens_details": {
1840                  "accepted_prediction_tokens": 0,
1841                  "audio_tokens": 0,
1842                  "reasoning_tokens": 0,
1843                  "rejected_prediction_tokens": 0,
1844              },
1845              "prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0},
1846          }
1847  
1848      def test_convert_usage_chunk_to_streaming_chunk(self):
1849          usage_chunk = ChatCompletionChunk(
1850              id="chatcmpl-BC1y4wqIhe17R8sv3lgLcWlB4tXCw",
1851              choices=[],
1852              created=1742207200,
1853              model="gpt-5-mini",
1854              object="chat.completion.chunk",
1855              service_tier="default",
1856              system_fingerprint="fp_06737a9306",
1857              usage=CompletionUsage(
1858                  completion_tokens=8,
1859                  prompt_tokens=13,
1860                  total_tokens=21,
1861                  completion_tokens_details=CompletionTokensDetails(
1862                      accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0
1863                  ),
1864                  prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0),
1865              ),
1866          )
1867          result = _convert_chat_completion_chunk_to_streaming_chunk(chunk=usage_chunk, previous_chunks=[])
1868          assert result.content == ""
1869          assert result.start is False
1870          assert result.tool_calls is None
1871          assert result.tool_call_result is None
1872          assert result.meta["model"] == "gpt-5-mini"
1873          assert result.meta["received_at"] is not None