/ test / components / generators / chat / test_openai_responses_conversion.py
test_openai_responses_conversion.py
   1  # SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
   2  #
   3  # SPDX-License-Identifier: Apache-2.0
   4  
   5  from unittest.mock import ANY
   6  
   7  import pytest
   8  from openai.types import Reasoning, ResponseFormatText
   9  from openai.types.responses import (
  10      FunctionTool,
  11      Response,
  12      ResponseCompletedEvent,
  13      ResponseContentPartAddedEvent,
  14      ResponseContentPartDoneEvent,
  15      ResponseCreatedEvent,
  16      ResponseFunctionCallArgumentsDeltaEvent,
  17      ResponseFunctionCallArgumentsDoneEvent,
  18      ResponseFunctionToolCall,
  19      ResponseInProgressEvent,
  20      ResponseOutputItemAddedEvent,
  21      ResponseOutputItemDoneEvent,
  22      ResponseOutputMessage,
  23      ResponseOutputText,
  24      ResponseReasoningItem,
  25      ResponseTextConfig,
  26      ResponseTextDeltaEvent,
  27      ResponseTextDoneEvent,
  28      ResponseUsage,
  29  )
  30  from openai.types.responses.response_usage import InputTokensDetails, OutputTokensDetails
  31  
  32  from haystack.components.generators.chat.openai_responses import (
  33      _convert_chat_message_to_responses_api_format,
  34      _convert_response_chunk_to_streaming_chunk,
  35      _convert_streaming_chunks_to_chat_message,
  36  )
  37  from haystack.dataclasses import (
  38      ChatMessage,
  39      ChatRole,
  40      FileContent,
  41      ImageContent,
  42      ReasoningContent,
  43      StreamingChunk,
  44      TextContent,
  45      ToolCall,
  46      ToolCallDelta,
  47      ToolCallResult,
  48  )
  49  
  50  
  51  @pytest.fixture
  52  def openai_responses_streaming_chunks_with_tool_call():
  53      return [
  54          StreamingChunk(
  55              content="",
  56              meta={
  57                  "received_at": ANY,
  58                  "response": {
  59                      "id": "resp_095b57053855eac100690491f4e22c8196ac124365e8c70424",
  60                      "created_at": 1761907188.0,
  61                      "model": "gpt-5-mini-2025-08-07",
  62                      "object": "response",
  63                      "output": [],
  64                      "tools": [
  65                          {
  66                              "name": "weather",
  67                              "parameters": {
  68                                  "type": "object",
  69                                  "properties": {"city": {"type": "string"}},
  70                                  "required": ["city"],
  71                              },
  72                              "strict": False,
  73                              "type": "function",
  74                              "description": "useful to determine the weather in a given location",
  75                          }
  76                      ],
  77                      "reasoning": {"effort": "medium", "generate_summary": None, "summary": None},
  78                      "usage": None,
  79                  },
  80                  "sequence_number": 0,
  81                  "type": "response.created",
  82              },
  83          ),
  84          StreamingChunk(
  85              content="",
  86              meta={"received_at": ANY},
  87              index=0,
  88              start=True,
  89              reasoning=ReasoningContent(
  90                  reasoning_text="",
  91                  extra={
  92                      "id": "rs_095b57053855eac100690491f54e308196878239be3ba6133c",
  93                      "summary": [],
  94                      "type": "reasoning",
  95                  },
  96              ),
  97          ),
  98          StreamingChunk(
  99              content="",
 100              meta={
 101                  "item": {
 102                      "id": "rs_095b57053855eac100690491f54e308196878239be3ba6133c",
 103                      "summary": [],
 104                      "type": "reasoning",
 105                  },
 106                  "output_index": 0,
 107                  "sequence_number": 3,
 108                  "type": "response.output_item.done",
 109                  "received_at": ANY,
 110              },
 111              index=0,
 112          ),
 113          StreamingChunk(
 114              content="",
 115              meta={"received_at": ANY},
 116              index=1,
 117              tool_calls=[
 118                  ToolCallDelta(
 119                      index=1,
 120                      tool_name="weather",
 121                      arguments=None,
 122                      id="fc_095b57053855eac100690491f6a224819680e2f9c7cbc5a531",
 123                      extra={
 124                          "arguments": "",
 125                          "call_id": "call_OZZXFm7SLb4F3Xg8a9XVVCvv",
 126                          "id": "fc_095b57053855eac100690491f6a224819680e2f9c7cbc5a531",
 127                          "name": "weather",
 128                          "status": "in_progress",
 129                          "type": "function_call",
 130                      },
 131                  )
 132              ],
 133              start=True,
 134          ),
 135          StreamingChunk(
 136              content="",
 137              meta={"received_at": ANY},
 138              index=1,
 139              tool_calls=[
 140                  ToolCallDelta(
 141                      index=1,
 142                      tool_name=None,
 143                      arguments='{"city":"Paris"}',
 144                      id="fc_095b57053855eac100690491f6a224819680e2f9c7cbc5a531",
 145                      extra={
 146                          "item_id": "fc_095b57053855eac100690491f6a224819680e2f9c7cbc5a531",
 147                          "output_index": 1,
 148                          "sequence_number": 5,
 149                          "type": "response.function_call_arguments.delta",
 150                          "obfuscation": "PySUcQ59ZZRkOm",
 151                      },
 152                  )
 153              ],
 154          ),
 155          StreamingChunk(
 156              content="",
 157              meta={
 158                  "received_at": ANY,
 159                  "arguments": '{"city":"Paris"}',
 160                  "item_id": "fc_095b57053855eac100690491f6a224819680e2f9c7cbc5a531",
 161                  "name": "weather",
 162                  "output_index": 1,
 163                  "sequence_number": 10,
 164                  "type": "response.function_call_arguments.done",
 165              },
 166              index=1,
 167          ),
 168          StreamingChunk(
 169              content="",
 170              meta={
 171                  "received_at": ANY,
 172                  "response": {
 173                      "id": "resp_095b57053855eac100690491f4e22c8196ac124365e8c70424",
 174                      "created_at": 1761907188.0,
 175                      "metadata": {},
 176                      "model": "gpt-5-mini-2025-08-07",
 177                      "object": "response",
 178                      "output": [
 179                          {
 180                              "id": "rs_095b57053855eac100690491f54e308196878239be3ba6133c",
 181                              "summary": [],
 182                              "type": "reasoning",
 183                          },
 184                          {
 185                              "arguments": '{"city":"Paris"}',
 186                              "call_id": "call_OZZXFm7SLb4F3Xg8a9XVVCvv",
 187                              "name": "weather",
 188                              "type": "function_call",
 189                              "id": "fc_095b57053855eac100690491f6a224819680e2f9c7cbc5a531",
 190                              "status": "completed",
 191                          },
 192                      ],
 193                      "tools": [
 194                          {
 195                              "name": "weather",
 196                              "parameters": {
 197                                  "type": "object",
 198                                  "properties": {"city": {"type": "string"}},
 199                                  "required": ["city"],
 200                                  "additionalProperties": False,
 201                              },
 202                              "strict": False,
 203                              "type": "function",
 204                              "description": "useful to determine the weather in a given location",
 205                          }
 206                      ],
 207                      "top_p": 1.0,
 208                      "reasoning": {"effort": "medium", "generate_summary": None, "summary": None},
 209                      "usage": {
 210                          "input_tokens": 62,
 211                          "input_tokens_details": {"cached_tokens": 0},
 212                          "output_tokens": 83,
 213                          "output_tokens_details": {"reasoning_tokens": 64},
 214                          "total_tokens": 145,
 215                      },
 216                      "store": True,
 217                  },
 218                  "sequence_number": 12,
 219                  "type": "response.completed",
 220              },
 221              finish_reason="tool_calls",
 222          ),
 223      ]
 224  
 225  
 226  class TestConversionToStreamingChunks:
 227      def test_convert_streaming_chunks_to_chat_message_with_tool_call_empty_reasoning(
 228          self, openai_responses_streaming_chunks_with_tool_call
 229      ):
 230          chat_message = _convert_streaming_chunks_to_chat_message(openai_responses_streaming_chunks_with_tool_call)
 231          assert chat_message == ChatMessage(
 232              _role="assistant",
 233              _content=[
 234                  ReasoningContent(
 235                      reasoning_text="",
 236                      extra={"id": "rs_095b57053855eac100690491f54e308196878239be3ba6133c", "type": "reasoning"},
 237                  ),
 238                  ToolCall(
 239                      tool_name="weather",
 240                      arguments={"city": "Paris"},
 241                      id="fc_095b57053855eac100690491f6a224819680e2f9c7cbc5a531",
 242                      extra={"call_id": "call_OZZXFm7SLb4F3Xg8a9XVVCvv"},
 243                  ),
 244              ],
 245              _name=None,
 246              _meta={
 247                  "id": "resp_095b57053855eac100690491f4e22c8196ac124365e8c70424",
 248                  "created_at": 1761907188.0,
 249                  "metadata": {},
 250                  "model": "gpt-5-mini-2025-08-07",
 251                  "object": "response",
 252                  "tools": [
 253                      {
 254                          "name": "weather",
 255                          "parameters": {
 256                              "type": "object",
 257                              "properties": {"city": {"type": "string"}},
 258                              "required": ["city"],
 259                              "additionalProperties": False,
 260                          },
 261                          "strict": False,
 262                          "type": "function",
 263                          "description": "useful to determine the weather in a given location",
 264                      }
 265                  ],
 266                  "top_p": 1.0,
 267                  "reasoning": {"effort": "medium", "generate_summary": None, "summary": None},
 268                  "usage": {
 269                      "input_tokens": 62,
 270                      "input_tokens_details": {"cached_tokens": 0},
 271                      "output_tokens": 83,
 272                      "output_tokens_details": {"reasoning_tokens": 64},
 273                      "total_tokens": 145,
 274                  },
 275                  "store": True,
 276              },
 277          )
 278  
 279      def test_convert_only_text(self):
 280          openai_chunks = [
 281              ResponseCreatedEvent(
 282                  response=Response(
 283                      id="resp_0a8811e62a95217b00690c5ff62c14819596eae387d116f285",
 284                      created_at=1762418678.0,
 285                      metadata={},
 286                      model="gpt-5-mini-2025-08-07",
 287                      object="response",
 288                      output=[],
 289                      parallel_tool_calls=True,
 290                      temperature=1.0,
 291                      tool_choice="auto",
 292                      tools=[],
 293                      top_p=1.0,
 294                      background=False,
 295                      reasoning=Reasoning(effort="medium", generate_summary=None, summary=None),
 296                      service_tier="auto",
 297                      status="in_progress",
 298                      text=ResponseTextConfig(format=ResponseFormatText(type="text"), verbosity="medium"),
 299                      top_logprobs=0,
 300                      truncation="disabled",
 301                      prompt_cache_retention=None,
 302                      store=True,
 303                  ),
 304                  sequence_number=0,
 305                  type="response.created",
 306              ),
 307              ResponseInProgressEvent(
 308                  response=Response(
 309                      id="resp_0a8811e62a95217b00690c5ff62c14819596eae387d116f285",
 310                      created_at=1762418678.0,
 311                      metadata={},
 312                      model="gpt-5-mini-2025-08-07",
 313                      object="response",
 314                      output=[],
 315                      parallel_tool_calls=True,
 316                      temperature=1.0,
 317                      tool_choice="auto",
 318                      tools=[],
 319                      top_p=1.0,
 320                      background=False,
 321                      reasoning=Reasoning(effort="medium", generate_summary=None, summary=None),
 322                      service_tier="auto",
 323                      status="in_progress",
 324                      text=ResponseTextConfig(format=ResponseFormatText(type="text"), verbosity="medium"),
 325                      top_logprobs=0,
 326                      truncation="disabled",
 327                      prompt_cache_retention=None,
 328                      store=True,
 329                  ),
 330                  sequence_number=1,
 331                  type="response.in_progress",
 332              ),
 333              ResponseOutputItemAddedEvent(
 334                  item=ResponseReasoningItem(
 335                      id="rs_0a8811e62a95217b00690c5ff70a308195a8207d7eb43f1d5b", summary=[], type="reasoning"
 336                  ),
 337                  output_index=0,
 338                  sequence_number=2,
 339                  type="response.output_item.added",
 340              ),
 341              ResponseOutputItemDoneEvent(
 342                  item=ResponseReasoningItem(
 343                      id="rs_0a8811e62a95217b00690c5ff70a308195a8207d7eb43f1d5b", summary=[], type="reasoning"
 344                  ),
 345                  output_index=0,
 346                  sequence_number=3,
 347                  type="response.output_item.done",
 348              ),
 349              ResponseOutputItemAddedEvent(
 350                  item=ResponseOutputMessage(
 351                      id="msg_0a8811e62a95217b00690c5ff88f6c8195b037e57d327a1ee0",
 352                      content=[],
 353                      role="assistant",
 354                      status="in_progress",
 355                      type="message",
 356                  ),
 357                  output_index=1,
 358                  sequence_number=4,
 359                  type="response.output_item.added",
 360              ),
 361              ResponseContentPartAddedEvent(
 362                  content_index=0,
 363                  item_id="msg_0a8811e62a95217b00690c5ff88f6c8195b037e57d327a1ee0",
 364                  output_index=1,
 365                  part=ResponseOutputText(annotations=[], text="", type="output_text", logprobs=[]),
 366                  sequence_number=5,
 367                  type="response.content_part.added",
 368              ),
 369              ResponseTextDeltaEvent(
 370                  content_index=0,
 371                  delta="Germany",
 372                  item_id="msg_0a8811e62a95217b00690c5ff88f6c8195b037e57d327a1ee0",
 373                  logprobs=[],
 374                  output_index=1,
 375                  sequence_number=6,
 376                  type="response.output_text.delta",
 377                  obfuscation="EV5gCoyiD",
 378              ),
 379              ResponseTextDeltaEvent(
 380                  content_index=0,
 381                  delta=":",
 382                  item_id="msg_0a8811e62a95217b00690c5ff88f6c8195b037e57d327a1ee0",
 383                  logprobs=[],
 384                  output_index=1,
 385                  sequence_number=7,
 386                  type="response.output_text.delta",
 387                  obfuscation="EkdNXp1EE2Cgj8z",
 388              ),
 389              ResponseTextDeltaEvent(
 390                  content_index=0,
 391                  delta=" Berlin",
 392                  item_id="msg_0a8811e62a95217b00690c5ff88f6c8195b037e57d327a1ee0",
 393                  logprobs=[],
 394                  output_index=1,
 395                  sequence_number=8,
 396                  type="response.output_text.delta",
 397                  obfuscation="1eS0q9aye",
 398              ),
 399              ResponseTextDeltaEvent(
 400                  content_index=0,
 401                  delta="\n",
 402                  item_id="msg_0a8811e62a95217b00690c5ff88f6c8195b037e57d327a1ee0",
 403                  logprobs=[],
 404                  output_index=1,
 405                  sequence_number=9,
 406                  type="response.output_text.delta",
 407                  obfuscation="H9Ict3F41DwGS4a",
 408              ),
 409              ResponseTextDeltaEvent(
 410                  content_index=0,
 411                  delta="France",
 412                  item_id="msg_0a8811e62a95217b00690c5ff88f6c8195b037e57d327a1ee0",
 413                  logprobs=[],
 414                  output_index=1,
 415                  sequence_number=10,
 416                  type="response.output_text.delta",
 417                  obfuscation="4vxrblWURx",
 418              ),
 419              ResponseTextDeltaEvent(
 420                  content_index=0,
 421                  delta=":",
 422                  item_id="msg_0a8811e62a95217b00690c5ff88f6c8195b037e57d327a1ee0",
 423                  logprobs=[],
 424                  output_index=1,
 425                  sequence_number=11,
 426                  type="response.output_text.delta",
 427                  obfuscation="B1CMJsNGhhqIz5K",
 428              ),
 429              ResponseTextDeltaEvent(
 430                  content_index=0,
 431                  delta=" Paris",
 432                  item_id="msg_0a8811e62a95217b00690c5ff88f6c8195b037e57d327a1ee0",
 433                  logprobs=[],
 434                  output_index=1,
 435                  sequence_number=12,
 436                  type="response.output_text.delta",
 437                  obfuscation="ojbz89bS7j",
 438              ),
 439              ResponseTextDoneEvent(
 440                  content_index=0,
 441                  item_id="msg_0a8811e62a95217b00690c5ff88f6c8195b037e57d327a1ee0",
 442                  logprobs=[],
 443                  output_index=1,
 444                  sequence_number=13,
 445                  text="Germany: Berlin\nFrance: Paris",
 446                  type="response.output_text.done",
 447              ),
 448              ResponseContentPartDoneEvent(
 449                  content_index=0,
 450                  item_id="msg_0a8811e62a95217b00690c5ff88f6c8195b037e57d327a1ee0",
 451                  output_index=1,
 452                  part=ResponseOutputText(
 453                      annotations=[], text="Germany: Berlin\nFrance: Paris", type="output_text", logprobs=[]
 454                  ),
 455                  sequence_number=14,
 456                  type="response.content_part.done",
 457              ),
 458              ResponseOutputItemDoneEvent(
 459                  item=ResponseOutputMessage(
 460                      id="msg_0a8811e62a95217b00690c5ff88f6c8195b037e57d327a1ee0",
 461                      content=[
 462                          ResponseOutputText(
 463                              annotations=[], text="Germany: Berlin\nFrance: Paris", type="output_text", logprobs=[]
 464                          )
 465                      ],
 466                      role="assistant",
 467                      status="completed",
 468                      type="message",
 469                  ),
 470                  output_index=1,
 471                  sequence_number=15,
 472                  type="response.output_item.done",
 473              ),
 474              ResponseCompletedEvent(
 475                  response=Response(
 476                      id="resp_0a8811e62a95217b00690c5ff62c14819596eae387d116f285",
 477                      created_at=1762418678.0,
 478                      error=None,
 479                      incomplete_details=None,
 480                      instructions=None,
 481                      metadata={},
 482                      model="gpt-5-mini-2025-08-07",
 483                      object="response",
 484                      output=[
 485                          ResponseReasoningItem(
 486                              id="rs_0a8811e62a95217b00690c5ff70a308195a8207d7eb43f1d5b", summary=[], type="reasoning"
 487                          ),
 488                          ResponseOutputMessage(
 489                              id="msg_0a8811e62a95217b00690c5ff88f6c8195b037e57d327a1ee0",
 490                              content=[
 491                                  ResponseOutputText(
 492                                      annotations=[],
 493                                      text="Germany: Berlin\nFrance: Paris",
 494                                      type="output_text",
 495                                      logprobs=[],
 496                                  )
 497                              ],
 498                              role="assistant",
 499                              status="completed",
 500                              type="message",
 501                          ),
 502                      ],
 503                      parallel_tool_calls=True,
 504                      temperature=1.0,
 505                      tool_choice="auto",
 506                      tools=[],
 507                      top_p=1.0,
 508                      background=False,
 509                      reasoning=Reasoning(effort="medium", generate_summary=None, summary=None),
 510                      safety_identifier=None,
 511                      service_tier="default",
 512                      status="completed",
 513                      text=ResponseTextConfig(format=ResponseFormatText(type="text"), verbosity="medium"),
 514                      top_logprobs=0,
 515                      truncation="disabled",
 516                      usage=ResponseUsage(
 517                          input_tokens=15,
 518                          input_tokens_details=InputTokensDetails(cached_tokens=0),
 519                          output_tokens=77,
 520                          output_tokens_details=OutputTokensDetails(reasoning_tokens=64),
 521                          total_tokens=92,
 522                      ),
 523                      prompt_cache_retention=None,
 524                      store=True,
 525                  ),
 526                  sequence_number=16,
 527                  type="response.completed",
 528              ),
 529          ]
 530          streaming_chunks = []
 531          for chunk in openai_chunks:
 532              streaming_chunk = _convert_response_chunk_to_streaming_chunk(chunk, previous_chunks=streaming_chunks)
 533              streaming_chunks.append(streaming_chunk)
 534  
 535          assert streaming_chunks == [
 536              StreamingChunk(
 537                  content="",
 538                  meta={
 539                      "received_at": ANY,
 540                      "response": {
 541                          "id": "resp_0a8811e62a95217b00690c5ff62c14819596eae387d116f285",
 542                          "created_at": 1762418678.0,
 543                          "metadata": {},
 544                          "model": "gpt-5-mini-2025-08-07",
 545                          "object": "response",
 546                          "output": [],
 547                          "parallel_tool_calls": True,
 548                          "temperature": 1.0,
 549                          "tool_choice": "auto",
 550                          "tools": [],
 551                          "top_p": 1.0,
 552                          "background": False,
 553                          "reasoning": {"effort": "medium", "generate_summary": None, "summary": None},
 554                          "service_tier": "auto",
 555                          "status": "in_progress",
 556                          "text": {"format": {"type": "text"}, "verbosity": "medium"},
 557                          "top_logprobs": 0,
 558                          "truncation": "disabled",
 559                          "prompt_cache_retention": None,
 560                          "store": True,
 561                      },
 562                      "sequence_number": 0,
 563                      "type": "response.created",
 564                  },
 565              ),
 566              StreamingChunk(
 567                  content="",
 568                  meta={
 569                      "received_at": ANY,
 570                      "response": {
 571                          "id": "resp_0a8811e62a95217b00690c5ff62c14819596eae387d116f285",
 572                          "created_at": 1762418678.0,
 573                          "metadata": {},
 574                          "model": "gpt-5-mini-2025-08-07",
 575                          "object": "response",
 576                          "output": [],
 577                          "parallel_tool_calls": True,
 578                          "temperature": 1.0,
 579                          "tool_choice": "auto",
 580                          "tools": [],
 581                          "top_p": 1.0,
 582                          "background": False,
 583                          "reasoning": {"effort": "medium", "generate_summary": None, "summary": None},
 584                          "service_tier": "auto",
 585                          "status": "in_progress",
 586                          "text": {"format": {"type": "text"}, "verbosity": "medium"},
 587                          "top_logprobs": 0,
 588                          "truncation": "disabled",
 589                          "prompt_cache_retention": None,
 590                          "store": True,
 591                      },
 592                      "sequence_number": 1,
 593                      "type": "response.in_progress",
 594                  },
 595              ),
 596              StreamingChunk(
 597                  content="",
 598                  meta={"received_at": ANY},
 599                  index=0,
 600                  start=True,
 601                  reasoning=ReasoningContent(
 602                      reasoning_text="",
 603                      extra={
 604                          "id": "rs_0a8811e62a95217b00690c5ff70a308195a8207d7eb43f1d5b",
 605                          "summary": [],
 606                          "type": "reasoning",
 607                      },
 608                  ),
 609              ),
 610              StreamingChunk(
 611                  content="",
 612                  meta={
 613                      "received_at": ANY,
 614                      "item": {
 615                          "id": "rs_0a8811e62a95217b00690c5ff70a308195a8207d7eb43f1d5b",
 616                          "summary": [],
 617                          "type": "reasoning",
 618                      },
 619                      "output_index": 0,
 620                      "sequence_number": 3,
 621                      "type": "response.output_item.done",
 622                  },
 623                  index=0,
 624              ),
 625              StreamingChunk(
 626                  content="",
 627                  meta={
 628                      "received_at": ANY,
 629                      "item": {
 630                          "id": "msg_0a8811e62a95217b00690c5ff88f6c8195b037e57d327a1ee0",
 631                          "content": [],
 632                          "role": "assistant",
 633                          "status": "in_progress",
 634                          "type": "message",
 635                      },
 636                      "output_index": 1,
 637                      "sequence_number": 4,
 638                      "type": "response.output_item.added",
 639                  },
 640                  index=1,
 641              ),
 642              StreamingChunk(
 643                  content="",
 644                  meta={
 645                      "received_at": ANY,
 646                      "content_index": 0,
 647                      "item_id": "msg_0a8811e62a95217b00690c5ff88f6c8195b037e57d327a1ee0",
 648                      "output_index": 1,
 649                      "part": {"annotations": [], "text": "", "type": "output_text", "logprobs": []},
 650                      "sequence_number": 5,
 651                      "type": "response.content_part.added",
 652                  },
 653                  index=1,
 654              ),
 655              StreamingChunk(
 656                  content="Germany",
 657                  meta={
 658                      "content_index": 0,
 659                      "delta": "Germany",
 660                      "item_id": "msg_0a8811e62a95217b00690c5ff88f6c8195b037e57d327a1ee0",
 661                      "logprobs": [],
 662                      "output_index": 1,
 663                      "sequence_number": 6,
 664                      "type": "response.output_text.delta",
 665                      "obfuscation": "EV5gCoyiD",
 666                      "received_at": ANY,
 667                  },
 668                  index=1,
 669                  start=True,
 670              ),
 671              StreamingChunk(
 672                  content=":",
 673                  meta={
 674                      "content_index": 0,
 675                      "delta": ":",
 676                      "item_id": "msg_0a8811e62a95217b00690c5ff88f6c8195b037e57d327a1ee0",
 677                      "logprobs": [],
 678                      "output_index": 1,
 679                      "sequence_number": 7,
 680                      "type": "response.output_text.delta",
 681                      "obfuscation": "EkdNXp1EE2Cgj8z",
 682                      "received_at": ANY,
 683                  },
 684                  index=1,
 685              ),
 686              StreamingChunk(
 687                  content=" Berlin",
 688                  meta={
 689                      "content_index": 0,
 690                      "delta": " Berlin",
 691                      "item_id": "msg_0a8811e62a95217b00690c5ff88f6c8195b037e57d327a1ee0",
 692                      "logprobs": [],
 693                      "output_index": 1,
 694                      "sequence_number": 8,
 695                      "type": "response.output_text.delta",
 696                      "obfuscation": "1eS0q9aye",
 697                      "received_at": ANY,
 698                  },
 699                  index=1,
 700              ),
 701              StreamingChunk(
 702                  content="\n",
 703                  meta={
 704                      "content_index": 0,
 705                      "delta": "\n",
 706                      "item_id": "msg_0a8811e62a95217b00690c5ff88f6c8195b037e57d327a1ee0",
 707                      "logprobs": [],
 708                      "output_index": 1,
 709                      "sequence_number": 9,
 710                      "type": "response.output_text.delta",
 711                      "obfuscation": "H9Ict3F41DwGS4a",
 712                      "received_at": ANY,
 713                  },
 714                  index=1,
 715              ),
 716              StreamingChunk(
 717                  content="France",
 718                  meta={
 719                      "content_index": 0,
 720                      "delta": "France",
 721                      "item_id": "msg_0a8811e62a95217b00690c5ff88f6c8195b037e57d327a1ee0",
 722                      "logprobs": [],
 723                      "output_index": 1,
 724                      "sequence_number": 10,
 725                      "type": "response.output_text.delta",
 726                      "obfuscation": "4vxrblWURx",
 727                      "received_at": ANY,
 728                  },
 729                  index=1,
 730              ),
 731              StreamingChunk(
 732                  content=":",
 733                  meta={
 734                      "content_index": 0,
 735                      "delta": ":",
 736                      "item_id": "msg_0a8811e62a95217b00690c5ff88f6c8195b037e57d327a1ee0",
 737                      "logprobs": [],
 738                      "output_index": 1,
 739                      "sequence_number": 11,
 740                      "type": "response.output_text.delta",
 741                      "obfuscation": "B1CMJsNGhhqIz5K",
 742                      "received_at": ANY,
 743                  },
 744                  index=1,
 745              ),
 746              StreamingChunk(
 747                  content=" Paris",
 748                  meta={
 749                      "content_index": 0,
 750                      "delta": " Paris",
 751                      "item_id": "msg_0a8811e62a95217b00690c5ff88f6c8195b037e57d327a1ee0",
 752                      "logprobs": [],
 753                      "output_index": 1,
 754                      "sequence_number": 12,
 755                      "type": "response.output_text.delta",
 756                      "obfuscation": "ojbz89bS7j",
 757                      "received_at": ANY,
 758                  },
 759                  index=1,
 760              ),
 761              StreamingChunk(
 762                  content="",
 763                  meta={
 764                      "received_at": ANY,
 765                      "content_index": 0,
 766                      "item_id": "msg_0a8811e62a95217b00690c5ff88f6c8195b037e57d327a1ee0",
 767                      "logprobs": [],
 768                      "output_index": 1,
 769                      "sequence_number": 13,
 770                      "text": "Germany: Berlin\nFrance: Paris",
 771                      "type": "response.output_text.done",
 772                  },
 773                  index=1,
 774              ),
 775              StreamingChunk(
 776                  content="",
 777                  meta={
 778                      "received_at": ANY,
 779                      "content_index": 0,
 780                      "item_id": "msg_0a8811e62a95217b00690c5ff88f6c8195b037e57d327a1ee0",
 781                      "output_index": 1,
 782                      "part": {
 783                          "annotations": [],
 784                          "text": "Germany: Berlin\nFrance: Paris",
 785                          "type": "output_text",
 786                          "logprobs": [],
 787                      },
 788                      "sequence_number": 14,
 789                      "type": "response.content_part.done",
 790                  },
 791                  index=1,
 792              ),
 793              StreamingChunk(
 794                  content="",
 795                  meta={
 796                      "received_at": ANY,
 797                      "item": {
 798                          "id": "msg_0a8811e62a95217b00690c5ff88f6c8195b037e57d327a1ee0",
 799                          "content": [
 800                              {
 801                                  "annotations": [],
 802                                  "text": "Germany: Berlin\nFrance: Paris",
 803                                  "type": "output_text",
 804                                  "logprobs": [],
 805                              }
 806                          ],
 807                          "role": "assistant",
 808                          "status": "completed",
 809                          "type": "message",
 810                      },
 811                      "output_index": 1,
 812                      "sequence_number": 15,
 813                      "type": "response.output_item.done",
 814                  },
 815                  index=1,
 816              ),
 817              StreamingChunk(
 818                  content="",
 819                  meta={
 820                      "received_at": ANY,
 821                      "response": {
 822                          "id": "resp_0a8811e62a95217b00690c5ff62c14819596eae387d116f285",
 823                          "created_at": 1762418678.0,
 824                          "error": None,
 825                          "incomplete_details": None,
 826                          "instructions": None,
 827                          "metadata": {},
 828                          "model": "gpt-5-mini-2025-08-07",
 829                          "object": "response",
 830                          "output": [
 831                              {
 832                                  "id": "rs_0a8811e62a95217b00690c5ff70a308195a8207d7eb43f1d5b",
 833                                  "summary": [],
 834                                  "type": "reasoning",
 835                              },
 836                              {
 837                                  "id": "msg_0a8811e62a95217b00690c5ff88f6c8195b037e57d327a1ee0",
 838                                  "content": [
 839                                      {
 840                                          "annotations": [],
 841                                          "text": "Germany: Berlin\nFrance: Paris",
 842                                          "type": "output_text",
 843                                          "logprobs": [],
 844                                      }
 845                                  ],
 846                                  "role": "assistant",
 847                                  "status": "completed",
 848                                  "type": "message",
 849                              },
 850                          ],
 851                          "parallel_tool_calls": True,
 852                          "temperature": 1.0,
 853                          "tool_choice": "auto",
 854                          "tools": [],
 855                          "top_p": 1.0,
 856                          "background": False,
 857                          "reasoning": {"effort": "medium", "generate_summary": None, "summary": None},
 858                          "safety_identifier": None,
 859                          "service_tier": "default",
 860                          "status": "completed",
 861                          "text": {"format": {"type": "text"}, "verbosity": "medium"},
 862                          "top_logprobs": 0,
 863                          "truncation": "disabled",
 864                          "usage": {
 865                              "input_tokens": 15,
 866                              "input_tokens_details": {"cached_tokens": 0},
 867                              "output_tokens": 77,
 868                              "output_tokens_details": {"reasoning_tokens": 64},
 869                              "total_tokens": 92,
 870                          },
 871                          "prompt_cache_retention": None,
 872                          "store": True,
 873                      },
 874                      "sequence_number": 16,
 875                      "type": "response.completed",
 876                  },
 877                  finish_reason="stop",
 878              ),
 879          ]
 880  
 881      def test_convert_only_function_call(self):
 882          chunks = [
 883              ResponseCreatedEvent(
 884                  response=Response(
 885                      id="resp_095b57053855eac100690491f4e22c8196ac124365e8c70424",
 886                      created_at=1761907188.0,
 887                      metadata={},
 888                      model="gpt-5-mini-2025-08-07",
 889                      object="response",
 890                      output=[],
 891                      parallel_tool_calls=True,
 892                      temperature=1.0,
 893                      tool_choice="auto",
 894                      tools=[
 895                          FunctionTool(
 896                              name="weather",
 897                              parameters={
 898                                  "type": "object",
 899                                  "properties": {"city": {"type": "string"}},
 900                                  "required": ["city"],
 901                                  "additionalProperties": False,
 902                              },
 903                              strict=False,
 904                              type="function",
 905                              description="useful to determine the weather in a given location",
 906                          )
 907                      ],
 908                      reasoning=Reasoning(effort="medium", generate_summary=None, summary=None),
 909                      usage=None,
 910                  ),
 911                  sequence_number=0,
 912                  type="response.created",
 913              ),
 914              ResponseOutputItemAddedEvent(
 915                  item=ResponseReasoningItem(
 916                      id="rs_095b57053855eac100690491f54e308196878239be3ba6133c", summary=[], type="reasoning"
 917                  ),
 918                  output_index=0,
 919                  sequence_number=2,
 920                  type="response.output_item.added",
 921              ),
 922              ResponseOutputItemDoneEvent(
 923                  item=ResponseReasoningItem(
 924                      id="rs_095b57053855eac100690491f54e308196878239be3ba6133c", summary=[], type="reasoning"
 925                  ),
 926                  output_index=0,
 927                  sequence_number=3,
 928                  type="response.output_item.done",
 929              ),
 930              ResponseOutputItemAddedEvent(
 931                  item=ResponseFunctionToolCall(
 932                      arguments="",
 933                      call_id="call_OZZXFm7SLb4F3Xg8a9XVVCvv",
 934                      name="weather",
 935                      type="function_call",
 936                      id="fc_095b57053855eac100690491f6a224819680e2f9c7cbc5a531",
 937                      status="in_progress",
 938                  ),
 939                  output_index=1,
 940                  sequence_number=4,
 941                  type="response.output_item.added",
 942              ),
 943              ResponseFunctionCallArgumentsDeltaEvent(
 944                  delta='{"city":',
 945                  item_id="fc_095b57053855eac100690491f6a224819680e2f9c7cbc5a531",
 946                  output_index=1,
 947                  sequence_number=5,
 948                  type="response.function_call_arguments.delta",
 949                  obfuscation="PySUcQ59ZZRkOm",
 950              ),
 951              ResponseFunctionCallArgumentsDeltaEvent(
 952                  delta='"Paris"}',
 953                  item_id="fc_095b57053855eac100690491f6a224819680e2f9c7cbc5a531",
 954                  output_index=1,
 955                  sequence_number=8,
 956                  type="response.function_call_arguments.delta",
 957                  obfuscation="INeMDAi1uAj",
 958              ),
 959              ResponseFunctionCallArgumentsDoneEvent(
 960                  arguments='{"city":"Paris"}',
 961                  item_id="fc_095b57053855eac100690491f6a224819680e2f9c7cbc5a531",
 962                  name="weather",  # added name here because pydantic complains otherwise API returns a none here
 963                  output_index=1,
 964                  sequence_number=10,
 965                  type="response.function_call_arguments.done",
 966              ),
 967              ResponseCompletedEvent(
 968                  response=Response(
 969                      id="resp_095b57053855eac100690491f4e22c8196ac124365e8c70424",
 970                      created_at=1761907188.0,
 971                      metadata={},
 972                      model="gpt-5-mini-2025-08-07",
 973                      object="response",
 974                      output=[
 975                          ResponseReasoningItem(
 976                              id="rs_095b57053855eac100690491f54e308196878239be3ba6133c", summary=[], type="reasoning"
 977                          ),
 978                          ResponseFunctionToolCall(
 979                              arguments='{"city":"Paris"}',
 980                              call_id="call_OZZXFm7SLb4F3Xg8a9XVVCvv",
 981                              name="weather",
 982                              type="function_call",
 983                              id="fc_095b57053855eac100690491f6a224819680e2f9c7cbc5a531",
 984                              status="completed",
 985                          ),
 986                      ],
 987                      parallel_tool_calls=True,
 988                      temperature=1.0,
 989                      tool_choice="auto",
 990                      tools=[
 991                          FunctionTool(
 992                              name="weather",
 993                              parameters={
 994                                  "type": "object",
 995                                  "properties": {"city": {"type": "string"}},
 996                                  "required": ["city"],
 997                                  "additionalProperties": False,
 998                              },
 999                              strict=False,
1000                              type="function",
1001                              description="useful to determine the weather in a given location",
1002                          )
1003                      ],
1004                      top_p=1.0,
1005                      reasoning=Reasoning(effort="medium", generate_summary=None, summary=None),
1006                      usage=ResponseUsage(
1007                          input_tokens=62,
1008                          input_tokens_details=InputTokensDetails(cached_tokens=0),
1009                          output_tokens=83,
1010                          output_tokens_details=OutputTokensDetails(reasoning_tokens=64),
1011                          total_tokens=145,
1012                      ),
1013                      store=True,
1014                  ),
1015                  sequence_number=12,
1016                  type="response.completed",
1017              ),
1018          ]
1019  
1020          streaming_chunks = []
1021          for chunk in chunks:
1022              streaming_chunk = _convert_response_chunk_to_streaming_chunk(chunk, previous_chunks=streaming_chunks)
1023              streaming_chunks.append(streaming_chunk)
1024  
1025          assert streaming_chunks == [
1026              # TODO Unneeded streaming chunk
1027              StreamingChunk(
1028                  content="",
1029                  meta={
1030                      "received_at": ANY,
1031                      "response": {
1032                          "id": "resp_095b57053855eac100690491f4e22c8196ac124365e8c70424",
1033                          "created_at": 1761907188.0,
1034                          "metadata": {},
1035                          "model": "gpt-5-mini-2025-08-07",
1036                          "object": "response",
1037                          "output": [],
1038                          "parallel_tool_calls": True,
1039                          "temperature": 1.0,
1040                          "tool_choice": "auto",
1041                          "tools": [
1042                              {
1043                                  "name": "weather",
1044                                  "parameters": {
1045                                      "type": "object",
1046                                      "properties": {"city": {"type": "string"}},
1047                                      "required": ["city"],
1048                                      "additionalProperties": False,
1049                                  },
1050                                  "strict": False,
1051                                  "type": "function",
1052                                  "description": "useful to determine the weather in a given location",
1053                              }
1054                          ],
1055                          "reasoning": {"effort": "medium", "generate_summary": None, "summary": None},
1056                          "usage": None,
1057                      },
1058                      "sequence_number": 0,
1059                      "type": "response.created",
1060                  },
1061              ),
1062              StreamingChunk(
1063                  content="",
1064                  meta={"received_at": ANY},
1065                  index=0,
1066                  start=True,
1067                  reasoning=ReasoningContent(
1068                      reasoning_text="",
1069                      extra={
1070                          "id": "rs_095b57053855eac100690491f54e308196878239be3ba6133c",
1071                          "summary": [],
1072                          "type": "reasoning",
1073                      },
1074                  ),
1075              ),
1076              StreamingChunk(
1077                  content="",
1078                  meta={
1079                      "item": {
1080                          "id": "rs_095b57053855eac100690491f54e308196878239be3ba6133c",
1081                          "summary": [],
1082                          "type": "reasoning",
1083                      },
1084                      "output_index": 0,
1085                      "sequence_number": 3,
1086                      "type": "response.output_item.done",
1087                      "received_at": ANY,
1088                  },
1089                  index=0,
1090              ),
1091              StreamingChunk(
1092                  content="",
1093                  meta={"received_at": ANY},
1094                  index=1,
1095                  tool_calls=[
1096                      ToolCallDelta(
1097                          index=1,
1098                          tool_name="weather",
1099                          arguments=None,
1100                          id="fc_095b57053855eac100690491f6a224819680e2f9c7cbc5a531",
1101                          extra={
1102                              "arguments": "",
1103                              "call_id": "call_OZZXFm7SLb4F3Xg8a9XVVCvv",
1104                              "id": "fc_095b57053855eac100690491f6a224819680e2f9c7cbc5a531",
1105                              "name": "weather",
1106                              "status": "in_progress",
1107                              "type": "function_call",
1108                          },
1109                      )
1110                  ],
1111                  start=True,
1112              ),
1113              StreamingChunk(
1114                  content="",
1115                  meta={"received_at": ANY},
1116                  index=1,
1117                  tool_calls=[
1118                      ToolCallDelta(
1119                          index=1,
1120                          tool_name=None,
1121                          arguments='{"city":',
1122                          id="fc_095b57053855eac100690491f6a224819680e2f9c7cbc5a531",
1123                          extra={
1124                              "item_id": "fc_095b57053855eac100690491f6a224819680e2f9c7cbc5a531",
1125                              "output_index": 1,
1126                              "sequence_number": 5,
1127                              "type": "response.function_call_arguments.delta",
1128                              "obfuscation": "PySUcQ59ZZRkOm",
1129                          },
1130                      )
1131                  ],
1132              ),
1133              StreamingChunk(
1134                  content="",
1135                  meta={"received_at": ANY},
1136                  index=1,
1137                  tool_calls=[
1138                      ToolCallDelta(
1139                          index=1,
1140                          tool_name=None,
1141                          arguments='"Paris"}',
1142                          id="fc_095b57053855eac100690491f6a224819680e2f9c7cbc5a531",
1143                          extra={
1144                              "item_id": "fc_095b57053855eac100690491f6a224819680e2f9c7cbc5a531",
1145                              "output_index": 1,
1146                              "sequence_number": 8,
1147                              "type": "response.function_call_arguments.delta",
1148                              "obfuscation": "INeMDAi1uAj",
1149                          },
1150                      )
1151                  ],
1152              ),
1153              StreamingChunk(
1154                  content="",
1155                  meta={
1156                      "received_at": ANY,
1157                      "arguments": '{"city":"Paris"}',
1158                      "item_id": "fc_095b57053855eac100690491f6a224819680e2f9c7cbc5a531",
1159                      "name": "weather",
1160                      "output_index": 1,
1161                      "sequence_number": 10,
1162                      "type": "response.function_call_arguments.done",
1163                  },
1164                  index=1,
1165              ),
1166              StreamingChunk(
1167                  content="",
1168                  meta={
1169                      "received_at": ANY,
1170                      "response": {
1171                          "id": "resp_095b57053855eac100690491f4e22c8196ac124365e8c70424",
1172                          "created_at": 1761907188.0,
1173                          "metadata": {},
1174                          "model": "gpt-5-mini-2025-08-07",
1175                          "object": "response",
1176                          "output": [
1177                              {
1178                                  "id": "rs_095b57053855eac100690491f54e308196878239be3ba6133c",
1179                                  "summary": [],
1180                                  "type": "reasoning",
1181                              },
1182                              {
1183                                  "arguments": '{"city":"Paris"}',
1184                                  "call_id": "call_OZZXFm7SLb4F3Xg8a9XVVCvv",
1185                                  "name": "weather",
1186                                  "type": "function_call",
1187                                  "id": "fc_095b57053855eac100690491f6a224819680e2f9c7cbc5a531",
1188                                  "status": "completed",
1189                              },
1190                          ],
1191                          "parallel_tool_calls": True,
1192                          "temperature": 1.0,
1193                          "tool_choice": "auto",
1194                          "tools": [
1195                              {
1196                                  "name": "weather",
1197                                  "parameters": {
1198                                      "type": "object",
1199                                      "properties": {"city": {"type": "string"}},
1200                                      "required": ["city"],
1201                                      "additionalProperties": False,
1202                                  },
1203                                  "strict": False,
1204                                  "type": "function",
1205                                  "description": "useful to determine the weather in a given location",
1206                              }
1207                          ],
1208                          "top_p": 1.0,
1209                          "reasoning": {"effort": "medium", "generate_summary": None, "summary": None},
1210                          "usage": {
1211                              "input_tokens": 62,
1212                              "input_tokens_details": {"cached_tokens": 0},
1213                              "output_tokens": 83,
1214                              "output_tokens_details": {"reasoning_tokens": 64},
1215                              "total_tokens": 145,
1216                          },
1217                          "store": True,
1218                      },
1219                      "sequence_number": 12,
1220                      "type": "response.completed",
1221                  },
1222                  finish_reason="tool_calls",
1223              ),
1224          ]
1225  
1226  
1227  class TestResponseToChatMessage:
1228      def test_convert_system_message(self):
1229          message = ChatMessage.from_system("You are good assistant")
1230          assert _convert_chat_message_to_responses_api_format(message) == [
1231              {"role": "system", "content": "You are good assistant"}
1232          ]
1233  
1234      def test_convert_user_message(self):
1235          message = ChatMessage.from_user("I have a question")
1236          assert _convert_chat_message_to_responses_api_format(message) == [
1237              {"role": "user", "content": [{"type": "input_text", "text": "I have a question"}]}
1238          ]
1239  
1240      def test_convert_multimodal_user_message(self, base64_image_string):
1241          message = ChatMessage.from_user(
1242              content_parts=[
1243                  TextContent("I have a question"),
1244                  ImageContent(base64_image=base64_image_string, detail="auto"),
1245              ]
1246          )
1247          assert message.to_openai_dict_format() == {
1248              "role": "user",
1249              "content": [
1250                  {"type": "text", "text": "I have a question"},
1251                  {
1252                      "type": "image_url",
1253                      "image_url": {"url": f"data:image/png;base64,{base64_image_string}", "detail": "auto"},
1254                  },
1255              ],
1256          }
1257  
1258          # image content only should be supported as well
1259          message = ChatMessage.from_user(content_parts=[ImageContent(base64_image=base64_image_string, detail="auto")])
1260          assert message.to_openai_dict_format() == {
1261              "role": "user",
1262              "content": [
1263                  {
1264                      "type": "image_url",
1265                      "image_url": {"url": f"data:image/png;base64,{base64_image_string}", "detail": "auto"},
1266                  }
1267              ],
1268          }
1269  
1270      def test_convert_user_message_with_file_content(self, base64_pdf_string):
1271          message = ChatMessage.from_user(
1272              content_parts=[FileContent(base64_data=base64_pdf_string, mime_type="application/pdf", filename="test.pdf")]
1273          )
1274          assert _convert_chat_message_to_responses_api_format(message) == [
1275              {
1276                  "role": "user",
1277                  "content": [
1278                      {
1279                          "type": "input_file",
1280                          "filename": "test.pdf",
1281                          "file_data": f"data:application/pdf;base64,{base64_pdf_string}",
1282                      }
1283                  ],
1284              }
1285          ]
1286  
1287      def test_convert_user_message_with_file_content_no_filename(self, base64_pdf_string):
1288          message = ChatMessage.from_user(
1289              content_parts=[FileContent(base64_data=base64_pdf_string, mime_type="application/pdf")]
1290          )
1291          assert _convert_chat_message_to_responses_api_format(message) == [
1292              {
1293                  "role": "user",
1294                  "content": [
1295                      {
1296                          "type": "input_file",
1297                          "filename": "filename",
1298                          "file_data": f"data:application/pdf;base64,{base64_pdf_string}",
1299                      }
1300                  ],
1301              }
1302          ]
1303  
1304      def test_convert_assistant_message(self):
1305          message = ChatMessage.from_assistant(text="I have an answer", meta={"finish_reason": "stop"})
1306          assert _convert_chat_message_to_responses_api_format(message) == [
1307              {"role": "assistant", "content": "I have an answer"}
1308          ]
1309  
1310      def test_convert_assistant_message_w_tool_call(self):
1311          chat_message = ChatMessage(
1312              _role=ChatRole.ASSISTANT,
1313              _content=[
1314                  TextContent(text="I need to use the functions.weather tool."),
1315                  ReasoningContent(
1316                      reasoning_text="I need to use the functions.weather tool.",
1317                      extra={"id": "rs_0d13efdd", "type": "reasoning"},
1318                  ),
1319                  ToolCall(
1320                      tool_name="weather",
1321                      arguments={"location": "Berlin"},
1322                      id="fc_0d13efdd",
1323                      extra={"call_id": "call_a82vwFAIzku9SmBuQuecQSRq"},
1324                  ),
1325              ],
1326              _name=None,
1327              # some keys are removed to keep the test concise
1328              _meta={
1329                  "id": "resp_0d13efdd97aa4",
1330                  "created_at": 1761148307.0,
1331                  "model": "gpt-5-mini-2025-08-07",
1332                  "object": "response",
1333                  "parallel_tool_calls": True,
1334                  "temperature": 1.0,
1335                  "tool_choice": "auto",
1336                  "tools": [
1337                      {
1338                          "name": "weather",
1339                          "parameters": {
1340                              "type": "object",
1341                              "properties": {"location": {"type": "string"}},
1342                              "required": ["location"],
1343                              "additionalProperties": False,
1344                          },
1345                          "strict": False,
1346                          "type": "function",
1347                          "description": "A tool to get the weather",
1348                      }
1349                  ],
1350                  "top_p": 1.0,
1351                  "reasoning": {"effort": "low", "summary": "detailed"},
1352                  "usage": {"input_tokens": 59, "output_tokens": 19, "total_tokens": 78},
1353                  "store": True,
1354              },
1355          )
1356          responses_api_format = _convert_chat_message_to_responses_api_format(chat_message)
1357          assert responses_api_format == [
1358              {
1359                  "id": "rs_0d13efdd",
1360                  "type": "reasoning",
1361                  "summary": [{"text": "I need to use the functions.weather tool.", "type": "summary_text"}],
1362              },
1363              {
1364                  "type": "function_call",
1365                  "name": "weather",
1366                  "arguments": '{"location": "Berlin"}',
1367                  "id": "fc_0d13efdd",
1368                  "call_id": "call_a82vwFAIzku9SmBuQuecQSRq",
1369              },
1370              {"content": "I need to use the functions.weather tool.", "role": "assistant"},
1371          ]
1372  
1373      def test_convert_tool_message(self):
1374          tool_call_result = ChatMessage(
1375              _role=ChatRole.TOOL,
1376              _content=[
1377                  ToolCallResult(
1378                      result="result",
1379                      origin=ToolCall(
1380                          id="fc_0d13efdd",
1381                          tool_name="weather",
1382                          arguments={"location": "Berlin"},
1383                          extra={"call_id": "call_a82vwFAIzku9SmBuQuecQSRq"},
1384                      ),
1385                      error=False,
1386                  )
1387              ],
1388          )
1389  
1390          assert _convert_chat_message_to_responses_api_format(tool_call_result) == [
1391              {
1392                  "call_id": "call_a82vwFAIzku9SmBuQuecQSRq",
1393                  "output": [{"type": "input_text", "text": "result"}],
1394                  "type": "function_call_output",
1395              }
1396          ]
1397  
1398      def test_convert_tool_message_list_with_image(self, base64_image_string):
1399          tool_result = [
1400              TextContent(text="first result"),
1401              ImageContent(base64_image=base64_image_string, mime_type="image/png"),
1402          ]
1403          message = ChatMessage.from_tool(
1404              tool_result=tool_result,
1405              origin=ToolCall(
1406                  tool_name="mytool", arguments={}, id="123", extra={"call_id": "call_a82vwFAIzku9SmBuQuecQSRq"}
1407              ),
1408              error=False,
1409          )
1410  
1411          assert _convert_chat_message_to_responses_api_format(message) == [
1412              {
1413                  "call_id": "call_a82vwFAIzku9SmBuQuecQSRq",
1414                  "output": [
1415                      {"type": "input_text", "text": "first result"},
1416                      {"type": "input_image", "image_url": f"data:image/png;base64,{base64_image_string}"},
1417                  ],
1418                  "type": "function_call_output",
1419              }
1420          ]
1421  
1422      def test_convert_invalid(self):
1423          message = ChatMessage(_role=ChatRole.ASSISTANT, _content=[])
1424          with pytest.raises(ValueError):
1425              _convert_chat_message_to_responses_api_format(message)
1426  
1427          message = ChatMessage(
1428              _role=ChatRole.USER,
1429              _content=[
1430                  TextContent(text="I have an answer"),
1431                  ToolCallResult(
1432                      result="I have another answer",
1433                      origin=ToolCall(id="123", tool_name="mytool", arguments={"a": 1}),
1434                      error=False,
1435                  ),
1436              ],
1437          )
1438          with pytest.raises(ValueError):
1439              _convert_chat_message_to_responses_api_format(message)