diff --git a/.stats.yml b/.stats.yml
index 185585b675..fd4f271361 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,2 +1,2 @@
configured_endpoints: 68
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-8ff62fa1091460d68fbd36d72c17d91b709917bebf2983c9c4de5784bc384a2e.yml
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-1dbac0e95bdb5a89a0dd3d93265475a378214551b7d8c22862928e0d87ace94b.yml
diff --git a/api.md b/api.md
index 648d0f3708..48778cc57c 100644
--- a/api.md
+++ b/api.md
@@ -365,6 +365,7 @@ from openai.types.beta.threads.runs import (
RunStepDelta,
RunStepDeltaEvent,
RunStepDeltaMessageDelta,
+ RunStepInclude,
ToolCall,
ToolCallDelta,
ToolCallDeltaObject,
@@ -374,7 +375,7 @@ from openai.types.beta.threads.runs import (
Methods:
-- client.beta.threads.runs.steps.retrieve(step_id, \*, thread_id, run_id) -> RunStep
+- client.beta.threads.runs.steps.retrieve(step_id, \*, thread_id, run_id, \*\*params) -> RunStep
- client.beta.threads.runs.steps.list(run_id, \*, thread_id, \*\*params) -> SyncCursorPage[RunStep]
### Messages
diff --git a/src/openai/resources/beta/threads/runs/runs.py b/src/openai/resources/beta/threads/runs/runs.py
index cbfb9546f0..4f39912e62 100644
--- a/src/openai/resources/beta/threads/runs/runs.py
+++ b/src/openai/resources/beta/threads/runs/runs.py
@@ -3,7 +3,7 @@
from __future__ import annotations
import typing_extensions
-from typing import Union, Iterable, Optional, overload
+from typing import List, Union, Iterable, Optional, overload
from functools import partial
from typing_extensions import Literal
@@ -49,6 +49,7 @@
from .....types.beta.threads.run import Run
from .....types.beta.assistant_tool_param import AssistantToolParam
from .....types.beta.assistant_stream_event import AssistantStreamEvent
+from .....types.beta.threads.runs.run_step_include import RunStepInclude
from .....types.beta.assistant_tool_choice_option_param import AssistantToolChoiceOptionParam
from .....types.beta.assistant_response_format_option_param import AssistantResponseFormatOptionParam
@@ -74,6 +75,7 @@ def create(
thread_id: str,
*,
assistant_id: str,
+ include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
@@ -104,6 +106,14 @@ def create(
[assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
execute this run.
+ include: A list of additional fields to include in the response. Currently the only
+ supported value is `step_details.tool_calls[*].file_search.results[*].content`
+ to fetch the file search result content.
+
+ See the
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
+ for more information.
+
additional_instructions: Appends additional instructions at the end of the instructions for the run. This
is useful for modifying the behavior on a per-run basis without overriding other
instructions.
@@ -206,6 +216,7 @@ def create(
*,
assistant_id: str,
stream: Literal[True],
+ include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
@@ -239,6 +250,14 @@ def create(
events, terminating when the Run enters a terminal state with a `data: [DONE]`
message.
+ include: A list of additional fields to include in the response. Currently the only
+ supported value is `step_details.tool_calls[*].file_search.results[*].content`
+ to fetch the file search result content.
+
+ See the
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
+ for more information.
+
additional_instructions: Appends additional instructions at the end of the instructions for the run. This
is useful for modifying the behavior on a per-run basis without overriding other
instructions.
@@ -337,6 +356,7 @@ def create(
*,
assistant_id: str,
stream: bool,
+ include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
@@ -370,6 +390,14 @@ def create(
events, terminating when the Run enters a terminal state with a `data: [DONE]`
message.
+ include: A list of additional fields to include in the response. Currently the only
+ supported value is `step_details.tool_calls[*].file_search.results[*].content`
+ to fetch the file search result content.
+
+ See the
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
+ for more information.
+
additional_instructions: Appends additional instructions at the end of the instructions for the run. This
is useful for modifying the behavior on a per-run basis without overriding other
instructions.
@@ -467,6 +495,7 @@ def create(
thread_id: str,
*,
assistant_id: str,
+ include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
@@ -516,7 +545,11 @@ def create(
run_create_params.RunCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform({"include": include}, run_create_params.RunCreateParams),
),
cast_to=Run,
stream=stream or False,
@@ -712,6 +745,7 @@ def create_and_poll(
self,
*,
assistant_id: str,
+ include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
@@ -743,6 +777,7 @@ def create_and_poll(
run = self.create(
thread_id=thread_id,
assistant_id=assistant_id,
+ include=include,
additional_instructions=additional_instructions,
additional_messages=additional_messages,
instructions=instructions,
@@ -958,6 +993,7 @@ def stream(
self,
*,
assistant_id: str,
+ include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
@@ -988,6 +1024,7 @@ def stream(
self,
*,
assistant_id: str,
+ include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
@@ -1018,6 +1055,7 @@ def stream(
self,
*,
assistant_id: str,
+ include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
@@ -1057,6 +1095,7 @@ def stream(
body=maybe_transform(
{
"assistant_id": assistant_id,
+ "include": include,
"additional_instructions": additional_instructions,
"additional_messages": additional_messages,
"instructions": instructions,
@@ -1387,6 +1426,7 @@ async def create(
thread_id: str,
*,
assistant_id: str,
+ include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
@@ -1417,6 +1457,14 @@ async def create(
[assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
execute this run.
+ include: A list of additional fields to include in the response. Currently the only
+ supported value is `step_details.tool_calls[*].file_search.results[*].content`
+ to fetch the file search result content.
+
+ See the
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
+ for more information.
+
additional_instructions: Appends additional instructions at the end of the instructions for the run. This
is useful for modifying the behavior on a per-run basis without overriding other
instructions.
@@ -1519,6 +1567,7 @@ async def create(
*,
assistant_id: str,
stream: Literal[True],
+ include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
@@ -1552,6 +1601,14 @@ async def create(
events, terminating when the Run enters a terminal state with a `data: [DONE]`
message.
+ include: A list of additional fields to include in the response. Currently the only
+ supported value is `step_details.tool_calls[*].file_search.results[*].content`
+ to fetch the file search result content.
+
+ See the
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
+ for more information.
+
additional_instructions: Appends additional instructions at the end of the instructions for the run. This
is useful for modifying the behavior on a per-run basis without overriding other
instructions.
@@ -1650,6 +1707,7 @@ async def create(
*,
assistant_id: str,
stream: bool,
+ include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
@@ -1683,6 +1741,14 @@ async def create(
events, terminating when the Run enters a terminal state with a `data: [DONE]`
message.
+ include: A list of additional fields to include in the response. Currently the only
+ supported value is `step_details.tool_calls[*].file_search.results[*].content`
+ to fetch the file search result content.
+
+ See the
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
+ for more information.
+
additional_instructions: Appends additional instructions at the end of the instructions for the run. This
is useful for modifying the behavior on a per-run basis without overriding other
instructions.
@@ -1780,6 +1846,7 @@ async def create(
thread_id: str,
*,
assistant_id: str,
+ include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
@@ -1810,6 +1877,7 @@ async def create(
body=await async_maybe_transform(
{
"assistant_id": assistant_id,
+ "include": include,
"additional_instructions": additional_instructions,
"additional_messages": additional_messages,
"instructions": instructions,
@@ -1829,7 +1897,11 @@ async def create(
run_create_params.RunCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform({"include": include}, run_create_params.RunCreateParams),
),
cast_to=Run,
stream=stream or False,
@@ -2025,6 +2097,7 @@ async def create_and_poll(
self,
*,
assistant_id: str,
+ include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
@@ -2056,6 +2129,7 @@ async def create_and_poll(
run = await self.create(
thread_id=thread_id,
assistant_id=assistant_id,
+ include=include,
additional_instructions=additional_instructions,
additional_messages=additional_messages,
instructions=instructions,
@@ -2303,6 +2377,7 @@ def stream(
self,
*,
assistant_id: str,
+ include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
@@ -2333,6 +2408,7 @@ def stream(
self,
*,
assistant_id: str,
+ include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
@@ -2374,6 +2450,7 @@ def stream(
body=maybe_transform(
{
"assistant_id": assistant_id,
+ "include": include,
"additional_instructions": additional_instructions,
"additional_messages": additional_messages,
"instructions": instructions,
diff --git a/src/openai/resources/beta/threads/runs/steps.py b/src/openai/resources/beta/threads/runs/steps.py
index 512008939c..3d2d40a3fb 100644
--- a/src/openai/resources/beta/threads/runs/steps.py
+++ b/src/openai/resources/beta/threads/runs/steps.py
@@ -2,23 +2,25 @@
from __future__ import annotations
+from typing import List
from typing_extensions import Literal
import httpx
from ..... import _legacy_response
from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ....._utils import maybe_transform
+from ....._utils import (
+ maybe_transform,
+ async_maybe_transform,
+)
from ....._compat import cached_property
from ....._resource import SyncAPIResource, AsyncAPIResource
from ....._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from .....pagination import SyncCursorPage, AsyncCursorPage
-from ....._base_client import (
- AsyncPaginator,
- make_request_options,
-)
-from .....types.beta.threads.runs import step_list_params
+from ....._base_client import AsyncPaginator, make_request_options
+from .....types.beta.threads.runs import step_list_params, step_retrieve_params
from .....types.beta.threads.runs.run_step import RunStep
+from .....types.beta.threads.runs.run_step_include import RunStepInclude
__all__ = ["Steps", "AsyncSteps"]
@@ -38,6 +40,7 @@ def retrieve(
*,
thread_id: str,
run_id: str,
+ include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -49,6 +52,14 @@ def retrieve(
Retrieves a run step.
Args:
+ include: A list of additional fields to include in the response. Currently the only
+ supported value is `step_details.tool_calls[*].file_search.results[*].content`
+ to fetch the file search result content.
+
+ See the
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
+ for more information.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -67,7 +78,11 @@ def retrieve(
return self._get(
f"/threads/{thread_id}/runs/{run_id}/steps/{step_id}",
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform({"include": include}, step_retrieve_params.StepRetrieveParams),
),
cast_to=RunStep,
)
@@ -79,6 +94,7 @@ def list(
thread_id: str,
after: str | NotGiven = NOT_GIVEN,
before: str | NotGiven = NOT_GIVEN,
+ include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -102,6 +118,14 @@ def list(
ending with obj_foo, your subsequent call can include before=obj_foo in order to
fetch the previous page of the list.
+ include: A list of additional fields to include in the response. Currently the only
+ supported value is `step_details.tool_calls[*].file_search.results[*].content`
+ to fetch the file search result content.
+
+ See the
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
+ for more information.
+
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
@@ -133,6 +157,7 @@ def list(
{
"after": after,
"before": before,
+ "include": include,
"limit": limit,
"order": order,
},
@@ -158,6 +183,7 @@ async def retrieve(
*,
thread_id: str,
run_id: str,
+ include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -169,6 +195,14 @@ async def retrieve(
Retrieves a run step.
Args:
+ include: A list of additional fields to include in the response. Currently the only
+ supported value is `step_details.tool_calls[*].file_search.results[*].content`
+ to fetch the file search result content.
+
+ See the
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
+ for more information.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -187,7 +221,11 @@ async def retrieve(
return await self._get(
f"/threads/{thread_id}/runs/{run_id}/steps/{step_id}",
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform({"include": include}, step_retrieve_params.StepRetrieveParams),
),
cast_to=RunStep,
)
@@ -199,6 +237,7 @@ def list(
thread_id: str,
after: str | NotGiven = NOT_GIVEN,
before: str | NotGiven = NOT_GIVEN,
+ include: List[RunStepInclude] | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -222,6 +261,14 @@ def list(
ending with obj_foo, your subsequent call can include before=obj_foo in order to
fetch the previous page of the list.
+ include: A list of additional fields to include in the response. Currently the only
+ supported value is `step_details.tool_calls[*].file_search.results[*].content`
+ to fetch the file search result content.
+
+ See the
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
+ for more information.
+
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
@@ -253,6 +300,7 @@ def list(
{
"after": after,
"before": before,
+ "include": include,
"limit": limit,
"order": order,
},
diff --git a/src/openai/types/beta/file_search_tool.py b/src/openai/types/beta/file_search_tool.py
index 26ab1cb83f..4015b3da09 100644
--- a/src/openai/types/beta/file_search_tool.py
+++ b/src/openai/types/beta/file_search_tool.py
@@ -5,7 +5,21 @@
from ..._models import BaseModel
-__all__ = ["FileSearchTool", "FileSearch"]
+__all__ = ["FileSearchTool", "FileSearch", "FileSearchRankingOptions"]
+
+
+class FileSearchRankingOptions(BaseModel):
+ ranker: Optional[Literal["auto", "default_2024_08_21"]] = None
+ """The ranker to use for the file search.
+
+ If not specified will use the `auto` ranker.
+ """
+
+ score_threshold: Optional[float] = None
+ """The score threshold for the file search.
+
+ All values must be a floating point number between 0 and 1.
+ """
class FileSearch(BaseModel):
@@ -17,7 +31,15 @@ class FileSearch(BaseModel):
Note that the file search tool may output fewer than `max_num_results` results.
See the
- [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/number-of-chunks-returned)
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
+ for more information.
+ """
+
+ ranking_options: Optional[FileSearchRankingOptions] = None
+ """The ranking options for the file search.
+
+ See the
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
for more information.
"""
diff --git a/src/openai/types/beta/file_search_tool_param.py b/src/openai/types/beta/file_search_tool_param.py
index 666719f8cd..97e651b0da 100644
--- a/src/openai/types/beta/file_search_tool_param.py
+++ b/src/openai/types/beta/file_search_tool_param.py
@@ -4,7 +4,21 @@
from typing_extensions import Literal, Required, TypedDict
-__all__ = ["FileSearchToolParam", "FileSearch"]
+__all__ = ["FileSearchToolParam", "FileSearch", "FileSearchRankingOptions"]
+
+
+class FileSearchRankingOptions(TypedDict, total=False):
+ ranker: Literal["auto", "default_2024_08_21"]
+ """The ranker to use for the file search.
+
+ If not specified will use the `auto` ranker.
+ """
+
+ score_threshold: float
+ """The score threshold for the file search.
+
+ All values must be a floating point number between 0 and 1.
+ """
class FileSearch(TypedDict, total=False):
@@ -16,7 +30,15 @@ class FileSearch(TypedDict, total=False):
Note that the file search tool may output fewer than `max_num_results` results.
See the
- [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/number-of-chunks-returned)
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
+ for more information.
+ """
+
+ ranking_options: FileSearchRankingOptions
+ """The ranking options for the file search.
+
+ See the
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
for more information.
"""
diff --git a/src/openai/types/beta/threads/run_create_params.py b/src/openai/types/beta/threads/run_create_params.py
index d3e6d9c476..8bb73ddc78 100644
--- a/src/openai/types/beta/threads/run_create_params.py
+++ b/src/openai/types/beta/threads/run_create_params.py
@@ -2,11 +2,12 @@
from __future__ import annotations
-from typing import Union, Iterable, Optional
+from typing import List, Union, Iterable, Optional
from typing_extensions import Literal, Required, TypeAlias, TypedDict
from ...chat_model import ChatModel
from ..assistant_tool_param import AssistantToolParam
+from .runs.run_step_include import RunStepInclude
from .message_content_part_param import MessageContentPartParam
from ..code_interpreter_tool_param import CodeInterpreterToolParam
from ..assistant_tool_choice_option_param import AssistantToolChoiceOptionParam
@@ -32,6 +33,18 @@ class RunCreateParamsBase(TypedDict, total=False):
execute this run.
"""
+ include: List[RunStepInclude]
+ """A list of additional fields to include in the response.
+
+ Currently the only supported value is
+ `step_details.tool_calls[*].file_search.results[*].content` to fetch the file
+ search result content.
+
+ See the
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
+ for more information.
+ """
+
additional_instructions: Optional[str]
"""Appends additional instructions at the end of the instructions for the run.
diff --git a/src/openai/types/beta/threads/runs/__init__.py b/src/openai/types/beta/threads/runs/__init__.py
index a312ce3df2..467d5d793d 100644
--- a/src/openai/types/beta/threads/runs/__init__.py
+++ b/src/openai/types/beta/threads/runs/__init__.py
@@ -6,9 +6,11 @@
from .tool_call import ToolCall as ToolCall
from .run_step_delta import RunStepDelta as RunStepDelta
from .tool_call_delta import ToolCallDelta as ToolCallDelta
+from .run_step_include import RunStepInclude as RunStepInclude
from .step_list_params import StepListParams as StepListParams
from .function_tool_call import FunctionToolCall as FunctionToolCall
from .run_step_delta_event import RunStepDeltaEvent as RunStepDeltaEvent
+from .step_retrieve_params import StepRetrieveParams as StepRetrieveParams
from .code_interpreter_logs import CodeInterpreterLogs as CodeInterpreterLogs
from .file_search_tool_call import FileSearchToolCall as FileSearchToolCall
from .tool_call_delta_object import ToolCallDeltaObject as ToolCallDeltaObject
diff --git a/src/openai/types/beta/threads/runs/file_search_tool_call.py b/src/openai/types/beta/threads/runs/file_search_tool_call.py
index 57c0ca9a90..da4d58dc37 100644
--- a/src/openai/types/beta/threads/runs/file_search_tool_call.py
+++ b/src/openai/types/beta/threads/runs/file_search_tool_call.py
@@ -1,17 +1,71 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+from typing import List, Optional
from typing_extensions import Literal
from ....._models import BaseModel
-__all__ = ["FileSearchToolCall"]
+__all__ = [
+ "FileSearchToolCall",
+ "FileSearch",
+ "FileSearchRankingOptions",
+ "FileSearchResult",
+ "FileSearchResultContent",
+]
+
+
+class FileSearchRankingOptions(BaseModel):
+ ranker: Literal["default_2024_08_21"]
+ """The ranker used for the file search."""
+
+ score_threshold: float
+ """The score threshold for the file search.
+
+ All values must be a floating point number between 0 and 1.
+ """
+
+
+class FileSearchResultContent(BaseModel):
+ text: Optional[str] = None
+ """The text content of the file."""
+
+ type: Optional[Literal["text"]] = None
+ """The type of the content."""
+
+
+class FileSearchResult(BaseModel):
+ file_id: str
+ """The ID of the file that result was found in."""
+
+ file_name: str
+ """The name of the file that result was found in."""
+
+ score: float
+ """The score of the result.
+
+ All values must be a floating point number between 0 and 1.
+ """
+
+ content: Optional[List[FileSearchResultContent]] = None
+ """The content of the result that was found.
+
+ The content is only included if requested via the include query parameter.
+ """
+
+
+class FileSearch(BaseModel):
+ ranking_options: Optional[FileSearchRankingOptions] = None
+ """The ranking options for the file search."""
+
+ results: Optional[List[FileSearchResult]] = None
+ """The results of the file search."""
class FileSearchToolCall(BaseModel):
id: str
"""The ID of the tool call object."""
- file_search: object
+ file_search: FileSearch
"""For now, this is always going to be an empty object."""
type: Literal["file_search"]
diff --git a/src/openai/types/beta/threads/runs/run_step_include.py b/src/openai/types/beta/threads/runs/run_step_include.py
new file mode 100644
index 0000000000..8e76c1b716
--- /dev/null
+++ b/src/openai/types/beta/threads/runs/run_step_include.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["RunStepInclude"]
+
+RunStepInclude: TypeAlias = Literal["step_details.tool_calls[*].file_search.results[*].content"]
diff --git a/src/openai/types/beta/threads/runs/step_list_params.py b/src/openai/types/beta/threads/runs/step_list_params.py
index 606d444539..3931bd7e0c 100644
--- a/src/openai/types/beta/threads/runs/step_list_params.py
+++ b/src/openai/types/beta/threads/runs/step_list_params.py
@@ -2,8 +2,11 @@
from __future__ import annotations
+from typing import List
from typing_extensions import Literal, Required, TypedDict
+from .run_step_include import RunStepInclude
+
__all__ = ["StepListParams"]
@@ -28,6 +31,18 @@ class StepListParams(TypedDict, total=False):
of the list.
"""
+ include: List[RunStepInclude]
+ """A list of additional fields to include in the response.
+
+ Currently the only supported value is
+ `step_details.tool_calls[*].file_search.results[*].content` to fetch the file
+ search result content.
+
+ See the
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
+ for more information.
+ """
+
limit: int
"""A limit on the number of objects to be returned.
diff --git a/src/openai/types/beta/threads/runs/step_retrieve_params.py b/src/openai/types/beta/threads/runs/step_retrieve_params.py
new file mode 100644
index 0000000000..22c1c049f4
--- /dev/null
+++ b/src/openai/types/beta/threads/runs/step_retrieve_params.py
@@ -0,0 +1,28 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Required, TypedDict
+
+from .run_step_include import RunStepInclude
+
+__all__ = ["StepRetrieveParams"]
+
+
+class StepRetrieveParams(TypedDict, total=False):
+ thread_id: Required[str]
+
+ run_id: Required[str]
+
+ include: List[RunStepInclude]
+ """A list of additional fields to include in the response.
+
+ Currently the only supported value is
+ `step_details.tool_calls[*].file_search.results[*].content` to fetch the file
+ search result content.
+
+ See the
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
+ for more information.
+ """
diff --git a/tests/api_resources/beta/threads/runs/test_steps.py b/tests/api_resources/beta/threads/runs/test_steps.py
index e6108d8dad..f5dc17e0b5 100644
--- a/tests/api_resources/beta/threads/runs/test_steps.py
+++ b/tests/api_resources/beta/threads/runs/test_steps.py
@@ -27,6 +27,16 @@ def test_method_retrieve(self, client: OpenAI) -> None:
)
assert_matches_type(RunStep, step, path=["response"])
+ @parametrize
+ def test_method_retrieve_with_all_params(self, client: OpenAI) -> None:
+ step = client.beta.threads.runs.steps.retrieve(
+ step_id="step_id",
+ thread_id="thread_id",
+ run_id="run_id",
+ include=["step_details.tool_calls[*].file_search.results[*].content"],
+ )
+ assert_matches_type(RunStep, step, path=["response"])
+
@parametrize
def test_raw_response_retrieve(self, client: OpenAI) -> None:
response = client.beta.threads.runs.steps.with_raw_response.retrieve(
@@ -89,10 +99,11 @@ def test_method_list(self, client: OpenAI) -> None:
@parametrize
def test_method_list_with_all_params(self, client: OpenAI) -> None:
step = client.beta.threads.runs.steps.list(
- "string",
- thread_id="string",
- after="string",
- before="string",
+ run_id="run_id",
+ thread_id="thread_id",
+ after="after",
+ before="before",
+ include=["step_details.tool_calls[*].file_search.results[*].content"],
limit=0,
order="asc",
)
@@ -151,6 +162,16 @@ async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
)
assert_matches_type(RunStep, step, path=["response"])
+ @parametrize
+ async def test_method_retrieve_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ step = await async_client.beta.threads.runs.steps.retrieve(
+ step_id="step_id",
+ thread_id="thread_id",
+ run_id="run_id",
+ include=["step_details.tool_calls[*].file_search.results[*].content"],
+ )
+ assert_matches_type(RunStep, step, path=["response"])
+
@parametrize
async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
response = await async_client.beta.threads.runs.steps.with_raw_response.retrieve(
@@ -213,10 +234,11 @@ async def test_method_list(self, async_client: AsyncOpenAI) -> None:
@parametrize
async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
step = await async_client.beta.threads.runs.steps.list(
- "string",
- thread_id="string",
- after="string",
- before="string",
+ run_id="run_id",
+ thread_id="thread_id",
+ after="after",
+ before="before",
+ include=["step_details.tool_calls[*].file_search.results[*].content"],
limit=0,
order="asc",
)
diff --git a/tests/api_resources/beta/threads/test_runs.py b/tests/api_resources/beta/threads/test_runs.py
index 5d16bdb364..c8d70f5f89 100644
--- a/tests/api_resources/beta/threads/test_runs.py
+++ b/tests/api_resources/beta/threads/test_runs.py
@@ -33,9 +33,10 @@ def test_method_create_overload_1(self, client: OpenAI) -> None:
@parametrize
def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
run = client.beta.threads.runs.create(
- "string",
- assistant_id="string",
- additional_instructions="string",
+ thread_id="thread_id",
+ assistant_id="assistant_id",
+ include=["step_details.tool_calls[*].file_search.results[*].content"],
+ additional_instructions="additional_instructions",
additional_messages=[
{
"content": "string",
@@ -199,7 +200,8 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
"string",
assistant_id="string",
stream=True,
- additional_instructions="string",
+ include=["step_details.tool_calls[*].file_search.results[*].content"],
+ additional_instructions="additional_instructions",
additional_messages=[
{
"content": "string",
@@ -699,9 +701,10 @@ async def test_method_create_overload_1(self, async_client: AsyncOpenAI) -> None
@parametrize
async def test_method_create_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None:
run = await async_client.beta.threads.runs.create(
- "string",
- assistant_id="string",
- additional_instructions="string",
+ thread_id="thread_id",
+ assistant_id="assistant_id",
+ include=["step_details.tool_calls[*].file_search.results[*].content"],
+ additional_instructions="additional_instructions",
additional_messages=[
{
"content": "string",
@@ -865,7 +868,8 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
"string",
assistant_id="string",
stream=True,
- additional_instructions="string",
+ include=["step_details.tool_calls[*].file_search.results[*].content"],
+ additional_instructions="additional_instructions",
additional_messages=[
{
"content": "string",