diff --git a/README.md b/README.md index 7e4f0ae657..bb00e4fcdc 100644 --- a/README.md +++ b/README.md @@ -77,7 +77,7 @@ With an image URL: ```python prompt = "What is in this image?" -img_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/d5/2023_06_08_Raccoon1.jpg/1599px-2023_06_08_Raccoon1.jpg" +img_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/d5/2023_06_08_Raccoon1.jpg/640px-2023_06_08_Raccoon1.jpg" response = client.responses.create( model="gpt-5.2", @@ -93,6 +93,8 @@ response = client.responses.create( ) ``` +If you get `BadRequestError` with `"invalid_value"` when downloading the image, either swap in a smaller URL you control or switch to the base64 example below so the image is uploaded directly rather than fetched from the web. + With the image as a base64 encoded string: ```python diff --git a/src/openai/_streaming.py b/src/openai/_streaming.py index 61a742668a..2ebe840d8b 100644 --- a/src/openai/_streaming.py +++ b/src/openai/_streaming.py @@ -55,46 +55,40 @@ def __stream__(self) -> Iterator[_T]: process_data = self._client._process_response_data iterator = self._iter_events() + def _raise_streaming_error(data: object) -> None: + if not is_mapping(data): + return + error = data.get("error") + if not error: + return + + message: str | None = None + if is_mapping(error): + message = error.get("message") + if not message or not isinstance(message, str): + message = "An error occurred during streaming" + + raise APIError( + message=message, + request=self.response.request, + body=error, + ) + try: for sse in iterator: if sse.data.startswith("[DONE]"): break # we have to special case the Assistants `thread.` events since we won't have an "event" key in the data - if sse.event and sse.event.startswith("thread."): - data = sse.json() - - if sse.event == "error" and is_mapping(data) and data.get("error"): - message = None - error = data.get("error") - if is_mapping(error): - message = error.get("message") - if not message or not isinstance(message, str): - message = "An error occurred during streaming" - - raise APIError( - message=message, - request=self.response.request, - body=data["error"], - ) + data = sse.json() + if sse.event == "error": + _raise_streaming_error(data) + if sse.event and sse.event.startswith("thread."): + _raise_streaming_error(data) yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response) else: - data = sse.json() - if is_mapping(data) and data.get("error"): - message = None - error = data.get("error") - if is_mapping(error): - message = error.get("message") - if not message or not isinstance(message, str): - message = "An error occurred during streaming" - - raise APIError( - message=message, - request=self.response.request, - body=data["error"], - ) - + _raise_streaming_error(data) yield process_data(data=data, cast_to=cast_to, response=response) finally: @@ -158,46 +152,40 @@ async def __stream__(self) -> AsyncIterator[_T]: process_data = self._client._process_response_data iterator = self._iter_events() + def _raise_streaming_error(data: object) -> None: + if not is_mapping(data): + return + error = data.get("error") + if not error: + return + + message: str | None = None + if is_mapping(error): + message = error.get("message") + if not message or not isinstance(message, str): + message = "An error occurred during streaming" + + raise APIError( + message=message, + request=self.response.request, + body=error, + ) + try: async for sse in iterator: if sse.data.startswith("[DONE]"): break # we have to special case the Assistants `thread.` events since we won't have an "event" key in the data - if sse.event and sse.event.startswith("thread."): - data = sse.json() - - if sse.event == "error" and is_mapping(data) and data.get("error"): - message = None - error = data.get("error") - if is_mapping(error): - message = error.get("message") - if not message or not isinstance(message, str): - message = "An error occurred during streaming" - - raise APIError( - message=message, - request=self.response.request, - body=data["error"], - ) + data = sse.json() + if sse.event == "error": + _raise_streaming_error(data) + if sse.event and sse.event.startswith("thread."): + _raise_streaming_error(data) yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response) else: - data = sse.json() - if is_mapping(data) and data.get("error"): - message = None - error = data.get("error") - if is_mapping(error): - message = error.get("message") - if not message or not isinstance(message, str): - message = "An error occurred during streaming" - - raise APIError( - message=message, - request=self.response.request, - body=data["error"], - ) - + _raise_streaming_error(data) yield process_data(data=data, cast_to=cast_to, response=response) finally: @@ -223,6 +211,10 @@ async def close(self) -> None: """ await self.response.aclose() + async def aclose(self) -> None: + """Alias for `close` so contexts depending on `aclose` still work.""" + await self.close() + class ServerSentEvent: def __init__( diff --git a/src/openai/types/shared/chat_model.py b/src/openai/types/shared/chat_model.py index 8223b81bef..8bbe813b7d 100644 --- a/src/openai/types/shared/chat_model.py +++ b/src/openai/types/shared/chat_model.py @@ -70,6 +70,7 @@ "gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-0613", + "gpt-audio-mini", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-0301", diff --git a/tests/api_resources/test_responses.py b/tests/api_resources/test_responses.py index a644b2c6b9..9bc7439728 100644 --- a/tests/api_resources/test_responses.py +++ b/tests/api_resources/test_responses.py @@ -32,7 +32,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: background=True, context_management=[ { - "type": "type", + "type": "compaction", "compact_threshold": 1000, } ], @@ -119,7 +119,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: background=True, context_management=[ { - "type": "type", + "type": "compaction", "compact_threshold": 1000, } ], @@ -440,7 +440,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn background=True, context_management=[ { - "type": "type", + "type": "compaction", "compact_threshold": 1000, } ], @@ -527,7 +527,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn background=True, context_management=[ { - "type": "type", + "type": "compaction", "compact_threshold": 1000, } ],