OpenAI to report when running out of funds (#139088)

This commit is contained in:
Paulus Schoutsen 2025-02-23 04:51:25 -05:00 committed by GitHub
parent 0797c3228b
commit 91668e99e3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 28 additions and 6 deletions

View File

@ -287,6 +287,9 @@ class OpenAIConversationEntity(
try: try:
result = await client.chat.completions.create(**model_args) result = await client.chat.completions.create(**model_args)
except openai.RateLimitError as err:
LOGGER.error("Rate limited by OpenAI: %s", err)
raise HomeAssistantError("Rate limited or insufficient funds") from err
except openai.OpenAIError as err: except openai.OpenAIError as err:
LOGGER.error("Error talking to OpenAI: %s", err) LOGGER.error("Error talking to OpenAI: %s", err)
raise HomeAssistantError("Error talking to OpenAI") from err raise HomeAssistantError("Error talking to OpenAI") from err

View File

@ -4,7 +4,7 @@ from collections.abc import Generator
from unittest.mock import AsyncMock, patch from unittest.mock import AsyncMock, patch
from httpx import Response from httpx import Response
from openai import RateLimitError from openai import AuthenticationError, RateLimitError
from openai.types.chat.chat_completion_chunk import ( from openai.types.chat.chat_completion_chunk import (
ChatCompletionChunk, ChatCompletionChunk,
Choice, Choice,
@ -94,23 +94,42 @@ async def test_entity(
) )
@pytest.mark.parametrize(
("exception", "message"),
[
(
RateLimitError(
response=Response(status_code=429, request=""), body=None, message=None
),
"Rate limited or insufficient funds",
),
(
AuthenticationError(
response=Response(status_code=401, request=""), body=None, message=None
),
"Error talking to OpenAI",
),
],
)
async def test_error_handling( async def test_error_handling(
hass: HomeAssistant, mock_config_entry: MockConfigEntry, mock_init_component hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_init_component,
exception,
message,
) -> None: ) -> None:
"""Test that we handle errors when calling completion API.""" """Test that we handle errors when calling completion API."""
with patch( with patch(
"openai.resources.chat.completions.AsyncCompletions.create", "openai.resources.chat.completions.AsyncCompletions.create",
new_callable=AsyncMock, new_callable=AsyncMock,
side_effect=RateLimitError( side_effect=exception,
response=Response(status_code=None, request=""), body=None, message=None
),
): ):
result = await conversation.async_converse( result = await conversation.async_converse(
hass, "hello", None, Context(), agent_id=mock_config_entry.entry_id hass, "hello", None, Context(), agent_id=mock_config_entry.entry_id
) )
assert result.response.response_type == intent.IntentResponseType.ERROR, result assert result.response.response_type == intent.IntentResponseType.ERROR, result
assert result.response.error_code == "unknown", result assert result.response.speech["plain"]["speech"] == message, result.response.speech
async def test_conversation_agent( async def test_conversation_agent(