Encrypted reasoning items support for OpenAI Conversation (#148279)

This commit is contained in:
Denis Shulyaka 2025-07-10 15:46:10 +07:00 committed by GitHub
parent c2bc4a990e
commit cbe2fbdc34
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 23 additions and 5 deletions

View File

@ -293,6 +293,7 @@ class OpenAIBaseLLMEntity(Entity):
"top_p": options.get(CONF_TOP_P, RECOMMENDED_TOP_P),
"temperature": options.get(CONF_TEMPERATURE, RECOMMENDED_TEMPERATURE),
"user": chat_log.conversation_id,
"store": False,
"stream": True,
}
if tools:
@ -304,8 +305,7 @@ class OpenAIBaseLLMEntity(Entity):
CONF_REASONING_EFFORT, RECOMMENDED_REASONING_EFFORT
)
}
else:
model_args["store"] = False
model_args["include"] = ["reasoning.encrypted_content"]
try:
result = await client.responses.create(**model_args)

View File

@ -5,7 +5,10 @@ from unittest.mock import patch
import pytest
from homeassistant.components.openai_conversation.const import DEFAULT_CONVERSATION_NAME
from homeassistant.components.openai_conversation.const import (
CONF_CHAT_MODEL,
DEFAULT_CONVERSATION_NAME,
)
from homeassistant.config_entries import ConfigSubentryData
from homeassistant.const import CONF_LLM_HASS_API
from homeassistant.core import HomeAssistant
@ -59,6 +62,19 @@ def mock_config_entry_with_assist(
return mock_config_entry
@pytest.fixture
def mock_config_entry_with_reasoning_model(
hass: HomeAssistant, mock_config_entry: MockConfigEntry
) -> MockConfigEntry:
"""Mock a config entry with assist."""
hass.config_entries.async_update_subentry(
mock_config_entry,
next(iter(mock_config_entry.subentries.values())),
data={CONF_LLM_HASS_API: llm.LLM_API_ASSIST, CONF_CHAT_MODEL: "o4-mini"},
)
return mock_config_entry
@pytest.fixture
async def mock_init_component(
hass: HomeAssistant, mock_config_entry: MockConfigEntry

View File

@ -499,6 +499,7 @@ def create_reasoning_item(id: str, output_index: int) -> list[ResponseStreamEven
summary=[],
type="reasoning",
status=None,
encrypted_content="AAA",
),
output_index=output_index,
sequence_number=0,
@ -510,6 +511,7 @@ def create_reasoning_item(id: str, output_index: int) -> list[ResponseStreamEven
summary=[],
type="reasoning",
status=None,
encrypted_content="AAABBB",
),
output_index=output_index,
sequence_number=0,
@ -566,7 +568,7 @@ def create_web_search_item(id: str, output_index: int) -> list[ResponseStreamEve
async def test_function_call(
hass: HomeAssistant,
mock_config_entry_with_assist: MockConfigEntry,
mock_config_entry_with_reasoning_model: MockConfigEntry,
mock_init_component,
mock_create_stream: AsyncMock,
mock_chat_log: MockChatLog, # noqa: F811
@ -617,7 +619,7 @@ async def test_function_call(
"id": "rs_A",
"summary": [],
"type": "reasoning",
"encrypted_content": None,
"encrypted_content": "AAABBB",
}
assert result.response.response_type == intent.IntentResponseType.ACTION_DONE
# Don't test the prompt, as it's not deterministic