diff --git a/homeassistant/components/conversation/chat_log.py b/homeassistant/components/conversation/chat_log.py index c78f41f3c5c..7580b878d04 100644 --- a/homeassistant/components/conversation/chat_log.py +++ b/homeassistant/components/conversation/chat_log.py @@ -395,7 +395,6 @@ class ChatLog: llm_context = llm.LLMContext( platform=conversing_domain, context=user_input.context, - user_prompt=user_input.text, language=user_input.language, assistant=DOMAIN, device_id=user_input.device_id, diff --git a/homeassistant/components/mcp_server/http.py b/homeassistant/components/mcp_server/http.py index bc8fdbd56c8..07284b29434 100644 --- a/homeassistant/components/mcp_server/http.py +++ b/homeassistant/components/mcp_server/http.py @@ -88,7 +88,6 @@ class ModelContextProtocolSSEView(HomeAssistantView): context = llm.LLMContext( platform=DOMAIN, context=self.context(request), - user_prompt=None, language="*", assistant=conversation.DOMAIN, device_id=None, diff --git a/homeassistant/helpers/llm.py b/homeassistant/helpers/llm.py index adf113e0f30..51b5510495f 100644 --- a/homeassistant/helpers/llm.py +++ b/homeassistant/helpers/llm.py @@ -160,11 +160,19 @@ class LLMContext: """Tool input to be processed.""" platform: str + """Integration that is handling the LLM request.""" + context: Context | None - user_prompt: str | None + """Context of the LLM request.""" + language: str | None + """Language of the LLM request.""" + assistant: str | None + """Assistant domain that is handling the LLM request.""" + device_id: str | None + """Device that is making the request.""" @dataclass(slots=True) @@ -302,7 +310,7 @@ class IntentTool(Tool): platform=llm_context.platform, intent_type=self.name, slots=slots, - text_input=llm_context.user_prompt, + text_input=None, context=llm_context.context, language=llm_context.language, assistant=llm_context.assistant, diff --git a/tests/components/anthropic/test_conversation.py b/tests/components/anthropic/test_conversation.py index 3e01e91976d..6aadcf3eeb4 100644 --- a/tests/components/anthropic/test_conversation.py +++ b/tests/components/anthropic/test_conversation.py @@ -415,7 +415,6 @@ async def test_function_call( llm.LLMContext( platform="anthropic", context=context, - user_prompt="Please call the test function", language="en", assistant="conversation", device_id=None, @@ -510,7 +509,6 @@ async def test_function_exception( llm.LLMContext( platform="anthropic", context=context, - user_prompt="Please call the test function", language="en", assistant="conversation", device_id=None, diff --git a/tests/components/mcp/test_init.py b/tests/components/mcp/test_init.py index 045fb99e181..00666e71d05 100644 --- a/tests/components/mcp/test_init.py +++ b/tests/components/mcp/test_init.py @@ -58,7 +58,6 @@ def create_llm_context() -> llm.LLMContext: return llm.LLMContext( platform="test_platform", context=Context(), - user_prompt="test_text", language="*", assistant="conversation", device_id=None, diff --git a/tests/components/ollama/test_conversation.py b/tests/components/ollama/test_conversation.py index 8e54018a14d..e83c2a3495f 100644 --- a/tests/components/ollama/test_conversation.py +++ b/tests/components/ollama/test_conversation.py @@ -284,7 +284,6 @@ async def test_function_call( llm.LLMContext( platform="ollama", context=context, - user_prompt="Please call the test function", language="en", assistant="conversation", device_id=None, @@ -369,7 +368,6 @@ async def test_function_exception( llm.LLMContext( platform="ollama", context=context, - user_prompt="Please call the test function", language="en", assistant="conversation", device_id=None, diff --git a/tests/helpers/test_llm.py b/tests/helpers/test_llm.py index 1a9225c505b..98dee920bd9 100644 --- a/tests/helpers/test_llm.py +++ b/tests/helpers/test_llm.py @@ -36,7 +36,6 @@ def llm_context() -> llm.LLMContext: return llm.LLMContext( platform="", context=None, - user_prompt=None, language=None, assistant=None, device_id=None, @@ -162,7 +161,6 @@ async def test_assist_api( llm_context = llm.LLMContext( platform="test_platform", context=test_context, - user_prompt="test_text", language="*", assistant="conversation", device_id=None, @@ -237,7 +235,7 @@ async def test_assist_api( "area": {"value": "kitchen"}, "floor": {"value": "ground_floor"}, }, - text_input="test_text", + text_input=None, context=test_context, language="*", assistant="conversation", @@ -296,7 +294,7 @@ async def test_assist_api( "preferred_area_id": {"value": area.id}, "preferred_floor_id": {"value": floor.floor_id}, }, - text_input="test_text", + text_input=None, context=test_context, language="*", assistant="conversation", @@ -412,7 +410,6 @@ async def test_assist_api_prompt( llm_context = llm.LLMContext( platform="test_platform", context=context, - user_prompt="test_text", language="*", assistant="conversation", device_id=None, @@ -760,7 +757,6 @@ async def test_script_tool( llm_context = llm.LLMContext( platform="test_platform", context=context, - user_prompt="test_text", language="*", assistant="conversation", device_id=None, @@ -961,7 +957,6 @@ async def test_script_tool_name(hass: HomeAssistant) -> None: llm_context = llm.LLMContext( platform="test_platform", context=context, - user_prompt="test_text", language="*", assistant="conversation", device_id=None, @@ -1241,7 +1236,6 @@ async def test_calendar_get_events_tool(hass: HomeAssistant) -> None: llm_context = llm.LLMContext( platform="test_platform", context=context, - user_prompt="test_text", language="*", assistant="conversation", device_id=None, @@ -1344,7 +1338,6 @@ async def test_todo_get_items_tool(hass: HomeAssistant) -> None: llm_context = llm.LLMContext( platform="test_platform", context=context, - user_prompt="test_text", language="*", assistant="conversation", device_id=None, @@ -1451,7 +1444,6 @@ async def test_no_tools_exposed(hass: HomeAssistant) -> None: llm_context = llm.LLMContext( platform="test_platform", context=context, - user_prompt="test_text", language="*", assistant="conversation", device_id=None,