mirror of
https://github.com/home-assistant/core.git
synced 2025-07-14 08:47:10 +00:00
Fix quality loss for LLM conversation agent question answering (#142873)
* Fix a bug parsing a streaming response with no json * Remove debug lines * Fix quality loss for LLM conversation agent question answering * Update tests
This commit is contained in:
parent
8767599ad4
commit
db043b26da
@ -72,6 +72,19 @@ NO_ENTITIES_PROMPT = (
|
||||
"to their voice assistant in Home Assistant."
|
||||
)
|
||||
|
||||
DYNAMIC_CONTEXT_PROMPT = """You ARE equipped to answer questions about the current state of
|
||||
the home using the `GetLiveContext` tool. This is a primary function. Do not state you lack the
|
||||
functionality if the question requires live data.
|
||||
If the user asks about device existence/type (e.g., "Do I have lights in the bedroom?"): Answer
|
||||
from the static context below.
|
||||
If the user asks about the CURRENT state, value, or mode (e.g., "Is the lock locked?",
|
||||
"Is the fan on?", "What mode is the thermostat in?", "What is the temperature outside?"):
|
||||
1. Recognize this requires live data.
|
||||
2. You MUST call `GetLiveContext`. This tool will provide the needed real-time information (like temperature from the local weather, lock status, etc.).
|
||||
3. Use the tool's response** to answer the user accurately (e.g., "The temperature outside is [value from tool].").
|
||||
For general knowledge questions not about the home: Answer truthfully from internal knowledge.
|
||||
"""
|
||||
|
||||
|
||||
@callback
|
||||
def async_render_no_api_prompt(hass: HomeAssistant) -> str:
|
||||
@ -495,6 +508,8 @@ class AssistAPI(API):
|
||||
):
|
||||
prompt.append("This device is not able to start timers.")
|
||||
|
||||
prompt.append(DYNAMIC_CONTEXT_PROMPT)
|
||||
|
||||
return prompt
|
||||
|
||||
@callback
|
||||
@ -506,7 +521,7 @@ class AssistAPI(API):
|
||||
|
||||
if exposed_entities and exposed_entities["entities"]:
|
||||
prompt.append(
|
||||
"An overview of the areas and the devices in this smart home:"
|
||||
"Static Context: An overview of the areas and the devices in this smart home:"
|
||||
)
|
||||
prompt.append(yaml_util.dump(list(exposed_entities["entities"].values())))
|
||||
|
||||
@ -568,7 +583,7 @@ class AssistAPI(API):
|
||||
)
|
||||
|
||||
if exposed_domains:
|
||||
tools.append(GetHomeStateTool())
|
||||
tools.append(GetLiveContextTool())
|
||||
|
||||
return tools
|
||||
|
||||
@ -1009,7 +1024,7 @@ class CalendarGetEventsTool(Tool):
|
||||
return {"success": True, "result": events}
|
||||
|
||||
|
||||
class GetHomeStateTool(Tool):
|
||||
class GetLiveContextTool(Tool):
|
||||
"""Tool for getting the current state of exposed entities.
|
||||
|
||||
This returns state for all entities that have been exposed to
|
||||
@ -1017,8 +1032,13 @@ class GetHomeStateTool(Tool):
|
||||
returns state for entities based on intent parameters.
|
||||
"""
|
||||
|
||||
name = "get_home_state"
|
||||
description = "Get the current state of all devices in the home. "
|
||||
name = "GetLiveContext"
|
||||
description = (
|
||||
"Use this tool when the user asks a question about the CURRENT state, "
|
||||
"value, or mode of a specific device, sensor, entity, or area in the "
|
||||
"smart home, and the answer can be improved with real-time data not "
|
||||
"available in the static device overview list. "
|
||||
)
|
||||
|
||||
async def async_call(
|
||||
self,
|
||||
@ -1036,7 +1056,7 @@ class GetHomeStateTool(Tool):
|
||||
if not exposed_entities["entities"]:
|
||||
return {"success": False, "error": NO_ENTITIES_PROMPT}
|
||||
prompt = [
|
||||
"An overview of the areas and the devices in this smart home:",
|
||||
"Live Context: An overview of the areas and the devices in this smart home:",
|
||||
yaml_util.dump(list(exposed_entities["entities"].values())),
|
||||
]
|
||||
return {
|
||||
|
@ -185,13 +185,13 @@ async def test_assist_api(
|
||||
|
||||
assert len(llm.async_get_apis(hass)) == 1
|
||||
api = await llm.async_get_api(hass, "assist", llm_context)
|
||||
assert [tool.name for tool in api.tools] == ["get_home_state"]
|
||||
assert [tool.name for tool in api.tools] == ["GetLiveContext"]
|
||||
|
||||
# Match all
|
||||
intent_handler.platforms = None
|
||||
|
||||
api = await llm.async_get_api(hass, "assist", llm_context)
|
||||
assert [tool.name for tool in api.tools] == ["test_intent", "get_home_state"]
|
||||
assert [tool.name for tool in api.tools] == ["test_intent", "GetLiveContext"]
|
||||
|
||||
# Match specific domain
|
||||
intent_handler.platforms = {"light"}
|
||||
@ -579,7 +579,7 @@ async def test_assist_api_prompt(
|
||||
suggested_area="Test Area 2",
|
||||
)
|
||||
)
|
||||
exposed_entities_prompt = """An overview of the areas and the devices in this smart home:
|
||||
exposed_entities_prompt = """Live Context: An overview of the areas and the devices in this smart home:
|
||||
- names: '1'
|
||||
domain: light
|
||||
state: unavailable
|
||||
@ -627,7 +627,7 @@ async def test_assist_api_prompt(
|
||||
state: unavailable
|
||||
areas: Test Area 2
|
||||
"""
|
||||
stateless_exposed_entities_prompt = """An overview of the areas and the devices in this smart home:
|
||||
stateless_exposed_entities_prompt = """Static Context: An overview of the areas and the devices in this smart home:
|
||||
- names: '1'
|
||||
domain: light
|
||||
areas: Test Area 2
|
||||
@ -673,17 +673,30 @@ async def test_assist_api_prompt(
|
||||
"When a user asks to turn on all devices of a specific type, "
|
||||
"ask user to specify an area, unless there is only one device of that type."
|
||||
)
|
||||
dynamic_context_prompt = """You ARE equipped to answer questions about the current state of
|
||||
the home using the `GetLiveContext` tool. This is a primary function. Do not state you lack the
|
||||
functionality if the question requires live data.
|
||||
If the user asks about device existence/type (e.g., "Do I have lights in the bedroom?"): Answer
|
||||
from the static context below.
|
||||
If the user asks about the CURRENT state, value, or mode (e.g., "Is the lock locked?",
|
||||
"Is the fan on?", "What mode is the thermostat in?", "What is the temperature outside?"):
|
||||
1. Recognize this requires live data.
|
||||
2. You MUST call `GetLiveContext`. This tool will provide the needed real-time information (like temperature from the local weather, lock status, etc.).
|
||||
3. Use the tool's response** to answer the user accurately (e.g., "The temperature outside is [value from tool].").
|
||||
For general knowledge questions not about the home: Answer truthfully from internal knowledge.
|
||||
"""
|
||||
api = await llm.async_get_api(hass, "assist", llm_context)
|
||||
assert api.api_prompt == (
|
||||
f"""{first_part_prompt}
|
||||
{area_prompt}
|
||||
{no_timer_prompt}
|
||||
{dynamic_context_prompt}
|
||||
{stateless_exposed_entities_prompt}"""
|
||||
)
|
||||
|
||||
# Verify that the get_home_state tool returns the same results as the exposed_entities_prompt
|
||||
# Verify that the GetLiveContext tool returns the same results as the exposed_entities_prompt
|
||||
result = await api.async_call_tool(
|
||||
llm.ToolInput(tool_name="get_home_state", tool_args={})
|
||||
llm.ToolInput(tool_name="GetLiveContext", tool_args={})
|
||||
)
|
||||
assert result == {
|
||||
"success": True,
|
||||
@ -701,6 +714,7 @@ async def test_assist_api_prompt(
|
||||
f"""{first_part_prompt}
|
||||
{area_prompt}
|
||||
{no_timer_prompt}
|
||||
{dynamic_context_prompt}
|
||||
{stateless_exposed_entities_prompt}"""
|
||||
)
|
||||
|
||||
@ -716,6 +730,7 @@ async def test_assist_api_prompt(
|
||||
f"""{first_part_prompt}
|
||||
{area_prompt}
|
||||
{no_timer_prompt}
|
||||
{dynamic_context_prompt}
|
||||
{stateless_exposed_entities_prompt}"""
|
||||
)
|
||||
|
||||
@ -727,6 +742,7 @@ async def test_assist_api_prompt(
|
||||
assert api.api_prompt == (
|
||||
f"""{first_part_prompt}
|
||||
{area_prompt}
|
||||
{dynamic_context_prompt}
|
||||
{stateless_exposed_entities_prompt}"""
|
||||
)
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user