diff --git a/CODEOWNERS b/CODEOWNERS index e90def993d2..b53e0a929bb 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -108,6 +108,8 @@ build.json @home-assistant/supervisor /tests/components/anova/ @Lash-L /homeassistant/components/anthemav/ @hyralex /tests/components/anthemav/ @hyralex +/homeassistant/components/anthropic/ @Shulyaka +/tests/components/anthropic/ @Shulyaka /homeassistant/components/aosmith/ @bdr99 /tests/components/aosmith/ @bdr99 /homeassistant/components/apache_kafka/ @bachya diff --git a/homeassistant/components/anthropic/__init__.py b/homeassistant/components/anthropic/__init__.py new file mode 100644 index 00000000000..aa6cf509fa1 --- /dev/null +++ b/homeassistant/components/anthropic/__init__.py @@ -0,0 +1,46 @@ +"""The Anthropic integration.""" + +from __future__ import annotations + +import anthropic + +from homeassistant.config_entries import ConfigEntry +from homeassistant.const import CONF_API_KEY, Platform +from homeassistant.core import HomeAssistant +from homeassistant.exceptions import ConfigEntryNotReady +from homeassistant.helpers import config_validation as cv + +from .const import DOMAIN, LOGGER + +PLATFORMS = (Platform.CONVERSATION,) +CONFIG_SCHEMA = cv.config_entry_only_config_schema(DOMAIN) + +type AnthropicConfigEntry = ConfigEntry[anthropic.AsyncClient] + + +async def async_setup_entry(hass: HomeAssistant, entry: AnthropicConfigEntry) -> bool: + """Set up Anthropic from a config entry.""" + client = anthropic.AsyncAnthropic(api_key=entry.data[CONF_API_KEY]) + try: + await client.messages.create( + model="claude-3-haiku-20240307", + max_tokens=1, + messages=[{"role": "user", "content": "Hi"}], + timeout=10.0, + ) + except anthropic.AuthenticationError as err: + LOGGER.error("Invalid API key: %s", err) + return False + except anthropic.AnthropicError as err: + raise ConfigEntryNotReady(err) from err + + entry.runtime_data = client + + await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS) + + return True + + +async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: + """Unload Anthropic.""" + return await hass.config_entries.async_unload_platforms(entry, PLATFORMS) diff --git a/homeassistant/components/anthropic/config_flow.py b/homeassistant/components/anthropic/config_flow.py new file mode 100644 index 00000000000..01e16ec5350 --- /dev/null +++ b/homeassistant/components/anthropic/config_flow.py @@ -0,0 +1,210 @@ +"""Config flow for Anthropic integration.""" + +from __future__ import annotations + +import logging +from types import MappingProxyType +from typing import Any + +import anthropic +import voluptuous as vol + +from homeassistant.config_entries import ( + ConfigEntry, + ConfigFlow, + ConfigFlowResult, + OptionsFlow, +) +from homeassistant.const import CONF_API_KEY, CONF_LLM_HASS_API +from homeassistant.core import HomeAssistant +from homeassistant.helpers import llm +from homeassistant.helpers.selector import ( + NumberSelector, + NumberSelectorConfig, + SelectOptionDict, + SelectSelector, + SelectSelectorConfig, + TemplateSelector, +) + +from .const import ( + CONF_CHAT_MODEL, + CONF_MAX_TOKENS, + CONF_PROMPT, + CONF_RECOMMENDED, + CONF_TEMPERATURE, + DOMAIN, + RECOMMENDED_CHAT_MODEL, + RECOMMENDED_MAX_TOKENS, + RECOMMENDED_TEMPERATURE, +) + +_LOGGER = logging.getLogger(__name__) + +STEP_USER_DATA_SCHEMA = vol.Schema( + { + vol.Required(CONF_API_KEY): str, + } +) + +RECOMMENDED_OPTIONS = { + CONF_RECOMMENDED: True, + CONF_LLM_HASS_API: llm.LLM_API_ASSIST, + CONF_PROMPT: llm.DEFAULT_INSTRUCTIONS_PROMPT, +} + + +async def validate_input(hass: HomeAssistant, data: dict[str, Any]) -> None: + """Validate the user input allows us to connect. + + Data has the keys from STEP_USER_DATA_SCHEMA with values provided by the user. + """ + client = anthropic.AsyncAnthropic(api_key=data[CONF_API_KEY]) + await client.messages.create( + model="claude-3-haiku-20240307", + max_tokens=1, + messages=[{"role": "user", "content": "Hi"}], + timeout=10.0, + ) + + +class AnthropicConfigFlow(ConfigFlow, domain=DOMAIN): + """Handle a config flow for Anthropic.""" + + VERSION = 1 + + async def async_step_user( + self, user_input: dict[str, Any] | None = None + ) -> ConfigFlowResult: + """Handle the initial step.""" + errors = {} + + if user_input is not None: + try: + await validate_input(self.hass, user_input) + except anthropic.APITimeoutError: + errors["base"] = "timeout_connect" + except anthropic.APIConnectionError: + errors["base"] = "cannot_connect" + except anthropic.APIStatusError as e: + if isinstance(e.body, dict): + errors["base"] = e.body.get("error", {}).get("type", "unknown") + else: + errors["base"] = "unknown" + except Exception: + _LOGGER.exception("Unexpected exception") + errors["base"] = "unknown" + else: + return self.async_create_entry( + title="Claude", + data=user_input, + options=RECOMMENDED_OPTIONS, + ) + + return self.async_show_form( + step_id="user", data_schema=STEP_USER_DATA_SCHEMA, errors=errors or None + ) + + @staticmethod + def async_get_options_flow( + config_entry: ConfigEntry, + ) -> OptionsFlow: + """Create the options flow.""" + return AnthropicOptionsFlow(config_entry) + + +class AnthropicOptionsFlow(OptionsFlow): + """Anthropic config flow options handler.""" + + def __init__(self, config_entry: ConfigEntry) -> None: + """Initialize options flow.""" + self.config_entry = config_entry + self.last_rendered_recommended = config_entry.options.get( + CONF_RECOMMENDED, False + ) + + async def async_step_init( + self, user_input: dict[str, Any] | None = None + ) -> ConfigFlowResult: + """Manage the options.""" + options: dict[str, Any] | MappingProxyType[str, Any] = self.config_entry.options + + if user_input is not None: + if user_input[CONF_RECOMMENDED] == self.last_rendered_recommended: + if user_input[CONF_LLM_HASS_API] == "none": + user_input.pop(CONF_LLM_HASS_API) + return self.async_create_entry(title="", data=user_input) + + # Re-render the options again, now with the recommended options shown/hidden + self.last_rendered_recommended = user_input[CONF_RECOMMENDED] + + options = { + CONF_RECOMMENDED: user_input[CONF_RECOMMENDED], + CONF_PROMPT: user_input[CONF_PROMPT], + CONF_LLM_HASS_API: user_input[CONF_LLM_HASS_API], + } + + suggested_values = options.copy() + if not suggested_values.get(CONF_PROMPT): + suggested_values[CONF_PROMPT] = llm.DEFAULT_INSTRUCTIONS_PROMPT + + schema = self.add_suggested_values_to_schema( + vol.Schema(anthropic_config_option_schema(self.hass, options)), + suggested_values, + ) + + return self.async_show_form( + step_id="init", + data_schema=schema, + ) + + +def anthropic_config_option_schema( + hass: HomeAssistant, + options: dict[str, Any] | MappingProxyType[str, Any], +) -> dict: + """Return a schema for Anthropic completion options.""" + hass_apis: list[SelectOptionDict] = [ + SelectOptionDict( + label="No control", + value="none", + ) + ] + hass_apis.extend( + SelectOptionDict( + label=api.name, + value=api.id, + ) + for api in llm.async_get_apis(hass) + ) + + schema = { + vol.Optional(CONF_PROMPT): TemplateSelector(), + vol.Optional(CONF_LLM_HASS_API, default="none"): SelectSelector( + SelectSelectorConfig(options=hass_apis) + ), + vol.Required( + CONF_RECOMMENDED, default=options.get(CONF_RECOMMENDED, False) + ): bool, + } + + if options.get(CONF_RECOMMENDED): + return schema + + schema.update( + { + vol.Optional( + CONF_CHAT_MODEL, + default=RECOMMENDED_CHAT_MODEL, + ): str, + vol.Optional( + CONF_MAX_TOKENS, + default=RECOMMENDED_MAX_TOKENS, + ): int, + vol.Optional( + CONF_TEMPERATURE, + default=RECOMMENDED_TEMPERATURE, + ): NumberSelector(NumberSelectorConfig(min=0, max=1, step=0.05)), + } + ) + return schema diff --git a/homeassistant/components/anthropic/const.py b/homeassistant/components/anthropic/const.py new file mode 100644 index 00000000000..4ccf2c88faa --- /dev/null +++ b/homeassistant/components/anthropic/const.py @@ -0,0 +1,15 @@ +"""Constants for the Anthropic integration.""" + +import logging + +DOMAIN = "anthropic" +LOGGER = logging.getLogger(__package__) + +CONF_RECOMMENDED = "recommended" +CONF_PROMPT = "prompt" +CONF_CHAT_MODEL = "chat_model" +RECOMMENDED_CHAT_MODEL = "claude-3-5-sonnet-20240620" +CONF_MAX_TOKENS = "max_tokens" +RECOMMENDED_MAX_TOKENS = 1024 +CONF_TEMPERATURE = "temperature" +RECOMMENDED_TEMPERATURE = 1.0 diff --git a/homeassistant/components/anthropic/conversation.py b/homeassistant/components/anthropic/conversation.py new file mode 100644 index 00000000000..92a09ad8a10 --- /dev/null +++ b/homeassistant/components/anthropic/conversation.py @@ -0,0 +1,301 @@ +"""Conversation support for Anthropic.""" + +from collections.abc import Callable +import json +from typing import Any, Literal, cast + +import anthropic +from anthropic._types import NOT_GIVEN +from anthropic.types import ( + Message, + MessageParam, + TextBlock, + TextBlockParam, + ToolParam, + ToolResultBlockParam, + ToolUseBlock, + ToolUseBlockParam, +) +import voluptuous as vol +from voluptuous_openapi import convert + +from homeassistant.components import conversation +from homeassistant.components.conversation import trace +from homeassistant.const import CONF_LLM_HASS_API, MATCH_ALL +from homeassistant.core import HomeAssistant +from homeassistant.exceptions import HomeAssistantError, TemplateError +from homeassistant.helpers import device_registry as dr, intent, llm, template +from homeassistant.helpers.entity_platform import AddEntitiesCallback +from homeassistant.util import ulid + +from . import AnthropicConfigEntry +from .const import ( + CONF_CHAT_MODEL, + CONF_MAX_TOKENS, + CONF_PROMPT, + CONF_TEMPERATURE, + DOMAIN, + LOGGER, + RECOMMENDED_CHAT_MODEL, + RECOMMENDED_MAX_TOKENS, + RECOMMENDED_TEMPERATURE, +) + +# Max number of back and forth with the LLM to generate a response +MAX_TOOL_ITERATIONS = 10 + + +async def async_setup_entry( + hass: HomeAssistant, + config_entry: AnthropicConfigEntry, + async_add_entities: AddEntitiesCallback, +) -> None: + """Set up conversation entities.""" + agent = AnthropicConversationEntity(config_entry) + async_add_entities([agent]) + + +def _format_tool( + tool: llm.Tool, custom_serializer: Callable[[Any], Any] | None +) -> ToolParam: + """Format tool specification.""" + return ToolParam( + name=tool.name, + description=tool.description or "", + input_schema=convert(tool.parameters, custom_serializer=custom_serializer), + ) + + +def _message_convert( + message: Message, +) -> MessageParam: + """Convert from class to TypedDict.""" + param_content: list[TextBlockParam | ToolUseBlockParam] = [] + + for message_content in message.content: + if isinstance(message_content, TextBlock): + param_content.append(TextBlockParam(type="text", text=message_content.text)) + elif isinstance(message_content, ToolUseBlock): + param_content.append( + ToolUseBlockParam( + type="tool_use", + id=message_content.id, + name=message_content.name, + input=message_content.input, + ) + ) + + return MessageParam(role=message.role, content=param_content) + + +class AnthropicConversationEntity( + conversation.ConversationEntity, conversation.AbstractConversationAgent +): + """Anthropic conversation agent.""" + + _attr_has_entity_name = True + _attr_name = None + + def __init__(self, entry: AnthropicConfigEntry) -> None: + """Initialize the agent.""" + self.entry = entry + self.history: dict[str, list[MessageParam]] = {} + self._attr_unique_id = entry.entry_id + self._attr_device_info = dr.DeviceInfo( + identifiers={(DOMAIN, entry.entry_id)}, + manufacturer="Anthropic", + model="Claude", + entry_type=dr.DeviceEntryType.SERVICE, + ) + if self.entry.options.get(CONF_LLM_HASS_API): + self._attr_supported_features = ( + conversation.ConversationEntityFeature.CONTROL + ) + + @property + def supported_languages(self) -> list[str] | Literal["*"]: + """Return a list of supported languages.""" + return MATCH_ALL + + async def async_process( + self, user_input: conversation.ConversationInput + ) -> conversation.ConversationResult: + """Process a sentence.""" + options = self.entry.options + intent_response = intent.IntentResponse(language=user_input.language) + llm_api: llm.APIInstance | None = None + tools: list[ToolParam] | None = None + user_name: str | None = None + llm_context = llm.LLMContext( + platform=DOMAIN, + context=user_input.context, + user_prompt=user_input.text, + language=user_input.language, + assistant=conversation.DOMAIN, + device_id=user_input.device_id, + ) + + if options.get(CONF_LLM_HASS_API): + try: + llm_api = await llm.async_get_api( + self.hass, + options[CONF_LLM_HASS_API], + llm_context, + ) + except HomeAssistantError as err: + LOGGER.error("Error getting LLM API: %s", err) + intent_response.async_set_error( + intent.IntentResponseErrorCode.UNKNOWN, + f"Error preparing LLM API: {err}", + ) + return conversation.ConversationResult( + response=intent_response, conversation_id=user_input.conversation_id + ) + tools = [ + _format_tool(tool, llm_api.custom_serializer) for tool in llm_api.tools + ] + + if user_input.conversation_id is None: + conversation_id = ulid.ulid_now() + messages = [] + + elif user_input.conversation_id in self.history: + conversation_id = user_input.conversation_id + messages = self.history[conversation_id] + + else: + # Conversation IDs are ULIDs. We generate a new one if not provided. + # If an old OLID is passed in, we will generate a new one to indicate + # a new conversation was started. If the user picks their own, they + # want to track a conversation and we respect it. + try: + ulid.ulid_to_bytes(user_input.conversation_id) + conversation_id = ulid.ulid_now() + except ValueError: + conversation_id = user_input.conversation_id + + messages = [] + + if ( + user_input.context + and user_input.context.user_id + and ( + user := await self.hass.auth.async_get_user(user_input.context.user_id) + ) + ): + user_name = user.name + + try: + prompt_parts = [ + template.Template( + llm.BASE_PROMPT + + options.get(CONF_PROMPT, llm.DEFAULT_INSTRUCTIONS_PROMPT), + self.hass, + ).async_render( + { + "ha_name": self.hass.config.location_name, + "user_name": user_name, + "llm_context": llm_context, + }, + parse_result=False, + ) + ] + + except TemplateError as err: + LOGGER.error("Error rendering prompt: %s", err) + intent_response.async_set_error( + intent.IntentResponseErrorCode.UNKNOWN, + f"Sorry, I had a problem with my template: {err}", + ) + return conversation.ConversationResult( + response=intent_response, conversation_id=conversation_id + ) + + if llm_api: + prompt_parts.append(llm_api.api_prompt) + + prompt = "\n".join(prompt_parts) + + # Create a copy of the variable because we attach it to the trace + messages = [*messages, MessageParam(role="user", content=user_input.text)] + + LOGGER.debug("Prompt: %s", messages) + LOGGER.debug("Tools: %s", tools) + trace.async_conversation_trace_append( + trace.ConversationTraceEventType.AGENT_DETAIL, + {"system": prompt, "messages": messages}, + ) + + client = self.entry.runtime_data + + # To prevent infinite loops, we limit the number of iterations + for _iteration in range(MAX_TOOL_ITERATIONS): + try: + response = await client.messages.create( + model=options.get(CONF_CHAT_MODEL, RECOMMENDED_CHAT_MODEL), + messages=messages, + tools=tools or NOT_GIVEN, + max_tokens=options.get(CONF_MAX_TOKENS, RECOMMENDED_MAX_TOKENS), + system=prompt, + temperature=options.get(CONF_TEMPERATURE, RECOMMENDED_TEMPERATURE), + ) + except anthropic.AnthropicError as err: + intent_response.async_set_error( + intent.IntentResponseErrorCode.UNKNOWN, + f"Sorry, I had a problem talking to Anthropic: {err}", + ) + return conversation.ConversationResult( + response=intent_response, conversation_id=conversation_id + ) + + LOGGER.debug("Response %s", response) + + messages.append(_message_convert(response)) + + if response.stop_reason != "tool_use" or not llm_api: + break + + tool_results: list[ToolResultBlockParam] = [] + for tool_call in response.content: + if isinstance(tool_call, TextBlock): + LOGGER.info(tool_call.text) + + if not isinstance(tool_call, ToolUseBlock): + continue + + tool_input = llm.ToolInput( + tool_name=tool_call.name, + tool_args=cast(dict[str, Any], tool_call.input), + ) + LOGGER.debug( + "Tool call: %s(%s)", tool_input.tool_name, tool_input.tool_args + ) + + try: + tool_response = await llm_api.async_call_tool(tool_input) + except (HomeAssistantError, vol.Invalid) as e: + tool_response = {"error": type(e).__name__} + if str(e): + tool_response["error_text"] = str(e) + + LOGGER.debug("Tool response: %s", tool_response) + tool_results.append( + ToolResultBlockParam( + type="tool_result", + tool_use_id=tool_call.id, + content=json.dumps(tool_response), + ) + ) + + messages.append(MessageParam(role="user", content=tool_results)) + + self.history[conversation_id] = messages + + for content in response.content: + if isinstance(content, TextBlock): + intent_response.async_set_speech(content.text) + break + + return conversation.ConversationResult( + response=intent_response, conversation_id=conversation_id + ) diff --git a/homeassistant/components/anthropic/manifest.json b/homeassistant/components/anthropic/manifest.json new file mode 100644 index 00000000000..7d51c458e4d --- /dev/null +++ b/homeassistant/components/anthropic/manifest.json @@ -0,0 +1,12 @@ +{ + "domain": "anthropic", + "name": "Anthropic Conversation", + "after_dependencies": ["assist_pipeline", "intent"], + "codeowners": ["@Shulyaka"], + "config_flow": true, + "dependencies": ["conversation"], + "documentation": "https://www.home-assistant.io/integrations/anthropic", + "integration_type": "service", + "iot_class": "cloud_polling", + "requirements": ["anthropic==0.31.2"] +} diff --git a/homeassistant/components/anthropic/strings.json b/homeassistant/components/anthropic/strings.json new file mode 100644 index 00000000000..9550a1a6672 --- /dev/null +++ b/homeassistant/components/anthropic/strings.json @@ -0,0 +1,34 @@ +{ + "config": { + "step": { + "user": { + "data": { + "api_key": "[%key:common::config_flow::data::api_key%]" + } + } + }, + "error": { + "cannot_connect": "[%key:common::config_flow::error::cannot_connect%]", + "timeout_connect": "[%key:common::config_flow::error::timeout_connect%]", + "authentication_error": "[%key:common::config_flow::error::invalid_auth%]", + "unknown": "[%key:common::config_flow::error::unknown%]" + } + }, + "options": { + "step": { + "init": { + "data": { + "prompt": "Instructions", + "chat_model": "[%key:common::generic::model%]", + "max_tokens": "Maximum tokens to return in response", + "temperature": "Temperature", + "llm_hass_api": "[%key:common::config_flow::data::llm_hass_api%]", + "recommended": "Recommended model settings" + }, + "data_description": { + "prompt": "Instruct how the LLM should respond. This can be a template." + } + } + } + } +} diff --git a/homeassistant/generated/config_flows.py b/homeassistant/generated/config_flows.py index 0c37cf9c412..67cffd25f28 100644 --- a/homeassistant/generated/config_flows.py +++ b/homeassistant/generated/config_flows.py @@ -53,6 +53,7 @@ FLOWS = { "androidtv_remote", "anova", "anthemav", + "anthropic", "aosmith", "apcupsd", "apple_tv", diff --git a/homeassistant/generated/integrations.json b/homeassistant/generated/integrations.json index 3cc3ea71df9..25a78e30017 100644 --- a/homeassistant/generated/integrations.json +++ b/homeassistant/generated/integrations.json @@ -315,6 +315,12 @@ "config_flow": true, "iot_class": "local_push" }, + "anthropic": { + "name": "Anthropic Conversation", + "integration_type": "service", + "config_flow": true, + "iot_class": "cloud_polling" + }, "anwb_energie": { "name": "ANWB Energie", "integration_type": "virtual", diff --git a/requirements_all.txt b/requirements_all.txt index 0bf4b77e9d2..1df7cc12072 100644 --- a/requirements_all.txt +++ b/requirements_all.txt @@ -451,6 +451,9 @@ anova-wifi==0.17.0 # homeassistant.components.anthemav anthemav==1.4.1 +# homeassistant.components.anthropic +anthropic==0.31.2 + # homeassistant.components.weatherkit apple_weatherkit==1.1.2 diff --git a/requirements_test_all.txt b/requirements_test_all.txt index f9670987b70..5831f7c23cf 100644 --- a/requirements_test_all.txt +++ b/requirements_test_all.txt @@ -424,6 +424,9 @@ anova-wifi==0.17.0 # homeassistant.components.anthemav anthemav==1.4.1 +# homeassistant.components.anthropic +anthropic==0.31.2 + # homeassistant.components.weatherkit apple_weatherkit==1.1.2 diff --git a/tests/components/anthropic/__init__.py b/tests/components/anthropic/__init__.py new file mode 100644 index 00000000000..99d7a5785a8 --- /dev/null +++ b/tests/components/anthropic/__init__.py @@ -0,0 +1 @@ +"""Tests for the Anthropic integration.""" diff --git a/tests/components/anthropic/conftest.py b/tests/components/anthropic/conftest.py new file mode 100644 index 00000000000..0a5ad5e5ac6 --- /dev/null +++ b/tests/components/anthropic/conftest.py @@ -0,0 +1,51 @@ +"""Tests helpers.""" + +from unittest.mock import AsyncMock, patch + +import pytest + +from homeassistant.const import CONF_LLM_HASS_API +from homeassistant.core import HomeAssistant +from homeassistant.helpers import llm +from homeassistant.setup import async_setup_component + +from tests.common import MockConfigEntry + + +@pytest.fixture +def mock_config_entry(hass): + """Mock a config entry.""" + entry = MockConfigEntry( + title="Claude", + domain="anthropic", + data={ + "api_key": "bla", + }, + ) + entry.add_to_hass(hass) + return entry + + +@pytest.fixture +def mock_config_entry_with_assist(hass, mock_config_entry): + """Mock a config entry with assist.""" + hass.config_entries.async_update_entry( + mock_config_entry, options={CONF_LLM_HASS_API: llm.LLM_API_ASSIST} + ) + return mock_config_entry + + +@pytest.fixture +async def mock_init_component(hass, mock_config_entry): + """Initialize integration.""" + with patch( + "anthropic.resources.messages.AsyncMessages.create", new_callable=AsyncMock + ): + assert await async_setup_component(hass, "anthropic", {}) + await hass.async_block_till_done() + + +@pytest.fixture(autouse=True) +async def setup_ha(hass: HomeAssistant) -> None: + """Set up Home Assistant.""" + assert await async_setup_component(hass, "homeassistant", {}) diff --git a/tests/components/anthropic/snapshots/test_conversation.ambr b/tests/components/anthropic/snapshots/test_conversation.ambr new file mode 100644 index 00000000000..e4dd7cd00bb --- /dev/null +++ b/tests/components/anthropic/snapshots/test_conversation.ambr @@ -0,0 +1,34 @@ +# serializer version: 1 +# name: test_unknown_hass_api + dict({ + 'conversation_id': None, + 'response': IntentResponse( + card=dict({ + }), + error_code=, + failed_results=list([ + ]), + intent=None, + intent_targets=list([ + ]), + language='en', + matched_states=list([ + ]), + reprompt=dict({ + }), + response_type=, + speech=dict({ + 'plain': dict({ + 'extra_data': None, + 'speech': 'Error preparing LLM API: API non-existing not found', + }), + }), + speech_slots=dict({ + }), + success_results=list([ + ]), + unmatched_states=list([ + ]), + ), + }) +# --- diff --git a/tests/components/anthropic/test_config_flow.py b/tests/components/anthropic/test_config_flow.py new file mode 100644 index 00000000000..df27352b7b2 --- /dev/null +++ b/tests/components/anthropic/test_config_flow.py @@ -0,0 +1,239 @@ +"""Test the Anthropic config flow.""" + +from unittest.mock import AsyncMock, patch + +from anthropic import ( + APIConnectionError, + APIResponseValidationError, + APITimeoutError, + AuthenticationError, + BadRequestError, + InternalServerError, +) +from httpx import URL, Request, Response +import pytest + +from homeassistant import config_entries +from homeassistant.components.anthropic.config_flow import RECOMMENDED_OPTIONS +from homeassistant.components.anthropic.const import ( + CONF_CHAT_MODEL, + CONF_MAX_TOKENS, + CONF_PROMPT, + CONF_RECOMMENDED, + CONF_TEMPERATURE, + DOMAIN, + RECOMMENDED_CHAT_MODEL, + RECOMMENDED_MAX_TOKENS, +) +from homeassistant.const import CONF_LLM_HASS_API +from homeassistant.core import HomeAssistant +from homeassistant.data_entry_flow import FlowResultType + +from tests.common import MockConfigEntry + + +async def test_form(hass: HomeAssistant) -> None: + """Test we get the form.""" + # Pretend we already set up a config entry. + hass.config.components.add("anthropic") + MockConfigEntry( + domain=DOMAIN, + state=config_entries.ConfigEntryState.LOADED, + ).add_to_hass(hass) + + result = await hass.config_entries.flow.async_init( + DOMAIN, context={"source": config_entries.SOURCE_USER} + ) + assert result["type"] is FlowResultType.FORM + assert result["errors"] is None + + with ( + patch( + "homeassistant.components.anthropic.config_flow.anthropic.resources.messages.AsyncMessages.create", + new_callable=AsyncMock, + ), + patch( + "homeassistant.components.anthropic.async_setup_entry", + return_value=True, + ) as mock_setup_entry, + ): + result2 = await hass.config_entries.flow.async_configure( + result["flow_id"], + { + "api_key": "bla", + }, + ) + await hass.async_block_till_done() + + assert result2["type"] is FlowResultType.CREATE_ENTRY + assert result2["data"] == { + "api_key": "bla", + } + assert result2["options"] == RECOMMENDED_OPTIONS + assert len(mock_setup_entry.mock_calls) == 1 + + +async def test_options( + hass: HomeAssistant, mock_config_entry, mock_init_component +) -> None: + """Test the options form.""" + options_flow = await hass.config_entries.options.async_init( + mock_config_entry.entry_id + ) + options = await hass.config_entries.options.async_configure( + options_flow["flow_id"], + { + "prompt": "Speak like a pirate", + "max_tokens": 200, + }, + ) + await hass.async_block_till_done() + assert options["type"] is FlowResultType.CREATE_ENTRY + assert options["data"]["prompt"] == "Speak like a pirate" + assert options["data"]["max_tokens"] == 200 + assert options["data"][CONF_CHAT_MODEL] == RECOMMENDED_CHAT_MODEL + + +@pytest.mark.parametrize( + ("side_effect", "error"), + [ + (APIConnectionError(request=None), "cannot_connect"), + (APITimeoutError(request=None), "timeout_connect"), + ( + BadRequestError( + message="Your credit balance is too low to access the Claude API. Please go to Plans & Billing to upgrade or purchase credits.", + response=Response( + status_code=400, + request=Request(method="POST", url=URL()), + ), + body={"type": "error", "error": {"type": "invalid_request_error"}}, + ), + "invalid_request_error", + ), + ( + AuthenticationError( + message="invalid x-api-key", + response=Response( + status_code=401, + request=Request(method="POST", url=URL()), + ), + body={"type": "error", "error": {"type": "authentication_error"}}, + ), + "authentication_error", + ), + ( + InternalServerError( + message=None, + response=Response( + status_code=500, + request=Request(method="POST", url=URL()), + ), + body=None, + ), + "unknown", + ), + ( + APIResponseValidationError( + response=Response( + status_code=200, + request=Request(method="POST", url=URL()), + ), + body=None, + ), + "unknown", + ), + ], +) +async def test_form_invalid_auth(hass: HomeAssistant, side_effect, error) -> None: + """Test we handle invalid auth.""" + result = await hass.config_entries.flow.async_init( + DOMAIN, context={"source": config_entries.SOURCE_USER} + ) + + with patch( + "homeassistant.components.anthropic.config_flow.anthropic.resources.messages.AsyncMessages.create", + new_callable=AsyncMock, + side_effect=side_effect, + ): + result2 = await hass.config_entries.flow.async_configure( + result["flow_id"], + { + "api_key": "bla", + }, + ) + + assert result2["type"] is FlowResultType.FORM + assert result2["errors"] == {"base": error} + + +@pytest.mark.parametrize( + ("current_options", "new_options", "expected_options"), + [ + ( + { + CONF_RECOMMENDED: True, + CONF_LLM_HASS_API: "none", + CONF_PROMPT: "bla", + }, + { + CONF_RECOMMENDED: False, + CONF_PROMPT: "Speak like a pirate", + CONF_TEMPERATURE: 0.3, + }, + { + CONF_RECOMMENDED: False, + CONF_PROMPT: "Speak like a pirate", + CONF_TEMPERATURE: 0.3, + CONF_CHAT_MODEL: RECOMMENDED_CHAT_MODEL, + CONF_MAX_TOKENS: RECOMMENDED_MAX_TOKENS, + }, + ), + ( + { + CONF_RECOMMENDED: False, + CONF_PROMPT: "Speak like a pirate", + CONF_TEMPERATURE: 0.3, + CONF_CHAT_MODEL: RECOMMENDED_CHAT_MODEL, + CONF_MAX_TOKENS: RECOMMENDED_MAX_TOKENS, + }, + { + CONF_RECOMMENDED: True, + CONF_LLM_HASS_API: "assist", + CONF_PROMPT: "", + }, + { + CONF_RECOMMENDED: True, + CONF_LLM_HASS_API: "assist", + CONF_PROMPT: "", + }, + ), + ], +) +async def test_options_switching( + hass: HomeAssistant, + mock_config_entry, + mock_init_component, + current_options, + new_options, + expected_options, +) -> None: + """Test the options form.""" + hass.config_entries.async_update_entry(mock_config_entry, options=current_options) + options_flow = await hass.config_entries.options.async_init( + mock_config_entry.entry_id + ) + if current_options.get(CONF_RECOMMENDED) != new_options.get(CONF_RECOMMENDED): + options_flow = await hass.config_entries.options.async_configure( + options_flow["flow_id"], + { + **current_options, + CONF_RECOMMENDED: new_options[CONF_RECOMMENDED], + }, + ) + options = await hass.config_entries.options.async_configure( + options_flow["flow_id"], + new_options, + ) + await hass.async_block_till_done() + assert options["type"] is FlowResultType.CREATE_ENTRY + assert options["data"] == expected_options diff --git a/tests/components/anthropic/test_conversation.py b/tests/components/anthropic/test_conversation.py new file mode 100644 index 00000000000..65ede877281 --- /dev/null +++ b/tests/components/anthropic/test_conversation.py @@ -0,0 +1,487 @@ +"""Tests for the Anthropic integration.""" + +from unittest.mock import AsyncMock, Mock, patch + +from anthropic import RateLimitError +from anthropic.types import Message, TextBlock, ToolUseBlock, Usage +from freezegun import freeze_time +from httpx import URL, Request, Response +from syrupy.assertion import SnapshotAssertion +import voluptuous as vol + +from homeassistant.components import conversation +from homeassistant.components.conversation import trace +from homeassistant.const import CONF_LLM_HASS_API +from homeassistant.core import Context, HomeAssistant +from homeassistant.exceptions import HomeAssistantError +from homeassistant.helpers import intent, llm +from homeassistant.setup import async_setup_component +from homeassistant.util import ulid + +from tests.common import MockConfigEntry + + +async def test_entity( + hass: HomeAssistant, + mock_config_entry: MockConfigEntry, + mock_init_component, +) -> None: + """Test entity properties.""" + state = hass.states.get("conversation.claude") + assert state + assert state.attributes["supported_features"] == 0 + + hass.config_entries.async_update_entry( + mock_config_entry, + options={ + **mock_config_entry.options, + CONF_LLM_HASS_API: "assist", + }, + ) + with patch( + "anthropic.resources.messages.AsyncMessages.create", new_callable=AsyncMock + ): + await hass.config_entries.async_reload(mock_config_entry.entry_id) + + state = hass.states.get("conversation.claude") + assert state + assert ( + state.attributes["supported_features"] + == conversation.ConversationEntityFeature.CONTROL + ) + + +async def test_error_handling( + hass: HomeAssistant, mock_config_entry: MockConfigEntry, mock_init_component +) -> None: + """Test that the default prompt works.""" + with patch( + "anthropic.resources.messages.AsyncMessages.create", + new_callable=AsyncMock, + side_effect=RateLimitError( + message=None, + response=Response( + status_code=429, request=Request(method="POST", url=URL()) + ), + body=None, + ), + ): + result = await conversation.async_converse( + hass, "hello", None, Context(), agent_id="conversation.claude" + ) + + assert result.response.response_type == intent.IntentResponseType.ERROR, result + assert result.response.error_code == "unknown", result + + +async def test_template_error( + hass: HomeAssistant, mock_config_entry: MockConfigEntry +) -> None: + """Test that template error handling works.""" + hass.config_entries.async_update_entry( + mock_config_entry, + options={ + "prompt": "talk like a {% if True %}smarthome{% else %}pirate please.", + }, + ) + with patch( + "anthropic.resources.messages.AsyncMessages.create", new_callable=AsyncMock + ): + await hass.config_entries.async_setup(mock_config_entry.entry_id) + await hass.async_block_till_done() + result = await conversation.async_converse( + hass, "hello", None, Context(), agent_id="conversation.claude" + ) + + assert result.response.response_type == intent.IntentResponseType.ERROR, result + assert result.response.error_code == "unknown", result + + +async def test_template_variables( + hass: HomeAssistant, mock_config_entry: MockConfigEntry +) -> None: + """Test that template variables work.""" + context = Context(user_id="12345") + mock_user = Mock() + mock_user.id = "12345" + mock_user.name = "Test User" + + hass.config_entries.async_update_entry( + mock_config_entry, + options={ + "prompt": ( + "The user name is {{ user_name }}. " + "The user id is {{ llm_context.context.user_id }}." + ), + }, + ) + with ( + patch( + "anthropic.resources.messages.AsyncMessages.create", new_callable=AsyncMock + ) as mock_create, + patch("homeassistant.auth.AuthManager.async_get_user", return_value=mock_user), + ): + await hass.config_entries.async_setup(mock_config_entry.entry_id) + await hass.async_block_till_done() + result = await conversation.async_converse( + hass, "hello", None, context, agent_id="conversation.claude" + ) + + assert ( + result.response.response_type == intent.IntentResponseType.ACTION_DONE + ), result + assert "The user name is Test User." in mock_create.mock_calls[1][2]["system"] + assert "The user id is 12345." in mock_create.mock_calls[1][2]["system"] + + +async def test_conversation_agent( + hass: HomeAssistant, + mock_config_entry: MockConfigEntry, + mock_init_component, +) -> None: + """Test Anthropic Agent.""" + agent = conversation.agent_manager.async_get_agent(hass, "conversation.claude") + assert agent.supported_languages == "*" + + +@patch("homeassistant.components.anthropic.conversation.llm.AssistAPI._async_get_tools") +async def test_function_call( + mock_get_tools, + hass: HomeAssistant, + mock_config_entry_with_assist: MockConfigEntry, + mock_init_component, +) -> None: + """Test function call from the assistant.""" + agent_id = "conversation.claude" + context = Context() + + mock_tool = AsyncMock() + mock_tool.name = "test_tool" + mock_tool.description = "Test function" + mock_tool.parameters = vol.Schema( + {vol.Optional("param1", description="Test parameters"): str} + ) + mock_tool.async_call.return_value = "Test response" + + mock_get_tools.return_value = [mock_tool] + + def completion_result(*args, messages, **kwargs): + for message in messages: + for content in message["content"]: + if not isinstance(content, str) and content["type"] == "tool_use": + return Message( + type="message", + id="msg_1234567890ABCDEFGHIJKLMN", + content=[ + TextBlock( + type="text", + text="I have successfully called the function", + ) + ], + model="claude-3-5-sonnet-20240620", + role="assistant", + stop_reason="end_turn", + stop_sequence=None, + usage=Usage(input_tokens=8, output_tokens=12), + ) + + return Message( + type="message", + id="msg_1234567890ABCDEFGHIJKLMN", + content=[ + TextBlock(type="text", text="Certainly, calling it now!"), + ToolUseBlock( + type="tool_use", + id="toolu_0123456789AbCdEfGhIjKlM", + name="test_tool", + input={"param1": "test_value"}, + ), + ], + model="claude-3-5-sonnet-20240620", + role="assistant", + stop_reason="tool_use", + stop_sequence=None, + usage=Usage(input_tokens=8, output_tokens=12), + ) + + with ( + patch( + "anthropic.resources.messages.AsyncMessages.create", + new_callable=AsyncMock, + side_effect=completion_result, + ) as mock_create, + freeze_time("2024-06-03 23:00:00"), + ): + result = await conversation.async_converse( + hass, + "Please call the test function", + None, + context, + agent_id=agent_id, + ) + + assert "Today's date is 2024-06-03." in mock_create.mock_calls[1][2]["system"] + + assert result.response.response_type == intent.IntentResponseType.ACTION_DONE + assert mock_create.mock_calls[1][2]["messages"][2] == { + "role": "user", + "content": [ + { + "content": '"Test response"', + "tool_use_id": "toolu_0123456789AbCdEfGhIjKlM", + "type": "tool_result", + } + ], + } + mock_tool.async_call.assert_awaited_once_with( + hass, + llm.ToolInput( + tool_name="test_tool", + tool_args={"param1": "test_value"}, + ), + llm.LLMContext( + platform="anthropic", + context=context, + user_prompt="Please call the test function", + language="en", + assistant="conversation", + device_id=None, + ), + ) + + # Test Conversation tracing + traces = trace.async_get_traces() + assert traces + last_trace = traces[-1].as_dict() + trace_events = last_trace.get("events", []) + assert [event["event_type"] for event in trace_events] == [ + trace.ConversationTraceEventType.ASYNC_PROCESS, + trace.ConversationTraceEventType.AGENT_DETAIL, + trace.ConversationTraceEventType.TOOL_CALL, + ] + # AGENT_DETAIL event contains the raw prompt passed to the model + detail_event = trace_events[1] + assert "Answer in plain text" in detail_event["data"]["system"] + assert "Today's date is 2024-06-03." in trace_events[1]["data"]["system"] + + # Call it again, make sure we have updated prompt + with ( + patch( + "anthropic.resources.messages.AsyncMessages.create", + new_callable=AsyncMock, + side_effect=completion_result, + ) as mock_create, + freeze_time("2024-06-04 23:00:00"), + ): + result = await conversation.async_converse( + hass, + "Please call the test function", + None, + context, + agent_id=agent_id, + ) + + assert "Today's date is 2024-06-04." in mock_create.mock_calls[1][2]["system"] + # Test old assert message not updated + assert "Today's date is 2024-06-03." in trace_events[1]["data"]["system"] + + +@patch("homeassistant.components.anthropic.conversation.llm.AssistAPI._async_get_tools") +async def test_function_exception( + mock_get_tools, + hass: HomeAssistant, + mock_config_entry_with_assist: MockConfigEntry, + mock_init_component, +) -> None: + """Test function call with exception.""" + agent_id = "conversation.claude" + context = Context() + + mock_tool = AsyncMock() + mock_tool.name = "test_tool" + mock_tool.description = "Test function" + mock_tool.parameters = vol.Schema( + {vol.Optional("param1", description="Test parameters"): str} + ) + mock_tool.async_call.side_effect = HomeAssistantError("Test tool exception") + + mock_get_tools.return_value = [mock_tool] + + def completion_result(*args, messages, **kwargs): + for message in messages: + for content in message["content"]: + if not isinstance(content, str) and content["type"] == "tool_use": + return Message( + type="message", + id="msg_1234567890ABCDEFGHIJKLMN", + content=[ + TextBlock( + type="text", + text="There was an error calling the function", + ) + ], + model="claude-3-5-sonnet-20240620", + role="assistant", + stop_reason="end_turn", + stop_sequence=None, + usage=Usage(input_tokens=8, output_tokens=12), + ) + + return Message( + type="message", + id="msg_1234567890ABCDEFGHIJKLMN", + content=[ + TextBlock(type="text", text="Certainly, calling it now!"), + ToolUseBlock( + type="tool_use", + id="toolu_0123456789AbCdEfGhIjKlM", + name="test_tool", + input={"param1": "test_value"}, + ), + ], + model="claude-3-5-sonnet-20240620", + role="assistant", + stop_reason="tool_use", + stop_sequence=None, + usage=Usage(input_tokens=8, output_tokens=12), + ) + + with patch( + "anthropic.resources.messages.AsyncMessages.create", + new_callable=AsyncMock, + side_effect=completion_result, + ) as mock_create: + result = await conversation.async_converse( + hass, + "Please call the test function", + None, + context, + agent_id=agent_id, + ) + + assert result.response.response_type == intent.IntentResponseType.ACTION_DONE + assert mock_create.mock_calls[1][2]["messages"][2] == { + "role": "user", + "content": [ + { + "content": '{"error": "HomeAssistantError", "error_text": "Test tool exception"}', + "tool_use_id": "toolu_0123456789AbCdEfGhIjKlM", + "type": "tool_result", + } + ], + } + mock_tool.async_call.assert_awaited_once_with( + hass, + llm.ToolInput( + tool_name="test_tool", + tool_args={"param1": "test_value"}, + ), + llm.LLMContext( + platform="anthropic", + context=context, + user_prompt="Please call the test function", + language="en", + assistant="conversation", + device_id=None, + ), + ) + + +async def test_assist_api_tools_conversion( + hass: HomeAssistant, + mock_config_entry_with_assist: MockConfigEntry, + mock_init_component, +) -> None: + """Test that we are able to convert actual tools from Assist API.""" + for component in ( + "intent", + "todo", + "light", + "shopping_list", + "humidifier", + "climate", + "media_player", + "vacuum", + "cover", + "weather", + ): + assert await async_setup_component(hass, component, {}) + + agent_id = "conversation.claude" + with patch( + "anthropic.resources.messages.AsyncMessages.create", + new_callable=AsyncMock, + return_value=Message( + type="message", + id="msg_1234567890ABCDEFGHIJKLMN", + content=[TextBlock(type="text", text="Hello, how can I help you?")], + model="claude-3-5-sonnet-20240620", + role="assistant", + stop_reason="end_turn", + stop_sequence=None, + usage=Usage(input_tokens=8, output_tokens=12), + ), + ) as mock_create: + await conversation.async_converse( + hass, "hello", None, Context(), agent_id=agent_id + ) + + tools = mock_create.mock_calls[0][2]["tools"] + assert tools + + +async def test_unknown_hass_api( + hass: HomeAssistant, + mock_config_entry: MockConfigEntry, + snapshot: SnapshotAssertion, + mock_init_component, +) -> None: + """Test when we reference an API that no longer exists.""" + hass.config_entries.async_update_entry( + mock_config_entry, + options={ + **mock_config_entry.options, + CONF_LLM_HASS_API: "non-existing", + }, + ) + + result = await conversation.async_converse( + hass, "hello", None, Context(), agent_id="conversation.claude" + ) + + assert result == snapshot + + +@patch("anthropic.resources.messages.AsyncMessages.create", new_callable=AsyncMock) +async def test_conversation_id( + mock_create, + hass: HomeAssistant, + mock_config_entry: MockConfigEntry, + mock_init_component, +) -> None: + """Test conversation ID is honored.""" + result = await conversation.async_converse( + hass, "hello", None, None, agent_id="conversation.claude" + ) + + conversation_id = result.conversation_id + + result = await conversation.async_converse( + hass, "hello", conversation_id, None, agent_id="conversation.claude" + ) + + assert result.conversation_id == conversation_id + + unknown_id = ulid.ulid() + + result = await conversation.async_converse( + hass, "hello", unknown_id, None, agent_id="conversation.claude" + ) + + assert result.conversation_id != unknown_id + + result = await conversation.async_converse( + hass, "hello", "koala", None, agent_id="conversation.claude" + ) + + assert result.conversation_id == "koala" diff --git a/tests/components/anthropic/test_init.py b/tests/components/anthropic/test_init.py new file mode 100644 index 00000000000..ee87bb708d0 --- /dev/null +++ b/tests/components/anthropic/test_init.py @@ -0,0 +1,64 @@ +"""Tests for the Anthropic integration.""" + +from unittest.mock import AsyncMock, patch + +from anthropic import ( + APIConnectionError, + APITimeoutError, + AuthenticationError, + BadRequestError, +) +from httpx import URL, Request, Response +import pytest + +from homeassistant.core import HomeAssistant +from homeassistant.setup import async_setup_component + +from tests.common import MockConfigEntry + + +@pytest.mark.parametrize( + ("side_effect", "error"), + [ + (APIConnectionError(request=None), "Connection error"), + (APITimeoutError(request=None), "Request timed out"), + ( + BadRequestError( + message="Your credit balance is too low to access the Claude API. Please go to Plans & Billing to upgrade or purchase credits.", + response=Response( + status_code=400, + request=Request(method="POST", url=URL()), + ), + body={"type": "error", "error": {"type": "invalid_request_error"}}, + ), + "anthropic integration not ready yet: Your credit balance is too low to access the Claude API", + ), + ( + AuthenticationError( + message="invalid x-api-key", + response=Response( + status_code=401, + request=Request(method="POST", url=URL()), + ), + body={"type": "error", "error": {"type": "authentication_error"}}, + ), + "Invalid API key", + ), + ], +) +async def test_init_error( + hass: HomeAssistant, + mock_config_entry: MockConfigEntry, + caplog: pytest.LogCaptureFixture, + side_effect, + error, +) -> None: + """Test initialization errors.""" + with patch( + "anthropic.resources.messages.AsyncMessages.create", + new_callable=AsyncMock, + side_effect=side_effect, + ): + assert await async_setup_component(hass, "anthropic", {}) + await hass.async_block_till_done() + assert error in caplog.text