diff --git a/homeassistant/components/openai_conversation/config_flow.py b/homeassistant/components/openai_conversation/config_flow.py index c9f6e266055..469d36e28d8 100644 --- a/homeassistant/components/openai_conversation/config_flow.py +++ b/homeassistant/components/openai_conversation/config_flow.py @@ -145,6 +145,16 @@ def openai_config_option_schema( ) return { + vol.Optional( + CONF_PROMPT, + description={"suggested_value": options.get(CONF_PROMPT)}, + default=DEFAULT_PROMPT, + ): TemplateSelector(), + vol.Optional( + CONF_LLM_HASS_API, + description={"suggested_value": options.get(CONF_LLM_HASS_API)}, + default="none", + ): SelectSelector(SelectSelectorConfig(options=apis)), vol.Optional( CONF_CHAT_MODEL, description={ @@ -153,16 +163,6 @@ def openai_config_option_schema( }, default=DEFAULT_CHAT_MODEL, ): str, - vol.Optional( - CONF_LLM_HASS_API, - description={"suggested_value": options.get(CONF_LLM_HASS_API)}, - default="none", - ): SelectSelector(SelectSelectorConfig(options=apis)), - vol.Optional( - CONF_PROMPT, - description={"suggested_value": options.get(CONF_PROMPT)}, - default=DEFAULT_PROMPT, - ): TemplateSelector(), vol.Optional( CONF_MAX_TOKENS, description={"suggested_value": options.get(CONF_MAX_TOKENS)}, @@ -177,5 +177,5 @@ def openai_config_option_schema( CONF_TEMPERATURE, description={"suggested_value": options.get(CONF_TEMPERATURE)}, default=DEFAULT_TEMPERATURE, - ): NumberSelector(NumberSelectorConfig(min=0, max=1, step=0.05)), + ): NumberSelector(NumberSelectorConfig(min=0, max=2, step=0.05)), } diff --git a/homeassistant/components/openai_conversation/const.py b/homeassistant/components/openai_conversation/const.py index 1e1fe27f547..c50b66c1320 100644 --- a/homeassistant/components/openai_conversation/const.py +++ b/homeassistant/components/openai_conversation/const.py @@ -23,10 +23,10 @@ An overview of the areas and the devices in this smart home: {%- endfor %} """ CONF_CHAT_MODEL = "chat_model" -DEFAULT_CHAT_MODEL = "gpt-3.5-turbo" +DEFAULT_CHAT_MODEL = "gpt-4o" CONF_MAX_TOKENS = "max_tokens" DEFAULT_MAX_TOKENS = 150 CONF_TOP_P = "top_p" -DEFAULT_TOP_P = 1 +DEFAULT_TOP_P = 1.0 CONF_TEMPERATURE = "temperature" -DEFAULT_TEMPERATURE = 0.5 +DEFAULT_TEMPERATURE = 1.0 diff --git a/homeassistant/components/openai_conversation/strings.json b/homeassistant/components/openai_conversation/strings.json index 6ab2ffb2855..01060afc7f1 100644 --- a/homeassistant/components/openai_conversation/strings.json +++ b/homeassistant/components/openai_conversation/strings.json @@ -17,12 +17,15 @@ "step": { "init": { "data": { - "prompt": "Prompt Template", + "prompt": "Instructions", "chat_model": "[%key:common::generic::model%]", "max_tokens": "Maximum tokens to return in response", "temperature": "Temperature", "top_p": "Top P", "llm_hass_api": "[%key:common::config_flow::data::llm_hass_api%]" + }, + "data_description": { + "prompt": "Instruct how the LLM should respond. This can be a template." } } }