mirror of
https://github.com/home-assistant/core.git
synced 2025-04-27 02:37:50 +00:00

* Allow llm API to render dynamic template prompt * Make rendering api prompt async so it can become a RAG * Fix test
17 lines
463 B
Python
17 lines
463 B
Python
"""Constants for the OpenAI Conversation integration."""
|
|
|
|
import logging
|
|
|
|
DOMAIN = "openai_conversation"
|
|
LOGGER = logging.getLogger(__package__)
|
|
CONF_PROMPT = "prompt"
|
|
DEFAULT_PROMPT = """Answer in plain text. Keep it simple and to the point."""
|
|
CONF_CHAT_MODEL = "chat_model"
|
|
DEFAULT_CHAT_MODEL = "gpt-4o"
|
|
CONF_MAX_TOKENS = "max_tokens"
|
|
DEFAULT_MAX_TOKENS = 150
|
|
CONF_TOP_P = "top_p"
|
|
DEFAULT_TOP_P = 1.0
|
|
CONF_TEMPERATURE = "temperature"
|
|
DEFAULT_TEMPERATURE = 1.0
|