diff --git a/source/_includes/asides/docs_sitemap.html b/source/_includes/asides/docs_sitemap.html
index 64c93a510ec..53eac73f1df 100644
--- a/source/_includes/asides/docs_sitemap.html
+++ b/source/_includes/asides/docs_sitemap.html
@@ -109,7 +109,7 @@
- {% icon "mdi:microphone" %} Voice assistants
+ {% icon "mdi:microphone" %} {% active_link /voice_control/ Voice assistants %}
{% if root == 'voice_control' or include.docs_index %}
- {% icon "mdi:comment-processing-outline" %} {% active_link /voice_control/ Assist up and running %}
diff --git a/source/voice_control/custom_sentences.markdown b/source/voice_control/custom_sentences.markdown
index 5cf3697a5cc..3a0bf1875c8 100644
--- a/source/voice_control/custom_sentences.markdown
+++ b/source/voice_control/custom_sentences.markdown
@@ -64,112 +64,9 @@ This is the easiest method to get started with custom sentences for automations.
8. Pick up your voice control device and speak the custom sentence.
- Your automation should now be triggered.
-## Setting up custom sentences in configuration.yaml
+### Setting up custom sentences in configuration.yaml
+To set up custom sentences in your configuration file follow [this tutorial](/voice_control/custom_sentences_yaml/).
-Intents and sentences may be added in the [`conversation`](/integrations/conversation/) config in your `configuration.yaml` file:
-
-{% raw %}
-
-```yaml
-# Example configuration.yaml
-conversation:
- intents:
- HassTurnOn:
- - "activate [the] {name}"
-```
-
-{% endraw %}
-
-This extends the default English sentences for the `HassTurnOn` intent, allowing you to say "activate the kitchen lights" as well as "turn on the kitchen lights".
-
-New intents can also be added, with their responses and actions defined using the [`intent_script`](/integrations/intent_script/) integration:
-
-{% raw %}
-
-```yaml
-# Example configuration.yaml
-conversation:
- intents:
- YearOfVoice:
- - "how is the year of voice going"
-
-intent_script:
- YearOfVoice:
- speech:
- text: "Great! We're at over 40 languages and counting."
-```
-
-{% endraw %}
-
-Besides a text response, `intent_script` can trigger any `action` available in Home Assistant, such as calling a service or firing an event.
-
-## Setting up sentences in the config directory
-
-More advanced customization can be done in Home Assistant's `config` directory. YAML files in `config/custom_sentences/en`, for example, will be loaded when English sentences (language code `en`) are requested.
-
-The following example creates a new `SetVolume` intent that changes the volume on one of two media players:
-
-{% raw %}
-
-```yaml
-# Example config/custom_sentences/en/media.yaml
-language: "en"
-intents:
- SetVolume:
- data:
- - sentences:
- - "(set|change) {media_player} volume to {volume} [percent]"
- - "(set|change) [the] volume for {media_player} to {volume} [percent]"
-lists:
- media_player:
- values:
- - in: "living room"
- out: "media_player.living_room"
- - in: "bedroom"
- out: "media_player.bedroom"
- volume:
- range:
- from: 0
- to: 100
-```
-
-{% endraw %}
-
-As mentioned above, you can then use the `intent_script` integration to implement an action and provide a response for `SetVolume`:
-
-{% raw %}
-
-```yaml
-# Example configuration.yaml
-intent_script:
- SetVolume:
- action:
- service: "media_player.volume_set"
- data:
- entity_id: "{{ media_player }}"
- volume_level: "{{ volume / 100.0 }}"
- speech:
- text: "Volume changed to {{ volume }}"
-```
-
-{% endraw %}
-
-## Customizing responses
-
-Responses for existing intents can be customized as well in `config/custom_sentences/`:
-
-{% raw %}
-
-```yaml
-# Example config/custom_sentences/en/responses.yaml
-language: "en"
-responses:
- intents:
- HassTurnOn:
- default: "I have turned on the {{ slots.name }}"
-```
-
-{% endraw %}
## Related devices and installation tutorials
diff --git a/source/voice_control/custom_sentences_yaml.markdown b/source/voice_control/custom_sentences_yaml.markdown
new file mode 100644
index 00000000000..b9cb3af8ea1
--- /dev/null
+++ b/source/voice_control/custom_sentences_yaml.markdown
@@ -0,0 +1,125 @@
+---
+title: "Setting up custom sentences in configuration.yaml"
+related:
+ - docs: /voice_control/aliases/
+ title: Create aliases
+ - docs: /docs/scripts/#respond-to-a-conversation
+ title: Conversation response script action
+ - docs: /docs/automation/trigger/#sentence-trigger
+ title: Sentence triggers
+ - docs: /docs/automation/trigger/#sentence-wildcards
+ title: Sentence wildcards
+ - url: https://developers.home-assistant.io/docs/intent_builtin/
+ title: View existing intents
+---
+
+Intents and sentences may be added in the [`conversation`](/integrations/conversation/) config in your `configuration.yaml` file:
+
+{% raw %}
+
+```yaml
+# Example configuration.yaml
+conversation:
+ intents:
+ HassTurnOn:
+ - "activate [the] {name}"
+```
+
+{% endraw %}
+
+This extends the default English sentences for the `HassTurnOn` intent, allowing you to say "activate the kitchen lights" as well as "turn on the kitchen lights".
+
+New intents can also be added, with their responses and actions defined using the [`intent_script`](/integrations/intent_script/) integration:
+
+{% raw %}
+
+```yaml
+# Example configuration.yaml
+conversation:
+ intents:
+ YearOfVoice:
+ - "how is the year of voice going"
+
+intent_script:
+ YearOfVoice:
+ speech:
+ text: "Great! We're at over 40 languages and counting."
+```
+
+{% endraw %}
+
+Besides a text response, `intent_script` can trigger any `action` available in Home Assistant, such as calling a service or firing an event.
+
+## Setting up sentences in the config directory
+
+More advanced customization can be done in Home Assistant's `config` directory. YAML files in `config/custom_sentences/en`, for example, will be loaded when English sentences (language code `en`) are requested.
+
+The following example creates a new `SetVolume` intent that changes the volume on one of two media players:
+
+{% raw %}
+
+```yaml
+# Example config/custom_sentences/en/media.yaml
+language: "en"
+intents:
+ SetVolume:
+ data:
+ - sentences:
+ - "(set|change) {media_player} volume to {volume} [percent]"
+ - "(set|change) [the] volume for {media_player} to {volume} [percent]"
+lists:
+ media_player:
+ values:
+ - in: "living room"
+ out: "media_player.living_room"
+ - in: "bedroom"
+ out: "media_player.bedroom"
+ volume:
+ range:
+ from: 0
+ to: 100
+```
+
+{% endraw %}
+
+As mentioned above, you can then use the `intent_script` integration to implement an action and provide a response for `SetVolume`:
+
+{% raw %}
+
+```yaml
+# Example configuration.yaml
+intent_script:
+ SetVolume:
+ action:
+ service: "media_player.volume_set"
+ data:
+ entity_id: "{{ media_player }}"
+ volume_level: "{{ volume / 100.0 }}"
+ speech:
+ text: "Volume changed to {{ volume }}"
+```
+
+{% endraw %}
+
+## Customizing responses
+
+Responses for existing intents can be customized as well in `config/custom_sentences/`:
+
+{% raw %}
+
+```yaml
+# Example config/custom_sentences/en/responses.yaml
+language: "en"
+responses:
+ intents:
+ HassTurnOn:
+ default: "I have turned on the {{ slots.name }}"
+```
+
+{% endraw %}
+
+## Related devices and installation tutorials
+
+- [Custom sentences main page](/voice_control/custom_sentences)
+- [$13 voice assistant for Home Assistant](/voice_control/thirteen-usd-voice-remote/)
+- [S3-BOX-3 voice assistant](/voice_control/s3_box_voice_assistant/)
diff --git a/source/voice_control/voice_remote_local_assistant.markdown b/source/voice_control/voice_remote_local_assistant.markdown
index efbcf494369..671c577e80f 100644
--- a/source/voice_control/voice_remote_local_assistant.markdown
+++ b/source/voice_control/voice_remote_local_assistant.markdown
@@ -28,10 +28,6 @@ In Home Assistant, the Assist pipelines are made up of various components that t
- For understanding, it needs to have a text-to-speech and speech-to-text software integrated.
- For running all together, it needs to have the Home Assistant Operating System running.
-### First, make sure Assist can run in your local setup
-
-Check our comparison table to be sure local setup is going to meet your expectations.
-
## Some options for speech-to-text and text-to-speech
There is a speech-to-text and text-to-speech option that runs entirely local. No data is sent to external servers for processing.