Improve script tracing (#48100)

* Improve script tracing

* Fix test
This commit is contained in:
Erik Montnemery 2021-03-22 13:15:45 +01:00 committed by GitHub
parent 3fb323b745
commit 272dffc384
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 645 additions and 48 deletions

View File

@ -427,11 +427,12 @@ class _ScriptRun:
delay = delay.total_seconds()
self._changed()
trace_set_result(delay=delay, done=False)
try:
async with async_timeout.timeout(delay):
await self._stop.wait()
except asyncio.TimeoutError:
pass
trace_set_result(delay=delay, done=True)
async def _async_wait_template_step(self):
"""Handle a wait template."""
@ -443,6 +444,7 @@ class _ScriptRun:
self._step_log("wait template", timeout)
self._variables["wait"] = {"remaining": timeout, "completed": False}
trace_set_result(wait=self._variables["wait"])
wait_template = self._action[CONF_WAIT_TEMPLATE]
wait_template.hass = self._hass
@ -455,10 +457,9 @@ class _ScriptRun:
@callback
def async_script_wait(entity_id, from_s, to_s):
"""Handle script after template condition is true."""
self._variables["wait"] = {
"remaining": to_context.remaining if to_context else timeout,
"completed": True,
}
wait_var = self._variables["wait"]
wait_var["remaining"] = to_context.remaining if to_context else timeout
wait_var["completed"] = True
done.set()
to_context = None
@ -475,10 +476,11 @@ class _ScriptRun:
async with async_timeout.timeout(timeout) as to_context:
await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)
except asyncio.TimeoutError as ex:
self._variables["wait"]["remaining"] = 0.0
if not self._action.get(CONF_CONTINUE_ON_TIMEOUT, True):
self._log(_TIMEOUT_MSG)
trace_set_result(wait=self._variables["wait"], timeout=True)
raise _StopScript from ex
self._variables["wait"]["remaining"] = 0.0
finally:
for task in tasks:
task.cancel()
@ -539,6 +541,7 @@ class _ScriptRun:
else:
limit = SERVICE_CALL_LIMIT
trace_set_result(params=params, running_script=running_script, limit=limit)
service_task = self._hass.async_create_task(
self._hass.services.async_call(
**params,
@ -567,6 +570,7 @@ class _ScriptRun:
async def _async_scene_step(self):
"""Activate the scene specified in the action."""
self._step_log("activate scene")
trace_set_result(scene=self._action[CONF_SCENE])
await self._hass.services.async_call(
scene.DOMAIN,
SERVICE_TURN_ON,
@ -592,6 +596,7 @@ class _ScriptRun:
"Error rendering event data template: %s", ex, level=logging.ERROR
)
trace_set_result(event=self._action[CONF_EVENT], event_data=event_data)
self._hass.bus.async_fire(
self._action[CONF_EVENT], event_data, context=self._context
)
@ -748,14 +753,14 @@ class _ScriptRun:
variables = {**self._variables}
self._variables["wait"] = {"remaining": timeout, "trigger": None}
trace_set_result(wait=self._variables["wait"])
done = asyncio.Event()
async def async_done(variables, context=None):
self._variables["wait"] = {
"remaining": to_context.remaining if to_context else timeout,
"trigger": variables["trigger"],
}
wait_var = self._variables["wait"]
wait_var["remaining"] = to_context.remaining if to_context else timeout
wait_var["trigger"] = variables["trigger"]
done.set()
def log_cb(level, msg, **kwargs):
@ -782,10 +787,11 @@ class _ScriptRun:
async with async_timeout.timeout(timeout) as to_context:
await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)
except asyncio.TimeoutError as ex:
self._variables["wait"]["remaining"] = 0.0
if not self._action.get(CONF_CONTINUE_ON_TIMEOUT, True):
self._log(_TIMEOUT_MSG)
trace_set_result(wait=self._variables["wait"], timeout=True)
raise _StopScript from ex
self._variables["wait"]["remaining"] = 0.0
finally:
for task in tasks:
task.cancel()

View File

@ -50,6 +50,18 @@ async def test_get_automation_trace(hass, hass_ws_client):
"action": {"event": "another_event"},
}
sun_action = {
"limit": 10,
"params": {
"domain": "test",
"service": "automation",
"service_data": {},
"target": {},
},
"running_script": False,
}
moon_action = {"event": "another_event", "event_data": {}}
assert await async_setup_component(
hass,
"automation",
@ -94,7 +106,7 @@ async def test_get_automation_trace(hass, hass_ws_client):
assert len(trace["action_trace"]) == 1
assert len(trace["action_trace"]["action/0"]) == 1
assert trace["action_trace"]["action/0"][0]["error"]
assert "result" not in trace["action_trace"]["action/0"][0]
assert trace["action_trace"]["action/0"][0]["result"] == sun_action
assert trace["condition_trace"] == {}
assert trace["config"] == sun_config
assert trace["context"]
@ -133,7 +145,7 @@ async def test_get_automation_trace(hass, hass_ws_client):
assert len(trace["action_trace"]) == 1
assert len(trace["action_trace"]["action/0"]) == 1
assert "error" not in trace["action_trace"]["action/0"][0]
assert "result" not in trace["action_trace"]["action/0"][0]
assert trace["action_trace"]["action/0"][0]["result"] == moon_action
assert len(trace["condition_trace"]) == 1
assert len(trace["condition_trace"]["condition/0"]) == 1
assert trace["condition_trace"]["condition/0"][0]["result"] == {"result": True}
@ -212,7 +224,7 @@ async def test_get_automation_trace(hass, hass_ws_client):
assert len(trace["action_trace"]) == 1
assert len(trace["action_trace"]["action/0"]) == 1
assert "error" not in trace["action_trace"]["action/0"][0]
assert "result" not in trace["action_trace"]["action/0"][0]
assert trace["action_trace"]["action/0"][0]["result"] == moon_action
assert len(trace["condition_trace"]) == 1
assert len(trace["condition_trace"]["condition/0"]) == 1
assert trace["condition_trace"]["condition/0"][0]["result"] == {"result": True}

View File

@ -16,7 +16,8 @@ import voluptuous as vol
from homeassistant import exceptions
import homeassistant.components.scene as scene
from homeassistant.const import ATTR_ENTITY_ID, SERVICE_TURN_ON
from homeassistant.core import Context, CoreState, callback
from homeassistant.core import SERVICE_CALL_LIMIT, Context, CoreState, callback
from homeassistant.exceptions import ConditionError, ServiceNotFound
from homeassistant.helpers import config_validation as cv, script, trace
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.setup import async_setup_component
@ -31,18 +32,59 @@ from tests.common import (
ENTITY_ID = "script.test"
@pytest.fixture(autouse=True)
def prepare_tracing():
"""Prepare tracing."""
trace.trace_get()
def compare_trigger_item(actual_trigger, expected_trigger):
"""Compare trigger data description."""
assert actual_trigger["description"] == expected_trigger["description"]
def compare_result_item(key, actual, expected):
"""Compare an item in the result dict."""
if key == "wait" and (expected.get("trigger") is not None):
assert "trigger" in actual
expected_trigger = expected.pop("trigger")
actual_trigger = actual.pop("trigger")
compare_trigger_item(actual_trigger, expected_trigger)
assert actual == expected
def assert_element(trace_element, expected_element, path):
"""Assert a trace element is as expected.
Note: Unused variable 'path' is passed to get helpful errors from pytest.
"""
for result_key, result in expected_element.get("result", {}).items():
expected_result = expected_element.get("result", {})
# Check that every item in expected_element is present and equal in trace_element
# The redundant set operation gives helpful errors from pytest
assert not set(expected_result) - set(trace_element._result or {})
for result_key, result in expected_result.items():
compare_result_item(result_key, trace_element._result[result_key], result)
assert trace_element._result[result_key] == result
# Check for unexpected items in trace_element
assert not set(trace_element._result or {}) - set(expected_result)
if "error_type" in expected_element:
assert isinstance(trace_element._error, expected_element["error_type"])
else:
assert trace_element._error is None
# Don't check variables when script starts
if trace_element.path == "0":
return
if "variables" in expected_element:
assert expected_element["variables"] == trace_element._variables
else:
assert not trace_element._variables
def assert_action_trace(expected):
"""Assert a trace condition sequence is as expected."""
@ -82,9 +124,6 @@ async def test_firing_event_basic(hass, caplog):
{"alias": alias, "event": event, "event_data": {"hello": "world"}}
)
# Prepare tracing
trace.trace_get()
script_obj = script.Script(
hass,
sequence,
@ -102,9 +141,12 @@ async def test_firing_event_basic(hass, caplog):
assert ".test_name:" in caplog.text
assert "Test Name: Running test script" in caplog.text
assert f"Executing step {alias}" in caplog.text
assert_action_trace(
{
"0": [{}],
"0": [
{"result": {"event": "test_event", "event_data": {"hello": "world"}}},
],
}
)
@ -150,6 +192,24 @@ async def test_firing_event_template(hass):
"list2": ["yes", "yesyes"],
}
assert_action_trace(
{
"0": [
{
"result": {
"event": "test_event",
"event_data": {
"dict": {1: "yes", 2: "yesyes", 3: "yesyesyes"},
"dict2": {1: "yes", 2: "yesyes", 3: "yesyesyes"},
"list": ["yes", "yesyes"],
"list2": ["yes", "yesyes"],
},
}
}
],
}
)
async def test_calling_service_basic(hass, caplog):
"""Test the calling of a service."""
@ -170,6 +230,25 @@ async def test_calling_service_basic(hass, caplog):
assert calls[0].data.get("hello") == "world"
assert f"Executing step {alias}" in caplog.text
assert_action_trace(
{
"0": [
{
"result": {
"limit": SERVICE_CALL_LIMIT,
"params": {
"domain": "test",
"service": "script",
"service_data": {"hello": "world"},
"target": {},
},
"running_script": False,
}
}
],
}
)
async def test_calling_service_template(hass):
"""Test the calling of a service."""
@ -204,6 +283,25 @@ async def test_calling_service_template(hass):
assert calls[0].context is context
assert calls[0].data.get("hello") == "world"
assert_action_trace(
{
"0": [
{
"result": {
"limit": SERVICE_CALL_LIMIT,
"params": {
"domain": "test",
"service": "script",
"service_data": {"hello": "world"},
"target": {},
},
"running_script": False,
}
}
],
}
)
async def test_data_template_with_templated_key(hass):
"""Test the calling of a service with a data_template with a templated key."""
@ -222,7 +320,26 @@ async def test_data_template_with_templated_key(hass):
assert len(calls) == 1
assert calls[0].context is context
assert "hello" in calls[0].data
assert calls[0].data.get("hello") == "world"
assert_action_trace(
{
"0": [
{
"result": {
"limit": SERVICE_CALL_LIMIT,
"params": {
"domain": "test",
"service": "script",
"service_data": {"hello": "world"},
"target": {},
},
"running_script": False,
}
}
],
}
)
async def test_multiple_runs_no_wait(hass):
@ -315,6 +432,12 @@ async def test_activating_scene(hass, caplog):
assert calls[0].data.get(ATTR_ENTITY_ID) == "scene.hello"
assert f"Executing step {alias}" in caplog.text
assert_action_trace(
{
"0": [{"result": {"scene": "scene.hello"}}],
}
)
@pytest.mark.parametrize("count", [1, 3])
async def test_stop_no_wait(hass, count):
@ -390,6 +513,12 @@ async def test_delay_basic(hass):
assert not script_obj.is_running
assert script_obj.last_action is None
assert_action_trace(
{
"0": [{"result": {"delay": 5.0, "done": True}}],
}
)
async def test_multiple_runs_delay(hass):
"""Test multiple runs with delay in script."""
@ -454,6 +583,12 @@ async def test_delay_template_ok(hass):
assert not script_obj.is_running
assert_action_trace(
{
"0": [{"result": {"delay": 5.0, "done": True}}],
}
)
async def test_delay_template_invalid(hass, caplog):
"""Test the delay as a template that fails."""
@ -481,6 +616,13 @@ async def test_delay_template_invalid(hass, caplog):
assert not script_obj.is_running
assert len(events) == 1
assert_action_trace(
{
"0": [{"result": {"event": "test_event", "event_data": {}}}],
"1": [{"error_type": script._StopScript}],
}
)
async def test_delay_template_complex_ok(hass):
"""Test the delay with a working complex template."""
@ -501,6 +643,12 @@ async def test_delay_template_complex_ok(hass):
assert not script_obj.is_running
assert_action_trace(
{
"0": [{"result": {"delay": 5.0, "done": True}}],
}
)
async def test_delay_template_complex_invalid(hass, caplog):
"""Test the delay with a complex template that fails."""
@ -528,6 +676,13 @@ async def test_delay_template_complex_invalid(hass, caplog):
assert not script_obj.is_running
assert len(events) == 1
assert_action_trace(
{
"0": [{"result": {"event": "test_event", "event_data": {}}}],
"1": [{"error_type": script._StopScript}],
}
)
async def test_cancel_delay(hass):
"""Test the cancelling while the delay is present."""
@ -559,6 +714,12 @@ async def test_cancel_delay(hass):
assert not script_obj.is_running
assert len(events) == 0
assert_action_trace(
{
"0": [{"result": {"delay": 5.0, "done": False}}],
}
)
@pytest.mark.parametrize("action_type", ["template", "trigger"])
async def test_wait_basic(hass, action_type):
@ -594,6 +755,28 @@ async def test_wait_basic(hass, action_type):
assert not script_obj.is_running
assert script_obj.last_action is None
if action_type == "template":
assert_action_trace(
{
"0": [{"result": {"wait": {"completed": True, "remaining": None}}}],
}
)
else:
assert_action_trace(
{
"0": [
{
"result": {
"wait": {
"trigger": {"description": "state of switch.test"},
"remaining": None,
}
}
}
],
}
)
async def test_wait_for_trigger_variables(hass):
"""Test variables are passed to wait_for_trigger action."""
@ -672,6 +855,19 @@ async def test_wait_basic_times_out(hass, action_type):
assert timed_out
if action_type == "template":
assert_action_trace(
{
"0": [{"result": {"wait": {"completed": False, "remaining": None}}}],
}
)
else:
assert_action_trace(
{
"0": [{"result": {"wait": {"trigger": None, "remaining": None}}}],
}
)
@pytest.mark.parametrize("action_type", ["template", "trigger"])
async def test_multiple_runs_wait(hass, action_type):
@ -769,6 +965,19 @@ async def test_cancel_wait(hass, action_type):
assert not script_obj.is_running
assert len(events) == 0
if action_type == "template":
assert_action_trace(
{
"0": [{"result": {"wait": {"completed": False, "remaining": None}}}],
}
)
else:
assert_action_trace(
{
"0": [{"result": {"wait": {"trigger": None, "remaining": None}}}],
}
)
async def test_wait_template_not_schedule(hass):
"""Test the wait template with correct condition."""
@ -790,6 +999,19 @@ async def test_wait_template_not_schedule(hass):
assert not script_obj.is_running
assert len(events) == 2
assert_action_trace(
{
"0": [{"result": {"event": "test_event", "event_data": {}}}],
"1": [{"result": {"wait": {"completed": True, "remaining": None}}}],
"2": [
{
"result": {"event": "test_event", "event_data": {}},
"variables": {"wait": {"completed": True, "remaining": None}},
}
],
}
)
@pytest.mark.parametrize(
"timeout_param", [5, "{{ 5 }}", {"seconds": 5}, {"seconds": "{{ 5 }}"}]
@ -839,6 +1061,21 @@ async def test_wait_timeout(hass, caplog, timeout_param, action_type):
assert len(events) == 1
assert "(timeout: 0:00:05)" in caplog.text
if action_type == "template":
variable_wait = {"wait": {"completed": False, "remaining": 0.0}}
else:
variable_wait = {"wait": {"trigger": None, "remaining": 0.0}}
expected_trace = {
"0": [{"result": variable_wait}],
"1": [
{
"result": {"event": "test_event", "event_data": {}},
"variables": variable_wait,
}
],
}
assert_action_trace(expected_trace)
@pytest.mark.parametrize(
"continue_on_timeout,n_events", [(False, 0), (True, 1), (None, 1)]
@ -884,6 +1121,25 @@ async def test_wait_continue_on_timeout(
assert not script_obj.is_running
assert len(events) == n_events
if action_type == "template":
variable_wait = {"wait": {"completed": False, "remaining": 0.0}}
else:
variable_wait = {"wait": {"trigger": None, "remaining": 0.0}}
expected_trace = {
"0": [{"result": variable_wait}],
}
if continue_on_timeout is False:
expected_trace["0"][0]["result"]["timeout"] = True
expected_trace["0"][0]["error_type"] = script._StopScript
else:
expected_trace["1"] = [
{
"result": {"event": "test_event", "event_data": {}},
"variables": variable_wait,
}
]
assert_action_trace(expected_trace)
async def test_wait_template_variables_in(hass):
"""Test the wait template with input variables."""
@ -908,6 +1164,12 @@ async def test_wait_template_variables_in(hass):
assert not script_obj.is_running
assert_action_trace(
{
"0": [{"result": {"wait": {"completed": True, "remaining": None}}}],
}
)
async def test_wait_template_with_utcnow(hass):
"""Test the wait template with utcnow."""
@ -933,6 +1195,12 @@ async def test_wait_template_with_utcnow(hass):
await hass.async_block_till_done()
assert not script_obj.is_running
assert_action_trace(
{
"0": [{"result": {"wait": {"completed": True, "remaining": None}}}],
}
)
async def test_wait_template_with_utcnow_no_match(hass):
"""Test the wait template with utcnow that does not match."""
@ -963,6 +1231,12 @@ async def test_wait_template_with_utcnow_no_match(hass):
assert timed_out
assert_action_trace(
{
"0": [{"result": {"wait": {"completed": False, "remaining": None}}}],
}
)
@pytest.mark.parametrize("mode", ["no_timeout", "timeout_finish", "timeout_not_finish"])
@pytest.mark.parametrize("action_type", ["template", "trigger"])
@ -1056,6 +1330,12 @@ async def test_wait_for_trigger_bad(hass, caplog):
assert "Unknown error while setting up trigger" in caplog.text
assert_action_trace(
{
"0": [{"result": {"wait": {"trigger": None, "remaining": None}}}],
}
)
async def test_wait_for_trigger_generated_exception(hass, caplog):
"""Test bad wait_for_trigger."""
@ -1082,6 +1362,12 @@ async def test_wait_for_trigger_generated_exception(hass, caplog):
assert "ValueError" in caplog.text
assert "something bad" in caplog.text
assert_action_trace(
{
"0": [{"result": {"wait": {"trigger": None, "remaining": None}}}],
}
)
async def test_condition_warning(hass, caplog):
"""Test warning on condition."""
@ -1112,6 +1398,15 @@ async def test_condition_warning(hass, caplog):
assert len(events) == 1
assert_action_trace(
{
"0": [{"result": {"event": "test_event", "event_data": {}}}],
"1": [{"error_type": script._StopScript, "result": {"result": False}}],
"1/condition": [{"error_type": ConditionError}],
"1/condition/entity_id/0": [{"error_type": ConditionError}],
}
)
async def test_condition_basic(hass, caplog):
"""Test if we can use conditions in a script."""
@ -1139,6 +1434,15 @@ async def test_condition_basic(hass, caplog):
caplog.clear()
assert len(events) == 2
assert_action_trace(
{
"0": [{"result": {"event": "test_event", "event_data": {}}}],
"1": [{"result": {"result": True}}],
"1/condition": [{"result": {"result": True}}],
"2": [{"result": {"event": "test_event", "event_data": {}}}],
}
)
hass.states.async_set("test.entity", "goodbye")
await script_obj.async_run(context=Context())
@ -1147,6 +1451,14 @@ async def test_condition_basic(hass, caplog):
assert f"Test condition {alias}: False" in caplog.text
assert len(events) == 3
assert_action_trace(
{
"0": [{"result": {"event": "test_event", "event_data": {}}}],
"1": [{"error_type": script._StopScript, "result": {"result": False}}],
"1/condition": [{"result": {"result": False}}],
}
)
@patch("homeassistant.helpers.script.condition.async_from_config")
async def test_condition_created_once(async_from_config, hass):
@ -1219,9 +1531,6 @@ async def test_repeat_count(hass, caplog, count):
}
)
# Prepare tracing
trace.trace_get()
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
await script_obj.async_run(context=Context())
@ -1233,10 +1542,31 @@ async def test_repeat_count(hass, caplog, count):
assert event.data.get("index") == index + 1
assert event.data.get("last") == (index == count - 1)
assert caplog.text.count(f"Repeating {alias}") == count
first_index = max(1, count - script.ACTION_TRACE_NODE_MAX_LEN + 1)
last_index = count + 1
assert_action_trace(
{
"0": [{}],
"0/repeat/sequence/0": [{}] * min(count, script.ACTION_TRACE_NODE_MAX_LEN),
"0/repeat/sequence/0": [
{
"result": {
"event": "test_event",
"event_data": {
"first": index == 1,
"index": index,
"last": index == count,
},
},
"variables": {
"repeat": {
"first": index == 1,
"index": index,
"last": index == count,
}
},
}
for index in range(first_index, last_index)
],
}
)
@ -1281,6 +1611,26 @@ async def test_repeat_condition_warning(hass, caplog, condition):
assert len(events) == count
expected_trace = {"0": [{}]}
if condition == "until":
expected_trace["0/repeat/sequence/0"] = [
{
"result": {"event": "test_event", "event_data": {}},
"variables": {"repeat": {"first": True, "index": 1}},
}
]
expected_trace["0/repeat"] = [
{
"result": {"result": None},
"variables": {"repeat": {"first": True, "index": 1}},
}
]
expected_trace[f"0/repeat/{condition}/0"] = [{"error_type": ConditionError}]
expected_trace[f"0/repeat/{condition}/0/entity_id/0"] = [
{"error_type": ConditionError}
]
assert_action_trace(expected_trace)
@pytest.mark.parametrize("condition", ["while", "until"])
@pytest.mark.parametrize("direct_template", [False, True])
@ -1364,15 +1714,14 @@ async def test_repeat_var_in_condition(hass, condition):
sequence = {"repeat": {"sequence": {"event": event}}}
if condition == "while":
sequence["repeat"]["while"] = {
"condition": "template",
"value_template": "{{ repeat.index <= 2 }}",
}
value_template = "{{ repeat.index <= 2 }}"
else:
sequence["repeat"]["until"] = {
"condition": "template",
"value_template": "{{ repeat.index == 2 }}",
}
value_template = "{{ repeat.index == 2 }}"
sequence["repeat"][condition] = {
"condition": "template",
"value_template": value_template,
}
script_obj = script.Script(
hass, cv.SCRIPT_SCHEMA(sequence), "Test Name", "test_domain"
)
@ -1385,6 +1734,63 @@ async def test_repeat_var_in_condition(hass, condition):
assert len(events) == 2
if condition == "while":
expected_trace = {
"0": [{}],
"0/repeat": [
{
"result": {"result": True},
"variables": {"repeat": {"first": True, "index": 1}},
},
{
"result": {"result": True},
"variables": {"repeat": {"first": False, "index": 2}},
},
{
"result": {"result": False},
"variables": {"repeat": {"first": False, "index": 3}},
},
],
"0/repeat/while/0": [
{"result": {"result": True}},
{"result": {"result": True}},
{"result": {"result": False}},
],
"0/repeat/sequence/0": [
{"result": {"event": "test_event", "event_data": {}}}
]
* 2,
}
else:
expected_trace = {
"0": [{}],
"0/repeat/sequence/0": [
{
"result": {"event": "test_event", "event_data": {}},
"variables": {"repeat": {"first": True, "index": 1}},
},
{
"result": {"event": "test_event", "event_data": {}},
"variables": {"repeat": {"first": False, "index": 2}},
},
],
"0/repeat": [
{
"result": {"result": False},
"variables": {"repeat": {"first": True, "index": 1}},
},
{
"result": {"result": True},
"variables": {"repeat": {"first": False, "index": 2}},
},
],
"0/repeat/until/0": [
{"result": {"result": False}},
{"result": {"result": True}},
],
}
assert_action_trace(expected_trace)
@pytest.mark.parametrize(
"variables,first_last,inside_x",
@ -1486,6 +1892,49 @@ async def test_repeat_nested(hass, variables, first_last, inside_x):
"x": result[3],
}
event_data1 = {"repeat": None, "x": inside_x}
event_data2 = [
{"first": True, "index": 1, "last": False, "x": inside_x},
{"first": False, "index": 2, "last": True, "x": inside_x},
]
variable_repeat = [
{"repeat": {"first": True, "index": 1, "last": False}},
{"repeat": {"first": False, "index": 2, "last": True}},
]
expected_trace = {
"0": [{"result": {"event": "test_event", "event_data": event_data1}}],
"1": [{}],
"1/repeat/sequence/0": [
{
"result": {"event": "test_event", "event_data": event_data2[0]},
"variables": variable_repeat[0],
},
{
"result": {"event": "test_event", "event_data": event_data2[1]},
"variables": variable_repeat[1],
},
],
"1/repeat/sequence/1": [{}, {}],
"1/repeat/sequence/1/repeat/sequence/0": [
{"result": {"event": "test_event", "event_data": event_data2[0]}},
{
"result": {"event": "test_event", "event_data": event_data2[1]},
"variables": variable_repeat[1],
},
{
"result": {"event": "test_event", "event_data": event_data2[0]},
"variables": variable_repeat[0],
},
{"result": {"event": "test_event", "event_data": event_data2[1]}},
],
"1/repeat/sequence/2": [
{"result": {"event": "test_event", "event_data": event_data2[0]}},
{"result": {"event": "test_event", "event_data": event_data2[1]}},
],
"2": [{"result": {"event": "test_event", "event_data": event_data1}}],
}
assert_action_trace(expected_trace)
async def test_choose_warning(hass, caplog):
"""Test warning on choose."""
@ -1578,9 +2027,6 @@ async def test_choose(hass, caplog, var, result):
}
)
# Prepare tracing
trace.trace_get()
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
await script_obj.async_run(MappingProxyType({"var": var}), Context())
@ -1593,19 +2039,29 @@ async def test_choose(hass, caplog, var, result):
expected_choice = "default"
assert f"{alias}: {expected_choice}: Executing step {aliases[var]}" in caplog.text
expected_trace = {"0": [{}]}
if var >= 1:
expected_trace["0/choose/0"] = [{}]
expected_trace["0/choose/0/conditions/0"] = [{}]
if var >= 2:
expected_trace["0/choose/1"] = [{}]
expected_trace["0/choose/1/conditions/0"] = [{}]
if var == 1:
expected_trace["0/choose/0/sequence/0"] = [{}]
if var == 2:
expected_trace["0/choose/1/sequence/0"] = [{}]
expected_choice = var - 1
if var == 3:
expected_trace["0/default/sequence/0"] = [{}]
expected_choice = "default"
expected_trace = {"0": [{"result": {"choice": expected_choice}}]}
if var >= 1:
expected_trace["0/choose/0"] = [{"result": {"result": var == 1}}]
expected_trace["0/choose/0/conditions/0"] = [{"result": {"result": var == 1}}]
if var >= 2:
expected_trace["0/choose/1"] = [{"result": {"result": var == 2}}]
expected_trace["0/choose/1/conditions/0"] = [{"result": {"result": var == 2}}]
if var == 1:
expected_trace["0/choose/0/sequence/0"] = [
{"result": {"event": "test_event", "event_data": {"choice": "first"}}}
]
if var == 2:
expected_trace["0/choose/1/sequence/0"] = [
{"result": {"event": "test_event", "event_data": {"choice": "second"}}}
]
if var == 3:
expected_trace["0/default/sequence/0"] = [
{"result": {"event": "test_event", "event_data": {"choice": "default"}}}
]
assert_action_trace(expected_trace)
@ -1668,6 +2124,25 @@ async def test_propagate_error_service_not_found(hass):
assert len(events) == 0
assert not script_obj.is_running
expected_trace = {
"0": [
{
"error_type": ServiceNotFound,
"result": {
"limit": 10,
"params": {
"domain": "test",
"service": "script",
"service_data": {},
"target": {},
},
"running_script": False,
},
}
],
}
assert_action_trace(expected_trace)
async def test_propagate_error_invalid_service_data(hass):
"""Test that a script aborts when we send invalid service data."""
@ -1686,6 +2161,25 @@ async def test_propagate_error_invalid_service_data(hass):
assert len(calls) == 0
assert not script_obj.is_running
expected_trace = {
"0": [
{
"error_type": vol.MultipleInvalid,
"result": {
"limit": 10,
"params": {
"domain": "test",
"service": "script",
"service_data": {"text": 1},
"target": {},
},
"running_script": False,
},
}
],
}
assert_action_trace(expected_trace)
async def test_propagate_error_service_exception(hass):
"""Test that a script aborts when a service throws an exception."""
@ -1708,6 +2202,25 @@ async def test_propagate_error_service_exception(hass):
assert len(events) == 0
assert not script_obj.is_running
expected_trace = {
"0": [
{
"error_type": ValueError,
"result": {
"limit": 10,
"params": {
"domain": "test",
"service": "script",
"service_data": {},
"target": {},
},
"running_script": False,
},
}
],
}
assert_action_trace(expected_trace)
async def test_referenced_entities(hass):
"""Test referenced entities."""
@ -2151,6 +2664,11 @@ async def test_shutdown_at(hass, caplog):
assert not script_obj.is_running
assert "Stopping scripts running at shutdown: test script" in caplog.text
expected_trace = {
"0": [{"result": {"delay": 120.0, "done": False}}],
}
assert_action_trace(expected_trace)
async def test_shutdown_after(hass, caplog):
"""Test stopping scripts at shutdown."""
@ -2182,6 +2700,11 @@ async def test_shutdown_after(hass, caplog):
in caplog.text
)
expected_trace = {
"0": [{"result": {"delay": 120.0, "done": False}}],
}
assert_action_trace(expected_trace)
async def test_update_logger(hass, caplog):
"""Test updating logger."""
@ -2240,6 +2763,26 @@ async def test_set_variable(hass, caplog):
assert mock_calls[0].data["value"] == "value"
assert f"Executing step {alias}" in caplog.text
expected_trace = {
"0": [{}],
"1": [
{
"result": {
"limit": SERVICE_CALL_LIMIT,
"params": {
"domain": "test",
"service": "script",
"service_data": {"value": "value"},
"target": {},
},
"running_script": False,
},
"variables": {"variable": "value"},
}
],
}
assert_action_trace(expected_trace)
async def test_set_redefines_variable(hass, caplog):
"""Test setting variables based on their current value."""
@ -2261,6 +2804,42 @@ async def test_set_redefines_variable(hass, caplog):
assert mock_calls[0].data["value"] == 1
assert mock_calls[1].data["value"] == 2
expected_trace = {
"0": [{}],
"1": [
{
"result": {
"limit": SERVICE_CALL_LIMIT,
"params": {
"domain": "test",
"service": "script",
"service_data": {"value": 1},
"target": {},
},
"running_script": False,
},
"variables": {"variable": "1"},
}
],
"2": [{}],
"3": [
{
"result": {
"limit": SERVICE_CALL_LIMIT,
"params": {
"domain": "test",
"service": "script",
"service_data": {"value": 2},
"target": {},
},
"running_script": False,
},
"variables": {"variable": 2},
}
],
}
assert_action_trace(expected_trace)
async def test_validate_action_config(hass):
"""Validate action config."""