Enhance check_config script with JSON output and fail on warnings (#152575)

This commit is contained in:
Benjamin Michaelis
2025-10-19 12:55:55 -07:00
committed by GitHub
parent fd08c55b79
commit acead56bd5
2 changed files with 510 additions and 3 deletions

View File

@@ -7,6 +7,7 @@ import asyncio
from collections import OrderedDict from collections import OrderedDict
from collections.abc import Callable, Mapping, Sequence from collections.abc import Callable, Mapping, Sequence
from glob import glob from glob import glob
import json
import logging import logging
import os import os
from typing import Any from typing import Any
@@ -82,17 +83,60 @@ def run(script_args: list) -> int:
parser.add_argument( parser.add_argument(
"-s", "--secrets", action="store_true", help="Show secret information" "-s", "--secrets", action="store_true", help="Show secret information"
) )
parser.add_argument("--json", action="store_true", help="Output JSON format")
parser.add_argument(
"--fail-on-warnings",
action="store_true",
help="Exit non-zero if warnings are present",
)
args, unknown = parser.parse_known_args() args, unknown = parser.parse_known_args(script_args)
if unknown: if unknown:
print(color("red", "Unknown arguments:", ", ".join(unknown))) print(color("red", "Unknown arguments:", ", ".join(unknown)))
config_dir = os.path.join(os.getcwd(), args.config) config_dir = os.path.join(os.getcwd(), args.config)
print(color("bold", "Testing configuration at", config_dir)) if not args.json:
print(color("bold", "Testing configuration at", config_dir))
res = check(config_dir, args.secrets) res = check(config_dir, args.secrets)
# JSON output branch
if args.json:
json_object = {
"config_dir": config_dir,
"total_errors": sum(len(errors) for errors in res["except"].values()),
"total_warnings": sum(len(warnings) for warnings in res["warn"].values()),
"errors": res["except"],
"warnings": res["warn"],
"components": list(res["components"].keys()),
}
# Include secrets information if requested
if args.secrets:
# Build list of missing secrets (referenced but not found)
missing_secrets = [
key for key, val in res["secrets"].items() if val is None
]
# Build list of used secrets (found and used)
used_secrets = [
key for key, val in res["secrets"].items() if val is not None
]
json_object["secrets"] = {
"secret_files": res["secret_cache"],
"used_secrets": used_secrets,
"missing_secrets": missing_secrets,
"total_secrets": len(res["secrets"]),
"total_missing": len(missing_secrets),
}
print(json.dumps(json_object, indent=2))
# Determine exit code for JSON mode
return 1 if res["except"] or (args.fail_on_warnings and res["warn"]) else 0
domain_info: list[str] = [] domain_info: list[str] = []
if args.info: if args.info:
domain_info = args.info.split(",") domain_info = args.info.split(",")
@@ -165,7 +209,8 @@ def run(script_args: list) -> int:
continue continue
print(" -", skey + ":", sval) print(" -", skey + ":", sval)
return len(res["except"]) # Determine final exit code
return 1 if res["except"] or (args.fail_on_warnings and res["warn"]) else 0
def check(config_dir, secrets=False): def check(config_dir, secrets=False):

View File

@@ -1,6 +1,8 @@
"""Test check_config script.""" """Test check_config script."""
import json
import logging import logging
import os
from unittest.mock import patch from unittest.mock import patch
import pytest import pytest
@@ -180,3 +182,463 @@ def test_bootstrap_error() -> None:
assert res["secrets"] == {} assert res["secrets"] == {}
assert res["warn"] == {} assert res["warn"] == {}
assert res["yaml_files"] == {} assert res["yaml_files"] == {}
@pytest.mark.parametrize("hass_config_yaml", [BASE_CONFIG])
@pytest.mark.usefixtures("mock_is_file", "mock_hass_config_yaml")
def test_run_json_flag_only() -> None:
"""Test that --json flag works independently."""
with (
patch("builtins.print") as mock_print,
patch.object(check_config, "check") as mock_check,
):
mock_check.return_value = {
"except": {"domain1": ["error1", "error2"]},
"warn": {"domain2": ["warning1"]},
"components": {"homeassistant": {}, "light": {}, "http": {}},
"secrets": {},
"secret_cache": {},
"yaml_files": {},
}
exit_code = check_config.run(["--json"])
# Should exit with code 1 (1 domain with errors)
assert exit_code == 1
# Should have printed JSON
assert mock_print.call_count == 1
json_output = mock_print.call_args[0][0]
# Verify it's valid JSON
parsed_json = json.loads(json_output)
# Verify JSON structure
assert "config_dir" in parsed_json
assert "total_errors" in parsed_json
assert "total_warnings" in parsed_json
assert "errors" in parsed_json
assert "warnings" in parsed_json
assert "components" in parsed_json
# Verify JSON content
assert parsed_json["total_errors"] == 2 # 2 error messages
assert parsed_json["total_warnings"] == 1 # 1 warning message
assert parsed_json["errors"] == {"domain1": ["error1", "error2"]}
assert parsed_json["warnings"] == {"domain2": ["warning1"]}
assert set(parsed_json["components"]) == {"homeassistant", "light", "http"}
@pytest.mark.parametrize("hass_config_yaml", [BASE_CONFIG])
@pytest.mark.usefixtures("mock_is_file", "mock_hass_config_yaml")
def test_run_fail_on_warnings_flag_only() -> None:
"""Test that --fail-on-warnings flag works independently."""
# Test with warnings only
with patch.object(check_config, "check") as mock_check:
mock_check.return_value = {
"except": {},
"warn": {"light": ["warning message"]},
"components": {"homeassistant": {}},
"secrets": {},
"secret_cache": {},
"yaml_files": {},
}
exit_code = check_config.run(["--fail-on-warnings"])
assert exit_code == 1 # Should exit non-zero due to warnings
# Test with no warnings or errors
with patch.object(check_config, "check") as mock_check:
mock_check.return_value = {
"except": {},
"warn": {},
"components": {"homeassistant": {}},
"secrets": {},
"secret_cache": {},
"yaml_files": {},
}
exit_code = check_config.run(["--fail-on-warnings"])
assert exit_code == 0 # Should exit zero when no warnings/errors
# Test with both errors and warnings
with patch.object(check_config, "check") as mock_check:
mock_check.return_value = {
"except": {"domain1": ["error"]},
"warn": {"domain2": ["warning"]},
"components": {"homeassistant": {}},
"secrets": {},
"secret_cache": {},
"yaml_files": {},
}
exit_code = check_config.run(["--fail-on-warnings"])
assert exit_code == 1 # max(1, 1) = 1
@pytest.mark.parametrize("hass_config_yaml", [BASE_CONFIG])
@pytest.mark.usefixtures("mock_is_file", "mock_hass_config_yaml")
def test_run_json_output_structure() -> None:
"""Test JSON output contains all required fields with correct types."""
with (
patch("builtins.print") as mock_print,
patch.object(check_config, "check") as mock_check,
):
mock_check.return_value = {
"except": {"domain1": ["error1", {"config": "bad"}]},
"warn": {"domain2": ["warning1", {"config": "deprecated"}]},
"components": {"homeassistant": {}, "light": {}, "automation": {}},
"secrets": {},
"secret_cache": {},
"yaml_files": {},
}
exit_code = check_config.run(["--json", "--config", "/test/path"])
json_output = mock_print.call_args[0][0]
parsed_json = json.loads(json_output)
# Should exit with code 1 due to errors
assert exit_code == 1
# Test all required fields are present
required_fields = [
"config_dir",
"total_errors",
"total_warnings",
"errors",
"warnings",
"components",
]
for field in required_fields:
assert field in parsed_json, f"Missing required field: {field}"
# Test field types and values
assert isinstance(parsed_json["config_dir"], str)
assert isinstance(parsed_json["total_errors"], int)
assert isinstance(parsed_json["total_warnings"], int)
assert isinstance(parsed_json["errors"], dict)
assert isinstance(parsed_json["warnings"], dict)
assert isinstance(parsed_json["components"], list)
# Test counts are correct
assert parsed_json["total_errors"] == 2 # 2 items in domain1 list
assert parsed_json["total_warnings"] == 2 # 2 items in domain2 list
# Test components is a list of strings
assert all(isinstance(comp, str) for comp in parsed_json["components"])
assert set(parsed_json["components"]) == {
"homeassistant",
"light",
"automation",
}
def test_run_exit_code_logic() -> None:
"""Test exit code logic for all flag combinations."""
test_cases = [
# (errors, warnings, flags, expected_exit_code)
({}, {}, [], 0), # No errors, no warnings, no flags
({}, {}, ["--json"], 0), # No errors, no warnings, json only
(
{},
{},
["--fail-on-warnings"],
0,
), # No errors, no warnings, fail-on-warnings only
(
{},
{},
["--json", "--fail-on-warnings"],
0,
), # No errors, no warnings, both flags
(
{},
{"domain": ["warning"]},
[],
0,
), # Warnings only, no flags (backwards compatible)
({}, {"domain": ["warning"]}, ["--json"], 0), # Warnings only, json only
(
{},
{"domain": ["warning"]},
["--fail-on-warnings"],
1,
), # Warnings only, fail-on-warnings
(
{},
{"domain": ["warning"]},
["--json", "--fail-on-warnings"],
1,
), # Warnings only, both flags
({"domain": ["error"]}, {}, [], 1), # Errors only, no flags
({"domain": ["error"]}, {}, ["--json"], 1), # Errors only, json only
(
{"domain": ["error"]},
{},
["--fail-on-warnings"],
1,
), # Errors only, fail-on-warnings
(
{"domain": ["error"]},
{},
["--json", "--fail-on-warnings"],
1,
), # Errors only, both flags
({"domain": ["error"]}, {"domain2": ["warning"]}, [], 1), # Both, no flags
(
{"domain": ["error"]},
{"domain2": ["warning"]},
["--json"],
1,
), # Both, json only
(
{"domain": ["error"]},
{"domain2": ["warning"]},
["--fail-on-warnings"],
1,
), # Both, fail-on-warnings
(
{"domain": ["error"]},
{"domain2": ["warning"]},
["--json", "--fail-on-warnings"],
1,
), # Both, both flags
({"d1": ["e1"], "d2": ["e2"]}, {}, [], 1), # Multiple error domains, no flags
(
{"d1": ["e1"], "d2": ["e2"]},
{"d3": ["w1"]},
["--fail-on-warnings"],
1,
), # Multiple errors + warnings
]
for errors, warnings, flags, expected_exit in test_cases:
with patch("builtins.print"), patch.object(check_config, "check") as mock_check:
mock_check.return_value = {
"except": errors,
"warn": warnings,
"components": {"homeassistant": {}},
"secrets": {},
"secret_cache": {},
"yaml_files": {},
}
exit_code = check_config.run(flags)
assert exit_code == expected_exit, (
f"Failed for errors={errors}, warnings={warnings}, flags={flags}. "
f"Expected {expected_exit}, got {exit_code}"
)
@pytest.mark.parametrize("hass_config_yaml", [BASE_CONFIG])
@pytest.mark.usefixtures("mock_is_file", "mock_hass_config_yaml")
def test_run_human_readable_still_works() -> None:
"""Test that human-readable output still works without JSON flag."""
with (
patch("builtins.print") as mock_print,
patch.object(check_config, "check") as mock_check,
):
mock_check.return_value = {
"except": {},
"warn": {},
"components": {"homeassistant": {}},
"secrets": {},
"secret_cache": {},
"yaml_files": {},
}
check_config.run([])
# Should print the "Testing configuration at" message
printed_outputs = [
call[0][0] if call[0] else "" for call in mock_print.call_args_list
]
testing_message_found = any(
"Testing configuration at" in output for output in printed_outputs
)
assert testing_message_found, (
"Human-readable 'Testing configuration at' message not found"
)
def test_run_with_config_path() -> None:
"""Test that config path is correctly included in JSON output."""
with (
patch("builtins.print") as mock_print,
patch.object(check_config, "check") as mock_check,
):
mock_check.return_value = {
"except": {},
"warn": {},
"components": {"homeassistant": {}},
"secrets": {},
"secret_cache": {},
"yaml_files": {},
}
test_config_path = "/custom/config/path"
check_config.run(["--json", "--config", test_config_path])
json_output = mock_print.call_args[0][0]
parsed_json = json.loads(json_output)
# The config_dir should include the full path
expected_path = os.path.join(os.getcwd(), test_config_path)
assert parsed_json["config_dir"] == expected_path
# Flag Interaction Tests
def test_unknown_arguments_with_json() -> None:
"""Test that unknown arguments are handled properly with JSON flag."""
with (
patch("builtins.print") as mock_print,
patch.object(check_config, "check") as mock_check,
):
mock_check.return_value = {
"except": {},
"warn": {},
"components": {"homeassistant": {}},
"secrets": {},
"secret_cache": {},
"yaml_files": {},
}
check_config.run(["--json", "--unknown-flag", "value"])
# Should still print unknown argument warning AND JSON
assert mock_print.call_count == 2
# First call should be the unknown argument warning
unknown_warning = mock_print.call_args_list[0][0][0]
assert "Unknown arguments" in unknown_warning
assert "unknown-flag" in unknown_warning
# Second call should be valid JSON
json_output = mock_print.call_args_list[1][0][0]
parsed_json = json.loads(json_output)
assert "config_dir" in parsed_json
@pytest.mark.parametrize("hass_config_yaml", [BASE_CONFIG])
@pytest.mark.usefixtures("mock_is_file", "mock_hass_config_yaml")
def test_info_flag_with_json() -> None:
"""Test how --info flag interacts with --json."""
with (
patch("builtins.print") as mock_print,
patch.object(check_config, "check") as mock_check,
):
mock_check.return_value = {
"except": {},
"warn": {},
"components": {"homeassistant": {}, "light": {"platform": "demo"}},
"secrets": {},
"secret_cache": {},
"yaml_files": {},
}
# Test --json with --info - JSON should take precedence
exit_code = check_config.run(["--json", "--info", "light"])
assert exit_code == 0
assert mock_print.call_count == 1
# Should be JSON output, not info output
json_output = json.loads(mock_print.call_args[0][0])
assert "config_dir" in json_output
assert "components" in json_output
assert "light" in json_output["components"]
def test_config_flag_variations() -> None:
"""Test different ways to specify config directory."""
test_cases = [
(["-c", "/test/path"], "/test/path"),
(["--config", "/test/path"], "/test/path"),
(["--json", "-c", "relative/path"], "relative/path"),
(["--config", ".", "--json"], "."),
]
for flags, expected_config_part in test_cases:
with (
patch("builtins.print") as mock_print,
patch.object(check_config, "check") as mock_check,
):
mock_check.return_value = {
"except": {},
"warn": {},
"components": {"homeassistant": {}},
"secrets": {},
"secret_cache": {},
"yaml_files": {},
}
check_config.run(flags)
if "--json" in flags:
json_output = json.loads(mock_print.call_args[0][0])
expected_full_path = os.path.join(os.getcwd(), expected_config_part)
assert json_output["config_dir"] == expected_full_path
def test_multiple_config_flags() -> None:
"""Test behavior with multiple config directory specifications."""
with (
patch("builtins.print") as mock_print,
patch.object(check_config, "check") as mock_check,
):
mock_check.return_value = {
"except": {},
"warn": {},
"components": {"homeassistant": {}},
"secrets": {},
"secret_cache": {},
"yaml_files": {},
}
# Last config flag should win
check_config.run(
["--json", "--config", "/first/path", "--config", "/second/path"]
)
json_output = json.loads(mock_print.call_args[0][0])
expected_path = os.path.join(os.getcwd(), "/second/path")
assert json_output["config_dir"] == expected_path
def test_fail_on_warnings_with_json_combinations() -> None:
"""Test --fail-on-warnings with --json in various scenarios."""
test_scenarios = [
# (errors, warnings, expected_exit_code)
({}, {}, 0),
({"domain1": ["error"]}, {}, 1),
({}, {"domain1": ["warning"]}, 1), # With --fail-on-warnings
({"d1": ["e1"]}, {"d2": ["w1"]}, 1), # Errors still take precedence
({"d1": ["e1"], "d2": ["e2"]}, {"d3": ["w1"]}, 1), # Multiple errors > warnings
]
for errors, warnings, expected_exit in test_scenarios:
with (
patch("builtins.print") as mock_print,
patch.object(check_config, "check") as mock_check,
):
mock_check.return_value = {
"except": errors,
"warn": warnings,
"components": {"homeassistant": {}},
"secrets": {},
"secret_cache": {},
"yaml_files": {},
}
exit_code = check_config.run(["--json", "--fail-on-warnings"])
assert exit_code == expected_exit
# Should still output valid JSON
json_output = json.loads(mock_print.call_args[0][0])
assert json_output["total_errors"] == sum(len(e) for e in errors.values())
assert json_output["total_warnings"] == sum(
len(w) for w in warnings.values()
)