Merge remote-tracking branch 'upstream/dev' into integration

This commit is contained in:
J. Nick Koston 2025-07-18 07:58:14 -10:00
commit 10605e93cd
No known key found for this signature in database
8 changed files with 838 additions and 10 deletions

View File

@ -305,8 +305,7 @@ jobs:
const { data: codeownersFile } = await github.rest.repos.getContent({
owner,
repo,
path: '.github/CODEOWNERS',
ref: context.payload.pull_request.head.sha
path: 'CODEOWNERS',
});
const codeownersContent = Buffer.from(codeownersFile.content, 'base64').toString('utf8');

View File

@ -0,0 +1,264 @@
# This workflow automatically requests reviews from codeowners when:
# 1. A PR is opened, reopened, or synchronized (updated)
# 2. A PR is marked as ready for review
#
# It reads the CODEOWNERS file and matches all changed files in the PR against
# the codeowner patterns, then requests reviews from the appropriate owners
# while avoiding duplicate requests for users who have already been requested
# or have already reviewed the PR.
name: Request Codeowner Reviews
on:
# Needs to be pull_request_target to get write permissions
pull_request_target:
types: [opened, reopened, synchronize, ready_for_review]
permissions:
pull-requests: write
contents: read
jobs:
request-codeowner-reviews:
name: Run
if: ${{ !github.event.pull_request.draft }}
runs-on: ubuntu-latest
steps:
- name: Request reviews from component codeowners
uses: actions/github-script@v7.0.1
with:
script: |
const owner = context.repo.owner;
const repo = context.repo.repo;
const pr_number = context.payload.pull_request.number;
console.log(`Processing PR #${pr_number} for codeowner review requests`);
try {
// Get the list of changed files in this PR
const { data: files } = await github.rest.pulls.listFiles({
owner,
repo,
pull_number: pr_number
});
const changedFiles = files.map(file => file.filename);
console.log(`Found ${changedFiles.length} changed files`);
if (changedFiles.length === 0) {
console.log('No changed files found, skipping codeowner review requests');
return;
}
// Fetch CODEOWNERS file from root
const { data: codeownersFile } = await github.rest.repos.getContent({
owner,
repo,
path: 'CODEOWNERS',
ref: context.payload.pull_request.base.sha
});
const codeownersContent = Buffer.from(codeownersFile.content, 'base64').toString('utf8');
// Parse CODEOWNERS file to extract all patterns and their owners
const codeownersLines = codeownersContent.split('\n')
.map(line => line.trim())
.filter(line => line && !line.startsWith('#'));
const codeownersPatterns = [];
// Convert CODEOWNERS pattern to regex (robust glob handling)
function globToRegex(pattern) {
// Escape regex special characters except for glob wildcards
let regexStr = pattern
.replace(/([.+^=!:${}()|[\]\\])/g, '\\$1') // escape regex chars
.replace(/\*\*/g, '.*') // globstar
.replace(/\*/g, '[^/]*') // single star
.replace(/\?/g, '.'); // question mark
return new RegExp('^' + regexStr + '$');
}
// Helper function to create comment body
function createCommentBody(reviewersList, teamsList, matchedFileCount, isSuccessful = true) {
const reviewerMentions = reviewersList.map(r => `@${r}`);
const teamMentions = teamsList.map(t => `@${owner}/${t}`);
const allMentions = [...reviewerMentions, ...teamMentions].join(', ');
if (isSuccessful) {
return `👋 Hi there! I've automatically requested reviews from codeowners based on the files changed in this PR.\n\n${allMentions} - You've been requested to review this PR as codeowner(s) of ${matchedFileCount} file(s) that were modified. Thanks for your time! 🙏`;
} else {
return `👋 Hi there! This PR modifies ${matchedFileCount} file(s) with codeowners.\n\n${allMentions} - As codeowner(s) of the affected files, your review would be appreciated! 🙏\n\n_Note: Automatic review request may have failed, but you're still welcome to review._`;
}
}
for (const line of codeownersLines) {
const parts = line.split(/\s+/);
if (parts.length < 2) continue;
const pattern = parts[0];
const owners = parts.slice(1);
// Use robust glob-to-regex conversion
const regex = globToRegex(pattern);
codeownersPatterns.push({ pattern, regex, owners });
}
console.log(`Parsed ${codeownersPatterns.length} codeowner patterns`);
// Match changed files against CODEOWNERS patterns
const matchedOwners = new Set();
const matchedTeams = new Set();
const fileMatches = new Map(); // Track which files matched which patterns
for (const file of changedFiles) {
for (const { pattern, regex, owners } of codeownersPatterns) {
if (regex.test(file)) {
console.log(`File '${file}' matches pattern '${pattern}' with owners: ${owners.join(', ')}`);
if (!fileMatches.has(file)) {
fileMatches.set(file, []);
}
fileMatches.get(file).push({ pattern, owners });
// Add owners to the appropriate set (remove @ prefix)
for (const owner of owners) {
const cleanOwner = owner.startsWith('@') ? owner.slice(1) : owner;
if (cleanOwner.includes('/')) {
// Team mention (org/team-name)
const teamName = cleanOwner.split('/')[1];
matchedTeams.add(teamName);
} else {
// Individual user
matchedOwners.add(cleanOwner);
}
}
}
}
}
if (matchedOwners.size === 0 && matchedTeams.size === 0) {
console.log('No codeowners found for any changed files');
return;
}
// Remove the PR author from reviewers
const prAuthor = context.payload.pull_request.user.login;
matchedOwners.delete(prAuthor);
// Get current reviewers to avoid duplicate requests (but still mention them)
const { data: prData } = await github.rest.pulls.get({
owner,
repo,
pull_number: pr_number
});
const currentReviewers = new Set();
const currentTeams = new Set();
if (prData.requested_reviewers) {
prData.requested_reviewers.forEach(reviewer => {
currentReviewers.add(reviewer.login);
});
}
if (prData.requested_teams) {
prData.requested_teams.forEach(team => {
currentTeams.add(team.slug);
});
}
// Check for completed reviews to avoid re-requesting users who have already reviewed
const { data: reviews } = await github.rest.pulls.listReviews({
owner,
repo,
pull_number: pr_number
});
const reviewedUsers = new Set();
reviews.forEach(review => {
reviewedUsers.add(review.user.login);
});
// Remove only users who have already submitted reviews (not just requested reviewers)
reviewedUsers.forEach(reviewer => {
matchedOwners.delete(reviewer);
});
// For teams, we'll still remove already requested teams to avoid API errors
currentTeams.forEach(team => {
matchedTeams.delete(team);
});
const reviewersList = Array.from(matchedOwners);
const teamsList = Array.from(matchedTeams);
if (reviewersList.length === 0 && teamsList.length === 0) {
console.log('No eligible reviewers found (all may already be requested or reviewed)');
return;
}
const totalReviewers = reviewersList.length + teamsList.length;
console.log(`Requesting reviews from ${reviewersList.length} users and ${teamsList.length} teams for ${fileMatches.size} matched files`);
// Request reviews
try {
const requestParams = {
owner,
repo,
pull_number: pr_number
};
// Filter out users who are already requested reviewers for the API call
const newReviewers = reviewersList.filter(reviewer => !currentReviewers.has(reviewer));
const newTeams = teamsList.filter(team => !currentTeams.has(team));
if (newReviewers.length > 0) {
requestParams.reviewers = newReviewers;
}
if (newTeams.length > 0) {
requestParams.team_reviewers = newTeams;
}
// Only make the API call if there are new reviewers to request
if (newReviewers.length > 0 || newTeams.length > 0) {
await github.rest.pulls.requestReviewers(requestParams);
console.log(`Successfully requested reviews from ${newReviewers.length} new users and ${newTeams.length} new teams`);
} else {
console.log('All codeowners are already requested reviewers or have reviewed');
}
// Add a comment to the PR mentioning what happened (include all matched codeowners)
const commentBody = createCommentBody(reviewersList, teamsList, fileMatches.size, true);
await github.rest.issues.createComment({
owner,
repo,
issue_number: pr_number,
body: commentBody
});
} catch (error) {
if (error.status === 422) {
console.log('Some reviewers may already be requested or unavailable:', error.message);
// Try to add a comment even if review request failed
const commentBody = createCommentBody(reviewersList, teamsList, fileMatches.size, false);
try {
await github.rest.issues.createComment({
owner,
repo,
issue_number: pr_number,
body: commentBody
});
} catch (commentError) {
console.log('Failed to add comment:', commentError.message);
}
} else {
throw error;
}
}
} catch (error) {
console.log('Failed to process codeowner review requests:', error.message);
console.error(error);
}

View File

@ -0,0 +1,119 @@
# This workflow automatically notifies codeowners when an issue is labeled with component labels.
# It reads the CODEOWNERS file to find the maintainers for the labeled components
# and posts a comment mentioning them to ensure they're aware of the issue.
name: Notify Issue Codeowners
on:
issues:
types: [labeled]
permissions:
issues: write
contents: read
jobs:
notify-codeowners:
name: Run
if: ${{ startsWith(github.event.label.name, format('component{0} ', ':')) }}
runs-on: ubuntu-latest
steps:
- name: Notify codeowners for component issues
uses: actions/github-script@v7.0.1
with:
script: |
const owner = context.repo.owner;
const repo = context.repo.repo;
const issue_number = context.payload.issue.number;
const labelName = context.payload.label.name;
console.log(`Processing issue #${issue_number} with label: ${labelName}`);
// Extract component name from label
const componentName = labelName.replace('component: ', '');
console.log(`Component: ${componentName}`);
try {
// Fetch CODEOWNERS file from root
const { data: codeownersFile } = await github.rest.repos.getContent({
owner,
repo,
path: 'CODEOWNERS'
});
const codeownersContent = Buffer.from(codeownersFile.content, 'base64').toString('utf8');
// Parse CODEOWNERS file to extract component mappings
const codeownersLines = codeownersContent.split('\n')
.map(line => line.trim())
.filter(line => line && !line.startsWith('#'));
let componentOwners = null;
for (const line of codeownersLines) {
const parts = line.split(/\s+/);
if (parts.length < 2) continue;
const pattern = parts[0];
const owners = parts.slice(1);
// Look for component patterns: esphome/components/{component}/*
const componentMatch = pattern.match(/^esphome\/components\/([^\/]+)\/\*$/);
if (componentMatch && componentMatch[1] === componentName) {
componentOwners = owners;
break;
}
}
if (!componentOwners) {
console.log(`No codeowners found for component: ${componentName}`);
return;
}
console.log(`Found codeowners for '${componentName}': ${componentOwners.join(', ')}`);
// Separate users and teams
const userOwners = [];
const teamOwners = [];
for (const owner of componentOwners) {
const cleanOwner = owner.startsWith('@') ? owner.slice(1) : owner;
if (cleanOwner.includes('/')) {
// Team mention (org/team-name)
teamOwners.push(`@${cleanOwner}`);
} else {
// Individual user
userOwners.push(`@${cleanOwner}`);
}
}
// Remove issue author from mentions to avoid self-notification
const issueAuthor = context.payload.issue.user.login;
const filteredUserOwners = userOwners.filter(mention =>
mention !== `@${issueAuthor}`
);
const allMentions = [...filteredUserOwners, ...teamOwners];
if (allMentions.length === 0) {
console.log('No codeowners to notify (issue author is the only codeowner)');
return;
}
// Create comment body
const mentionString = allMentions.join(', ');
const commentBody = `👋 Hey ${mentionString}!\n\nThis issue has been labeled with \`component: ${componentName}\` and you've been identified as a codeowner of this component. Please take a look when you have a chance!\n\nThanks for maintaining this component! 🙏`;
// Post comment
await github.rest.issues.createComment({
owner,
repo,
issue_number: issue_number,
body: commentBody
});
console.log(`Successfully notified codeowners: ${mentionString}`);
} catch (error) {
console.log('Failed to process codeowner notifications:', error.message);
console.error(error);
}

View File

@ -192,7 +192,7 @@ class WidgetType:
class NumberType(WidgetType):
def get_max(self, config: dict):
return int(config[CONF_MAX_VALUE] or 100)
return int(config.get(CONF_MAX_VALUE, 100))
def get_min(self, config: dict):
return int(config[CONF_MIN_VALUE] or 0)
return int(config.get(CONF_MIN_VALUE, 0))

View File

@ -12,7 +12,7 @@ platformio==6.1.18 # When updating platformio, also update /docker/Dockerfile
esptool==4.9.0
click==8.1.7
esphome-dashboard==20250514.0
aioesphomeapi==36.0.1
aioesphomeapi==37.0.0
zeroconf==0.147.0
puremagic==1.30
ruamel.yaml==0.18.14 # dashboard_import

View File

@ -1495,6 +1495,7 @@ def build_base_class(
base_class_name: str,
common_fields: list[descriptor.FieldDescriptorProto],
messages: list[descriptor.DescriptorProto],
message_source_map: dict[str, int],
) -> tuple[str, str, str]:
"""Build the base class definition and implementation."""
public_content = []
@ -1511,7 +1512,7 @@ def build_base_class(
# Determine if any message using this base class needs decoding
needs_decode = any(
get_opt(msg, pb.source, SOURCE_BOTH) in (SOURCE_BOTH, SOURCE_CLIENT)
message_source_map.get(msg.name, SOURCE_BOTH) in (SOURCE_BOTH, SOURCE_CLIENT)
for msg in messages
)
@ -1543,6 +1544,7 @@ def build_base_class(
def generate_base_classes(
base_class_groups: dict[str, list[descriptor.DescriptorProto]],
message_source_map: dict[str, int],
) -> tuple[str, str, str]:
"""Generate all base classes."""
all_headers = []
@ -1556,7 +1558,7 @@ def generate_base_classes(
if common_fields:
# Generate base class
header, cpp, dump_cpp = build_base_class(
base_class_name, common_fields, messages
base_class_name, common_fields, messages, message_source_map
)
all_headers.append(header)
all_cpp.append(cpp)
@ -1567,6 +1569,7 @@ def generate_base_classes(
def build_service_message_type(
mt: descriptor.DescriptorProto,
message_source_map: dict[str, int],
) -> tuple[str, str] | None:
"""Builds the service message type."""
snake = camel_to_snake(mt.name)
@ -1574,7 +1577,7 @@ def build_service_message_type(
if id_ is None:
return None
source: int = get_opt(mt, pb.source, 0)
source: int = message_source_map.get(mt.name, SOURCE_BOTH)
ifdef: str | None = get_opt(mt, pb.ifdef)
log: bool = get_opt(mt, pb.log, True)
@ -1714,7 +1717,9 @@ namespace api {
# Generate base classes
if base_class_fields:
base_headers, base_cpp, base_dump_cpp = generate_base_classes(base_class_groups)
base_headers, base_cpp, base_dump_cpp = generate_base_classes(
base_class_groups, message_source_map
)
content += base_headers
cpp += base_cpp
dump_cpp += base_dump_cpp
@ -1832,7 +1837,7 @@ static const char *const TAG = "api.service";
cpp += "#endif\n\n"
for mt in file.message_type:
obj = build_service_message_type(mt)
obj = build_service_message_type(mt, message_source_map)
if obj is None:
continue
hout, cout = obj

View File

@ -0,0 +1,207 @@
esphome:
name: scheduler-retry-test
on_boot:
priority: -100
then:
- logger.log: "Starting scheduler retry tests"
# Run all tests sequentially with delays
- script.execute: run_all_tests
host:
api:
logger:
level: VERBOSE
globals:
- id: simple_retry_counter
type: int
initial_value: '0'
- id: backoff_retry_counter
type: int
initial_value: '0'
- id: immediate_done_counter
type: int
initial_value: '0'
- id: cancel_retry_counter
type: int
initial_value: '0'
- id: empty_name_retry_counter
type: int
initial_value: '0'
- id: script_retry_counter
type: int
initial_value: '0'
- id: multiple_same_name_counter
type: int
initial_value: '0'
sensor:
- platform: template
name: Test Sensor
id: test_sensor
lambda: return 1.0;
update_interval: never
script:
- id: run_all_tests
then:
# Test 1: Simple retry
- logger.log: "=== Test 1: Simple retry ==="
- lambda: |-
auto *component = id(test_sensor);
App.scheduler.set_retry(component, "simple_retry", 50, 3,
[](uint8_t retry_countdown) {
id(simple_retry_counter)++;
ESP_LOGI("test", "Simple retry attempt %d (countdown=%d)",
id(simple_retry_counter), retry_countdown);
if (id(simple_retry_counter) >= 2) {
ESP_LOGI("test", "Simple retry succeeded on attempt %d", id(simple_retry_counter));
return RetryResult::DONE;
}
return RetryResult::RETRY;
});
# Test 2: Backoff retry
- logger.log: "=== Test 2: Retry with backoff ==="
- lambda: |-
auto *component = id(test_sensor);
static uint32_t backoff_start_time = 0;
static uint32_t last_attempt_time = 0;
backoff_start_time = millis();
last_attempt_time = backoff_start_time;
App.scheduler.set_retry(component, "backoff_retry", 50, 4,
[](uint8_t retry_countdown) {
id(backoff_retry_counter)++;
uint32_t now = millis();
uint32_t interval = now - last_attempt_time;
last_attempt_time = now;
ESP_LOGI("test", "Backoff retry attempt %d (countdown=%d, interval=%dms)",
id(backoff_retry_counter), retry_countdown, interval);
if (id(backoff_retry_counter) == 1) {
ESP_LOGI("test", "First call was immediate");
} else if (id(backoff_retry_counter) == 2) {
ESP_LOGI("test", "Second call interval: %dms (expected ~50ms)", interval);
} else if (id(backoff_retry_counter) == 3) {
ESP_LOGI("test", "Third call interval: %dms (expected ~100ms)", interval);
} else if (id(backoff_retry_counter) == 4) {
ESP_LOGI("test", "Fourth call interval: %dms (expected ~200ms)", interval);
ESP_LOGI("test", "Backoff retry completed");
return RetryResult::DONE;
}
return RetryResult::RETRY;
}, 2.0f);
# Test 3: Immediate done
- logger.log: "=== Test 3: Immediate done ==="
- lambda: |-
auto *component = id(test_sensor);
App.scheduler.set_retry(component, "immediate_done", 50, 5,
[](uint8_t retry_countdown) {
id(immediate_done_counter)++;
ESP_LOGI("test", "Immediate done retry called (countdown=%d)", retry_countdown);
return RetryResult::DONE;
});
# Test 4: Cancel retry
- logger.log: "=== Test 4: Cancel retry ==="
- lambda: |-
auto *component = id(test_sensor);
App.scheduler.set_retry(component, "cancel_test", 25, 10,
[](uint8_t retry_countdown) {
id(cancel_retry_counter)++;
ESP_LOGI("test", "Cancel test retry attempt %d", id(cancel_retry_counter));
return RetryResult::RETRY;
});
// Cancel it after 100ms
App.scheduler.set_timeout(component, "cancel_timer", 100, []() {
bool cancelled = App.scheduler.cancel_retry(id(test_sensor), "cancel_test");
ESP_LOGI("test", "Retry cancellation result: %s", cancelled ? "true" : "false");
ESP_LOGI("test", "Cancel retry ran %d times before cancellation", id(cancel_retry_counter));
});
# Test 5: Empty name retry
- logger.log: "=== Test 5: Empty name retry ==="
- lambda: |-
auto *component = id(test_sensor);
App.scheduler.set_retry(component, "", 50, 5,
[](uint8_t retry_countdown) {
id(empty_name_retry_counter)++;
ESP_LOGI("test", "Empty name retry attempt %d", id(empty_name_retry_counter));
return RetryResult::RETRY;
});
// Try to cancel after 75ms
App.scheduler.set_timeout(component, "empty_cancel_timer", 75, []() {
bool cancelled = App.scheduler.cancel_retry(id(test_sensor), "");
ESP_LOGI("test", "Empty name retry cancel result: %s",
cancelled ? "true" : "false");
ESP_LOGI("test", "Empty name retry ran %d times", id(empty_name_retry_counter));
});
# Test 6: Component method
- logger.log: "=== Test 6: Component::set_retry method ==="
- lambda: |-
class TestRetryComponent : public Component {
public:
void test_retry() {
this->set_retry(50, 3,
[](uint8_t retry_countdown) {
id(script_retry_counter)++;
ESP_LOGI("test", "Component retry attempt %d", id(script_retry_counter));
if (id(script_retry_counter) >= 2) {
return RetryResult::DONE;
}
return RetryResult::RETRY;
}, 1.5f);
}
};
static TestRetryComponent test_component;
test_component.test_retry();
# Test 7: Multiple same name
- logger.log: "=== Test 7: Multiple retries with same name ==="
- lambda: |-
auto *component = id(test_sensor);
// Set first retry
App.scheduler.set_retry(component, "duplicate_retry", 100, 5,
[](uint8_t retry_countdown) {
id(multiple_same_name_counter) += 1;
ESP_LOGI("test", "First duplicate retry - should not run");
return RetryResult::RETRY;
});
// Set second retry with same name (should cancel first)
App.scheduler.set_retry(component, "duplicate_retry", 50, 3,
[](uint8_t retry_countdown) {
id(multiple_same_name_counter) += 10;
ESP_LOGI("test", "Second duplicate retry attempt (counter=%d)",
id(multiple_same_name_counter));
if (id(multiple_same_name_counter) >= 20) {
return RetryResult::DONE;
}
return RetryResult::RETRY;
});
# Wait for all tests to complete before reporting
- delay: 500ms
# Final report
- logger.log: "=== Retry Test Results ==="
- lambda: |-
ESP_LOGI("test", "Simple retry counter: %d (expected 2)", id(simple_retry_counter));
ESP_LOGI("test", "Backoff retry counter: %d (expected 4)", id(backoff_retry_counter));
ESP_LOGI("test", "Immediate done counter: %d (expected 1)", id(immediate_done_counter));
ESP_LOGI("test", "Cancel retry counter: %d (expected ~3-4)", id(cancel_retry_counter));
ESP_LOGI("test", "Empty name retry counter: %d (expected 1-2)", id(empty_name_retry_counter));
ESP_LOGI("test", "Component retry counter: %d (expected 2)", id(script_retry_counter));
ESP_LOGI("test", "Multiple same name counter: %d (expected 20+)", id(multiple_same_name_counter));
ESP_LOGI("test", "All retry tests completed");

View File

@ -0,0 +1,234 @@
"""Test scheduler retry functionality."""
import asyncio
import re
import pytest
from .types import APIClientConnectedFactory, RunCompiledFunction
@pytest.mark.asyncio
async def test_scheduler_retry_test(
yaml_config: str,
run_compiled: RunCompiledFunction,
api_client_connected: APIClientConnectedFactory,
) -> None:
"""Test that scheduler retry functionality works correctly."""
# Track test progress
simple_retry_done = asyncio.Event()
backoff_retry_done = asyncio.Event()
immediate_done_done = asyncio.Event()
cancel_retry_done = asyncio.Event()
empty_name_retry_done = asyncio.Event()
component_retry_done = asyncio.Event()
multiple_name_done = asyncio.Event()
test_complete = asyncio.Event()
# Track retry counts
simple_retry_count = 0
backoff_retry_count = 0
immediate_done_count = 0
cancel_retry_count = 0
empty_name_retry_count = 0
component_retry_count = 0
multiple_name_count = 0
# Track specific test results
cancel_result = None
empty_cancel_result = None
backoff_intervals = []
def on_log_line(line: str) -> None:
nonlocal simple_retry_count, backoff_retry_count, immediate_done_count
nonlocal cancel_retry_count, empty_name_retry_count, component_retry_count
nonlocal multiple_name_count, cancel_result, empty_cancel_result
# Strip ANSI color codes
clean_line = re.sub(r"\x1b\[[0-9;]*m", "", line)
# Simple retry test
if "Simple retry attempt" in clean_line:
if match := re.search(r"Simple retry attempt (\d+)", clean_line):
simple_retry_count = int(match.group(1))
elif "Simple retry succeeded on attempt" in clean_line:
simple_retry_done.set()
# Backoff retry test
elif "Backoff retry attempt" in clean_line:
if match := re.search(
r"Backoff retry attempt (\d+).*interval=(\d+)ms", clean_line
):
backoff_retry_count = int(match.group(1))
interval = int(match.group(2))
if backoff_retry_count > 1: # Skip first (immediate) call
backoff_intervals.append(interval)
elif "Backoff retry completed" in clean_line:
backoff_retry_done.set()
# Immediate done test
elif "Immediate done retry called" in clean_line:
immediate_done_count += 1
immediate_done_done.set()
# Cancel retry test
elif "Cancel test retry attempt" in clean_line:
cancel_retry_count += 1
elif "Retry cancellation result:" in clean_line:
cancel_result = "true" in clean_line
cancel_retry_done.set()
# Empty name retry test
elif "Empty name retry attempt" in clean_line:
if match := re.search(r"Empty name retry attempt (\d+)", clean_line):
empty_name_retry_count = int(match.group(1))
elif "Empty name retry cancel result:" in clean_line:
empty_cancel_result = "true" in clean_line
elif "Empty name retry ran" in clean_line:
empty_name_retry_done.set()
# Component retry test
elif "Component retry attempt" in clean_line:
if match := re.search(r"Component retry attempt (\d+)", clean_line):
component_retry_count = int(match.group(1))
if component_retry_count >= 2:
component_retry_done.set()
# Multiple same name test
elif "Second duplicate retry attempt" in clean_line:
if match := re.search(r"counter=(\d+)", clean_line):
multiple_name_count = int(match.group(1))
if multiple_name_count >= 20:
multiple_name_done.set()
# Test completion
elif "All retry tests completed" in clean_line:
test_complete.set()
async with (
run_compiled(yaml_config, line_callback=on_log_line),
api_client_connected() as client,
):
# Verify we can connect
device_info = await client.device_info()
assert device_info is not None
assert device_info.name == "scheduler-retry-test"
# Wait for simple retry test
try:
await asyncio.wait_for(simple_retry_done.wait(), timeout=1.0)
except TimeoutError:
pytest.fail(
f"Simple retry test did not complete. Count: {simple_retry_count}"
)
assert simple_retry_count == 2, (
f"Expected 2 simple retry attempts, got {simple_retry_count}"
)
# Wait for backoff retry test
try:
await asyncio.wait_for(backoff_retry_done.wait(), timeout=3.0)
except TimeoutError:
pytest.fail(
f"Backoff retry test did not complete. Count: {backoff_retry_count}"
)
assert backoff_retry_count == 4, (
f"Expected 4 backoff retry attempts, got {backoff_retry_count}"
)
# Verify backoff intervals (allowing for timing variations)
assert len(backoff_intervals) >= 2, (
f"Expected at least 2 intervals, got {len(backoff_intervals)}"
)
if len(backoff_intervals) >= 3:
# First interval should be ~50ms
assert 30 <= backoff_intervals[0] <= 70, (
f"First interval {backoff_intervals[0]}ms not ~50ms"
)
# Second interval should be ~100ms (50ms * 2.0)
assert 80 <= backoff_intervals[1] <= 120, (
f"Second interval {backoff_intervals[1]}ms not ~100ms"
)
# Third interval should be ~200ms (100ms * 2.0)
assert 180 <= backoff_intervals[2] <= 220, (
f"Third interval {backoff_intervals[2]}ms not ~200ms"
)
# Wait for immediate done test
try:
await asyncio.wait_for(immediate_done_done.wait(), timeout=3.0)
except TimeoutError:
pytest.fail(
f"Immediate done test did not complete. Count: {immediate_done_count}"
)
assert immediate_done_count == 1, (
f"Expected 1 immediate done call, got {immediate_done_count}"
)
# Wait for cancel retry test
try:
await asyncio.wait_for(cancel_retry_done.wait(), timeout=2.0)
except TimeoutError:
pytest.fail(
f"Cancel retry test did not complete. Count: {cancel_retry_count}"
)
assert cancel_result is True, "Retry cancellation should have succeeded"
assert 2 <= cancel_retry_count <= 5, (
f"Expected 2-5 cancel retry attempts before cancellation, got {cancel_retry_count}"
)
# Wait for empty name retry test
try:
await asyncio.wait_for(empty_name_retry_done.wait(), timeout=1.0)
except TimeoutError:
pytest.fail(
f"Empty name retry test did not complete. Count: {empty_name_retry_count}"
)
# Empty name retry should run at least once before being cancelled
assert 1 <= empty_name_retry_count <= 2, (
f"Expected 1-2 empty name retry attempts, got {empty_name_retry_count}"
)
assert empty_cancel_result is True, (
"Empty name retry cancel should have succeeded"
)
# Wait for component retry test
try:
await asyncio.wait_for(component_retry_done.wait(), timeout=1.0)
except TimeoutError:
pytest.fail(
f"Component retry test did not complete. Count: {component_retry_count}"
)
assert component_retry_count >= 2, (
f"Expected at least 2 component retry attempts, got {component_retry_count}"
)
# Wait for multiple same name test
try:
await asyncio.wait_for(multiple_name_done.wait(), timeout=1.0)
except TimeoutError:
pytest.fail(
f"Multiple same name test did not complete. Count: {multiple_name_count}"
)
# Should be 20+ (only second retry should run)
assert multiple_name_count >= 20, (
f"Expected multiple name count >= 20 (second retry only), got {multiple_name_count}"
)
# Wait for test completion
try:
await asyncio.wait_for(test_complete.wait(), timeout=1.0)
except TimeoutError:
pytest.fail("Test did not complete within timeout")