Skip to content

Commit d5e3feb

Browse files
Alex Wangwangyb-A
authored andcommitted
test: Add integration tests for wait and run in child context
- Add integration tests: - multiple waits - simple execution - run in child context with large data
1 parent 5583620 commit d5e3feb

File tree

7 files changed

+279
-0
lines changed

7 files changed

+279
-0
lines changed

examples/examples-catalog.json

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,17 @@
5656
},
5757
"path": "./src/wait/wait.py"
5858
},
59+
{
60+
"name": "Multiple Wait",
61+
"description": "Usage of demonstrating multiple sequential wait operations.",
62+
"handler": "multiple_wait.handler",
63+
"integration": true,
64+
"durableConfig": {
65+
"RetentionPeriodInDays": 7,
66+
"ExecutionTimeout": 300
67+
},
68+
"path": "./src/wait/multiple_wait.py"
69+
},
5970
{
6071
"name": "Callback",
6172
"description": "Basic usage of context.create_callback() to create a callback for external systems",
@@ -154,6 +165,28 @@
154165
"ExecutionTimeout": 300
155166
},
156167
"path": "./src/wait_for_condition/wait_for_condition.py"
168+
},
169+
{
170+
"name": "Run in Child Context Large Data",
171+
"description": "Usage of context.run_in_child_context() to execute operations in isolated contexts with large data",
172+
"handler": "run_in_child_context_large_data.handler",
173+
"integration": true,
174+
"durableConfig": {
175+
"RetentionPeriodInDays": 7,
176+
"ExecutionTimeout": 300
177+
},
178+
"path": "./src/run_in_child_context/run_in_child_context_large_data.py"
179+
},
180+
{
181+
"name": "Simple Execution",
182+
"description": "Simple execution without durable execution",
183+
"handler": "simple_execution.handler",
184+
"integration": true,
185+
"durableConfig": {
186+
"RetentionPeriodInDays": 7,
187+
"ExecutionTimeout": 300
188+
},
189+
"path": "./src/simple_execution/simple_execution.py"
157190
}
158191
]
159192
}
Lines changed: 77 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,77 @@
1+
"""Test runInChildContext with large data exceeding individual step limits."""
2+
3+
from typing import Any
4+
5+
from aws_durable_execution_sdk_python.context import (
6+
DurableContext,
7+
durable_with_child_context,
8+
)
9+
from aws_durable_execution_sdk_python.execution import durable_execution
10+
11+
12+
def generate_large_string(size_in_kb: int) -> str:
13+
"""Generate a string of approximately the specified size in KB."""
14+
target_size = size_in_kb * 1024 # Convert KB to bytes
15+
base_string = "A" * 1000 # 1KB string
16+
repetitions = target_size // 1000
17+
remainder = target_size % 1000
18+
19+
return base_string * repetitions + "A" * remainder
20+
21+
22+
@durable_with_child_context
23+
def large_data_processor(child_context: DurableContext) -> dict[str, Any]:
24+
"""Process large data in child context."""
25+
# Generate data using a loop - each step returns ~50KB of data (under the step limit)
26+
step_results: list[str] = []
27+
step_sizes: list[int] = []
28+
29+
for i in range(1, 6): # 1 to 5
30+
step_result: str = child_context.step(
31+
lambda _: generate_large_string(50), # 50KB
32+
name=f"generate-data-{i}",
33+
)
34+
35+
step_results.append(step_result)
36+
step_sizes.append(len(step_result))
37+
38+
# Concatenate all results - total should be ~250KB
39+
concatenated_result = "".join(step_results)
40+
41+
return {
42+
"totalSize": len(concatenated_result),
43+
"sizeInKB": round(len(concatenated_result) / 1024),
44+
"data": concatenated_result,
45+
"stepSizes": step_sizes,
46+
}
47+
48+
49+
@durable_execution
50+
def handler(_event: Any, context: DurableContext) -> dict[str, Any]:
51+
"""Handler demonstrating runInChildContext with large data."""
52+
# Use runInChildContext to handle large data that would exceed 256k step limit
53+
large_data_result: dict[str, Any] = context.run_in_child_context(
54+
large_data_processor(), name="large-data-processor"
55+
)
56+
57+
# Add a wait after runInChildContext to test persistence across invocations
58+
context.wait(seconds=1, name="post-processing-wait")
59+
60+
# Verify the data is still intact after the wait
61+
data_integrity_check = (
62+
len(large_data_result["data"]) == large_data_result["totalSize"]
63+
and len(large_data_result["data"]) > 0
64+
)
65+
66+
return {
67+
"success": True,
68+
"message": "Successfully processed large data exceeding individual step limits using runInChildContext",
69+
"dataIntegrityCheck": data_integrity_check,
70+
"summary": {
71+
"totalDataSize": large_data_result["sizeInKB"],
72+
"stepsExecuted": 5,
73+
"childContextUsed": True,
74+
"waitExecuted": True,
75+
"dataPreservedAcrossWait": data_integrity_check,
76+
},
77+
}
Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
"""Demonstrates handler execution without any durable operations."""
2+
3+
import json
4+
import time
5+
from typing import Any
6+
7+
from aws_durable_execution_sdk_python.context import DurableContext
8+
from aws_durable_execution_sdk_python.execution import durable_execution
9+
10+
11+
@durable_execution
12+
def handler(event: Any, _context: DurableContext) -> dict[str, Any]:
13+
"""Handler that executes without any durable operations."""
14+
return {
15+
"received": json.dumps(event),
16+
"timestamp": int(time.time() * 1000), # milliseconds since epoch
17+
"message": "Handler completed successfully",
18+
}

examples/src/wait/multiple_wait.py

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
"""Example demonstrating multiple sequential wait operations."""
2+
3+
from typing import Any
4+
5+
from aws_durable_execution_sdk_python.context import DurableContext
6+
from aws_durable_execution_sdk_python.execution import durable_execution
7+
8+
9+
@durable_execution
10+
def handler(_event: Any, context: DurableContext) -> dict[str, Any]:
11+
"""Handler demonstrating multiple sequential wait operations."""
12+
context.wait(seconds=5, name="wait-1")
13+
context.wait(seconds=5, name="wait-2")
14+
15+
return {
16+
"completedWaits": 2,
17+
"finalStep": "done",
18+
}
Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
"""Tests for run_in_child_context_large_data."""
2+
3+
import pytest
4+
from aws_durable_execution_sdk_python.execution import InvocationStatus
5+
6+
from src.run_in_child_context import run_in_child_context_large_data
7+
from test.conftest import deserialize_operation_payload
8+
9+
10+
@pytest.mark.example
11+
@pytest.mark.durable_execution(
12+
handler=run_in_child_context_large_data.handler,
13+
lambda_function_name="run in child context large data",
14+
)
15+
def test_handle_large_data_exceeding_256k_limit_using_run_in_child_context(
16+
durable_runner,
17+
):
18+
"""Test handling large data exceeding 256k limit using runInChildContext."""
19+
with durable_runner:
20+
result = durable_runner.run(input=None, timeout=30)
21+
22+
result_data = deserialize_operation_payload(result.result)
23+
24+
# Verify the execution succeeded
25+
assert result.status is InvocationStatus.SUCCEEDED
26+
assert result_data["success"] is True
27+
28+
# Verify large data was processed
29+
assert result_data["summary"]["totalDataSize"] > 240 # Should be ~250KB
30+
assert result_data["summary"]["stepsExecuted"] == 5
31+
assert result_data["summary"]["childContextUsed"] is True
32+
assert result_data["summary"]["waitExecuted"] is True
33+
assert result_data["summary"]["dataPreservedAcrossWait"] is True
34+
35+
# Verify data integrity across wait
36+
assert result_data["dataIntegrityCheck"] is True
Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
"""Tests for simple_execution."""
2+
3+
import pytest
4+
from aws_durable_execution_sdk_python.execution import InvocationStatus
5+
6+
from src.simple_execution import simple_execution
7+
from test.conftest import deserialize_operation_payload
8+
9+
10+
@pytest.mark.example
11+
@pytest.mark.durable_execution(
12+
handler=simple_execution.handler,
13+
lambda_function_name="simple execution",
14+
)
15+
def test_execute_simple_handler_without_operations(durable_runner):
16+
"""Test simple handler execution without operations."""
17+
test_payload = {
18+
"userId": "test-user",
19+
"action": "simple-execution",
20+
}
21+
22+
with durable_runner:
23+
result = durable_runner.run(input=test_payload, timeout=10)
24+
25+
result_data = deserialize_operation_payload(result.result)
26+
27+
# Verify the result structure and content
28+
assert (
29+
result_data["received"]
30+
== '{"userId": "test-user", "action": "simple-execution"}'
31+
)
32+
assert result_data["message"] == "Handler completed successfully"
33+
assert isinstance(result_data["timestamp"], int)
34+
assert result_data["timestamp"] > 0
35+
36+
# Should have no operations for simple execution
37+
assert len(result.operations) == 0
38+
39+
# Verify no error occurred
40+
assert result.status is InvocationStatus.SUCCEEDED
Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
"""Tests for multiple_waits."""
2+
3+
import pytest
4+
from aws_durable_execution_sdk_python.execution import InvocationStatus
5+
6+
from src.wait import multiple_wait
7+
from test.conftest import deserialize_operation_payload
8+
9+
10+
@pytest.mark.example
11+
@pytest.mark.durable_execution(
12+
handler=multiple_wait.handler,
13+
lambda_function_name="multiple wait",
14+
)
15+
def test_multiple_sequential_wait_operations(durable_runner):
16+
"""Test multiple sequential wait operations."""
17+
with durable_runner:
18+
result = durable_runner.run(input=None, timeout=20)
19+
20+
assert result.status is InvocationStatus.SUCCEEDED
21+
22+
# Verify the final result
23+
assert deserialize_operation_payload(result.result) == {
24+
"completedWaits": 2,
25+
"finalStep": "done",
26+
}
27+
28+
# Verify operations were tracked
29+
operations = [op for op in result.operations if op.operation_type.value == "WAIT"]
30+
assert len(operations) == 2
31+
32+
# Find the wait operations by name
33+
wait_1_ops = [
34+
op
35+
for op in operations
36+
if op.operation_type.value == "WAIT" and op.name == "wait-1"
37+
]
38+
assert len(wait_1_ops) == 1
39+
first_wait = wait_1_ops[0]
40+
41+
wait_2_ops = [
42+
op
43+
for op in operations
44+
if op.operation_type.value == "WAIT" and op.name == "wait-2"
45+
]
46+
assert len(wait_2_ops) == 1
47+
second_wait = wait_2_ops[0]
48+
49+
# Verify operation types and status
50+
assert first_wait.operation_type.value == "WAIT"
51+
assert first_wait.status.value == "SUCCEEDED"
52+
assert second_wait.operation_type.value == "WAIT"
53+
assert second_wait.status.value == "SUCCEEDED"
54+
55+
# Verify wait details
56+
assert first_wait.scheduled_end_timestamp is not None
57+
assert second_wait.scheduled_end_timestamp is not None

0 commit comments

Comments
 (0)