From 6afda405321a08cb2b923b98180ef0c33d0b22bb Mon Sep 17 00:00:00 2001 From: Frank Chen Date: Mon, 27 Apr 2026 15:55:12 -0700 Subject: [PATCH 1/6] move examples from testing library --- .github/workflows/deploy-examples.yml | 139 +++ .github/workflows/integration-tests.yml | 47 +- .github/workflows/update-sam-template.yml | 42 + .gitignore | 5 +- CONTRIBUTING.md | 41 + examples/cli.py | 485 +++++++++ examples/examples-catalog.json | 563 +++++++++++ examples/scripts/generate_sam_template.py | 106 ++ examples/src/__init__.py | 3 + examples/src/block_example/block_example.py | 47 + examples/src/callback/callback_concurrency.py | 51 + examples/src/callback/callback_heartbeat.py | 22 + examples/src/callback/callback_mixed_ops.py | 35 + examples/src/callback/callback_serdes.py | 76 ++ examples/src/callback/callback_simple.py | 22 + .../src/callback/callback_with_timeout.py | 23 + .../comprehensive_operations.py | 51 + examples/src/handler_error/handler_error.py | 13 + examples/src/hello_world.py | 62 ++ examples/src/logger_example/logger_example.py | 64 ++ examples/src/map/map_completion.py | 117 +++ examples/src/map/map_operations.py | 23 + examples/src/map/map_with_batch_serdes.py | 96 ++ examples/src/map/map_with_custom_serdes.py | 63 ++ .../src/map/map_with_failure_tolerance.py | 53 + examples/src/map/map_with_large_scale.py | 64 ++ examples/src/map/map_with_max_concurrency.py | 23 + examples/src/map/map_with_min_successful.py | 43 + .../no_replay_execution.py | 15 + examples/src/none_results/none_results.py | 31 + examples/src/parallel/parallel.py | 27 + .../src/parallel/parallel_first_successful.py | 27 + .../parallel/parallel_with_batch_serdes.py | 97 ++ .../parallel/parallel_with_custom_serdes.py | 60 ++ .../parallel_with_failure_tolerance.py | 59 ++ .../parallel/parallel_with_max_concurrency.py | 25 + examples/src/parallel/parallel_with_wait.py | 24 + .../run_in_child_context.py | 22 + .../run_in_child_context_large_data.py | 73 ++ .../run_in_child_context_step_failure.py | 50 + .../src/simple_execution/simple_execution.py | 18 + examples/src/step/step.py | 19 + examples/src/step/step_no_name.py | 11 + .../src/step/step_semantics_at_most_once.py | 18 + .../src/step/step_with_exponential_backoff.py | 27 + examples/src/step/step_with_name.py | 11 + examples/src/step/step_with_retry.py | 46 + examples/src/step/steps_with_retry.py | 81 ++ examples/src/wait/multiple_wait.py | 19 + examples/src/wait/wait.py | 11 + examples/src/wait/wait_with_name.py | 12 + .../wait_for_callback/wait_for_callback.py | 27 + .../wait_for_callback_anonymous.py | 20 + .../wait_for_callback_child.py | 42 + .../wait_for_callback_heartbeat.py | 33 + .../wait_for_callback_mixed_ops.py | 47 + .../wait_for_callback_multiple_invocations.py | 53 + .../wait_for_callback_nested.py | 66 ++ .../wait_for_callback_serdes.py | 90 ++ .../wait_for_callback_submitter_failure.py | 39 + ...or_callback_submitter_failure_catchable.py | 52 + .../wait_for_callback_timeout.py | 35 + .../wait_for_condition/wait_for_condition.py | 32 + examples/template.yaml | 928 ++++++++++++++++++ examples/test/README.md | 119 +++ examples/test/__init__.py | 1 + .../test/block_example/test_block_example.py | 104 ++ .../callback/test_callback_concurrency.py | 83 ++ .../test/callback/test_callback_heartbeat.py | 51 + .../test/callback/test_callback_mixed_ops.py | 49 + .../test/callback/test_callback_serdes.py | 60 ++ .../test/callback/test_callback_simple.py | 47 + .../test_comprehensive_operations.py | 94 ++ examples/test/conftest.py | 268 +++++ .../test/handler_error/test_handler_error.py | 32 + .../logger_example/test_logger_example.py | 35 + examples/test/map/test_map_completion.py | 32 + examples/test/map/test_map_operations.py | 42 + .../test/map/test_map_with_batch_serdes.py | 43 + .../test/map/test_map_with_custom_serdes.py | 48 + .../map/test_map_with_failure_tolerance.py | 52 + .../test/map/test_map_with_large_scale.py | 36 + .../test/map/test_map_with_max_concurrency.py | 37 + .../test/map/test_map_with_min_successful.py | 70 ++ .../test_no_replay_execution.py | 52 + .../test/none_results/test_none_results.py | 51 + examples/test/parallel/test_parallel.py | 38 + .../test_parallel_with_batch_serdes.py | 43 + .../test_parallel_with_custom_serdes.py | 46 + .../test_parallel_with_failure_tolerance.py | 49 + .../test_parallel_with_max_concurrency.py | 36 + .../test/parallel/test_parallel_with_wait.py | 47 + .../test_run_in_child_context.py | 27 + .../test_run_in_child_context_large_data.py | 36 + .../test_run_in_child_context_step_failure.py | 23 + .../simple_execution/test_simple_execution.py | 40 + examples/test/step/test_step.py | 24 + examples/test/step/test_step_permutations.py | 75 ++ .../step/test_step_semantics_at_most_once.py | 32 + examples/test/step/test_step_with_retry.py | 40 + examples/test/step/test_steps_with_retry.py | 52 + examples/test/test_hello_world.py | 24 + examples/test/wait/test_multiple_wait.py | 57 ++ examples/test/wait/test_wait.py | 27 + examples/test/wait/test_wait_permutations.py | 25 + .../test_wait_for_callback_anonymous.py | 39 + .../test_wait_for_callback_child.py | 73 ++ .../test_wait_for_callback_failure.py | 27 + .../test_wait_for_callback_heartbeat.py | 62 ++ .../test_wait_for_callback_mixed_ops.py | 52 + ..._wait_for_callback_multiple_invocations.py | 74 ++ .../test_wait_for_callback_nested.py | 101 ++ .../test_wait_for_callback_serdes.py | 66 ++ ...est_wait_for_callback_submitter_failure.py | 32 + ...or_callback_submitter_failure_catchable.py | 28 + .../test_wait_for_callback_success.py | 25 + .../test_wait_for_callback_timeout.py | 32 + .../test_wait_for_condition.py | 24 + pyproject.toml | 50 +- 119 files changed, 7414 insertions(+), 44 deletions(-) create mode 100644 .github/workflows/deploy-examples.yml create mode 100644 .github/workflows/update-sam-template.yml create mode 100755 examples/cli.py create mode 100644 examples/examples-catalog.json create mode 100644 examples/scripts/generate_sam_template.py create mode 100644 examples/src/__init__.py create mode 100644 examples/src/block_example/block_example.py create mode 100644 examples/src/callback/callback_concurrency.py create mode 100644 examples/src/callback/callback_heartbeat.py create mode 100644 examples/src/callback/callback_mixed_ops.py create mode 100644 examples/src/callback/callback_serdes.py create mode 100644 examples/src/callback/callback_simple.py create mode 100644 examples/src/callback/callback_with_timeout.py create mode 100644 examples/src/comprehensive_operations/comprehensive_operations.py create mode 100644 examples/src/handler_error/handler_error.py create mode 100644 examples/src/hello_world.py create mode 100644 examples/src/logger_example/logger_example.py create mode 100644 examples/src/map/map_completion.py create mode 100644 examples/src/map/map_operations.py create mode 100644 examples/src/map/map_with_batch_serdes.py create mode 100644 examples/src/map/map_with_custom_serdes.py create mode 100644 examples/src/map/map_with_failure_tolerance.py create mode 100644 examples/src/map/map_with_large_scale.py create mode 100644 examples/src/map/map_with_max_concurrency.py create mode 100644 examples/src/map/map_with_min_successful.py create mode 100644 examples/src/no_replay_execution/no_replay_execution.py create mode 100644 examples/src/none_results/none_results.py create mode 100644 examples/src/parallel/parallel.py create mode 100644 examples/src/parallel/parallel_first_successful.py create mode 100644 examples/src/parallel/parallel_with_batch_serdes.py create mode 100644 examples/src/parallel/parallel_with_custom_serdes.py create mode 100644 examples/src/parallel/parallel_with_failure_tolerance.py create mode 100644 examples/src/parallel/parallel_with_max_concurrency.py create mode 100644 examples/src/parallel/parallel_with_wait.py create mode 100644 examples/src/run_in_child_context/run_in_child_context.py create mode 100644 examples/src/run_in_child_context/run_in_child_context_large_data.py create mode 100644 examples/src/run_in_child_context/run_in_child_context_step_failure.py create mode 100644 examples/src/simple_execution/simple_execution.py create mode 100644 examples/src/step/step.py create mode 100644 examples/src/step/step_no_name.py create mode 100644 examples/src/step/step_semantics_at_most_once.py create mode 100644 examples/src/step/step_with_exponential_backoff.py create mode 100644 examples/src/step/step_with_name.py create mode 100644 examples/src/step/step_with_retry.py create mode 100644 examples/src/step/steps_with_retry.py create mode 100644 examples/src/wait/multiple_wait.py create mode 100644 examples/src/wait/wait.py create mode 100644 examples/src/wait/wait_with_name.py create mode 100644 examples/src/wait_for_callback/wait_for_callback.py create mode 100644 examples/src/wait_for_callback/wait_for_callback_anonymous.py create mode 100644 examples/src/wait_for_callback/wait_for_callback_child.py create mode 100644 examples/src/wait_for_callback/wait_for_callback_heartbeat.py create mode 100644 examples/src/wait_for_callback/wait_for_callback_mixed_ops.py create mode 100644 examples/src/wait_for_callback/wait_for_callback_multiple_invocations.py create mode 100644 examples/src/wait_for_callback/wait_for_callback_nested.py create mode 100644 examples/src/wait_for_callback/wait_for_callback_serdes.py create mode 100644 examples/src/wait_for_callback/wait_for_callback_submitter_failure.py create mode 100644 examples/src/wait_for_callback/wait_for_callback_submitter_failure_catchable.py create mode 100644 examples/src/wait_for_callback/wait_for_callback_timeout.py create mode 100644 examples/src/wait_for_condition/wait_for_condition.py create mode 100644 examples/template.yaml create mode 100644 examples/test/README.md create mode 100644 examples/test/__init__.py create mode 100644 examples/test/block_example/test_block_example.py create mode 100644 examples/test/callback/test_callback_concurrency.py create mode 100644 examples/test/callback/test_callback_heartbeat.py create mode 100644 examples/test/callback/test_callback_mixed_ops.py create mode 100644 examples/test/callback/test_callback_serdes.py create mode 100644 examples/test/callback/test_callback_simple.py create mode 100644 examples/test/comprehensive_operations/test_comprehensive_operations.py create mode 100644 examples/test/conftest.py create mode 100644 examples/test/handler_error/test_handler_error.py create mode 100644 examples/test/logger_example/test_logger_example.py create mode 100644 examples/test/map/test_map_completion.py create mode 100644 examples/test/map/test_map_operations.py create mode 100644 examples/test/map/test_map_with_batch_serdes.py create mode 100644 examples/test/map/test_map_with_custom_serdes.py create mode 100644 examples/test/map/test_map_with_failure_tolerance.py create mode 100644 examples/test/map/test_map_with_large_scale.py create mode 100644 examples/test/map/test_map_with_max_concurrency.py create mode 100644 examples/test/map/test_map_with_min_successful.py create mode 100644 examples/test/no_replay_execution/test_no_replay_execution.py create mode 100644 examples/test/none_results/test_none_results.py create mode 100644 examples/test/parallel/test_parallel.py create mode 100644 examples/test/parallel/test_parallel_with_batch_serdes.py create mode 100644 examples/test/parallel/test_parallel_with_custom_serdes.py create mode 100644 examples/test/parallel/test_parallel_with_failure_tolerance.py create mode 100644 examples/test/parallel/test_parallel_with_max_concurrency.py create mode 100644 examples/test/parallel/test_parallel_with_wait.py create mode 100644 examples/test/run_in_child_context/test_run_in_child_context.py create mode 100644 examples/test/run_in_child_context/test_run_in_child_context_large_data.py create mode 100644 examples/test/run_in_child_context/test_run_in_child_context_step_failure.py create mode 100644 examples/test/simple_execution/test_simple_execution.py create mode 100644 examples/test/step/test_step.py create mode 100644 examples/test/step/test_step_permutations.py create mode 100644 examples/test/step/test_step_semantics_at_most_once.py create mode 100644 examples/test/step/test_step_with_retry.py create mode 100644 examples/test/step/test_steps_with_retry.py create mode 100644 examples/test/test_hello_world.py create mode 100644 examples/test/wait/test_multiple_wait.py create mode 100644 examples/test/wait/test_wait.py create mode 100644 examples/test/wait/test_wait_permutations.py create mode 100644 examples/test/wait_for_callback/test_wait_for_callback_anonymous.py create mode 100644 examples/test/wait_for_callback/test_wait_for_callback_child.py create mode 100644 examples/test/wait_for_callback/test_wait_for_callback_failure.py create mode 100644 examples/test/wait_for_callback/test_wait_for_callback_heartbeat.py create mode 100644 examples/test/wait_for_callback/test_wait_for_callback_mixed_ops.py create mode 100644 examples/test/wait_for_callback/test_wait_for_callback_multiple_invocations.py create mode 100644 examples/test/wait_for_callback/test_wait_for_callback_nested.py create mode 100644 examples/test/wait_for_callback/test_wait_for_callback_serdes.py create mode 100644 examples/test/wait_for_callback/test_wait_for_callback_submitter_failure.py create mode 100644 examples/test/wait_for_callback/test_wait_for_callback_submitter_failure_catchable.py create mode 100644 examples/test/wait_for_callback/test_wait_for_callback_success.py create mode 100644 examples/test/wait_for_callback/test_wait_for_callback_timeout.py create mode 100644 examples/test/wait_for_condition/test_wait_for_condition.py diff --git a/.github/workflows/deploy-examples.yml b/.github/workflows/deploy-examples.yml new file mode 100644 index 00000000..0c6e6d07 --- /dev/null +++ b/.github/workflows/deploy-examples.yml @@ -0,0 +1,139 @@ +name: Deploy Python Examples + +on: + pull_request: + branches: [ "main", "development"] + paths: + - 'src/aws_durable_execution_sdk_python/**' + - 'examples/**' + - '.github/workflows/deploy-examples.yml' + workflow_dispatch: + +env: + AWS_REGION: us-west-2 + +permissions: + id-token: write + contents: read + +jobs: + setup: + runs-on: ubuntu-latest + outputs: + examples: ${{ steps.get-examples.outputs.examples }} + steps: + - uses: actions/checkout@v4 + + - name: Get examples from catalog + id: get-examples + working-directory: ./examples + run: | + echo "examples=$(jq -c '.examples | map(select(.integration == true))' examples-catalog.json)" >> $GITHUB_OUTPUT + + integration-test: + needs: setup + runs-on: ubuntu-latest + name: ${{ matrix.example.name }} + strategy: + matrix: + example: ${{ fromJson(needs.setup.outputs.examples) }} + fail-fast: false + + steps: + - uses: actions/checkout@v4 + + - name: Setup SSH Agent + uses: webfactory/ssh-agent@dc588b651fe13675774614f8e6a936a468676387 # v0.9.0 + with: + ssh-private-key: ${{ secrets.SDK_KEY }} + + - name: Setup Python + uses: actions/setup-python@v4 + with: + python-version: '3.13' + + - name: Configure AWS credentials + if: github.event_name != 'workflow_dispatch' || github.actor != 'nektos/act' + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: "${{ secrets.ACTIONS_INTEGRATION_ROLE_NAME }}" + role-session-name: pythonTestingLibraryGitHubIntegrationTest + aws-region: ${{ env.AWS_REGION }} + + - name: Install Hatch + run: pip install hatch + - name: Build examples + run: hatch run examples:build + + - name: Deploy Lambda function - ${{ matrix.example.name }} + id: deploy + env: + AWS_ACCOUNT_ID: ${{ secrets.AWS_ACCOUNT_ID }} + LAMBDA_ENDPOINT: ${{ secrets.LAMBDA_ENDPOINT_BETA }} + KMS_KEY_ARN: ${{ secrets.KMS_KEY_ARN }} + run: | + # Build function name + EXAMPLE_NAME_CLEAN=$(echo "${{ matrix.example.name }}" | sed 's/ //g') + if [ "${{ github.event_name }}" = "pull_request" ]; then + FUNCTION_NAME="${EXAMPLE_NAME_CLEAN}-Python-PR-${{ github.event.number }}" + else + FUNCTION_NAME="${EXAMPLE_NAME_CLEAN}-Python" + fi + + # Clean up existing function if present to avoid conflicts + echo "Cleaning up existing function if present..." + aws lambda delete-function \ + --function-name "$FUNCTION_NAME" \ + --endpoint-url "$LAMBDA_ENDPOINT" \ + --region "$AWS_REGION" 2>/dev/null || echo "No existing function to clean up" + + # Give AWS time to process the deletion + sleep 5 + + echo "Deploying ${{ matrix.example.name }} as $FUNCTION_NAME" + hatch run examples:deploy "${{ matrix.example.name }}" --function-name "$FUNCTION_NAME" + + # $LATEST is also a qualified version + QUALIFIED_FUNCTION_NAME="${FUNCTION_NAME}:\$LATEST" + + # Store both names for later steps + echo "FUNCTION_NAME=$FUNCTION_NAME" >> $GITHUB_ENV + echo "QUALIFIED_FUNCTION_NAME=$QUALIFIED_FUNCTION_NAME" >> $GITHUB_ENV + echo "VERSION=$VERSION" >> $GITHUB_ENV + echo "DEPLOYED_FUNCTION_NAME=$FUNCTION_NAME" >> $GITHUB_OUTPUT + echo "QUALIFIED_FUNCTION_NAME=$QUALIFIED_FUNCTION_NAME" >> $GITHUB_OUTPUT + + - name: Run Integration Tests - ${{ matrix.example.name }} + env: + AWS_REGION: ${{ env.AWS_REGION }} + LAMBDA_ENDPOINT: ${{ secrets.LAMBDA_ENDPOINT_BETA }} + QUALIFIED_FUNCTION_NAME: ${{ env.QUALIFIED_FUNCTION_NAME }} + LAMBDA_FUNCTION_TEST_NAME: ${{ matrix.example.name }} + run: | + echo "Running integration tests for ${{ matrix.example.name }}" + echo "Function name: ${{ steps.deploy.outputs.DEPLOYED_FUNCTION_NAME }}" + echo "Qualified function name: ${QUALIFIED_FUNCTION_NAME}" + echo "AWS Region: ${AWS_REGION}" + echo "Lambda Endpoint: ${LAMBDA_ENDPOINT}" + + # Convert example name to test name: "Hello World" -> "test_hello_world" + TEST_NAME="test_$(echo "${{ matrix.example.name }}" | tr '[:upper:]' '[:lower:]' | tr ' ' '_')" + echo "Test name: ${TEST_NAME}" + + # Run integration tests + hatch run test:examples-integration + + # Wait for function to be ready + echo "Waiting for function to be active..." + aws lambda wait function-active --function-name "$QUALIFIED_FUNCTION_NAME" --endpoint-url "$LAMBDA_ENDPOINT" --region "$AWS_REGION" + + # - name: Cleanup Lambda function + # if: always() + # env: + # LAMBDA_ENDPOINT: ${{ secrets.LAMBDA_ENDPOINT_BETA }} + # run: | + # echo "Deleting function: $FUNCTION_NAME" + # aws lambda delete-function \ + # --function-name "$FUNCTION_NAME" \ + # --endpoint-url "$LAMBDA_ENDPOINT" \ + # --region "${{ env.AWS_REGION }}" || echo "Function already deleted or doesn't exist" diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index 67b42ad3..1b3ae8f7 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -22,19 +22,6 @@ jobs: with: path: language-sdk - - name: Parse testing SDK branch from PR body - id: parse - run: python language-sdk/ops/parse_sdk_branch.py - env: - PR_BODY: ${{ github.event.pull_request.body }} - - - name: Checkout Testing SDK - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - with: - repository: aws/aws-durable-execution-sdk-python-testing - ref: ${{ steps.parse.outputs.testing_ref }} - path: testing-sdk - - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: @@ -44,12 +31,9 @@ jobs: run: python -m pip install hatch==1.16.5 - name: Setup and run Testing SDK - working-directory: testing-sdk - env: - AWS_DURABLE_SDK_URL: file://${{ github.workspace }}/language-sdk + working-directory: language-sdk run: | - echo "Running Testing SDK tests against Language SDK PR changes..." - echo "Using Language SDK from: $AWS_DURABLE_SDK_URL" + echo "Running SDK tests against Language SDK PR changes..." hatch run -- test:pip install -e ../language-sdk hatch fmt --check hatch run types:check @@ -70,19 +54,6 @@ jobs: with: path: language-sdk - - name: Parse testing SDK branch from PR body - id: parse - run: python language-sdk/ops/parse_sdk_branch.py - env: - PR_BODY: ${{ github.event.pull_request.body }} - - - name: Checkout Testing SDK - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - with: - repository: aws/aws-durable-execution-sdk-python-testing - ref: ${{ steps.parse.outputs.testing_ref }} - path: testing-sdk - - name: Set up Python 3.13 uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: @@ -95,17 +66,9 @@ jobs: role-session-name: languageSDKIntegrationTest aws-region: ${{ env.AWS_REGION }} - - name: Install Hatch and setup Testing SDK - working-directory: testing-sdk - env: - AWS_DURABLE_SDK_URL: file://${{ github.workspace }}/language-sdk - run: | - pip install hatch==1.16.5 - python -m pip install -e . - - name: Get integration examples id: get-examples - working-directory: testing-sdk/examples + working-directory: language-sdk/examples run: | echo "examples=$(jq -c '.examples | map(select(.integration == true)) | .[0:2]' examples-catalog.json)" >> $GITHUB_OUTPUT @@ -116,10 +79,10 @@ jobs: rm /tmp/awscliv2.zip sudo /tmp/aws/install --update rm -rf /tmp/aws/ + - name: Deploy and test examples - working-directory: testing-sdk + working-directory: language-sdk env: - AWS_DURABLE_SDK_URL: file://${{ github.workspace }}/language-sdk AWS_ACCOUNT_ID: ${{ secrets.AWS_ACCOUNT_ID }} LAMBDA_ENDPOINT: ${{ secrets.LAMBDA_ENDPOINT }} INVOKE_ACCOUNT_ID: ${{ secrets.INVOKE_ACCOUNT_ID }} diff --git a/.github/workflows/update-sam-template.yml b/.github/workflows/update-sam-template.yml new file mode 100644 index 00000000..abc03d4f --- /dev/null +++ b/.github/workflows/update-sam-template.yml @@ -0,0 +1,42 @@ +name: Update SAM Template + +on: + pull_request: + paths: + - "examples/**" + +permissions: + contents: write + +concurrency: + group: ${{ github.head_ref }}-${{ github.run_id}}-sam-template + cancel-in-progress: true + +jobs: + update-template: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + token: ${{ secrets.GITHUB_TOKEN }} + ref: ${{ github.head_ref }} + + - name: Set up Python 3.13 + uses: actions/setup-python@v5 + with: + python-version: "3.13" + + - name: Generate SAM template + run: python examples/scripts/generate_sam_template.py + + - name: Commit and push changes + run: | + git config --local user.email "action@github.com" + git config --local user.name "GitHub Action" + git add . + if git diff --staged --quiet; then + echo "No changes to commit" + else + git commit -m "chore: update SAM template" --no-verify + git push + fi diff --git a/.gitignore b/.gitignore index bd355dc6..7d2b20ab 100644 --- a/.gitignore +++ b/.gitignore @@ -28,4 +28,7 @@ dist/ .idea -.kiro/ \ No newline at end of file +.kiro/ + +/examples/build/* +/examples/*.zip \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 521aee99..fb1a4078 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -203,6 +203,47 @@ Mimic the package structure in the src/aws_durable_execution_sdk_python director Name your module so that src/mypackage/mymodule.py has a dedicated unit test file tests/mypackage/mymodule_test.py +## Examples and Deployment + +The project includes a unified CLI tool for managing examples, deployment, and AWS account setup: + +### Bootstrap AWS Account +```bash +# Set up IAM role and KMS key for durable functions +export AWS_ACCOUNT_ID=your-account-id +hatch run examples:bootstrap +``` + +### Build and Deploy Examples +```bash +# Build all examples with dependencies +hatch run examples:build + +# Generate SAM template for all examples +hatch run examples:generate-sam-template + +# List available examples +hatch run examples:list + +# Deploy specific example (when durable functions are available) +hatch run examples:deploy "Hello World" +``` + +### Other CLI Commands +```bash +# Invoke deployed function +hatch run examples:invoke function-name --payload '{}' + +# Get execution details +hatch run examples:get execution-arn + +# Get execution history +hatch run examples:history execution-arn + +# Clean build artifacts +hatch run examples:clean +``` + ## Coverage ``` hatch run test:cov diff --git a/examples/cli.py b/examples/cli.py new file mode 100755 index 00000000..d15fc916 --- /dev/null +++ b/examples/cli.py @@ -0,0 +1,485 @@ +#!/usr/bin/env python3 + +import argparse +import json +import logging +import os +import shutil +import sys +import time +import zipfile +from pathlib import Path + + +# Configure logging +logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") +logger = logging.getLogger(__name__) + + +try: + import boto3 + from aws_durable_execution_sdk_python.lambda_service import LambdaClient +except ImportError: + sys.exit(1) + + +def load_catalog(): + """Load examples catalog.""" + catalog_path = Path(__file__).parent / "examples-catalog.json" + with open(catalog_path) as f: + return json.load(f) + + +def build_examples(): + """Build examples with SDK dependencies.""" + + build_dir = Path(__file__).parent / "build" + src_dir = Path(__file__).parent / "src" + + logger.info("Building examples...") + + # Clean and create build directory + if build_dir.exists(): + logger.info("Cleaning existing build directory") + shutil.rmtree(build_dir) + build_dir.mkdir() + + # Copy testing library from current environment + try: + import aws_durable_execution_sdk_python_testing + + sdk_path = Path(aws_durable_execution_sdk_python_testing.__file__).parent + logger.info("Copying SDK from %s", sdk_path) + shutil.copytree(sdk_path, build_dir / "aws_durable_execution_sdk_python_testing") + except (ImportError, OSError): + logger.exception("Failed to copy testing library") + return False + + # Copy testing SDK source + testing_src = ( + Path(__file__).parent.parent + / "src" + / "aws_durable_execution_sdk_python" + ) + logger.info("Copying SDK from %s", testing_src) + shutil.copytree(testing_src, build_dir / "aws_durable_execution_sdk_python") + + # Copy example functions + logger.info("Copying examples from %s", src_dir) + for file_path in src_dir.rglob("*"): + if file_path.is_file(): + shutil.copy2(file_path, build_dir / file_path.name) + + logger.info("Build completed successfully") + return True + + +def create_kms_key(kms_client, account_id): + """Create KMS key for durable functions encryption.""" + key_policy = { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "Enable IAM User Permissions", + "Effect": "Allow", + "Principal": {"AWS": f"arn:aws:iam::{account_id}:root"}, + "Action": "kms:*", + "Resource": "*", + }, + { + "Sid": "Allow Lambda service", + "Effect": "Allow", + "Principal": {"Service": "lambda.amazonaws.com"}, + "Action": ["kms:Decrypt", "kms:Encrypt", "kms:CreateGrant"], + "Resource": "*", + }, + ], + } + + try: + response = kms_client.create_key( + Description="KMS key for Lambda Durable Functions environment variable encryption", + KeyUsage="ENCRYPT_DECRYPT", + KeySpec="SYMMETRIC_DEFAULT", + Policy=json.dumps(key_policy), + ) + + return response["KeyMetadata"]["Arn"] + + except (kms_client.exceptions.ClientError, KeyError): + return None + + +def bootstrap_account(): + """Bootstrap account with necessary IAM role and KMS key.""" + account_id = os.getenv("AWS_ACCOUNT_ID") + region = os.getenv("AWS_REGION", "us-west-2") + + if not account_id: + return False + + # Create KMS key first + kms_client = boto3.client("kms", region_name=region) + kms_key_arn = create_kms_key(kms_client, account_id) + if not kms_key_arn: + return False + + iam_client = boto3.client("iam", region_name=region) + role_name = "DurableFunctionsIntegrationTestRole" + + trust_policy = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": ["lambda.amazonaws.com", "devo.lambda.aws.internal"] + }, + "Action": "sts:AssumeRole", + } + ], + } + + lambda_policy = { + "Version": "2012-10-17", + "Statement": [ + { + "Action": [ + "lambda:CheckpointDurableExecution", + "lambda:GetDurableExecutionState", + ], + "Resource": "*", + "Effect": "Allow", + } + ], + } + + logs_policy = { + "Version": "2012-10-17", + "Statement": [ + { + "Action": [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents", + ], + "Resource": "*", + "Effect": "Allow", + } + ], + } + + kms_policy = { + "Version": "2012-10-17", + "Statement": [ + { + "Action": ["kms:CreateGrant", "kms:Decrypt", "kms:Encrypt"], + "Resource": kms_key_arn, + "Effect": "Allow", + } + ], + } + + try: + iam_client.create_role( + RoleName=role_name, + AssumeRolePolicyDocument=json.dumps(trust_policy), + Description="Role for AWS Durable Functions integration testing", + ) + + iam_client.put_role_policy( + RoleName=role_name, + PolicyName="LambdaPolicy", + PolicyDocument=json.dumps(lambda_policy), + ) + + iam_client.put_role_policy( + RoleName=role_name, + PolicyName="LogsPolicy", + PolicyDocument=json.dumps(logs_policy), + ) + + iam_client.put_role_policy( + RoleName=role_name, + PolicyName="DurableFunctionsLambdaStagingKMSPolicy", + PolicyDocument=json.dumps(kms_policy), + ) + + except iam_client.exceptions.EntityAlreadyExistsException: + pass + except iam_client.exceptions.ClientError: + return False + else: + return True + + return True + + +def create_deployment_package(example_name: str) -> Path: + """Create deployment package for example.""" + + build_dir = Path(__file__).parent / "build" + if not build_dir.exists() and not build_examples(): + msg = "Failed to build examples" + raise ValueError(msg) + + zip_path = Path(__file__).parent / f"{example_name}.zip" + with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_DEFLATED) as zf: + # Add SDK dependencies + for file_path in build_dir.rglob("*"): + if file_path.is_file() and not file_path.is_relative_to(build_dir / "src"): + zf.write(file_path, file_path.relative_to(build_dir)) + + # Add example files at root level + src_dir = build_dir / "src" + for file_path in src_dir.rglob("*"): + if file_path.is_file(): + zf.write(file_path, file_path.relative_to(src_dir)) + + return zip_path + + +def get_aws_config(): + """Get AWS configuration from environment.""" + config = { + "region": os.getenv("AWS_REGION", "us-west-2"), + "lambda_endpoint": os.getenv("LAMBDA_ENDPOINT", "https://lambda.us-west-2.amazonaws.com"), + "account_id": os.getenv("AWS_ACCOUNT_ID"), + "kms_key_arn": os.getenv("KMS_KEY_ARN"), + } + + if not config["account_id"]: + msg = "Missing AWS_ACCOUNT_ID" + raise ValueError(msg) + + return config + + +def get_lambda_client(): + """Get configured Lambda client.""" + config = get_aws_config() + return boto3.client( + "lambda", + endpoint_url=config["lambda_endpoint"], + region_name=config["region"], + config=boto3.session.Config(parameter_validation=False), + ) + + +def retry_on_resource_conflict(func, *args, max_retries=5, **kwargs): + """Retry function on ResourceConflictException.""" + for attempt in range(max_retries): + try: + return func(*args, **kwargs) + except Exception as e: + if ( + hasattr(e, "response") + and e.response.get("Error", {}).get("Code") + == "ResourceConflictException" + and attempt < max_retries - 1 + ): + wait_time = 2**attempt # Exponential backoff + logger.info( + "ResourceConflictException on attempt %d, retrying in %ds...", + attempt + 1, + wait_time, + ) + time.sleep(wait_time) + continue + raise + return None + + +def deploy_function(example_name: str, function_name: str | None = None): + """Deploy function to AWS Lambda.""" + catalog = load_catalog() + + example_config = None + for example in catalog["examples"]: + if example["name"] == example_name: + example_config = example + break + + if not example_config: + logger.error("Example not found: '%s'", example_name) + list_examples() + return False + + if not function_name: + function_name = f"{example_name.replace(' ', '')}-Python" + + handler_file = example_config["handler"].replace(".handler", "") + zip_path = create_deployment_package(handler_file) + config = get_aws_config() + lambda_client = get_lambda_client() + + role_arn = ( + f"arn:aws:iam::{config['account_id']}:role/DurableFunctionsIntegrationTestRole" + ) + + function_config = { + "FunctionName": function_name, + "Runtime": "python3.13", + "Role": role_arn, + "Handler": example_config["handler"], + "Description": example_config["description"], + "Timeout": 60, + "MemorySize": 128, + "Environment": { + "Variables": {"AWS_ENDPOINT_URL_LAMBDA": config["lambda_endpoint"]} + }, + "DurableConfig": example_config["durableConfig"], + "LoggingConfig": example_config.get("loggingConfig", {}), + } + + if config["kms_key_arn"]: + function_config["KMSKeyArn"] = config["kms_key_arn"] + + with open(zip_path, "rb") as f: + zip_content = f.read() + + try: + lambda_client.get_function(FunctionName=function_name) + retry_on_resource_conflict( + lambda_client.update_function_code, + FunctionName=function_name, + ZipFile=zip_content, + max_retries=8, + ) + retry_on_resource_conflict( + lambda_client.update_function_configuration, **function_config + ) + + except lambda_client.exceptions.ResourceNotFoundException: + lambda_client.create_function(**function_config, Code={"ZipFile": zip_content}) + + logger.info("Function deployed successfully! %s", function_name) + return True + + +def invoke_function(function_name: str, payload: str = "{}"): + """Invoke a deployed function.""" + lambda_client = get_lambda_client() + + try: + response = lambda_client.invoke(FunctionName=function_name, Payload=payload) + + result = json.loads(response["Payload"].read()) + + if "DurableExecutionArn" in result: + pass + + return result.get("DurableExecutionArn") + + except lambda_client.exceptions.ClientError: + return None + + +def get_execution(execution_arn: str): + """Get execution details.""" + lambda_client = get_lambda_client() + + try: + return lambda_client.get_durable_execution(DurableExecutionArn=execution_arn) + except lambda_client.exceptions.ClientError: + return None + + +def get_execution_history(execution_arn: str): + """Get execution history.""" + lambda_client = get_lambda_client() + + try: + return lambda_client.get_durable_execution_history( + DurableExecutionArn=execution_arn + ) + except lambda_client.exceptions.ClientError: + return None + + +def get_function_policy(function_name: str): + """Get function resource policy.""" + lambda_client = get_lambda_client() + + try: + response = lambda_client.get_policy(FunctionName=function_name) + return json.loads(response["Policy"]) + except lambda_client.exceptions.ResourceNotFoundException: + return None + except (lambda_client.exceptions.ClientError, json.JSONDecodeError): + return None + + +def list_examples(): + """List available examples.""" + catalog = load_catalog() + logger.info("Available examples:") + for example in catalog["examples"]: + logger.info(" - %s: %s", example["name"], example["description"]) + + +def main(): + """Main CLI function.""" + parser = argparse.ArgumentParser(description="Durable Functions Examples CLI") + subparsers = parser.add_subparsers(dest="command", help="Available commands") + + # Bootstrap command + subparsers.add_parser("bootstrap", help="Bootstrap account with necessary IAM role") + + # Build command + subparsers.add_parser("build", help="Build examples with dependencies") + + # List command + subparsers.add_parser("list", help="List available examples") + + # Deploy command + deploy_parser = subparsers.add_parser("deploy", help="Deploy an example") + deploy_parser.add_argument("example_name", help="Name of example to deploy") + deploy_parser.add_argument("--function-name", help="Custom function name") + + # Invoke command + invoke_parser = subparsers.add_parser("invoke", help="Invoke a deployed function") + invoke_parser.add_argument("function_name", help="Name of function to invoke") + invoke_parser.add_argument("--payload", default="{}", help="JSON payload to send") + + # Get command + get_parser = subparsers.add_parser("get", help="Get execution details") + get_parser.add_argument("execution_arn", help="Execution ARN") + + # Policy command + policy_parser = subparsers.add_parser("policy", help="Get function resource policy") + policy_parser.add_argument("function_name", help="Function name") + + # History command + history_parser = subparsers.add_parser("history", help="Get execution history") + history_parser.add_argument("execution_arn", help="Execution ARN") + + args = parser.parse_args() + + if not args.command: + parser.print_help() + return + + try: + if args.command == "bootstrap": + bootstrap_account() + elif args.command == "build": + build_examples() + elif args.command == "list": + list_examples() + elif args.command == "deploy": + deploy_function(args.example_name, args.function_name) + elif args.command == "invoke": + invoke_function(args.function_name, args.payload) + elif args.command == "policy": + get_function_policy(args.function_name) + elif args.command == "get": + get_execution(args.execution_arn) + elif args.command == "history": + get_execution_history(args.execution_arn) + except (KeyboardInterrupt, SystemExit): + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/examples/examples-catalog.json b/examples/examples-catalog.json new file mode 100644 index 00000000..df8ea36e --- /dev/null +++ b/examples/examples-catalog.json @@ -0,0 +1,563 @@ +{ + "packageName": "DurableExecutionsPythonExamples-1.0", + "examples": [ + { + "name": "Hello World", + "description": "A simple hello world example with no durable operations", + "handler": "hello_world.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/hello_world.py" + }, + { + "name": "Basic Step", + "description": "Basic usage of context.step() to checkpoint a simple operation", + "handler": "step.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/step/step.py" + }, + { + "name": "Step with Name", + "description": "Step operation with explicit name parameter", + "handler": "step_with_name.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/step/step_with_name.py" + }, + { + "name": "Step with Retry", + "description": "Usage of context.step() with retry configuration for fault tolerance", + "handler": "step_with_retry.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/step/step_with_retry.py" + }, + { + "name": "Wait State", + "description": "Basic usage of context.wait() to pause execution", + "handler": "wait.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/wait/wait.py" + }, + { + "name": "Multiple Wait", + "description": "Usage of demonstrating multiple sequential wait operations.", + "handler": "multiple_wait.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/wait/multiple_wait.py" + }, + { + "name": "Callback", + "description": "Basic usage of context.create_callback() to create a callback for external systems", + "handler": "callback.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/callback/callback.py" + }, + { + "name": "Wait for Callback Success", + "description": "Usage of context.wait_for_callback() to wait for external system responses", + "handler": "wait_for_callback.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/wait_for_callback/wait_for_callback.py" + }, + { + "name": "Wait for Callback Failure", + "description": "Usage of context.wait_for_callback() to wait for external system responses", + "handler": "wait_for_callback.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/wait_for_callback/wait_for_callback.py" + }, + { + "name": "Wait For Callback Success Anonymous", + "description": "Usage of context.wait_for_callback() to wait for external system responses", + "handler": "wait_for_callback_anonymous.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/wait_for_callback/wait_for_callback_anonymous.py" + }, + { + "name": "Wait For Callback Heartbeat Sends", + "description": "Usage of context.wait_for_callback() to wait for external system responses", + "handler": "wait_for_callback_heartbeat.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/wait_for_callback/wait_for_callback_heartbeat.py" + }, + { + "name": "Wait For Callback With Child Context", + "description": "Usage of context.wait_for_callback() to wait for external system responses", + "handler": "wait_for_callback_child.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/wait_for_callback/wait_for_callback_child.py" + }, + { + "name": "Wait For Callback Mixed Ops", + "description": "Usage of context.wait_for_callback() to wait for external system responses", + "handler": "wait_for_callback_mixed_ops.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/wait_for_callback/wait_for_callback_mixed_ops.py" + }, + { + "name": "Wait For Callback Multiple Invocations", + "description": "Usage of context.wait_for_callback() to wait for external system responses", + "handler": "wait_for_callback_multiple_invocations.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/wait_for_callback/wait_for_callback_multiple_invocations.py" + }, + { + "name": "Wait For Callback Failing Submitter Catchable", + "description": "Usage of context.wait_for_callback() to wait for external system responses", + "handler": "wait_for_callback_submitter_failure_catchable.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/wait_for_callback/wait_for_callback_submitter_failure_catchable.py" + }, + { + "name": "Wait For Callback Submitter Failure", + "description": "Usage of context.wait_for_callback() to wait for external system responses", + "handler": "wait_for_callback_submitter_failure.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/wait_for_callback/wait_for_callback_submitter_failure.py" + }, + { + "name": "Wait For Callback Serdes", + "description": "Usage of context.wait_for_callback() to wait for external system responses", + "handler": "wait_for_callback_serdes.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/wait_for_callback/wait_for_callback_serdes.py" + }, + { + "name": "Wait For Callback Nested", + "description": "Usage of context.wait_for_callback() to wait for external system responses", + "handler": "wait_for_callback_nested.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/wait_for_callback/wait_for_callback_nested.py" + }, + { + "name": "Run in Child Context", + "description": "Usage of context.run_in_child_context() to execute operations in isolated contexts", + "handler": "run_in_child_context.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/run_in_child_context/run_in_child_context.py" + }, + { + "name": "Parallel Operations", + "description": "Executing multiple durable operations in parallel", + "handler": "parallel.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/parallel/parallel.py" + }, + { + "name": "Map Operations", + "description": "Processing collections using map-like durable operations", + "handler": "map_operations.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/map/map_operations.py" + }, + { + "name": "Map Large Scale", + "description": "Processing collections using map-like durable operations in large scale", + "handler": "map_with_large_scale.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/map/map_with_large_scale.py" + }, + { + "name": "Block Example", + "description": "Nested child contexts demonstrating block operations", + "handler": "block_example.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/block_example/block_example.py" + }, + { + "name": "Logger Example", + "description": "Demonstrating logger usage and enrichment in DurableContext", + "handler": "logger_example.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "loggingConfig": { + "ApplicationLogLevel": "INFO", + "LogFormat": "JSON" + }, + "path": "./src/logger_example/logger_example.py" + }, + { + "name": "Steps with Retry", + "description": "Multiple steps with retry logic in a polling pattern", + "handler": "steps_with_retry.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/step/steps_with_retry.py" + }, + { + "name": "Wait for Condition", + "description": "Polling pattern that waits for a condition to be met", + "handler": "wait_for_condition.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/wait_for_condition/wait_for_condition.py" + }, + { + "name": "Run in Child Context Large Data", + "description": "Usage of context.run_in_child_context() to execute operations in isolated contexts with large data", + "handler": "run_in_child_context_large_data.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/run_in_child_context/run_in_child_context_large_data.py" + }, + { + "name": "Simple Execution", + "description": "Simple execution without durable execution", + "handler": "simple_execution.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/simple_execution/simple_execution.py" + }, + { + "name": "Map with Max Concurrency", + "description": "Map operation with maxConcurrency limit", + "handler": "map_with_max_concurrency.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/map/map_with_max_concurrency.py" + }, + { + "name": "Map with Min Successful", + "description": "Map operation with min_successful completion config", + "handler": "map_with_min_successful.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/map/map_with_min_successful.py" + }, + { + "name": "Map with Failure Tolerance", + "description": "Map operation with failure tolerance", + "handler": "map_with_failure_tolerance.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/map/map_with_failure_tolerance.py" + }, + { + "name": "Map Completion Config", + "description": "Reproduces issue where map with minSuccessful loses failure count", + "handler": "map_completion.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/map/map_completion.py" + }, + { + "name": "Parallel with Max Concurrency", + "description": "Parallel operation with maxConcurrency limit", + "handler": "parallel_with_max_concurrency.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/parallel/parallel_with_max_concurrency.py" + }, + { + "name": "Parallel with Wait", + "description": "Parallel operation with wait operations in branches", + "handler": "parallel_with_wait.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/parallel/parallel_with_wait.py" + }, + { + "name": "Parallel with Failure Tolerance", + "description": "Parallel operation with failure tolerance", + "handler": "parallel_with_failure_tolerance.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/parallel/parallel_with_failure_tolerance.py" + }, + { + "name": "Map with Custom SerDes", + "description": "Map operation with custom item-level serialization", + "handler": "map_with_custom_serdes.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/map/map_with_custom_serdes.py" + }, + { + "name": "Map with Batch SerDes", + "description": "Map operation with custom batch-level serialization", + "handler": "map_with_batch_serdes.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/map/map_with_batch_serdes.py" + }, + { + "name": "Parallel with Custom SerDes", + "description": "Parallel operation with custom item-level serialization", + "handler": "parallel_with_custom_serdes.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/parallel/parallel_with_custom_serdes.py" + }, + { + "name": "Parallel with Batch SerDes", + "description": "Parallel operation with custom batch-level serialization", + "handler": "parallel_with_batch_serdes.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/parallel/parallel_with_batch_serdes.py" + }, + { + "name": "Handler Error", + "description": "Simple function with handler error", + "handler": "handler_error.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/handler_error/handler_error.py" + }, + { + "name": "None Results", + "description": "Test handling of step operations with undefined result after replay.", + "handler": "none_results.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/none_results/none_results.py" + }, + { + "name": "Callback Success", + "description": "Creating a callback ID for external systems to use", + "handler": "callback_simple.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/callback/callback_simple.py" + }, + { + "name": "Callback Success None", + "description": "Creating a callback ID for external systems to use", + "handler": "callback_simple.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/callback/callback_simple.py" + }, + { + "name": "Create Callback Heartbeat", + "description": "Demonstrates callback failure scenarios where the error propagates and is handled by framework", + "handler": "callback_heartbeat.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/callback/callback_heartbeat.py" + }, + { + "name": "Create Callback Mixed Operations", + "description": "Demonstrates createCallback mixed with steps, waits, and other operations", + "handler": "callback_mixed_ops.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/callback/callback_mixed_ops.py" + }, + { + "name": "Create Callback Custom Serdes", + "description": "Demonstrates createCallback with custom serialization/deserialization for Date objects", + "handler": "callback_serdes.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/callback/callback_serdes.py" + }, + { + "name": "No Replay Execution", + "description": "Execution with simples steps and without replay", + "handler": "no_replay_execution.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/no_replay_execution/no_replay_execution.py" + }, + { + "name": "Run In Child Context With Failing Step", + "description": "Demonstrates runInChildContext with a failing step followed by a successful wait", + "handler": "run_in_child_context_step_failure.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/run_in_child_context/run_in_child_context_step_failure.py" + }, + { + "name": "Comprehensive Operations", + "description": "Complex multi-operation example demonstrating all major operations", + "handler": "comprehensive_operations.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/comprehensive_operations/comprehensive_operations.py" + }, + { + "name": "Create Callback Concurrency", + "description": "Demonstrates multiple concurrent createCallback operations using context.parallel", + "handler": "callback_concurrency.handler", + "integration": true, + "durableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + }, + "path": "./src/callback/callback_concurrency.py", + "loggingConfig": { + "ApplicationLogLevel": "DEBUG", + "LogFormat": "JSON" + } + } + ] +} diff --git a/examples/scripts/generate_sam_template.py b/examples/scripts/generate_sam_template.py new file mode 100644 index 00000000..c4e631ce --- /dev/null +++ b/examples/scripts/generate_sam_template.py @@ -0,0 +1,106 @@ +#!/usr/bin/env python3 + +import json +from pathlib import Path + +import json + + +def load_catalog(): + """Load examples catalog.""" + catalog_path = Path(__file__).parent.parent / "examples-catalog.json" + with open(catalog_path) as f: + return json.load(f) + + +def generate_sam_template(): + """Generate SAM template for all examples.""" + catalog = load_catalog() + + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Transform": "AWS::Serverless-2016-10-31", + "Globals": { + "Function": { + "Runtime": "python3.13", + "Timeout": 60, + "MemorySize": 128, + "Environment": { + "Variables": {"AWS_ENDPOINT_URL_LAMBDA": {"Ref": "LambdaEndpoint"}} + }, + } + }, + "Parameters": { + "LambdaEndpoint": { + "Type": "String", + "Default": "https://lambda.us-west-2.amazonaws.com", + } + }, + "Resources": { + "DurableFunctionRole": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": {"Service": "lambda.amazonaws.com"}, + "Action": "sts:AssumeRole", + } + ], + }, + "ManagedPolicyArns": [ + "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + ], + "Policies": [ + { + "PolicyName": "DurableExecutionPolicy", + "PolicyDocument": { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "lambda:CheckpointDurableExecution", + "lambda:GetDurableExecutionState", + ], + "Resource": "*", + } + ], + }, + } + ], + }, + } + }, + } + + for example in catalog["examples"]: + # Convert handler name to PascalCase (e.g., hello_world -> HelloWorld) + handler_base = example["handler"].replace(".handler", "") + function_name = "".join(word.capitalize() for word in handler_base.split("_")) + template["Resources"][function_name] = { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": example["handler"], + "Description": example["description"], + "Role": {"Fn::GetAtt": ["DurableFunctionRole", "Arn"]}, + }, + } + + if "durableConfig" in example: + template["Resources"][function_name]["Properties"]["DurableConfig"] = ( + example["durableConfig"] + ) + + template_path = Path(__file__).parent.parent / "template.yaml" + with open(template_path, "w") as f: + json.dump(template, f, sort_keys=False, indent=2) + + print(f"Generated SAM template at {template_path}") + + +if __name__ == "__main__": + generate_sam_template() diff --git a/examples/src/__init__.py b/examples/src/__init__.py new file mode 100644 index 00000000..3f5aece5 --- /dev/null +++ b/examples/src/__init__.py @@ -0,0 +1,3 @@ +"""AWS Durable Functions Python Examples.""" + +__version__ = "0.1.0" diff --git a/examples/src/block_example/block_example.py b/examples/src/block_example/block_example.py new file mode 100644 index 00000000..6bcf9024 --- /dev/null +++ b/examples/src/block_example/block_example.py @@ -0,0 +1,47 @@ +"""Example demonstrating nested child contexts (blocks).""" + +from typing import Any + +from aws_durable_execution_sdk_python.context import ( + DurableContext, + durable_with_child_context, +) +from aws_durable_execution_sdk_python.execution import durable_execution +from aws_durable_execution_sdk_python.config import Duration + + +@durable_with_child_context +def nested_block(ctx: DurableContext) -> str: + """Nested block with its own child context.""" + # Wait in the nested block + ctx.wait(Duration.from_seconds(1)) + return "nested block result" + + +@durable_with_child_context +def parent_block(ctx: DurableContext) -> dict[str, str]: + """Parent block with nested operations.""" + # Nested step + nested_result: str = ctx.step( + lambda _: "nested step result", + name="nested_step", + ) + + # Nested block with its own child context + nested_block_result: str = ctx.run_in_child_context(nested_block()) + + return { + "nestedStep": nested_result, + "nestedBlock": nested_block_result, + } + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> dict[str, str]: + """Handler demonstrating nested child contexts.""" + # Run parent block which contains nested operations + result: dict[str, str] = context.run_in_child_context( + parent_block(), name="parent_block" + ) + + return result diff --git a/examples/src/callback/callback_concurrency.py b/examples/src/callback/callback_concurrency.py new file mode 100644 index 00000000..173e808c --- /dev/null +++ b/examples/src/callback/callback_concurrency.py @@ -0,0 +1,51 @@ +"""Demonstrates multiple concurrent createCallback operations using context.parallel.""" + +from typing import Any + +from aws_durable_execution_sdk_python.config import CallbackConfig, Duration +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> dict[str, Any]: + """Handler demonstrating multiple concurrent callback operations.""" + + callback_config = CallbackConfig(timeout=Duration.from_seconds(30)) + + def callback_branch_1(ctx: DurableContext) -> str: + """First callback branch.""" + callback = ctx.create_callback( + name="api-call-1", + config=callback_config, + ) + return callback.result() + + def callback_branch_2(ctx: DurableContext) -> str: + """Second callback branch.""" + callback = ctx.create_callback( + name="api-call-2", + config=callback_config, + ) + return callback.result() + + def callback_branch_3(ctx: DurableContext) -> str: + """Third callback branch.""" + callback = ctx.create_callback( + name="api-call-3", + config=callback_config, + ) + return callback.result() + + parallel_results = context.parallel( + functions=[callback_branch_1, callback_branch_2, callback_branch_3], + name="parallel_callbacks", + ) + + # Extract results from parallel execution + results = parallel_results.get_results() + + return { + "results": results, + "allCompleted": True, + } diff --git a/examples/src/callback/callback_heartbeat.py b/examples/src/callback/callback_heartbeat.py new file mode 100644 index 00000000..b439d529 --- /dev/null +++ b/examples/src/callback/callback_heartbeat.py @@ -0,0 +1,22 @@ +from typing import TYPE_CHECKING, Any + +from aws_durable_execution_sdk_python.config import CallbackConfig, Duration +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution + + +if TYPE_CHECKING: + from aws_durable_execution_sdk_python.types import Callback + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> str: + callback_config = CallbackConfig( + timeout=Duration.from_seconds(60), heartbeat_timeout=Duration.from_seconds(10) + ) + + callback: Callback[str] = context.create_callback( + name="heartbeat_callback", config=callback_config + ) + + return callback.result() diff --git a/examples/src/callback/callback_mixed_ops.py b/examples/src/callback/callback_mixed_ops.py new file mode 100644 index 00000000..089b17dd --- /dev/null +++ b/examples/src/callback/callback_mixed_ops.py @@ -0,0 +1,35 @@ +"""Demonstrates createCallback mixed with steps, waits, and other operations.""" + +import time +from typing import Any + +from aws_durable_execution_sdk_python.config import CallbackConfig, Duration +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> dict[str, Any]: + """Handler demonstrating createCallback mixed with other operations.""" + + step_result: dict[str, Any] = context.step( + lambda _: {"userId": 123, "name": "John Doe"}, + name="fetch-data", + ) + + callback_config = CallbackConfig(timeout=Duration.from_minutes(1)) + callback = context.create_callback( + name="process-user", + config=callback_config, + ) + + # Mix callback with step and wait operations + context.wait(Duration.from_seconds(1), name="initial-wait") + + callback_result = callback.result() + + return { + "stepResult": step_result, + "callbackResult": callback_result, + "completed": True, + } diff --git a/examples/src/callback/callback_serdes.py b/examples/src/callback/callback_serdes.py new file mode 100644 index 00000000..c624a797 --- /dev/null +++ b/examples/src/callback/callback_serdes.py @@ -0,0 +1,76 @@ +"""Demonstrates createCallback with custom serialization/deserialization for Date objects.""" + +import json +from datetime import datetime, timezone +from typing import Any, Optional + +from aws_durable_execution_sdk_python.config import CallbackConfig, Duration +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution +from aws_durable_execution_sdk_python.serdes import SerDes, SerDesContext + + +class CustomData: + """Data structure with datetime.""" + + def __init__(self, id: int, message: str, timestamp: datetime): + self.id = id + self.message = message + self.timestamp = timestamp + + def to_dict(self) -> dict[str, Any]: + """Convert to dictionary.""" + return { + "id": self.id, + "message": self.message, + "timestamp": self.timestamp.isoformat(), + } + + @staticmethod + def from_dict(data: dict[str, Any]) -> "CustomData": + """Create from dictionary.""" + return CustomData( + id=data["id"], + message=data["message"], + timestamp=datetime.fromisoformat(data["timestamp"].replace("Z", "+00:00")), + ) + + +class CustomDataSerDes(SerDes[CustomData]): + """Custom serializer for CustomData that handles datetime conversion.""" + + def serialize(self, value: Optional[CustomData], _: SerDesContext) -> Optional[str]: + """Serialize CustomData to JSON string.""" + if value is None: + return None + return json.dumps(value.to_dict()) + + def deserialize( + self, payload: Optional[str], _: SerDesContext + ) -> Optional[CustomData]: + """Deserialize JSON string to CustomData.""" + if payload is None: + return None + data = json.loads(payload) + return CustomData.from_dict(data) + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> dict[str, Any]: + """Handler demonstrating createCallback with custom serdes.""" + callback_config = CallbackConfig( + timeout=Duration.from_seconds(30), + serdes=CustomDataSerDes(), + ) + + callback = context.create_callback( + name="custom-serdes-callback", + config=callback_config, + ) + + result: CustomData = callback.result() + + return { + "receivedData": result.to_dict(), + "isDateObject": isinstance(result.timestamp, datetime), + } diff --git a/examples/src/callback/callback_simple.py b/examples/src/callback/callback_simple.py new file mode 100644 index 00000000..063aad19 --- /dev/null +++ b/examples/src/callback/callback_simple.py @@ -0,0 +1,22 @@ +from typing import TYPE_CHECKING, Any + +from aws_durable_execution_sdk_python.config import CallbackConfig, Duration +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution + + +if TYPE_CHECKING: + from aws_durable_execution_sdk_python.types import Callback + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> str: + callback_config = CallbackConfig( + timeout=Duration.from_seconds(120), heartbeat_timeout=Duration.from_seconds(60) + ) + + callback: Callback[str] = context.create_callback( + name="example_callback", config=callback_config + ) + + return callback.result() diff --git a/examples/src/callback/callback_with_timeout.py b/examples/src/callback/callback_with_timeout.py new file mode 100644 index 00000000..a3a2ac11 --- /dev/null +++ b/examples/src/callback/callback_with_timeout.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING, Any + +from aws_durable_execution_sdk_python.config import CallbackConfig, Duration +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution + + +if TYPE_CHECKING: + from aws_durable_execution_sdk_python.types import Callback + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> str: + # Callback with custom timeout configuration + config = CallbackConfig( + timeout=Duration.from_seconds(60), heartbeat_timeout=Duration.from_seconds(30) + ) + + callback: Callback[str] = context.create_callback( + name="timeout_callback", config=config + ) + + return f"Callback created with 60s timeout: {callback.callback_id}" diff --git a/examples/src/comprehensive_operations/comprehensive_operations.py b/examples/src/comprehensive_operations/comprehensive_operations.py new file mode 100644 index 00000000..9f4c0bb2 --- /dev/null +++ b/examples/src/comprehensive_operations/comprehensive_operations.py @@ -0,0 +1,51 @@ +"""Complex multi-operation example demonstrating all major operations.""" + +from typing import Any + +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution +from aws_durable_execution_sdk_python.config import Duration + + +@durable_execution +def handler(event: dict[str, Any], context: DurableContext) -> dict[str, Any]: + """Comprehensive example demonstrating all major durable operations.""" + print(f"Starting comprehensive operations example with event: {event}") + + # Step 1: ctx.step - Simple step that returns a result + step1_result: str = context.step( + lambda _: "Step 1 completed successfully", + name="step1", + ) + + # Step 2: ctx.wait - Wait for 1 second + context.wait(Duration.from_seconds(1)) + + # Step 3: ctx.map - Map with 5 iterations returning numbers 1 to 5 + map_input = [1, 2, 3, 4, 5] + + map_results = context.map( + inputs=map_input, + func=lambda ctx, item, index, _: ctx.step( + lambda _: item, name=f"map-step-{index}" + ), + name="map-numbers", + ).to_dict() + + # Step 4: ctx.parallel - 3 branches, each returning a fruit name + + parallel_results = context.parallel( + functions=[ + lambda ctx: ctx.step(lambda _: "apple", name="fruit-step-1"), + lambda ctx: ctx.step(lambda _: "banana", name="fruit-step-2"), + lambda ctx: ctx.step(lambda _: "orange", name="fruit-step-3"), + ] + ).to_dict() + + # Final result combining all operations + return { + "step1": step1_result, + "waitCompleted": True, + "mapResults": map_results, + "parallelResults": parallel_results, + } diff --git a/examples/src/handler_error/handler_error.py b/examples/src/handler_error/handler_error.py new file mode 100644 index 00000000..c045838f --- /dev/null +++ b/examples/src/handler_error/handler_error.py @@ -0,0 +1,13 @@ +"""Demonstrates how handler-level errors are captured and structured in results.""" + +from typing import Any + +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution + + +@durable_execution +def handler(_event: Any, _context: DurableContext) -> None: + """Handler demonstrating handler-level error capture.""" + # Simulate a handler-level error that might occur in real applications + raise Exception("Intentional handler failure") diff --git a/examples/src/hello_world.py b/examples/src/hello_world.py new file mode 100644 index 00000000..d3e4905d --- /dev/null +++ b/examples/src/hello_world.py @@ -0,0 +1,62 @@ +"""Simple durable Lambda handler example. + +This example demonstrates: +- Step execution with logging +- Wait operations (pausing without consuming resources) +- Replay-aware logging +- Returning a response +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Any + +from aws_durable_execution_sdk_python.config import Duration +from aws_durable_execution_sdk_python.context import DurableContext, durable_step +from aws_durable_execution_sdk_python.execution import durable_execution + +if TYPE_CHECKING: + from aws_durable_execution_sdk_python.types import StepContext + + +@durable_step +def step_1(step_context: StepContext) -> None: + """First step that logs a message.""" + step_context.logger.info("Hello from step1") + + +@durable_step +def step_2(step_context: StepContext, status_code: int) -> str: + """Second step that returns a message.""" + step_context.logger.info("Returning message with status code: %d", status_code) + return f"Hello from Durable Lambda! (status: {status_code})" + + +@durable_execution +def handler(event: Any, context: DurableContext) -> dict[str, Any]: + """Durable Lambda handler with steps, waits, and logging. + + Args: + event: Lambda event input + context: Durable execution context + + Returns: + Response dictionary with statusCode and body + """ + # Execute Step #1 - logs a message + context.step(step_1()) + + # Pause for 10 seconds without consuming CPU cycles or incurring usage charges + # The execution will suspend here and resume after 10 seconds + context.wait(Duration.from_seconds(10)) + + context.logger.info("Waited for 10 seconds") + + # Execute Step #2 - returns a message with status code + message = context.step(step_2(status_code=200)) + + # Return response + return { + "statusCode": 200, + "body": message, + } diff --git a/examples/src/logger_example/logger_example.py b/examples/src/logger_example/logger_example.py new file mode 100644 index 00000000..7c62d934 --- /dev/null +++ b/examples/src/logger_example/logger_example.py @@ -0,0 +1,64 @@ +"""Example demonstrating logger usage in DurableContext.""" + +from typing import Any + +from aws_durable_execution_sdk_python.context import ( + DurableContext, + StepContext, + durable_with_child_context, + durable_step, +) +from aws_durable_execution_sdk_python.execution import durable_execution + + +@durable_with_child_context +def child_workflow(ctx: DurableContext) -> str: + """Child workflow with its own logging context.""" + # Child context logger has step_id populated with child context ID + ctx.logger.info("Running in child context") + + # Step in child context has nested step ID + child_result: str = ctx.step( + lambda _: "child-processed", + name="child_step", + ) + + ctx.logger.info("Child workflow completed", extra={"result": child_result}) + + return child_result + + +@durable_step +def my_step(step_context: StepContext, my_arg: int) -> str: + step_context.logger.info("Hello from my_step") + step_context.logger.warning("Warning from my_step", extra={"my_arg": my_arg}) + step_context.logger.error( + "Error from my_step", extra={"my_arg": my_arg, "type": "error"} + ) + return f"from my_step: {my_arg}" + + +@durable_execution +def handler(event: Any, context: DurableContext) -> str: + """Handler demonstrating logger usage.""" + # Top-level context logger: no step_id field + context.logger.info("Starting workflow", extra={"eventId": event.get("id")}) + + # Logger in steps - gets enriched with step ID and attempt number + result1: str = context.step( + lambda _: "processed", + name="process_data", + ) + + context.step(my_step(123)) + + context.logger.info("Step 1 completed", extra={"result": result1}) + + # Child contexts inherit the parent's logger and have their own step ID + result2: str = context.run_in_child_context(child_workflow(), name="child_workflow") + + context.logger.info( + "Workflow completed", extra={"result1": result1, "result2": result2} + ) + + return f"{result1}-{result2}" diff --git a/examples/src/map/map_completion.py b/examples/src/map/map_completion.py new file mode 100644 index 00000000..02db2387 --- /dev/null +++ b/examples/src/map/map_completion.py @@ -0,0 +1,117 @@ +"""Reproduces issue where map with minSuccessful loses failure count.""" + +from typing import Any + +from aws_durable_execution_sdk_python.config import ( + CompletionConfig, + MapConfig, + StepConfig, + Duration, +) +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution +from aws_durable_execution_sdk_python.retries import ( + RetryStrategyConfig, + create_retry_strategy, +) + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> dict[str, Any]: + """Handler demonstrating map with completion config issue.""" + # Test data: Items 2 and 4 will fail (40% failure rate) + items = [ + {"id": 1, "shouldFail": False}, + {"id": 2, "shouldFail": True}, # Will fail + {"id": 3, "shouldFail": False}, + {"id": 4, "shouldFail": True}, # Will fail + {"id": 5, "shouldFail": False}, + ] + + # Fixed completion config that causes the issue + completion_config = CompletionConfig( + min_successful=2, + tolerated_failure_percentage=50, + ) + + context.logger.info( + f"Starting map with config: min_successful=2, tolerated_failure_percentage=50" + ) + context.logger.info( + f"Items pattern: {', '.join(['FAIL' if i['shouldFail'] else 'SUCCESS' for i in items])}" + ) + + def process_item( + ctx: DurableContext, item: dict[str, Any], index: int, _ + ) -> dict[str, Any]: + """Process each item in the map.""" + context.logger.info( + f"Processing item {item['id']} (index {index}), shouldFail: {item['shouldFail']}" + ) + + retry_config = RetryStrategyConfig( + max_attempts=2, + initial_delay=Duration.from_seconds(1), + max_delay=Duration.from_seconds(1), + ) + step_config = StepConfig(retry_strategy=create_retry_strategy(retry_config)) + + def step_function(_: DurableContext) -> dict[str, Any]: + """Step that processes or fails based on item.""" + if item["shouldFail"]: + raise Exception(f"Processing failed for item {item['id']}") + return { + "itemId": item["id"], + "processed": True, + "result": f"Item {item['id']} processed successfully", + } + + return ctx.step( + step_function, + name=f"process-item-{index}", + config=step_config, + ) + + config = MapConfig( + max_concurrency=3, + completion_config=completion_config, + ) + + results = context.map( + inputs=items, + func=process_item, + name="completion-config-items", + config=config, + ) + + context.logger.info("Map completed with results:") + context.logger.info(f"Total items processed: {results.total_count}") + context.logger.info(f"Successful items: {results.success_count}") + context.logger.info(f"Failed items: {results.failure_count}") + context.logger.info(f"Has failures: {results.has_failure}") + context.logger.info(f"Batch status: {results.status}") + context.logger.info(f"Completion reason: {results.completion_reason}") + + return { + "totalItems": results.total_count, + "successfulCount": results.success_count, + "failedCount": results.failure_count, + "hasFailures": results.has_failure, + "batchStatus": str(results.status), + "completionReason": str(results.completion_reason), + "successfulItems": [ + { + "index": item.index, + "itemId": items[item.index]["id"], + } + for item in results.succeeded() + ], + "failedItems": [ + { + "index": item.index, + "itemId": items[item.index]["id"], + "error": str(item.error), + } + for item in results.failed() + ], + } diff --git a/examples/src/map/map_operations.py b/examples/src/map/map_operations.py new file mode 100644 index 00000000..a1ed45cd --- /dev/null +++ b/examples/src/map/map_operations.py @@ -0,0 +1,23 @@ +"""Example demonstrating map operations for processing collections durably.""" + +from typing import Any + +from aws_durable_execution_sdk_python.config import MapConfig +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> list[int]: + """Process a list of items using context.map().""" + items = [1, 2, 3, 4, 5] + + # Use context.map() to process items concurrently and extract results immediately + return context.map( + inputs=items, + func=lambda ctx, item, index, _: ctx.step( + lambda _: item * 2, name=f"map_item_{index}" + ), + name="map_operation", + config=MapConfig(max_concurrency=2), + ).get_results() diff --git a/examples/src/map/map_with_batch_serdes.py b/examples/src/map/map_with_batch_serdes.py new file mode 100644 index 00000000..798adfa9 --- /dev/null +++ b/examples/src/map/map_with_batch_serdes.py @@ -0,0 +1,96 @@ +"""Example demonstrating map with batch-level serdes.""" + +import json +from typing import Any + +from aws_durable_execution_sdk_python.concurrency.models import ( + BatchItem, + BatchItemStatus, + BatchResult, + CompletionReason, +) +from aws_durable_execution_sdk_python.config import MapConfig +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution +from aws_durable_execution_sdk_python.lambda_service import ErrorObject +from aws_durable_execution_sdk_python.serdes import JsonSerDes, SerDes, SerDesContext + + +class CustomBatchSerDes(SerDes[BatchResult]): + """Custom serializer for the entire BatchResult.""" + + def serialize(self, value: BatchResult, _: SerDesContext) -> str: + # Serialize BatchResult with custom metadata + + wrapped = { + "batch_metadata": { + "serializer": "CustomBatchSerDes", + "version": "2.0", + "total_items": len(value.get_results()), + }, + "success_count": value.success_count, + "failure_count": value.failure_count, + "results": value.get_results(), + "errors": [e.to_dict() if e else None for e in value.get_errors()], + } + return json.dumps(wrapped) + + def deserialize(self, payload: str, _: SerDesContext) -> BatchResult: + wrapped = json.loads(payload) + batch_items = [] + results = wrapped["results"] + errors = wrapped["errors"] + + for i, result in enumerate(results): + error = errors[i] if i < len(errors) else None + if error: + batch_items.append( + BatchItem( + index=i, + status=BatchItemStatus.FAILED, + result=None, + error=ErrorObject.from_dict(error) if error else None, + ) + ) + else: + batch_items.append( + BatchItem( + index=i, + status=BatchItemStatus.SUCCEEDED, + result=result, + error=None, + ) + ) + + # Infer completion reason (assume ALL_COMPLETED if all succeeded) + completion_reason = ( + CompletionReason.ALL_COMPLETED + if wrapped["failure_count"] == 0 + else CompletionReason.FAILURE_TOLERANCE_EXCEEDED + ) + + return BatchResult(all=batch_items, completion_reason=completion_reason) + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> dict[str, Any]: + """Process items with custom batch-level serialization.""" + items = [10, 20, 30, 40] + + # Use custom serdes for the entire BatchResult, default JSON for individual items + config = MapConfig(serdes=CustomBatchSerDes(), item_serdes=JsonSerDes()) + + results = context.map( + inputs=items, + func=lambda ctx, item, index, _: ctx.step( + lambda _: item * 2, name=f"double_{index}" + ), + name="map_with_batch_serdes", + config=config, + ) + + return { + "success_count": results.success_count, + "results": results.get_results(), + "sum": sum(results.get_results()), + } diff --git a/examples/src/map/map_with_custom_serdes.py b/examples/src/map/map_with_custom_serdes.py new file mode 100644 index 00000000..5feebb3c --- /dev/null +++ b/examples/src/map/map_with_custom_serdes.py @@ -0,0 +1,63 @@ +"""Example demonstrating map with custom serdes.""" + +import json +from typing import Any + +from aws_durable_execution_sdk_python.config import MapConfig +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution +from aws_durable_execution_sdk_python.serdes import SerDes, SerDesContext + + +class CustomItemSerDes(SerDes[dict[str, Any]]): + """Custom serializer for individual items that adds metadata.""" + + def serialize(self, value: dict[str, Any], _: SerDesContext) -> str: + # Add custom metadata during serialization + wrapped = {"data": value, "serialized_by": "CustomItemSerDes", "version": "1.0"} + + return json.dumps(wrapped) + + def deserialize(self, payload: str, _: SerDesContext) -> dict[str, Any]: + wrapped = json.loads(payload) + # Extract the original data + return wrapped["data"] + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> dict[str, Any]: + """Process items with custom item serialization. + + This example demonstrates using item_serdes to customize serialization + of individual item results, while using default serialization for the + overall BatchResult. + """ + items = [ + {"id": 1, "name": "item1"}, + {"id": 2, "name": "item2"}, + {"id": 3, "name": "item3"}, + ] + + # Use custom serdes for individual items only + # The BatchResult will use default JSON serialization + config = MapConfig(item_serdes=CustomItemSerDes()) + + results = context.map( + inputs=items, + func=lambda ctx, item, index, _: ctx.step( + lambda _: { + "processed": item["name"], + "index": index, + "doubled_id": item["id"] * 2, + }, + name=f"process_{index}", + ), + name="map_with_custom_serdes", + config=config, + ) + + return { + "success_count": results.success_count, + "results": results.get_results(), + "processed_names": [r["processed"] for r in results.get_results()], + } diff --git a/examples/src/map/map_with_failure_tolerance.py b/examples/src/map/map_with_failure_tolerance.py new file mode 100644 index 00000000..dc01d152 --- /dev/null +++ b/examples/src/map/map_with_failure_tolerance.py @@ -0,0 +1,53 @@ +"""Example demonstrating map with failure tolerance.""" + +from typing import Any + +from aws_durable_execution_sdk_python.config import ( + CompletionConfig, + MapConfig, + StepConfig, +) +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution +from aws_durable_execution_sdk_python.retries import RetryStrategyConfig + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> dict[str, Any]: + """Process items with failure tolerance.""" + items = list(range(1, 11)) # [1, 2, 3, ..., 10] + + # Tolerate up to 3 failures + config = MapConfig( + max_concurrency=5, + completion_config=CompletionConfig(tolerated_failure_count=3), + ) + + # Disable retries so failures happen immediately + step_config = StepConfig(retry_strategy=RetryStrategyConfig(max_attempts=1)) + + results = context.map( + inputs=items, + func=lambda ctx, item, index, _: ctx.step( + lambda _: _process_with_failures(item), + name=f"item_{index}", + config=step_config, + ), + name="map_with_tolerance", + config=config, + ) + + return { + "success_count": results.success_count, + "failure_count": results.failure_count, + "succeeded": [item.result for item in results.succeeded()], + "failed_count": len(results.failed()), + "completion_reason": results.completion_reason.value, + } + + +def _process_with_failures(item: int) -> int: + """Process item - fails for items 3, 6, 9.""" + if item % 3 == 0: + raise ValueError(f"Item {item} failed") + return item * 2 diff --git a/examples/src/map/map_with_large_scale.py b/examples/src/map/map_with_large_scale.py new file mode 100644 index 00000000..91685169 --- /dev/null +++ b/examples/src/map/map_with_large_scale.py @@ -0,0 +1,64 @@ +"""Test map with 50 iterations, each returning 100KB data.""" + +from typing import Any + +from aws_durable_execution_sdk_python.config import MapConfig +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution +from aws_durable_execution_sdk_python.config import Duration + + +def generate_large_string(size_in_kb: int) -> str: + """Generate a string of approximately the specified size in KB.""" + return "A" * 1024 * size_in_kb + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> dict[str, Any]: + """Handler demonstrating large scale map with substantial data.""" + # Create array of 50 items (more manageable for testing) + items = list(range(1, 51)) # 1 to 50 + + config = MapConfig(max_concurrency=10) # Process 10 items concurrently + data = generate_large_string(100) + results = context.map( + inputs=items, + func=lambda ctx, item, index, _: ctx.step( + lambda _: { + "itemId": item, + "index": index, + "dataSize": len(data), + "data": data, + "processed": True, + } + ), + name="large-scale-map", + config=config, + ) + + context.wait(Duration.from_seconds(1), name="wait1") + + # Process results immediately after map operation + # Note: After wait operations, the BatchResult may be summarized + final_results = results.get_results() + total_data_size = sum(result["dataSize"] for result in final_results) + all_items_processed = all(result["processed"] for result in final_results) + + total_size_in_mb = round(total_data_size / (1024 * 1024)) + + summary = { + "itemsProcessed": results.success_count, + "totalDataSizeMB": total_size_in_mb, + "totalDataSizeBytes": total_data_size, + "maxConcurrency": 10, + "averageItemSize": round(total_data_size / results.success_count), + "allItemsProcessed": all_items_processed, + } + + context.wait(Duration.from_seconds(1), name="wait2") + + return { + "success": True, + "message": "Successfully processed 50 items with substantial data using map", + "summary": summary, + } diff --git a/examples/src/map/map_with_max_concurrency.py b/examples/src/map/map_with_max_concurrency.py new file mode 100644 index 00000000..6289b3f8 --- /dev/null +++ b/examples/src/map/map_with_max_concurrency.py @@ -0,0 +1,23 @@ +"""Example demonstrating map with maxConcurrency limit.""" + +from typing import Any + +from aws_durable_execution_sdk_python.config import MapConfig +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> list[int]: + """Process items with concurrency limit of 3.""" + items = list(range(1, 11)) # [1, 2, 3, ..., 10] + + # Extract results immediately to avoid BatchResult serialization + return context.map( + inputs=items, + func=lambda ctx, item, index, _: ctx.step( + lambda _: item * 3, name=f"process_{index}" + ), + name="map_with_concurrency", + config=MapConfig(max_concurrency=3), + ).get_results() diff --git a/examples/src/map/map_with_min_successful.py b/examples/src/map/map_with_min_successful.py new file mode 100644 index 00000000..cc0fe5c9 --- /dev/null +++ b/examples/src/map/map_with_min_successful.py @@ -0,0 +1,43 @@ +"""Example demonstrating map with min_successful completion config.""" + +from typing import Any + +from aws_durable_execution_sdk_python.config import CompletionConfig, MapConfig +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> dict[str, Any]: + """Process items with min_successful threshold.""" + items = list(range(1, 11)) # [1, 2, 3, ..., 10] + + # Configure to complete when 6 items succeed + config = MapConfig( + max_concurrency=5, + completion_config=CompletionConfig(min_successful=6), + ) + + results = context.map( + inputs=items, + func=lambda ctx, item, index, _: ctx.step( + lambda _: _process_item(item), name=f"item_{index}" + ), + name="map_min_successful", + config=config, + ) + + return { + "success_count": results.success_count, + "failure_count": results.failure_count, + "total_count": results.total_count, + "results": results.get_results(), + "completion_reason": results.completion_reason.value, + } + + +def _process_item(item: int) -> int: + """Process item - fails for items 7, 8, 9.""" + if item in [7, 8, 9]: + raise ValueError(f"Item {item} failed") + return item * 2 diff --git a/examples/src/no_replay_execution/no_replay_execution.py b/examples/src/no_replay_execution/no_replay_execution.py new file mode 100644 index 00000000..a6eb6464 --- /dev/null +++ b/examples/src/no_replay_execution/no_replay_execution.py @@ -0,0 +1,15 @@ +"""Demonstrates step execution tracking when no replay occurs.""" + +from typing import Any + +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> dict[str, bool]: + """Handler demonstrating step execution without replay.""" + context.step(lambda _: "user-1", name="fetch-user-1") + context.step(lambda _: "user-2", name="fetch-user-2") + + return {"completed": True} diff --git a/examples/src/none_results/none_results.py b/examples/src/none_results/none_results.py new file mode 100644 index 00000000..9cf32606 --- /dev/null +++ b/examples/src/none_results/none_results.py @@ -0,0 +1,31 @@ +"""Demonstrates handling of operations that return undefined values during replay.""" + +from typing import Any + +from aws_durable_execution_sdk_python.context import ( + DurableContext, + durable_with_child_context, +) +from aws_durable_execution_sdk_python.execution import durable_execution +from aws_durable_execution_sdk_python.config import Duration + + +@durable_with_child_context +def parent_context(ctx: DurableContext) -> None: + """Parent context that returns None.""" + return None + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> str: + """Handler demonstrating operations with undefined/None results.""" + context.step( + lambda _: None, + name="fetch-user", + ) + + context.run_in_child_context(parent_context(), name="parent") + + context.wait(Duration.from_seconds(1), name="wait") + + return "result" diff --git a/examples/src/parallel/parallel.py b/examples/src/parallel/parallel.py new file mode 100644 index 00000000..96fad57c --- /dev/null +++ b/examples/src/parallel/parallel.py @@ -0,0 +1,27 @@ +"""Example demonstrating parallel operations for concurrent execution.""" + +from typing import Any + +from aws_durable_execution_sdk_python.config import ParallelConfig +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution +from aws_durable_execution_sdk_python.config import Duration + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> list[str]: + """Execute multiple operations in parallel using context.parallel().""" + + # Use context.parallel() to execute functions concurrently and extract results immediately + return context.parallel( + functions=[ + lambda ctx: ctx.step(lambda _: "task 1 completed", name="task1"), + lambda ctx: ctx.step(lambda _: "task 2 completed", name="task2"), + lambda ctx: ( + ctx.wait(Duration.from_seconds(1), name="wait_in_task3"), + "task 3 completed after wait", + )[1], + ], + name="parallel_operation", + config=ParallelConfig(max_concurrency=2), + ).get_results() diff --git a/examples/src/parallel/parallel_first_successful.py b/examples/src/parallel/parallel_first_successful.py new file mode 100644 index 00000000..984c7e0c --- /dev/null +++ b/examples/src/parallel/parallel_first_successful.py @@ -0,0 +1,27 @@ +from typing import Any + +from aws_durable_execution_sdk_python.config import CompletionConfig, ParallelConfig +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> str: + # Parallel execution with first_successful completion strategy + config = ParallelConfig(completion_config=CompletionConfig.first_successful()) + + functions = [ + lambda ctx: ctx.step(lambda _: "Task 1", name="task1"), + lambda ctx: ctx.step(lambda _: "Task 2", name="task2"), + lambda ctx: ctx.step(lambda _: "Task 3", name="task3"), + ] + + results = context.parallel( + functions, name="first_successful_parallel", config=config + ) + + # Extract the first successful result + first_result = ( + results.successful_results[0] if results.successful_results else "None" + ) + return f"First successful result: {first_result}" diff --git a/examples/src/parallel/parallel_with_batch_serdes.py b/examples/src/parallel/parallel_with_batch_serdes.py new file mode 100644 index 00000000..84014e01 --- /dev/null +++ b/examples/src/parallel/parallel_with_batch_serdes.py @@ -0,0 +1,97 @@ +"""Example demonstrating parallel with batch-level serdes.""" + +import json +from typing import Any + +from aws_durable_execution_sdk_python.concurrency.models import ( + BatchItem, + BatchItemStatus, + BatchResult, + CompletionReason, +) +from aws_durable_execution_sdk_python.config import ParallelConfig +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution +from aws_durable_execution_sdk_python.lambda_service import ErrorObject +from aws_durable_execution_sdk_python.serdes import JsonSerDes, SerDes, SerDesContext + + +class CustomBatchSerDes(SerDes[BatchResult]): + """Custom serializer for the entire BatchResult.""" + + def serialize(self, value: BatchResult, _: SerDesContext) -> str: + wrapped = { + "batch_metadata": { + "serializer": "CustomBatchSerDes", + "version": "2.0", + "total_branches": len(value.get_results()), + }, + "success_count": value.success_count, + "failure_count": value.failure_count, + "results": value.get_results(), + "errors": [e.to_dict() if e else None for e in value.get_errors()], + } + return json.dumps(wrapped) + + def deserialize(self, payload: str, _: SerDesContext) -> BatchResult: + wrapped = json.loads(payload) + # Reconstruct BatchResult from wrapped data + # Need to rebuild BatchItem list from results and errors + + batch_items = [] + results = wrapped["results"] + errors = wrapped["errors"] + + for i, result in enumerate(results): + error = errors[i] if i < len(errors) else None + if error: + batch_items.append( + BatchItem( + index=i, + status=BatchItemStatus.FAILED, + result=None, + error=ErrorObject.from_dict(error) if error else None, + ) + ) + else: + batch_items.append( + BatchItem( + index=i, + status=BatchItemStatus.SUCCEEDED, + result=result, + error=None, + ) + ) + + # Infer completion reason (assume ALL_COMPLETED if all succeeded) + completion_reason = ( + CompletionReason.ALL_COMPLETED + if wrapped["failure_count"] == 0 + else CompletionReason.FAILURE_TOLERANCE_EXCEEDED + ) + + return BatchResult(all=batch_items, completion_reason=completion_reason) + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> dict[str, Any]: + """Execute parallel tasks with custom batch-level serialization.""" + + # Use custom serdes for the entire BatchResult, default JSON for individual functions + config = ParallelConfig(serdes=CustomBatchSerDes(), item_serdes=JsonSerDes()) + + results = context.parallel( + functions=[ + lambda ctx: ctx.step(lambda _: 100, name="branch1"), + lambda ctx: ctx.step(lambda _: 200, name="branch2"), + lambda ctx: ctx.step(lambda _: 300, name="branch3"), + ], + name="parallel_with_batch_serdes", + config=config, + ) + + return { + "success_count": results.success_count, + "results": results.get_results(), + "total": sum(results.get_results()), + } diff --git a/examples/src/parallel/parallel_with_custom_serdes.py b/examples/src/parallel/parallel_with_custom_serdes.py new file mode 100644 index 00000000..ec694d85 --- /dev/null +++ b/examples/src/parallel/parallel_with_custom_serdes.py @@ -0,0 +1,60 @@ +"""Example demonstrating parallel with custom serdes.""" + +import json +from typing import Any + +from aws_durable_execution_sdk_python.config import ParallelConfig +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution +from aws_durable_execution_sdk_python.serdes import SerDes, SerDesContext + + +class CustomItemSerDes(SerDes[dict[str, Any]]): + """Custom serializer for individual items that adds metadata.""" + + def serialize(self, value: dict[str, Any], _: SerDesContext) -> str: + # Add custom metadata during serialization + wrapped = {"data": value, "serialized_by": "CustomItemSerDes"} + + return json.dumps(wrapped) + + def deserialize(self, payload: str, _: SerDesContext) -> dict[str, Any]: + wrapped = json.loads(payload) + # Extract the original data + return wrapped["data"] + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> dict[str, Any]: + """Execute parallel tasks with custom item serialization. + + This example demonstrates using item_serdes to customize serialization + of individual function results, while using default serialization for the + overall BatchResult. + """ + + # Use custom serdes for individual function results only + # The BatchResult will use default JSON serialization + config = ParallelConfig(item_serdes=CustomItemSerDes()) + + results = context.parallel( + functions=[ + lambda ctx: ctx.step( + lambda _: {"task": "task1", "value": 100}, name="task1" + ), + lambda ctx: ctx.step( + lambda _: {"task": "task2", "value": 200}, name="task2" + ), + lambda ctx: ctx.step( + lambda _: {"task": "task3", "value": 300}, name="task3" + ), + ], + name="parallel_with_custom_serdes", + config=config, + ) + + return { + "success_count": results.success_count, + "results": results.get_results(), + "total_value": sum(r["value"] for r in results.get_results()), + } diff --git a/examples/src/parallel/parallel_with_failure_tolerance.py b/examples/src/parallel/parallel_with_failure_tolerance.py new file mode 100644 index 00000000..12327b93 --- /dev/null +++ b/examples/src/parallel/parallel_with_failure_tolerance.py @@ -0,0 +1,59 @@ +"""Example demonstrating parallel with failure tolerance.""" + +from typing import Any + +from aws_durable_execution_sdk_python.config import ( + CompletionConfig, + ParallelConfig, + StepConfig, +) +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution +from aws_durable_execution_sdk_python.retries import RetryStrategyConfig + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> dict[str, Any]: + """Execute tasks with failure tolerance.""" + + # Tolerate up to 2 failures + config = ParallelConfig( + completion_config=CompletionConfig(tolerated_failure_count=2) + ) + + # Disable retries so failures happen immediately + step_config = StepConfig(retry_strategy=RetryStrategyConfig(max_attempts=1)) + + results = context.parallel( + functions=[ + lambda ctx: ctx.step( + lambda _: "success 1", name="task1", config=step_config + ), + lambda ctx: ctx.step( + lambda _: _failing_task(2), name="task2", config=step_config + ), + lambda ctx: ctx.step( + lambda _: "success 3", name="task3", config=step_config + ), + lambda ctx: ctx.step( + lambda _: _failing_task(4), name="task4", config=step_config + ), + lambda ctx: ctx.step( + lambda _: "success 5", name="task5", config=step_config + ), + ], + name="parallel_with_tolerance", + config=config, + ) + + return { + "success_count": results.success_count, + "failure_count": results.failure_count, + "succeeded": results.get_results(), + "completion_reason": results.completion_reason.value, + } + + +def _failing_task(task_num: int) -> str: + """Task that always fails.""" + raise ValueError(f"Task {task_num} failed") diff --git a/examples/src/parallel/parallel_with_max_concurrency.py b/examples/src/parallel/parallel_with_max_concurrency.py new file mode 100644 index 00000000..a5b6e52e --- /dev/null +++ b/examples/src/parallel/parallel_with_max_concurrency.py @@ -0,0 +1,25 @@ +"""Example demonstrating parallel with maxConcurrency limit.""" + +from typing import Any + +from aws_durable_execution_sdk_python.config import ParallelConfig +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> list[str]: + """Execute 5 tasks with concurrency limit of 2.""" + + # Extract results immediately to avoid BatchResult serialization + return context.parallel( + functions=[ + lambda ctx: ctx.step(lambda _: "task 1", name="task1"), + lambda ctx: ctx.step(lambda _: "task 2", name="task2"), + lambda ctx: ctx.step(lambda _: "task 3", name="task3"), + lambda ctx: ctx.step(lambda _: "task 4", name="task4"), + lambda ctx: ctx.step(lambda _: "task 5", name="task5"), + ], + name="parallel_with_concurrency", + config=ParallelConfig(max_concurrency=2), + ).get_results() diff --git a/examples/src/parallel/parallel_with_wait.py b/examples/src/parallel/parallel_with_wait.py new file mode 100644 index 00000000..23df2532 --- /dev/null +++ b/examples/src/parallel/parallel_with_wait.py @@ -0,0 +1,24 @@ +"""Example demonstrating parallel with wait operations.""" + +from typing import Any + +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution +from aws_durable_execution_sdk_python.config import Duration + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> str: + """Execute parallel waits.""" + + # Call get_results() to extract data and avoid BatchResult serialization + context.parallel( + functions=[ + lambda ctx: ctx.wait(Duration.from_seconds(1), name="wait_1_second"), + lambda ctx: ctx.wait(Duration.from_seconds(2), name="wait_2_seconds"), + lambda ctx: ctx.wait(Duration.from_seconds(5), name="wait_5_seconds"), + ], + name="parallel_waits", + ).get_results() + + return "Completed waits" diff --git a/examples/src/run_in_child_context/run_in_child_context.py b/examples/src/run_in_child_context/run_in_child_context.py new file mode 100644 index 00000000..9e5a665a --- /dev/null +++ b/examples/src/run_in_child_context/run_in_child_context.py @@ -0,0 +1,22 @@ +from typing import Any + +from aws_durable_execution_sdk_python.context import ( + DurableContext, + durable_with_child_context, +) +from aws_durable_execution_sdk_python.execution import durable_execution + + +def multiply_by_two(value: int) -> int: + return value * 2 + + +@durable_with_child_context +def child_operation(ctx: DurableContext, value: int) -> int: + return ctx.step(lambda _: multiply_by_two(value), name="multiply") + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> str: + result = context.run_in_child_context(child_operation(5)) + return f"Child context result: {result}" diff --git a/examples/src/run_in_child_context/run_in_child_context_large_data.py b/examples/src/run_in_child_context/run_in_child_context_large_data.py new file mode 100644 index 00000000..f8b81335 --- /dev/null +++ b/examples/src/run_in_child_context/run_in_child_context_large_data.py @@ -0,0 +1,73 @@ +"""Test runInChildContext with large data exceeding individual step limits.""" + +from typing import Any + +from aws_durable_execution_sdk_python.context import ( + DurableContext, + durable_with_child_context, +) +from aws_durable_execution_sdk_python.execution import durable_execution +from aws_durable_execution_sdk_python.config import Duration + + +def generate_large_string(size_in_kb: int) -> str: + """Generate a string of approximately the specified size in KB.""" + return "A" * 1024 * size_in_kb + + +@durable_with_child_context +def large_data_processor(child_context: DurableContext) -> dict[str, Any]: + """Process large data in child context.""" + # Generate data using a loop - each step returns ~50KB of data (under the step limit) + step_results: list[str] = [] + step_sizes: list[int] = [] + + for i in range(1, 6): # 1 to 5 + step_result: str = child_context.step( + lambda _: generate_large_string(50), # 50KB + name=f"generate-data-{i}", + ) + + step_results.append(step_result) + step_sizes.append(len(step_result)) + + # Concatenate all results - total should be ~250KB + concatenated_result = "".join(step_results) + + return { + "totalSize": len(concatenated_result), + "sizeInKB": round(len(concatenated_result) / 1024), + "data": concatenated_result, + "stepSizes": step_sizes, + } + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> dict[str, Any]: + """Handler demonstrating runInChildContext with large data.""" + # Use runInChildContext to handle large data that would exceed 256k step limit + large_data_result: dict[str, Any] = context.run_in_child_context( + large_data_processor(), name="large-data-processor" + ) + + # Add a wait after runInChildContext to test persistence across invocations + context.wait(Duration.from_seconds(1), name="post-processing-wait") + + # Verify the data is still intact after the wait + data_integrity_check = ( + len(large_data_result["data"]) == large_data_result["totalSize"] + and len(large_data_result["data"]) > 0 + ) + + return { + "success": True, + "message": "Successfully processed large data exceeding individual step limits using runInChildContext", + "dataIntegrityCheck": data_integrity_check, + "summary": { + "totalDataSize": large_data_result["sizeInKB"], + "stepsExecuted": 5, + "childContextUsed": True, + "waitExecuted": True, + "dataPreservedAcrossWait": data_integrity_check, + }, + } diff --git a/examples/src/run_in_child_context/run_in_child_context_step_failure.py b/examples/src/run_in_child_context/run_in_child_context_step_failure.py new file mode 100644 index 00000000..c55c52ef --- /dev/null +++ b/examples/src/run_in_child_context/run_in_child_context_step_failure.py @@ -0,0 +1,50 @@ +"""Demonstrates runInChildContext with a failing step followed by a successful wait.""" + +from typing import Any + +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution +from aws_durable_execution_sdk_python.config import StepConfig, Duration +from aws_durable_execution_sdk_python.retries import ( + RetryStrategyConfig, + create_retry_strategy, +) + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> dict[str, bool]: + """Handler demonstrating runInChildContext with failing step.""" + + def child_with_failure(ctx: DurableContext) -> None: + """Child context with a failing step.""" + + retry_config = RetryStrategyConfig( + max_attempts=3, + initial_delay=Duration.from_seconds(1), + max_delay=Duration.from_seconds(10), + backoff_rate=2.0, + ) + step_config = StepConfig(retry_strategy=create_retry_strategy(retry_config)) + + def failing_step(_: DurableContext) -> None: + """Step that always fails.""" + raise Exception("Step failed in child context") + + ctx.step( + failing_step, + name="failing-step", + config=step_config, + ) + + try: + context.run_in_child_context( + child_with_failure, + name="child-with-failure", + ) + except Exception as error: + # Catch and ignore child context and step errors + result = {"success": True, "error": str(error)} + + context.wait(Duration.from_seconds(1), name="wait-after-failure") + + return result diff --git a/examples/src/simple_execution/simple_execution.py b/examples/src/simple_execution/simple_execution.py new file mode 100644 index 00000000..77cacba0 --- /dev/null +++ b/examples/src/simple_execution/simple_execution.py @@ -0,0 +1,18 @@ +"""Demonstrates handler execution without any durable operations.""" + +import json +import time +from typing import Any + +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution + + +@durable_execution +def handler(event: Any, _context: DurableContext) -> dict[str, Any]: + """Handler that executes without any durable operations.""" + return { + "received": json.dumps(event), + "timestamp": int(time.time() * 1000), # milliseconds since epoch + "message": "Handler completed successfully", + } diff --git a/examples/src/step/step.py b/examples/src/step/step.py new file mode 100644 index 00000000..3249040a --- /dev/null +++ b/examples/src/step/step.py @@ -0,0 +1,19 @@ +from typing import Any + +from aws_durable_execution_sdk_python.context import ( + DurableContext, + StepContext, + durable_step, +) +from aws_durable_execution_sdk_python.execution import durable_execution + + +@durable_step +def add_numbers(_step_context: StepContext, a: int, b: int) -> int: + return a + b + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> int: + result: int = context.step(add_numbers(5, 3)) + return result diff --git a/examples/src/step/step_no_name.py b/examples/src/step/step_no_name.py new file mode 100644 index 00000000..fb5b639d --- /dev/null +++ b/examples/src/step/step_no_name.py @@ -0,0 +1,11 @@ +from typing import Any + +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> str: + # Step without explicit name - should use function name + result = context.step(lambda _: "Step without name") + return f"Result: {result}" diff --git a/examples/src/step/step_semantics_at_most_once.py b/examples/src/step/step_semantics_at_most_once.py new file mode 100644 index 00000000..1f6f634e --- /dev/null +++ b/examples/src/step/step_semantics_at_most_once.py @@ -0,0 +1,18 @@ +from typing import Any + +from aws_durable_execution_sdk_python.config import StepConfig, StepSemantics +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> str: + # Step with AT_MOST_ONCE_PER_RETRY semantics + config = StepConfig(step_semantics=StepSemantics.AT_MOST_ONCE_PER_RETRY) + + result = context.step( + lambda _: "AT_MOST_ONCE_PER_RETRY semantics", + name="at_most_once_step", + config=config, + ) + return f"Result: {result}" diff --git a/examples/src/step/step_with_exponential_backoff.py b/examples/src/step/step_with_exponential_backoff.py new file mode 100644 index 00000000..f9af2b35 --- /dev/null +++ b/examples/src/step/step_with_exponential_backoff.py @@ -0,0 +1,27 @@ +from typing import Any + +from aws_durable_execution_sdk_python.config import StepConfig, Duration +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution +from aws_durable_execution_sdk_python.retries import ( + RetryStrategyConfig, + create_retry_strategy, +) + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> str: + # Step with exponential backoff retry strategy + retry_config = RetryStrategyConfig( + max_attempts=3, + initial_delay=Duration.from_seconds(1), + max_delay=Duration.from_seconds(10), + backoff_rate=2.0, + ) + + step_config = StepConfig(retry_strategy=create_retry_strategy(retry_config)) + + result = context.step( + lambda _: "Step with exponential backoff", name="retry_step", config=step_config + ) + return f"Result: {result}" diff --git a/examples/src/step/step_with_name.py b/examples/src/step/step_with_name.py new file mode 100644 index 00000000..021cbead --- /dev/null +++ b/examples/src/step/step_with_name.py @@ -0,0 +1,11 @@ +from typing import Any + +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> str: + # Step with explicit name + result = context.step(lambda _: "Step with explicit name", name="custom_step") + return f"Result: {result}" diff --git a/examples/src/step/step_with_retry.py b/examples/src/step/step_with_retry.py new file mode 100644 index 00000000..5f8cb780 --- /dev/null +++ b/examples/src/step/step_with_retry.py @@ -0,0 +1,46 @@ +from itertools import count +from typing import Any + +from aws_durable_execution_sdk_python.config import StepConfig +from aws_durable_execution_sdk_python.context import ( + DurableContext, + StepContext, + durable_step, +) +from aws_durable_execution_sdk_python.execution import durable_execution +from aws_durable_execution_sdk_python.retries import ( + RetryStrategyConfig, + create_retry_strategy, +) + + +# Counter for deterministic behavior across retries +_attempts = count(1) # starts from 1 + + +@durable_step +def unreliable_operation( + _step_context: StepContext, +) -> str: + # Use counter for deterministic behavior + # Will fail on first attempt, succeed on second + attempt = next(_attempts) + if attempt < 2: + msg = f"Attempt {attempt} failed" + raise RuntimeError(msg) + return "Operation succeeded" + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> str: + retry_config = RetryStrategyConfig( + max_attempts=3, + retryable_error_types=[RuntimeError], + ) + + result: str = context.step( + unreliable_operation(), + config=StepConfig(create_retry_strategy(retry_config)), + ) + + return result diff --git a/examples/src/step/steps_with_retry.py b/examples/src/step/steps_with_retry.py new file mode 100644 index 00000000..3d2bb33a --- /dev/null +++ b/examples/src/step/steps_with_retry.py @@ -0,0 +1,81 @@ +"""Example demonstrating multiple steps with retry logic.""" + +from itertools import count +from typing import Any + +from aws_durable_execution_sdk_python.config import Duration, StepConfig +from aws_durable_execution_sdk_python.context import DurableContext, StepContext +from aws_durable_execution_sdk_python.execution import durable_execution +from aws_durable_execution_sdk_python.retries import ( + RetryStrategyConfig, + create_retry_strategy, +) + + +# Counter for deterministic behavior across retries +_attempts = count(1) # starts from 1 + + +def simulated_get_item(_step_context: StepContext, name: str) -> dict[str, Any] | None: + """Simulate getting an item with deterministic counter-based behavior.""" + # Use counter for deterministic behavior + attempt = next(_attempts) + + # Fail on first attempt + if attempt == 1: + msg = "Random failure" + raise RuntimeError(msg) + + # Return None on second attempt (poll 1) + if attempt == 2: + return None + + # Return item on third attempt (poll 2, after retry) + return {"id": name, "data": "item data"} + + +@durable_execution +def handler(event: Any, context: DurableContext) -> dict[str, Any]: + """Handler demonstrating polling with retry logic.""" + name = event.get("name", "test-item") + + # Retry configuration for steps + retry_config = RetryStrategyConfig( + max_attempts=5, + retryable_error_types=[RuntimeError], + ) + + step_config = StepConfig(create_retry_strategy(retry_config)) + + item = None + poll_count = 0 + max_polls = 5 + + try: + while poll_count < max_polls: + poll_count += 1 + + # Try to get the item with retry + get_response = context.step( + lambda _, n=name: simulated_get_item(_, n), + name=f"get_item_poll_{poll_count}", + config=step_config, + ) + + # Did we find the item? + if get_response: + item = get_response + break + + # Wait 1 second until next poll + context.wait(Duration.from_seconds(1)) + + except RuntimeError as e: + # Retries exhausted + return {"error": "DDB Retries Exhausted", "message": str(e)} + + if not item: + return {"error": "Item Not Found"} + + # We found the item! + return {"success": True, "item": item, "pollsRequired": poll_count} diff --git a/examples/src/wait/multiple_wait.py b/examples/src/wait/multiple_wait.py new file mode 100644 index 00000000..65836193 --- /dev/null +++ b/examples/src/wait/multiple_wait.py @@ -0,0 +1,19 @@ +"""Example demonstrating multiple sequential wait operations.""" + +from typing import Any + +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution +from aws_durable_execution_sdk_python.config import Duration + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> dict[str, Any]: + """Handler demonstrating multiple sequential wait operations.""" + context.wait(Duration.from_seconds(5), name="wait-1") + context.wait(Duration.from_seconds(5), name="wait-2") + + return { + "completedWaits": 2, + "finalStep": "done", + } diff --git a/examples/src/wait/wait.py b/examples/src/wait/wait.py new file mode 100644 index 00000000..d4799288 --- /dev/null +++ b/examples/src/wait/wait.py @@ -0,0 +1,11 @@ +from typing import Any + +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution +from aws_durable_execution_sdk_python.config import Duration + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> str: + context.wait(Duration.from_seconds(5)) + return "Wait completed" diff --git a/examples/src/wait/wait_with_name.py b/examples/src/wait/wait_with_name.py new file mode 100644 index 00000000..155e4344 --- /dev/null +++ b/examples/src/wait/wait_with_name.py @@ -0,0 +1,12 @@ +from typing import Any + +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution +from aws_durable_execution_sdk_python.config import Duration + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> str: + # Wait with explicit name + context.wait(Duration.from_seconds(2), name="custom_wait") + return "Wait with name completed" diff --git a/examples/src/wait_for_callback/wait_for_callback.py b/examples/src/wait_for_callback/wait_for_callback.py new file mode 100644 index 00000000..bac1eb36 --- /dev/null +++ b/examples/src/wait_for_callback/wait_for_callback.py @@ -0,0 +1,27 @@ +from typing import Any + +from aws_durable_execution_sdk_python.config import Duration, WaitForCallbackConfig +from aws_durable_execution_sdk_python.context import ( + DurableContext, + WaitForCallbackContext, +) +from aws_durable_execution_sdk_python.execution import durable_execution + + +def external_system_call(_callback_id: str, _context: WaitForCallbackContext) -> None: + """Simulate calling an external system with callback ID.""" + # In real usage, this would make an API call to an external system + # passing the callback_id for the system to call back when done + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> str: + config = WaitForCallbackConfig( + timeout=Duration.from_seconds(120), heartbeat_timeout=Duration.from_seconds(60) + ) + + result = context.wait_for_callback( + external_system_call, name="external_call", config=config + ) + + return f"External system result: {result}" diff --git a/examples/src/wait_for_callback/wait_for_callback_anonymous.py b/examples/src/wait_for_callback/wait_for_callback_anonymous.py new file mode 100644 index 00000000..d62680f7 --- /dev/null +++ b/examples/src/wait_for_callback/wait_for_callback_anonymous.py @@ -0,0 +1,20 @@ +"""Demonstrates waitForCallback with anonymous (inline) submitter function.""" + +import time +from typing import Any + +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> dict[str, Any]: + """Handler demonstrating waitForCallback with anonymous submitter.""" + result: str = context.wait_for_callback( + lambda _callback_id, _context: time.sleep(1) + ) + + return { + "callbackResult": result, + "completed": True, + } diff --git a/examples/src/wait_for_callback/wait_for_callback_child.py b/examples/src/wait_for_callback/wait_for_callback_child.py new file mode 100644 index 00000000..46182efc --- /dev/null +++ b/examples/src/wait_for_callback/wait_for_callback_child.py @@ -0,0 +1,42 @@ +"""Demonstrates waitForCallback operations within child contexts.""" + +from typing import Any + +from aws_durable_execution_sdk_python.config import Duration +from aws_durable_execution_sdk_python.context import ( + DurableContext, + durable_with_child_context, +) +from aws_durable_execution_sdk_python.execution import durable_execution + + +@durable_with_child_context +def child_context_with_callback(child_context: DurableContext) -> dict[str, Any]: + """Child context containing wait and callback operations.""" + child_context.wait(Duration.from_seconds(1), name="child-wait") + + child_callback_result: str = child_context.wait_for_callback( + lambda _callback_id, _context: None, name="child-callback-op" + ) + + return { + "childResult": child_callback_result, + "childProcessed": True, + } + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> dict[str, Any]: + """Handler demonstrating waitForCallback within child contexts.""" + parent_result: str = context.wait_for_callback( + lambda _callback_id, _context: None, name="parent-callback-op" + ) + + child_context_result: dict[str, Any] = context.run_in_child_context( + child_context_with_callback(), name="child-context-with-callback" + ) + + return { + "parentResult": parent_result, + "childContextResult": child_context_result, + } diff --git a/examples/src/wait_for_callback/wait_for_callback_heartbeat.py b/examples/src/wait_for_callback/wait_for_callback_heartbeat.py new file mode 100644 index 00000000..0f5c929d --- /dev/null +++ b/examples/src/wait_for_callback/wait_for_callback_heartbeat.py @@ -0,0 +1,33 @@ +"""Demonstrates sending heartbeats during long-running callback processing.""" + +import time +from typing import Any + +from aws_durable_execution_sdk_python.config import Duration, WaitForCallbackConfig +from aws_durable_execution_sdk_python.context import ( + DurableContext, + WaitForCallbackContext, +) +from aws_durable_execution_sdk_python.execution import durable_execution + + +def submitter(_callback_id: str, _context: WaitForCallbackContext) -> None: + """Simulate long-running submitter function.""" + time.sleep(5) + return None + + +@durable_execution +def handler(event: dict[str, Any], context: DurableContext) -> dict[str, Any]: + """Handler demonstrating waitForCallback with heartbeat timeout.""" + + config = WaitForCallbackConfig( + timeout=Duration.from_seconds(120), heartbeat_timeout=Duration.from_seconds(15) + ) + + result: str = context.wait_for_callback(submitter, config=config) + + return { + "callbackResult": result, + "completed": True, + } diff --git a/examples/src/wait_for_callback/wait_for_callback_mixed_ops.py b/examples/src/wait_for_callback/wait_for_callback_mixed_ops.py new file mode 100644 index 00000000..1496e658 --- /dev/null +++ b/examples/src/wait_for_callback/wait_for_callback_mixed_ops.py @@ -0,0 +1,47 @@ +"""Demonstrates waitForCallback combined with steps, waits, and other operations.""" + +import time +from typing import Any + +from aws_durable_execution_sdk_python.config import Duration +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> dict[str, Any]: + """Handler demonstrating waitForCallback mixed with other operations.""" + # Mix waitForCallback with other operation types + context.wait(Duration.from_seconds(1), name="initial-wait") + + step_result: dict[str, Any] = context.step( + lambda _: {"userId": 123, "name": "John Doe"}, + name="fetch-user-data", + ) + + def submitter(_callback_id, _context) -> None: + """Submitter uses data from previous step.""" + time.sleep(0.1) + return None + + callback_result: str = context.wait_for_callback( + submitter, + name="wait-for-callback", + ) + + context.wait(Duration.from_seconds(2), name="final-wait") + + final_step: dict[str, Any] = context.step( + lambda _: { + "status": "completed", + "timestamp": int(time.time() * 1000), + }, + name="finalize-processing", + ) + + return { + "stepResult": step_result, + "callbackResult": callback_result, + "finalStep": final_step, + "workflowCompleted": True, + } diff --git a/examples/src/wait_for_callback/wait_for_callback_multiple_invocations.py b/examples/src/wait_for_callback/wait_for_callback_multiple_invocations.py new file mode 100644 index 00000000..57d54d5d --- /dev/null +++ b/examples/src/wait_for_callback/wait_for_callback_multiple_invocations.py @@ -0,0 +1,53 @@ +"""Demonstrates multiple invocations tracking with waitForCallback operations across different invocations.""" + +from typing import Any + +from aws_durable_execution_sdk_python.config import Duration +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> dict[str, Any]: + """Handler demonstrating multiple invocations with waitForCallback operations.""" + # First invocation - wait operation + context.wait(Duration.from_seconds(1), name="wait-invocation-1") + + # First callback operation + def first_submitter(callback_id: str, _context) -> None: + """Submitter for first callback.""" + print(f"First callback submitted with ID: {callback_id}") + return None + + callback_result_1: str = context.wait_for_callback( + first_submitter, + name="first-callback", + ) + + # Step operation between callbacks + step_result: dict[str, Any] = context.step( + lambda _: {"processed": True, "step": 1}, + name="process-callback-data", + ) + + # Second invocation - another wait operation + context.wait(Duration.from_seconds(1), name="wait-invocation-2") + + # Second callback operation + def second_submitter(callback_id: str, _context) -> None: + """Submitter for second callback.""" + print(f"Second callback submitted with ID: {callback_id}") + return None + + callback_result_2: str = context.wait_for_callback( + second_submitter, + name="second-callback", + ) + + # Final invocation returns complete result + return { + "firstCallback": callback_result_1, + "secondCallback": callback_result_2, + "stepResult": step_result, + "invocationCount": "multiple", + } diff --git a/examples/src/wait_for_callback/wait_for_callback_nested.py b/examples/src/wait_for_callback/wait_for_callback_nested.py new file mode 100644 index 00000000..e82f560d --- /dev/null +++ b/examples/src/wait_for_callback/wait_for_callback_nested.py @@ -0,0 +1,66 @@ +"""Demonstrates nested waitForCallback operations across multiple child context levels.""" + +from typing import Any + +from aws_durable_execution_sdk_python.config import Duration +from aws_durable_execution_sdk_python.context import ( + DurableContext, + durable_with_child_context, +) +from aws_durable_execution_sdk_python.execution import durable_execution + + +@durable_with_child_context +def inner_child_context(inner_child_ctx: DurableContext) -> dict[str, Any]: + """Inner child context with deep nested callback.""" + inner_child_ctx.wait(Duration.from_seconds(5), name="deep-wait") + + nested_callback_result: str = inner_child_ctx.wait_for_callback( + lambda _callback_id, _context: None, + name="nested-callback-op", + ) + + return { + "nestedCallback": nested_callback_result, + "deepLevel": "inner-child", + } + + +@durable_with_child_context +def outer_child_context(outer_child_ctx: DurableContext) -> dict[str, Any]: + """Outer child context with inner callback and nested context.""" + inner_result: str = outer_child_ctx.wait_for_callback( + lambda _callback_id, _context: None, + name="inner-callback-op", + ) + + # Nested child context with another callback + deep_nested_result: dict[str, Any] = outer_child_ctx.run_in_child_context( + inner_child_context(), + name="inner-child-context", + ) + + return { + "innerCallback": inner_result, + "deepNested": deep_nested_result, + "level": "outer-child", + } + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> dict[str, Any]: + """Handler demonstrating nested waitForCallback operations across multiple levels.""" + outer_result: str = context.wait_for_callback( + lambda _callback_id, _context: None, + name="outer-callback-op", + ) + + nested_result: dict[str, Any] = context.run_in_child_context( + outer_child_context(), + name="outer-child-context", + ) + + return { + "outerCallback": outer_result, + "nestedResults": nested_result, + } diff --git a/examples/src/wait_for_callback/wait_for_callback_serdes.py b/examples/src/wait_for_callback/wait_for_callback_serdes.py new file mode 100644 index 00000000..d3e7259c --- /dev/null +++ b/examples/src/wait_for_callback/wait_for_callback_serdes.py @@ -0,0 +1,90 @@ +"""Demonstrates waitForCallback with custom serialization/deserialization.""" + +import json +from datetime import datetime +from typing import Any, Optional, TypedDict + +from aws_durable_execution_sdk_python.config import Duration, WaitForCallbackConfig +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution +from aws_durable_execution_sdk_python.serdes import SerDes + + +class CustomDataMetadata(TypedDict): + """Metadata for CustomData.""" + + version: str + processed: bool + + +class CustomData(TypedDict): + """Custom data structure with datetime.""" + + id: int + message: str + timestamp: datetime + metadata: CustomDataMetadata + + +class CustomSerdes(SerDes[CustomData]): + """Custom serialization/deserialization for CustomData.""" + + @staticmethod + def serialize(data: CustomData, _=None) -> str: + """Serialize CustomData to JSON string.""" + if data is None: + return None + + serialized_data = { + "id": data["id"], + "message": data["message"], + "timestamp": data["timestamp"].isoformat(), + "metadata": data["metadata"], + "_serializedBy": "custom-serdes-v1", + } + return json.dumps(serialized_data) + + @staticmethod + def deserialize(data_str: str, _=None) -> CustomData: + """Deserialize JSON string to CustomData.""" + if data_str is None: + return None + + parsed = json.loads(data_str) + return CustomData( + id=parsed["id"], + message=parsed["message"], + timestamp=datetime.fromisoformat( + parsed["timestamp"].replace("Z", "+00:00") + ), + metadata=CustomDataMetadata( + version=parsed["metadata"]["version"], + processed=parsed["metadata"]["processed"], + ), + ) + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> dict[str, Any]: + """Handler demonstrating waitForCallback with custom serdes.""" + + config = WaitForCallbackConfig( + timeout=Duration.from_seconds(10), + heartbeat_timeout=Duration.from_seconds(20), + serdes=CustomSerdes(), + ) + + result: CustomData = context.wait_for_callback( + lambda _callback_id, _context: None, + name="custom-serdes-callback", + config=config, + ) + + isDateObject = isinstance(result["timestamp"], datetime) + # convert timestamp to isoformat because lambda only accepts default json type as result + result["timestamp"] = result["timestamp"].isoformat() + + return { + "receivedData": result, + "isDateObject": isDateObject, + } diff --git a/examples/src/wait_for_callback/wait_for_callback_submitter_failure.py b/examples/src/wait_for_callback/wait_for_callback_submitter_failure.py new file mode 100644 index 00000000..ab46066d --- /dev/null +++ b/examples/src/wait_for_callback/wait_for_callback_submitter_failure.py @@ -0,0 +1,39 @@ +"""Demonstrates waitForCallback with submitter retry strategy using exponential backoff (0.5s, 1s, 2s).""" + +from typing import Any + +from aws_durable_execution_sdk_python.config import Duration, WaitForCallbackConfig +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution +from aws_durable_execution_sdk_python.retries import ( + RetryStrategyConfig, + create_retry_strategy, +) + + +@durable_execution +def handler(event: dict[str, Any], context: DurableContext) -> dict[str, Any]: + """Handler demonstrating waitForCallback with submitter retry and exponential backoff.""" + + def submitter(callback_id: str, _context) -> None: + """Submitter function that can fail based on event parameter.""" + print(f"Submitting callback to external system - callbackId: {callback_id}") + raise Exception("Simulated submitter failure") + + config = WaitForCallbackConfig( + timeout=Duration.from_seconds(10), + heartbeat_timeout=Duration.from_seconds(20), + retry_strategy=create_retry_strategy( + config=RetryStrategyConfig( + max_attempts=3, + initial_delay=Duration.from_seconds(1), + max_delay=Duration.from_seconds(1), + ) + ), + ) + + result: str = context.wait_for_callback( + submitter, + name="retry-submitter-callback", + config=config, + ) diff --git a/examples/src/wait_for_callback/wait_for_callback_submitter_failure_catchable.py b/examples/src/wait_for_callback/wait_for_callback_submitter_failure_catchable.py new file mode 100644 index 00000000..ff235536 --- /dev/null +++ b/examples/src/wait_for_callback/wait_for_callback_submitter_failure_catchable.py @@ -0,0 +1,52 @@ +"""Demonstrates waitForCallback with submitter function that fails.""" + +import time +from typing import Any + +from aws_durable_execution_sdk_python.config import Duration, WaitForCallbackConfig +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution +from aws_durable_execution_sdk_python.retries import ( + RetryStrategyConfig, + create_retry_strategy, +) + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> dict[str, Any]: + """Handler demonstrating waitForCallback with failing submitter.""" + + def submitter(_callback_id, _context) -> None: + """Submitter function that fails after a delay.""" + time.sleep(0.5) + # Submitter fails + raise Exception("Submitter failed") + + config = WaitForCallbackConfig( + timeout=Duration.from_seconds(10), + heartbeat_timeout=Duration.from_seconds(20), + retry_strategy=create_retry_strategy( + config=RetryStrategyConfig( + max_attempts=3, + initial_delay=Duration.from_seconds(1), + max_delay=Duration.from_seconds(1), + ) + ), + ) + + try: + result: str = context.wait_for_callback( + submitter, + name="failing-submitter-callback", + config=config, + ) + + return { + "callbackResult": result, + "success": True, + } + except Exception as error: + return { + "success": False, + "error": str(error), + } diff --git a/examples/src/wait_for_callback/wait_for_callback_timeout.py b/examples/src/wait_for_callback/wait_for_callback_timeout.py new file mode 100644 index 00000000..3c36a31b --- /dev/null +++ b/examples/src/wait_for_callback/wait_for_callback_timeout.py @@ -0,0 +1,35 @@ +"""Demonstrates waitForCallback timeout scenarios.""" + +from typing import Any + +from aws_durable_execution_sdk_python.config import Duration, WaitForCallbackConfig +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> dict[str, Any]: + """Handler demonstrating waitForCallback timeout.""" + + config = WaitForCallbackConfig( + timeout=Duration.from_seconds(1), heartbeat_timeout=Duration.from_seconds(2) + ) + + def submitter(_callback_id, _context) -> None: + """Submitter succeeds but callback never completes.""" + return None + + try: + result: str = context.wait_for_callback( + submitter, + config=config, + ) + return { + "callbackResult": result, + "success": True, + } + except Exception as error: + return { + "success": False, + "error": str(error), + } diff --git a/examples/src/wait_for_condition/wait_for_condition.py b/examples/src/wait_for_condition/wait_for_condition.py new file mode 100644 index 00000000..37befe6a --- /dev/null +++ b/examples/src/wait_for_condition/wait_for_condition.py @@ -0,0 +1,32 @@ +"""Example demonstrating wait-for-condition pattern.""" + +from typing import Any + +from aws_durable_execution_sdk_python.context import DurableContext +from aws_durable_execution_sdk_python.execution import durable_execution +from aws_durable_execution_sdk_python.config import Duration +from aws_durable_execution_sdk_python.waits import ( + WaitForConditionConfig, + WaitForConditionDecision, +) + + +@durable_execution +def handler(_event: Any, context: DurableContext) -> int: + """Handler demonstrating wait-for-condition pattern.""" + + def condition_function(state: int, _) -> int: + """Increment state by 1.""" + return state + 1 + + def wait_strategy(state: int, attempt: int) -> dict[str, Any]: + """Wait strategy that continues until state reaches 3.""" + if state >= 3: + return WaitForConditionDecision.stop_polling() + return WaitForConditionDecision.continue_waiting(Duration.from_seconds(1)) + + config = WaitForConditionConfig(wait_strategy=wait_strategy, initial_state=0) + + result = context.wait_for_condition(check=condition_function, config=config) + + return result diff --git a/examples/template.yaml b/examples/template.yaml new file mode 100644 index 00000000..5e2d5aef --- /dev/null +++ b/examples/template.yaml @@ -0,0 +1,928 @@ +{ + "AWSTemplateFormatVersion": "2010-09-09", + "Transform": "AWS::Serverless-2016-10-31", + "Globals": { + "Function": { + "Runtime": "python3.13", + "Timeout": 60, + "MemorySize": 128, + "Environment": { + "Variables": { + "AWS_ENDPOINT_URL_LAMBDA": { + "Ref": "LambdaEndpoint" + } + } + } + } + }, + "Parameters": { + "LambdaEndpoint": { + "Type": "String", + "Default": "https://lambda.us-west-2.amazonaws.com" + } + }, + "Resources": { + "DurableFunctionRole": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "lambda.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] + }, + "ManagedPolicyArns": [ + "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + ], + "Policies": [ + { + "PolicyName": "DurableExecutionPolicy", + "PolicyDocument": { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "lambda:CheckpointDurableExecution", + "lambda:GetDurableExecutionState" + ], + "Resource": "*" + } + ] + } + } + ] + } + }, + "HelloWorld": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "hello_world.handler", + "Description": "A simple hello world example with no durable operations", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "Step": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "step.handler", + "Description": "Basic usage of context.step() to checkpoint a simple operation", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "StepWithName": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "step_with_name.handler", + "Description": "Step operation with explicit name parameter", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "StepWithRetry": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "step_with_retry.handler", + "Description": "Usage of context.step() with retry configuration for fault tolerance", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "Wait": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "wait.handler", + "Description": "Basic usage of context.wait() to pause execution", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "MultipleWait": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "multiple_wait.handler", + "Description": "Usage of demonstrating multiple sequential wait operations.", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "Callback": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "callback.handler", + "Description": "Basic usage of context.create_callback() to create a callback for external systems", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "WaitForCallback": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "wait_for_callback.handler", + "Description": "Usage of context.wait_for_callback() to wait for external system responses", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "WaitForCallbackAnonymous": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "wait_for_callback_anonymous.handler", + "Description": "Usage of context.wait_for_callback() to wait for external system responses", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "WaitForCallbackHeartbeat": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "wait_for_callback_heartbeat.handler", + "Description": "Usage of context.wait_for_callback() to wait for external system responses", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "WaitForCallbackChild": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "wait_for_callback_child.handler", + "Description": "Usage of context.wait_for_callback() to wait for external system responses", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "WaitForCallbackMixedOps": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "wait_for_callback_mixed_ops.handler", + "Description": "Usage of context.wait_for_callback() to wait for external system responses", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "WaitForCallbackMultipleInvocations": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "wait_for_callback_multiple_invocations.handler", + "Description": "Usage of context.wait_for_callback() to wait for external system responses", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "WaitForCallbackSubmitterFailureCatchable": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "wait_for_callback_submitter_failure_catchable.handler", + "Description": "Usage of context.wait_for_callback() to wait for external system responses", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "WaitForCallbackSubmitterFailure": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "wait_for_callback_submitter_failure.handler", + "Description": "Usage of context.wait_for_callback() to wait for external system responses", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "WaitForCallbackSerdes": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "wait_for_callback_serdes.handler", + "Description": "Usage of context.wait_for_callback() to wait for external system responses", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "WaitForCallbackNested": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "wait_for_callback_nested.handler", + "Description": "Usage of context.wait_for_callback() to wait for external system responses", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "RunInChildContext": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "run_in_child_context.handler", + "Description": "Usage of context.run_in_child_context() to execute operations in isolated contexts", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "Parallel": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "parallel.handler", + "Description": "Executing multiple durable operations in parallel", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "MapOperations": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "map_operations.handler", + "Description": "Processing collections using map-like durable operations", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "MapWithLargeScale": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "map_with_large_scale.handler", + "Description": "Processing collections using map-like durable operations in large scale", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "BlockExample": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "block_example.handler", + "Description": "Nested child contexts demonstrating block operations", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "LoggerExample": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "logger_example.handler", + "Description": "Demonstrating logger usage and enrichment in DurableContext", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "StepsWithRetry": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "steps_with_retry.handler", + "Description": "Multiple steps with retry logic in a polling pattern", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "WaitForCondition": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "wait_for_condition.handler", + "Description": "Polling pattern that waits for a condition to be met", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "RunInChildContextLargeData": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "run_in_child_context_large_data.handler", + "Description": "Usage of context.run_in_child_context() to execute operations in isolated contexts with large data", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "SimpleExecution": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "simple_execution.handler", + "Description": "Simple execution without durable execution", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "MapWithMaxConcurrency": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "map_with_max_concurrency.handler", + "Description": "Map operation with maxConcurrency limit", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "MapWithMinSuccessful": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "map_with_min_successful.handler", + "Description": "Map operation with min_successful completion config", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "MapWithFailureTolerance": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "map_with_failure_tolerance.handler", + "Description": "Map operation with failure tolerance", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "MapCompletion": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "map_completion.handler", + "Description": "Reproduces issue where map with minSuccessful loses failure count", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "ParallelWithMaxConcurrency": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "parallel_with_max_concurrency.handler", + "Description": "Parallel operation with maxConcurrency limit", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "ParallelWithWait": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "parallel_with_wait.handler", + "Description": "Parallel operation with wait operations in branches", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "ParallelWithFailureTolerance": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "parallel_with_failure_tolerance.handler", + "Description": "Parallel operation with failure tolerance", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "MapWithCustomSerdes": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "map_with_custom_serdes.handler", + "Description": "Map operation with custom item-level serialization", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "MapWithBatchSerdes": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "map_with_batch_serdes.handler", + "Description": "Map operation with custom batch-level serialization", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "ParallelWithCustomSerdes": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "parallel_with_custom_serdes.handler", + "Description": "Parallel operation with custom item-level serialization", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "ParallelWithBatchSerdes": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "parallel_with_batch_serdes.handler", + "Description": "Parallel operation with custom batch-level serialization", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "HandlerError": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "handler_error.handler", + "Description": "Simple function with handler error", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "NoneResults": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "none_results.handler", + "Description": "Test handling of step operations with undefined result after replay.", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "CallbackSimple": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "callback_simple.handler", + "Description": "Creating a callback ID for external systems to use", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "CallbackHeartbeat": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "callback_heartbeat.handler", + "Description": "Demonstrates callback failure scenarios where the error propagates and is handled by framework", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "CallbackMixedOps": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "callback_mixed_ops.handler", + "Description": "Demonstrates createCallback mixed with steps, waits, and other operations", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "CallbackSerdes": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "callback_serdes.handler", + "Description": "Demonstrates createCallback with custom serialization/deserialization for Date objects", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "NoReplayExecution": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "no_replay_execution.handler", + "Description": "Execution with simples steps and without replay", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "RunInChildContextStepFailure": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "run_in_child_context_step_failure.handler", + "Description": "Demonstrates runInChildContext with a failing step followed by a successful wait", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "ComprehensiveOperations": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "comprehensive_operations.handler", + "Description": "Complex multi-operation example demonstrating all major operations", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + }, + "CallbackConcurrency": { + "Type": "AWS::Serverless::Function", + "Properties": { + "CodeUri": "build/", + "Handler": "callback_concurrency.handler", + "Description": "Demonstrates multiple concurrent createCallback operations using context.parallel", + "Role": { + "Fn::GetAtt": [ + "DurableFunctionRole", + "Arn" + ] + }, + "DurableConfig": { + "RetentionPeriodInDays": 7, + "ExecutionTimeout": 300 + } + } + } + } +} \ No newline at end of file diff --git a/examples/test/README.md b/examples/test/README.md new file mode 100644 index 00000000..61996b43 --- /dev/null +++ b/examples/test/README.md @@ -0,0 +1,119 @@ +# Integration Tests for Python Durable Execution SDK + +This directory contains integration tests for the Python Durable Execution SDK examples. Tests can run in two modes using pytest fixtures. + +## Test Modes + +### Local Mode (Default) +Tests run against the in-memory `DurableFunctionTestRunner`: +- ✅ Fast execution (seconds) +- ✅ No AWS credentials needed +- ✅ Perfect for development +- ✅ Validates local runner behavior + +```bash +# Run all example tests locally (default) +hatch run test:examples + +# Run with explicit mode flag +pytest --runner-mode=local -m example examples/test/ + +# Run specific test +pytest --runner-mode=local -k test_hello_world examples/test/ +``` + +### Cloud Mode (Integration) +Tests run against actual AWS Lambda functions using `DurableFunctionCloudTestRunner`: +- ✅ Validates cloud deployment +- ✅ Tests real Lambda execution +- ✅ Verifies end-to-end behavior +- ⚠️ Requires deployed functions + +```bash +# Deploy function first +hatch run examples:deploy "hello world" --function-name HelloWorld-Test + +# Set environment variables for cloud testing +export AWS_REGION=us-west-2 +export LAMBDA_ENDPOINT=https://lambda.us-west-2.amazonaws.com +export QUALIFIED_FUNCTION_NAME="HelloWorld-Test:\$LATEST" +export LAMBDA_FUNCTION_TEST_NAME="hello world" + +# Run tests +pytest --runner-mode=cloud -k test_hello_world examples/test/ + +# Or using hatch +hatch run test:examples-integration -k test_hello_world +``` + +## Writing Tests + +Use the `durable_runner` pytest fixture with the `@pytest.mark.durable_execution` marker: + +```python +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus +from examples.src import my_example + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=my_example.handler, + lambda_function_name="my example", +) +def test_my_example(durable_runner): + """Test my example in both local and cloud modes.""" + with durable_runner: + result = durable_runner.run(input={"test": "data"}, timeout=10) + + # Assertions work in both modes + assert result.status == InvocationStatus.SUCCEEDED + assert result.result == "expected output" + + # Optional mode-specific validations + if durable_runner.mode == "cloud": + # Cloud-specific assertions + pass +``` + +## Configuration + +### Environment Variables (Cloud Mode) +- `AWS_REGION` - AWS region for Lambda invocation (default: us-west-2) +- `LAMBDA_ENDPOINT` - Optional Lambda endpoint URL for testing +- `QUALIFIED_FUNCTION_NAME` - Deployed Lambda function ARN or qualified name (required for cloud mode) +- `LAMBDA_FUNCTION_TEST_NAME` - Lambda function name to match with test's `lambda_function_name` marker (required for cloud mode) + +### CLI Options +- `--runner-mode` - Test mode: `local` (default) or `cloud` + +### Pytest Markers +- `-m example` - Run only example tests +- `-k test_name` - Run tests matching pattern + +## CI/CD Integration + +Tests automatically run in CI/CD after deployment: + +1. `deploy-examples.yml` deploys functions +2. Integration tests run against deployed functions +3. Results reported in GitHub Actions + +See `.github/workflows/deploy-examples.yml` for details. + +## Troubleshooting + +### Timeout errors +**Problem**: `TimeoutError: Execution did not complete within 60s` + +**Solution**: Increase timeout in test: +```python +result = runner.run(input="test", timeout=120) # Increase to 120s +``` + +### Import errors +**Problem**: `ModuleNotFoundError: No module named 'aws_durable_execution_sdk_python_testing'` + +**Solution**: Install dependencies: +```bash +hatch run test:examples # Installs dependencies automatically diff --git a/examples/test/__init__.py b/examples/test/__init__.py new file mode 100644 index 00000000..46dbb824 --- /dev/null +++ b/examples/test/__init__.py @@ -0,0 +1 @@ +"""Integration tests for AWS Durable Functions Python Examples.""" diff --git a/examples/test/block_example/test_block_example.py b/examples/test/block_example/test_block_example.py new file mode 100644 index 00000000..7d648f65 --- /dev/null +++ b/examples/test/block_example/test_block_example.py @@ -0,0 +1,104 @@ +"""Tests for block_example.""" + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus + +from src.block_example import block_example +from test.conftest import deserialize_operation_payload + + +def _get_all_operations(operations): + """Recursively get all operations including nested ones.""" + all_ops = [] + for op in operations: + all_ops.append(op) + if hasattr(op, "child_operations") and op.child_operations: + all_ops.extend(_get_all_operations(op.child_operations)) + return all_ops + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=block_example.handler, + lambda_function_name="block example", +) +def test_block_example(durable_runner): + """Test block example with nested child contexts.""" + with durable_runner: + result = durable_runner.run(input="test", timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + + # Verify the final result structure + assert deserialize_operation_payload(result.result) == { + "nestedStep": "nested step result", + "nestedBlock": "nested block result", + } + + # Check for the parent block operation + parent_block_ops = [ + op + for op in result.operations + if op.operation_type.value == "CONTEXT" and op.name == "parent_block" + ] + assert len(parent_block_ops) == 1 + parent_block_op = parent_block_ops[0] + + # Verify parent block result + assert deserialize_operation_payload(parent_block_op.result) == { + "nestedStep": "nested step result", + "nestedBlock": "nested block result", + } + + # Verify parent block has 2 child operations + child_operations = parent_block_op.child_operations + assert len(child_operations) == 2 + + # First child should be a STEP with result "nested step result" + assert child_operations[0].operation_type.value == "STEP" + assert ( + deserialize_operation_payload(child_operations[0].result) + == "nested step result" + ) + + # Second child should be a CONTEXT with result "nested block result" + assert child_operations[1].operation_type.value == "CONTEXT" + assert ( + deserialize_operation_payload(child_operations[1].result) + == "nested block result" + ) + + # Check for nested step operation by name + nested_step_ops = [ + op + for op in result.operations + if op.operation_type.value == "STEP" and op.name == "nested_step" + ] + # Note: nested_step is inside parent_block, so it won't be at top level + # We need to search in child operations + all_ops = _get_all_operations(result.operations) + nested_step_ops = [ + op + for op in all_ops + if op.operation_type.value == "STEP" and op.name == "nested_step" + ] + assert len(nested_step_ops) == 1 + assert ( + deserialize_operation_payload(nested_step_ops[0].result) == "nested step result" + ) + + # Check for nested block operation by name + nested_block_ops = [ + op + for op in all_ops + if op.operation_type.value == "CONTEXT" and op.name == "nested_block" + ] + assert len(nested_block_ops) == 1 + assert ( + deserialize_operation_payload(nested_block_ops[0].result) + == "nested block result" + ) + + # Verify wait operation exists within nested context + wait_ops = [op for op in all_ops if op.operation_type.value == "WAIT"] + assert len(wait_ops) >= 1 diff --git a/examples/test/callback/test_callback_concurrency.py b/examples/test/callback/test_callback_concurrency.py new file mode 100644 index 00000000..e78b0f5d --- /dev/null +++ b/examples/test/callback/test_callback_concurrency.py @@ -0,0 +1,83 @@ +"""Tests for create_callback_concurrent.""" + +import json + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus + +from src.callback import callback_concurrency +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=callback_concurrency.handler, + lambda_function_name="Create Callback Concurrency", +) +def test_handle_multiple_concurrent_callback_operations(durable_runner): + """Test handling multiple concurrent callback operations.""" + with durable_runner: + # Start the execution (this will pause at the callbacks) + execution_arn = durable_runner.run_async(input=None, timeout=60) + + callback_id_1 = durable_runner.wait_for_callback( + execution_arn=execution_arn, name="api-call-1" + ) + callback_id_2 = durable_runner.wait_for_callback( + execution_arn=execution_arn, name="api-call-2" + ) + callback_id_3 = durable_runner.wait_for_callback( + execution_arn=execution_arn, name="api-call-3" + ) + + callback_result_2 = json.dumps( + { + "id": 2, + "data": "second", + } + ) + durable_runner.send_callback_success( + callback_id=callback_id_2, result=callback_result_2.encode() + ) + + callback_result_1 = json.dumps( + { + "id": 1, + "data": "first", + } + ) + durable_runner.send_callback_success( + callback_id=callback_id_1, result=callback_result_1.encode() + ) + + callback_result_3 = json.dumps( + { + "id": 3, + "data": "third", + } + ) + durable_runner.send_callback_success( + callback_id=callback_id_3, result=callback_result_3.encode() + ) + + result = durable_runner.wait_for_result(execution_arn=execution_arn) + + assert result.status is InvocationStatus.SUCCEEDED + + result_data = deserialize_operation_payload(result.result) + + assert result_data == { + "results": [callback_result_1, callback_result_2, callback_result_3], + "allCompleted": True, + } + + # Verify all callback operations were tracked + operations = result.get_context("parallel_callbacks") + + assert len(operations.child_operations) == 3 + + # Verify all operations are CALLBACK type + for op in operations.child_operations: + assert op.operation_type.value == "CONTEXT" + assert len(op.child_operations) == 1 + assert op.child_operations[0].operation_type.value == "CALLBACK" diff --git a/examples/test/callback/test_callback_heartbeat.py b/examples/test/callback/test_callback_heartbeat.py new file mode 100644 index 00000000..66a99e98 --- /dev/null +++ b/examples/test/callback/test_callback_heartbeat.py @@ -0,0 +1,51 @@ +"""Tests for create_callback_heartbeat.""" + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus +import time +import json +from src.callback import callback_heartbeat +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=callback_heartbeat.handler, + lambda_function_name="Create Callback Heartbeat", +) +def test_handle_callback_operations_with_failure_uncaught(durable_runner): + """Test handling callback operations with failure.""" + test_payload = {"shouldCatchError": False} + + heartbeat_interval = 5 + total_duration = 20 + num_heartbeats = total_duration // heartbeat_interval + + with durable_runner: + execution_arn = durable_runner.run_async(input=test_payload, timeout=30) + + callback_id = durable_runner.wait_for_callback(execution_arn=execution_arn) + + for i in range(num_heartbeats): + print( + f"Sending heartbeat {i + 1}/{num_heartbeats} at {(i + 1) * heartbeat_interval}s" + ) + durable_runner.send_callback_heartbeat(callback_id=callback_id) + time.sleep(heartbeat_interval) + + callback_result = json.dumps( + { + "status": "completed", + "data": "success after heartbeats", + } + ) + durable_runner.send_callback_success( + callback_id=callback_id, result=callback_result.encode() + ) + + result = durable_runner.wait_for_result(execution_arn=execution_arn) + assert result.status is InvocationStatus.SUCCEEDED + + # Assert the callback result is returned + result_data = deserialize_operation_payload(result.result) + assert result_data == callback_result diff --git a/examples/test/callback/test_callback_mixed_ops.py b/examples/test/callback/test_callback_mixed_ops.py new file mode 100644 index 00000000..f87c06cc --- /dev/null +++ b/examples/test/callback/test_callback_mixed_ops.py @@ -0,0 +1,49 @@ +"""Tests for create_callback_mixed_ops.""" + +import json +import time + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus + +from src.callback import callback_mixed_ops +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=callback_mixed_ops.handler, + lambda_function_name="Create Callback Mixed Operations", +) +def test_handle_callback_operations_mixed_with_other_operation_types(durable_runner): + """Test callback operations mixed with other operation types.""" + with durable_runner: + execution_arn = durable_runner.run_async(input=None, timeout=30) + callback_id = durable_runner.wait_for_callback(execution_arn=execution_arn) + callback_result = json.dumps( + { + "processed": True, + } + ) + durable_runner.send_callback_success( + callback_id=callback_id, result=callback_result.encode() + ) + result = durable_runner.wait_for_result(execution_arn=execution_arn) + + assert result.status is InvocationStatus.SUCCEEDED + + result_data = deserialize_operation_payload(result.result) + + assert result_data == { + "stepResult": {"userId": 123, "name": "John Doe"}, + "callbackResult": callback_result, + "completed": True, + } + + completed_operations = result.operations + assert len(completed_operations) == 3 + + operation_types = [op.operation_type.value for op in completed_operations] + assert "WAIT" in operation_types + assert "STEP" in operation_types + assert "CALLBACK" in operation_types diff --git a/examples/test/callback/test_callback_serdes.py b/examples/test/callback/test_callback_serdes.py new file mode 100644 index 00000000..b007782b --- /dev/null +++ b/examples/test/callback/test_callback_serdes.py @@ -0,0 +1,60 @@ +"""Tests for create_callback_serdes.""" + +import json +from datetime import datetime, timezone + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus + +from src.callback.callback_serdes import CustomData, CustomDataSerDes +from src.callback import callback_serdes +from test.conftest import deserialize_operation_payload + + +class CustomDataTestSerDes(CustomDataSerDes): + """Test version of CustomDataSerDes for use in tests.""" + + pass + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=callback_serdes.handler, + lambda_function_name="Create Callback Custom Serdes", +) +def test_handle_callback_operations_with_custom_serdes(durable_runner): + """Test callback operations with custom serdes.""" + with durable_runner: + # Start the execution (this will pause at the callback) + execution_arn = durable_runner.run_async(input=None, timeout=30) + + # Wait for callback and get callback_id + callback_id = durable_runner.wait_for_callback(execution_arn=execution_arn) + + # Send data that requires custom serialization + test_data = CustomData( + id=42, + message="Hello World", + timestamp=datetime(2025, 1, 1, 0, 0, 0, tzinfo=timezone.utc), + ) + + # Serialize the data using custom serdes for sending + serdes = CustomDataTestSerDes() + serialized_data = serdes.serialize(test_data, None) + + durable_runner.send_callback_success( + callback_id=callback_id, result=serialized_data.encode() + ) + + # Wait for the execution to complete + result = durable_runner.wait_for_result(execution_arn=execution_arn) + + assert result.status is InvocationStatus.SUCCEEDED + + result_data = deserialize_operation_payload(result.result) + + # Verify the result structure + assert result_data["receivedData"]["id"] == 42 + assert result_data["receivedData"]["message"] == "Hello World" + assert "2025-01-01T00:00:00" in result_data["receivedData"]["timestamp"] + assert result_data["isDateObject"] is True diff --git a/examples/test/callback/test_callback_simple.py b/examples/test/callback/test_callback_simple.py new file mode 100644 index 00000000..678e5425 --- /dev/null +++ b/examples/test/callback/test_callback_simple.py @@ -0,0 +1,47 @@ +"""Tests for callback example.""" + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus + +from src.callback import callback_simple +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=callback_simple.handler, + lambda_function_name="Callback Success", +) +def test_callback_success(durable_runner): + callback_result = "successful" + + with durable_runner: + execution_arn = durable_runner.run_async(input=None, timeout=30) + callback_id = durable_runner.wait_for_callback(execution_arn=execution_arn) + durable_runner.send_callback_success( + callback_id=callback_id, result=callback_result.encode() + ) + result = durable_runner.wait_for_result(execution_arn=execution_arn) + + assert result.status is InvocationStatus.SUCCEEDED + + result_data = deserialize_operation_payload(result.result) + assert result_data == callback_result + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=callback_simple.handler, + lambda_function_name="Callback Success None", +) +def test_callback_success_none_result(durable_runner): + with durable_runner: + execution_arn = durable_runner.run_async(input=None, timeout=30) + callback_id = durable_runner.wait_for_callback(execution_arn=execution_arn) + durable_runner.send_callback_success(callback_id=callback_id, result=b"") + result = durable_runner.wait_for_result(execution_arn=execution_arn) + + assert result.status is InvocationStatus.SUCCEEDED + + result_data = deserialize_operation_payload(result.result) + assert result_data is None diff --git a/examples/test/comprehensive_operations/test_comprehensive_operations.py b/examples/test/comprehensive_operations/test_comprehensive_operations.py new file mode 100644 index 00000000..4b84cc6c --- /dev/null +++ b/examples/test/comprehensive_operations/test_comprehensive_operations.py @@ -0,0 +1,94 @@ +"""Tests for comprehensive_operations.""" + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus + +from src.comprehensive_operations import comprehensive_operations +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=comprehensive_operations.handler, + lambda_function_name="Comprehensive Operations", +) +def test_execute_all_operations_successfully(durable_runner): + """Test that all operations execute successfully.""" + with durable_runner: + result = durable_runner.run(input={"message": "test"}, timeout=30) + + assert result.status is InvocationStatus.SUCCEEDED + + result_data = deserialize_operation_payload(result.result) + + assert result_data["step1"] == "Step 1 completed successfully" + assert result_data["waitCompleted"] is True + + # verify map results + map_results = result_data["mapResults"] + assert len(map_results["all"]) == 5 + assert [item["result"] for item in map_results["all"]] == [1, 2, 3, 4, 5] + assert map_results["completionReason"] == "ALL_COMPLETED" + + # verify parallel results + parallel_results = result_data["parallelResults"] + assert len(parallel_results["all"]) == 3 + assert [item["result"] for item in parallel_results["all"]] == [ + "apple", + "banana", + "orange", + ] + assert parallel_results["completionReason"] == "ALL_COMPLETED" + + # Get all operations including nested ones + all_ops = result.get_all_operations() + + # Verify step1 operation + step1_ops = [ + op for op in all_ops if op.operation_type.value == "STEP" and op.name == "step1" + ] + assert len(step1_ops) == 1 + step1_op = step1_ops[0] + assert ( + deserialize_operation_payload(step1_op.result) + == "Step 1 completed successfully" + ) + + # Verify wait operation (should be at index 1) + wait_op = result.operations[1] + assert wait_op.operation_type.value == "WAIT" + + # Verify individual map step operations exist with correct names + for i in range(5): + map_step_ops = [ + op + for op in all_ops + if op.operation_type.value == "STEP" and op.name == f"map-step-{i}" + ] + assert len(map_step_ops) == 1 + assert deserialize_operation_payload(map_step_ops[0].result) == i + 1 + + # Verify individual parallel step operations exist + fruit_step_1_ops = [ + op + for op in all_ops + if op.operation_type.value == "STEP" and op.name == "fruit-step-1" + ] + assert len(fruit_step_1_ops) == 1 + assert deserialize_operation_payload(fruit_step_1_ops[0].result) == "apple" + + fruit_step_2_ops = [ + op + for op in all_ops + if op.operation_type.value == "STEP" and op.name == "fruit-step-2" + ] + assert len(fruit_step_2_ops) == 1 + assert deserialize_operation_payload(fruit_step_2_ops[0].result) == "banana" + + fruit_step_3_ops = [ + op + for op in all_ops + if op.operation_type.value == "STEP" and op.name == "fruit-step-3" + ] + assert len(fruit_step_3_ops) == 1 + assert deserialize_operation_payload(fruit_step_3_ops[0].result) == "orange" diff --git a/examples/test/conftest.py b/examples/test/conftest.py new file mode 100644 index 00000000..679ba486 --- /dev/null +++ b/examples/test/conftest.py @@ -0,0 +1,268 @@ +"""Pytest configuration and fixtures for durable execution tests.""" + +import contextlib +import json +import logging +import os +import sys +from enum import StrEnum +from pathlib import Path +from typing import Any + +import pytest +from aws_durable_execution_sdk_python.lambda_service import ( + ErrorObject, + OperationPayload, +) +from aws_durable_execution_sdk_python.serdes import ExtendedTypeSerDes + +from aws_durable_execution_sdk_python_testing.runner import ( + DurableFunctionCloudTestRunner, + DurableFunctionTestResult, + DurableFunctionTestRunner, +) + + +# Add examples/src to Python path for imports +examples_src = Path(__file__).parent.parent / "src" +if str(examples_src) not in sys.path: + sys.path.insert(0, str(examples_src)) + + +logger = logging.getLogger(__name__) + + +def deserialize_operation_payload( + payload: OperationPayload | None, serdes: ExtendedTypeSerDes | None = None +) -> Any: + """Deserialize an operation payload using the provided or default serializer. + + This utility function helps test code deserialize operation results that are + returned as raw strings. It supports both the default ExtendedTypeSerDes and + custom serializers. + + Args: + payload: The operation payload string to deserialize, or None. + serdes: Optional custom serializer. If None, uses ExtendedTypeSerDes. + + Returns: + Deserialized result object, or None if payload is None. + """ + if not payload: + return None + + if serdes is None: + serdes = ExtendedTypeSerDes() + + try: + return serdes.deserialize(payload) + except Exception: + # Fallback to plain JSON for backwards compatibility + return json.loads(payload) + + +class RunnerMode(StrEnum): + """Runner mode for local or cloud execution.""" + + LOCAL = "local" + CLOUD = "cloud" + + +def pytest_addoption(parser): + """Add custom command line options for test execution.""" + parser.addoption( + "--runner-mode", + action="store", + default=RunnerMode.LOCAL, + choices=[RunnerMode.LOCAL, RunnerMode.CLOUD], + help="Test runner mode: local (in-memory) or cloud (deployed Lambda)", + ) + + +class TestRunnerAdapter: + """Adapter that provides consistent interface for both local and cloud runners. + + This adapter encapsulates the differences between local and cloud test runners: + - Local runner: Requires context manager for resource cleanup (scheduler thread) + - Cloud runner: No resource cleanup needed (stateless boto3 client) + + The adapter ensures proper resource management while providing a unified interface. + """ + + def __init__( + self, + runner: DurableFunctionTestRunner | DurableFunctionCloudTestRunner, + mode: str, + ): + """Initialize the adapter.""" + self._runner: DurableFunctionTestRunner | DurableFunctionCloudTestRunner = ( + runner + ) + self._mode: str = mode + + def run( + self, + input: str | None = None, # noqa: A002 + timeout: int = 60, + ) -> DurableFunctionTestResult: + """Execute the durable function and return results.""" + return self._runner.run(input=input, timeout=timeout) + + def run_async( + self, + input: str | None = None, # noqa: A002 + timeout: int = 60, + ) -> str: + return self._runner.run_async(input=input, timeout=timeout) + + def send_callback_success( + self, callback_id: str, result: bytes | None = None + ) -> None: + self._runner.send_callback_success(callback_id=callback_id, result=result) + + def send_callback_failure( + self, callback_id: str, error: ErrorObject | None = None + ) -> None: + self._runner.send_callback_failure(callback_id=callback_id, error=error) + + def send_callback_heartbeat(self, callback_id: str) -> None: + self._runner.send_callback_heartbeat(callback_id=callback_id) + + def wait_for_result( + self, execution_arn: str, timeout: int = 60 + ) -> DurableFunctionTestResult: + return self._runner.wait_for_result( + execution_arn=execution_arn, timeout=timeout + ) + + def wait_for_callback( + self, execution_arn: str, name: str | None = None, timeout: int = 60 + ) -> str: + return self._runner.wait_for_callback( + execution_arn=execution_arn, name=name, timeout=timeout + ) + + @property + def mode(self) -> str: + """Get the runner mode (local or cloud).""" + return self._mode + + def __enter__(self): + """Context manager entry - only calls runner's __enter__ if it's a context manager.""" + if isinstance(self._runner, contextlib.AbstractContextManager): + self._runner.__enter__() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + """Context manager exit - only calls runner's __exit__ if it's a context manager.""" + if isinstance(self._runner, contextlib.AbstractContextManager): + return self._runner.__exit__(exc_type, exc_val, exc_tb) + return None + + +@pytest.fixture +def durable_runner(request): + """Pytest fixture that provides a test runner based on configuration. + + Configuration for cloud mode: + Environment variables (required): + AWS_REGION: AWS region for Lambda invocation (default: us-west-2) + LAMBDA_ENDPOINT: Optional Lambda endpoint URL + PYTEST_FUNCTION_NAME_MAP: JSON mapping of example names to deployed function names + + CLI option: + --runner-mode=cloud (or local, default: local) + + Example: + AWS_REGION=us-west-2 \ + LAMBDA_ENDPOINT=https://lambda.us-west-2.amazonaws.com \ + PYTEST_FUNCTION_NAME_MAP='{"hello world":"HelloWorld:$LATEST"}' \ + pytest --runner-mode=cloud -k test_hello_world + + Usage in tests: + @pytest.mark.durable_execution( + handler=hello_world.handler, + lambda_function_name="hello world" + ) + def test_hello_world(durable_runner): + with durable_runner: + result = durable_runner.run(input="test", timeout=10) + assert result.status == InvocationStatus.SUCCEEDED + """ + # Get marker with test configuration + marker = request.node.get_closest_marker("durable_execution") + if not marker: + pytest.fail("Test must be marked with @pytest.mark.durable_execution") + + handler: Any = marker.kwargs.get("handler") + lambda_function_name: str | None = marker.kwargs.get("lambda_function_name") + + # Get runner mode from CLI option + runner_mode: str = request.config.getoption("--runner-mode") + + logger.info("Running test in %s mode", runner_mode.upper()) + + # Create appropriate runner + if runner_mode == RunnerMode.CLOUD: + # Get deployed function name and AWS config from environment + deployed_name = _get_deployed_function_name(request, lambda_function_name) + region = os.environ.get("AWS_REGION", "us-west-2") + lambda_endpoint = os.environ.get("LAMBDA_ENDPOINT") + + logger.info("Using AWS region: %s", region) + + # Create cloud runner (no cleanup needed) + runner = DurableFunctionCloudTestRunner( + function_name=deployed_name, + region=region, + lambda_endpoint=lambda_endpoint, + ) + else: + if not handler: + pytest.fail("handler is required for local mode tests") + # Create local runner (needs cleanup via context manager) + runner = DurableFunctionTestRunner(handler=handler) + + # Wrap in adapter and use context manager for proper cleanup + with TestRunnerAdapter(runner, runner_mode) as adapter: + yield adapter + + +def _get_deployed_function_name( + request: pytest.FixtureRequest, + lambda_function_name: str | None, +) -> str: + """Get the deployed function name from environment variables. + + Required environment variables: + - QUALIFIED_FUNCTION_NAME: The qualified function ARN (e.g., "MyFunction:$LATEST") + - LAMBDA_FUNCTION_TEST_NAME: The lambda function name to match against test markers + + Tests are skipped if the test's lambda_function_name doesn't match LAMBDA_FUNCTION_TEST_NAME. + """ + if not lambda_function_name: + pytest.fail("lambda_function_name is required for cloud mode tests") + + # Get from environment variables + function_arn = os.environ.get("QUALIFIED_FUNCTION_NAME") + env_function_name = os.environ.get("LAMBDA_FUNCTION_TEST_NAME") + + if not function_arn or not env_function_name: + pytest.fail( + "Cloud mode requires both QUALIFIED_FUNCTION_NAME and LAMBDA_FUNCTION_TEST_NAME environment variables\n" + 'Example: QUALIFIED_FUNCTION_NAME="MyFunction:$LATEST" LAMBDA_FUNCTION_TEST_NAME="hello world" pytest --runner-mode=cloud' + ) + + # Check if this test matches the function name (case-insensitive) + if lambda_function_name.lower() == env_function_name.lower(): + logger.info( + "Using function ARN: %s for lambda function: %s", + function_arn, + env_function_name, + ) + return function_arn + + # This test doesn't match the function name, skip it + pytest.skip( + f"Test '{lambda_function_name}' doesn't match LAMBDA_FUNCTION_TEST_NAME '{env_function_name}'" + ) diff --git a/examples/test/handler_error/test_handler_error.py b/examples/test/handler_error/test_handler_error.py new file mode 100644 index 00000000..fc1b2430 --- /dev/null +++ b/examples/test/handler_error/test_handler_error.py @@ -0,0 +1,32 @@ +"""Tests for handler_error.""" + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus + +from src.handler_error import handler_error + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=handler_error.handler, + lambda_function_name="handler error", +) +def test_handle_handler_errors_gracefully_and_capture_error_details(durable_runner): + """Test that handler errors are handled gracefully and error details are captured.""" + test_payload = {"test": "error-case"} + + with durable_runner: + result = durable_runner.run(input=test_payload, timeout=10) + + # Verify execution failed + assert result.status is InvocationStatus.FAILED + + # Check that error was captured in the result + error = result.error + assert error is not None + + assert error.message == "Intentional handler failure" + assert error.type == "Exception" + + # Verify no operations were completed due to early error + assert len(result.operations) == 0 diff --git a/examples/test/logger_example/test_logger_example.py b/examples/test/logger_example/test_logger_example.py new file mode 100644 index 00000000..2087e721 --- /dev/null +++ b/examples/test/logger_example/test_logger_example.py @@ -0,0 +1,35 @@ +"""Tests for logger_example.""" + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus +from aws_durable_execution_sdk_python.lambda_service import OperationType + +from src.logger_example import logger_example +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=logger_example.handler, + lambda_function_name="logger example", +) +def test_logger_example(durable_runner): + """Test logger example.""" + with durable_runner: + result = durable_runner.run(input={"id": "test-123"}, timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + assert deserialize_operation_payload(result.result) == "processed-child-processed" + + # Verify step operations exist (process_data at top level) + # Note: child_step is nested inside the CONTEXT operation, not at top level + step_ops = [ + op for op in result.operations if op.operation_type == OperationType.STEP + ] + assert len(step_ops) >= 1 + + # Verify context operation exists (child_workflow) + context_ops = [ + op for op in result.operations if op.operation_type.value == "CONTEXT" + ] + assert len(context_ops) >= 1 diff --git a/examples/test/map/test_map_completion.py b/examples/test/map/test_map_completion.py new file mode 100644 index 00000000..f7bd850a --- /dev/null +++ b/examples/test/map/test_map_completion.py @@ -0,0 +1,32 @@ +"""Tests for map_completion.""" + +import pytest + +from src.map import map_completion +from test.conftest import deserialize_operation_payload +from aws_durable_execution_sdk_python.execution import InvocationStatus + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=map_completion.handler, + lambda_function_name="Map Completion Config", +) +def test_reproduce_completion_config_behavior_with_detailed_logging(durable_runner): + """Demonstrates map behavior with minSuccessful and concurrent execution.""" + with durable_runner: + result = durable_runner.run(input=None, timeout=60) + + assert result.status is InvocationStatus.SUCCEEDED + + result_data = deserialize_operation_payload(result.result) + + # 5 items are processed 2 of them succeeded. We exit early because min_successful is 2. + # Additionally, failure_count shows 0 because failed items have retry strategies configured and are still retrying + # when execution completes. Failures aren't finalized until retries complete, so they don't appear in the failure_count. + assert result_data["totalItems"] == 5 + assert result_data["successfulCount"] == 2 + assert result_data["failedCount"] == 0 + assert result_data["hasFailures"] is False + assert result_data["batchStatus"] == "BatchItemStatus.SUCCEEDED" + assert result_data["completionReason"] == "CompletionReason.MIN_SUCCESSFUL_REACHED" diff --git a/examples/test/map/test_map_operations.py b/examples/test/map/test_map_operations.py new file mode 100644 index 00000000..da8dc93f --- /dev/null +++ b/examples/test/map/test_map_operations.py @@ -0,0 +1,42 @@ +"""Tests for map_operations example.""" + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus +from aws_durable_execution_sdk_python.lambda_service import ( + OperationStatus, + OperationType, +) + +from src.map import map_operations +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=map_operations.handler, + lambda_function_name="map operations", +) +def test_map_operations(durable_runner): + """Test map_operations example using context.map().""" + with durable_runner: + result = durable_runner.run(input="test", timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + assert deserialize_operation_payload(result.result) == [2, 4, 6, 8, 10] + + # Get the map operation (CONTEXT type with MAP subtype) + map_op = result.get_context("map_operation") + assert map_op is not None + assert map_op.status is OperationStatus.SUCCEEDED + + # Verify all five child operations exist + assert len(map_op.child_operations) == 5 + + # Verify child operation names (SDK uses map-item-* format) + child_names = {op.name for op in map_op.child_operations} + expected_names = {f"map-item-{i}" for i in range(5)} + assert child_names == expected_names + + # Verify all children succeeded + for child in map_op.child_operations: + assert child.status is OperationStatus.SUCCEEDED diff --git a/examples/test/map/test_map_with_batch_serdes.py b/examples/test/map/test_map_with_batch_serdes.py new file mode 100644 index 00000000..b30a9bdb --- /dev/null +++ b/examples/test/map/test_map_with_batch_serdes.py @@ -0,0 +1,43 @@ +"""Tests for map with batch-level serdes.""" + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus +from aws_durable_execution_sdk_python.lambda_service import OperationStatus +from src.map import map_with_batch_serdes +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=map_with_batch_serdes.handler, + lambda_function_name="Map with Batch SerDes", +) +def test_map_with_batch_serdes(durable_runner): + """Test map with custom batch-level serialization.""" + with durable_runner: + result = durable_runner.run(input="test", timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + + result_data = deserialize_operation_payload(result.result) + + # Verify all items were processed + assert result_data["success_count"] == 4 + + # Verify results + results = result_data["results"] + assert len(results) == 4 + assert results == [20, 40, 60, 80] # [10*2, 20*2, 30*2, 40*2] + + # Verify sum + assert result_data["sum"] == 200 + + # Get the map operation + map_op = result.get_context("map_with_batch_serdes") + assert map_op is not None + assert map_op.status is OperationStatus.SUCCEEDED + + # Verify all 4 child operations exist and succeeded + assert len(map_op.child_operations) == 4 + for child in map_op.child_operations: + assert child.status is OperationStatus.SUCCEEDED diff --git a/examples/test/map/test_map_with_custom_serdes.py b/examples/test/map/test_map_with_custom_serdes.py new file mode 100644 index 00000000..c0d3d79f --- /dev/null +++ b/examples/test/map/test_map_with_custom_serdes.py @@ -0,0 +1,48 @@ +"""Tests for map with custom serdes.""" + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus +from aws_durable_execution_sdk_python.lambda_service import OperationStatus +from src.map import map_with_custom_serdes +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=map_with_custom_serdes.handler, + lambda_function_name="Map with Custom SerDes", +) +def test_map_with_custom_serdes(durable_runner): + """Test map with custom item serialization.""" + with durable_runner: + result = durable_runner.run(input="test", timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + + result_data = deserialize_operation_payload(result.result) + + # Verify all items were processed + assert result_data["success_count"] == 3 + + # Verify results were properly deserialized + results = result_data["results"] + assert len(results) == 3 + + # Verify the custom serdes worked (data was serialized and deserialized correctly) + processed_names = result_data["processed_names"] + assert processed_names == ["item1", "item2", "item3"] + + # Verify processing logic worked correctly + for i, r in enumerate(results): + assert r["index"] == i + assert r["doubled_id"] == (i + 1) * 2 # IDs are 1, 2, 3 + + # Get the map operation + map_op = result.get_context("map_with_custom_serdes") + assert map_op is not None + assert map_op.status is OperationStatus.SUCCEEDED + + # Verify all 3 child operations exist and succeeded + assert len(map_op.child_operations) == 3 + for child in map_op.child_operations: + assert child.status is OperationStatus.SUCCEEDED diff --git a/examples/test/map/test_map_with_failure_tolerance.py b/examples/test/map/test_map_with_failure_tolerance.py new file mode 100644 index 00000000..4cf06d1b --- /dev/null +++ b/examples/test/map/test_map_with_failure_tolerance.py @@ -0,0 +1,52 @@ +"""Tests for map with failure tolerance.""" + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus +from aws_durable_execution_sdk_python.lambda_service import OperationStatus +from src.map import map_with_failure_tolerance +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=map_with_failure_tolerance.handler, + lambda_function_name="Map with Failure Tolerance", +) +def test_map_with_failure_tolerance(durable_runner): + """Test map with failure tolerance.""" + with durable_runner: + result = durable_runner.run(input="test", timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + + result_data = deserialize_operation_payload(result.result) + + # Should have 7 successes and 3 failures (items 3, 6, 9 fail) + assert result_data["success_count"] == 7 + assert result_data["failure_count"] == 3 + assert result_data["failed_count"] == 3 + + # Verify successful results (items 1,2,4,5,7,8,10 multiplied by 2) + expected_results = [2, 4, 8, 10, 14, 16, 20] + assert set(result_data["succeeded"]) == set(expected_results) + + assert result_data["completion_reason"] == "ALL_COMPLETED" + + # Get the map operation + map_op = result.get_context("map_with_tolerance") + assert map_op is not None + assert map_op.status is OperationStatus.SUCCEEDED + + # Verify all 10 child operations exist + assert len(map_op.child_operations) == 10 + + # Count successes and failures + succeeded = [ + op for op in map_op.child_operations if op.status is OperationStatus.SUCCEEDED + ] + failed = [ + op for op in map_op.child_operations if op.status is OperationStatus.FAILED + ] + + assert len(succeeded) == 7 + assert len(failed) == 3 diff --git a/examples/test/map/test_map_with_large_scale.py b/examples/test/map/test_map_with_large_scale.py new file mode 100644 index 00000000..be3fb7ba --- /dev/null +++ b/examples/test/map/test_map_with_large_scale.py @@ -0,0 +1,36 @@ +"""Tests for map_large_scale.""" + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus + +from src.map import map_with_large_scale +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=map_with_large_scale.handler, + lambda_function_name="map large scale", +) +def test_handle_50_items_with_100kb_each_using_map(durable_runner): + """Test handling 50 items with 100KB each using map.""" + pass + with durable_runner: + result = durable_runner.run(input=None, timeout=60) + + result_data = deserialize_operation_payload(result.result) + + # Verify the execution succeeded + assert result.status is InvocationStatus.SUCCEEDED + assert result_data["success"] is True + + # Verify the expected number of items were processed (50 items) + assert result_data["summary"]["itemsProcessed"] == 50 + assert result_data["summary"]["allItemsProcessed"] is True + + # Verify data size expectations (~5MB total from 50 items × 100KB each) + assert result_data["summary"]["totalDataSizeMB"] > 4 # Should be ~5MB + assert result_data["summary"]["totalDataSizeMB"] < 6 + assert result_data["summary"]["totalDataSizeBytes"] > 5000000 # ~5MB + assert result_data["summary"]["averageItemSize"] > 100000 # ~100KB per item + assert result_data["summary"]["maxConcurrency"] == 10 diff --git a/examples/test/map/test_map_with_max_concurrency.py b/examples/test/map/test_map_with_max_concurrency.py new file mode 100644 index 00000000..3b6d5052 --- /dev/null +++ b/examples/test/map/test_map_with_max_concurrency.py @@ -0,0 +1,37 @@ +"""Tests for map with maxConcurrency.""" + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus +from aws_durable_execution_sdk_python.lambda_service import OperationStatus +from src.map import map_with_max_concurrency +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=map_with_max_concurrency.handler, + lambda_function_name="Map with Max Concurrency", +) +def test_map_with_max_concurrency(durable_runner): + """Test map with maxConcurrency limit.""" + with durable_runner: + result = durable_runner.run(input="test", timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + + results_list = deserialize_operation_payload(result.result) + assert len(results_list) == 10 + # Items 1-10 multiplied by 3 + assert results_list == [3, 6, 9, 12, 15, 18, 21, 24, 27, 30] + + # Get the map operation + map_op = result.get_context("map_with_concurrency") + assert map_op is not None + assert map_op.status is OperationStatus.SUCCEEDED + + # Verify all 10 child operations exist + assert len(map_op.child_operations) == 10 + + # Verify all children succeeded + for child in map_op.child_operations: + assert child.status is OperationStatus.SUCCEEDED diff --git a/examples/test/map/test_map_with_min_successful.py b/examples/test/map/test_map_with_min_successful.py new file mode 100644 index 00000000..c3a21772 --- /dev/null +++ b/examples/test/map/test_map_with_min_successful.py @@ -0,0 +1,70 @@ +"""Tests for map with min_successful.""" + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus +from aws_durable_execution_sdk_python.lambda_service import OperationStatus +from src.map import map_with_min_successful +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=map_with_min_successful.handler, + lambda_function_name="Map with Min Successful", +) +def test_map_with_min_successful(durable_runner): + """Test map with min_successful threshold.""" + with durable_runner: + result = durable_runner.run(input="test", timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + + result_data = deserialize_operation_payload(result.result) + + # With min_successful=6, operation completes after reaching 6 successes + # Due to concurrency (max_concurrency=5), some items may complete before check + # Items 1-6 succeed, item 10 succeeds, items 7-9 fail + # Depending on timing, we get 6 or 7 successes + assert result_data["success_count"] >= 6 + assert result_data["success_count"] <= 7 + + # Operation stops once min_successful is reached + # Items 7-9 (which would fail) are never processed + assert result_data["failure_count"] == 0 + assert result_data["total_count"] == 10 + + # Verify we got the expected successful results + # Items 1-6 always succeed (2, 4, 6, 8, 10, 12) + # Item 10 might also succeed (20) depending on timing + assert len(result_data["results"]) == result_data["success_count"] + for result_val in result_data["results"]: + assert result_val % 2 == 0 # All results should be even (item * 2) + assert result_val >= 2 and result_val <= 20 # Range: items 1-10 * 2 + assert result_val not in [14, 16, 18] # Items 7-9 should not be present + + # Completion reason should be MIN_SUCCESSFUL_REACHED + assert result_data["completion_reason"] == "MIN_SUCCESSFUL_REACHED" + + # Get the map operation + map_op = result.get_context("map_min_successful") + assert map_op is not None + assert map_op.status is OperationStatus.SUCCEEDED + + # All 10 operations may be started, but only some complete before min_successful + assert len(map_op.child_operations) == 10 + + # Count operations by status + succeeded = [ + op for op in map_op.child_operations if op.status is OperationStatus.SUCCEEDED + ] + failed = [ + op for op in map_op.child_operations if op.status is OperationStatus.FAILED + ] + started = [ + op for op in map_op.child_operations if op.status is OperationStatus.STARTED + ] + + # Should have 6-7 successes, 0 failures, and remaining in STARTED state + assert len(succeeded) == result_data["success_count"] + assert len(failed) == 0 + assert len(started) == 10 - result_data["success_count"] diff --git a/examples/test/no_replay_execution/test_no_replay_execution.py b/examples/test/no_replay_execution/test_no_replay_execution.py new file mode 100644 index 00000000..934e107a --- /dev/null +++ b/examples/test/no_replay_execution/test_no_replay_execution.py @@ -0,0 +1,52 @@ +"""Tests for no_replay_execution.""" + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus + +from src.no_replay_execution import no_replay_execution +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=no_replay_execution.handler, + lambda_function_name="No Replay Execution", +) +def test_handle_step_operations_when_no_replay_occurs(durable_runner): + """Test step operations when no replay occurs.""" + with durable_runner: + result = durable_runner.run(input=None, timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + + # Verify final result + assert deserialize_operation_payload(result.result) == {"completed": True} + + # Get step operations + user1_step_ops = [ + op + for op in result.operations + if op.operation_type.value == "STEP" and op.name == "fetch-user-1" + ] + assert len(user1_step_ops) == 1 + user1_step = user1_step_ops[0] + + user2_step_ops = [ + op + for op in result.operations + if op.operation_type.value == "STEP" and op.name == "fetch-user-2" + ] + assert len(user2_step_ops) == 1 + user2_step = user2_step_ops[0] + + # Verify first-time execution tracking (no replay) + assert user1_step.operation_type.value == "STEP" + assert user1_step.status.value == "SUCCEEDED" + assert deserialize_operation_payload(user1_step.result) == "user-1" + + assert user2_step.operation_type.value == "STEP" + assert user2_step.status.value == "SUCCEEDED" + assert deserialize_operation_payload(user2_step.result) == "user-2" + + # Verify both operations tracked + assert len(result.operations) == 2 diff --git a/examples/test/none_results/test_none_results.py b/examples/test/none_results/test_none_results.py new file mode 100644 index 00000000..75ae69fe --- /dev/null +++ b/examples/test/none_results/test_none_results.py @@ -0,0 +1,51 @@ +"""Tests for undefined_results.""" + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus + +from src.none_results import none_results +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=none_results.handler, + lambda_function_name="None Results", +) +def test_handle_step_operations_with_undefined_result_after_replay(durable_runner): + """Test handling of step operations with undefined result after replay.""" + with durable_runner: + result = durable_runner.run(input=None, timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + + # Verify execution completed successfully despite undefined operation results + assert deserialize_operation_payload(result.result) == "result" + + # Verify all operations were tracked even with undefined results + operations = result.operations + assert len(operations) == 3 # step + context + wait + + # Verify step operation with undefined result + step_ops = [ + op + for op in operations + if op.operation_type.value == "STEP" and op.name == "fetch-user" + ] + assert len(step_ops) == 1 + step_op = step_ops[0] + assert deserialize_operation_payload(step_op.result) is None + + # Verify child context operation with undefined result + context_ops = [ + op + for op in operations + if op.operation_type.value == "CONTEXT" and op.name == "parent" + ] + assert len(context_ops) == 1 + context_op = context_ops[0] + assert deserialize_operation_payload(context_op.result) is None + + # Verify wait operation completed normally + wait_op = operations[2] + assert wait_op.operation_type.value == "WAIT" diff --git a/examples/test/parallel/test_parallel.py b/examples/test/parallel/test_parallel.py new file mode 100644 index 00000000..184e8549 --- /dev/null +++ b/examples/test/parallel/test_parallel.py @@ -0,0 +1,38 @@ +"""Tests for parallel example.""" + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus +from aws_durable_execution_sdk_python.lambda_service import OperationStatus + +from src.parallel import parallel +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=parallel.handler, + lambda_function_name="Parallel Operations", +) +def test_parallel(durable_runner): + """Test parallel example using context.parallel().""" + with durable_runner: + result = durable_runner.run(input="test", timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + assert deserialize_operation_payload(result.result) == [ + "task 1 completed", + "task 2 completed", + "task 3 completed after wait", + ] + + # Get the parallel operation (CONTEXT type with PARALLEL subtype) + parallel_op = result.get_context("parallel_operation") + assert parallel_op is not None + assert parallel_op.status is OperationStatus.SUCCEEDED + + # Verify all three child operations exist + assert len(parallel_op.child_operations) == 3 + + # Verify all children succeeded + for child in parallel_op.child_operations: + assert child.status is OperationStatus.SUCCEEDED diff --git a/examples/test/parallel/test_parallel_with_batch_serdes.py b/examples/test/parallel/test_parallel_with_batch_serdes.py new file mode 100644 index 00000000..069428bb --- /dev/null +++ b/examples/test/parallel/test_parallel_with_batch_serdes.py @@ -0,0 +1,43 @@ +"""Tests for parallel with batch-level serdes.""" + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus +from aws_durable_execution_sdk_python.lambda_service import OperationStatus +from src.parallel import parallel_with_batch_serdes +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=parallel_with_batch_serdes.handler, + lambda_function_name="Parallel with Batch SerDes", +) +def test_parallel_with_batch_serdes(durable_runner): + """Test parallel with custom batch-level serialization.""" + with durable_runner: + result = durable_runner.run(input="test", timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + + result_data = deserialize_operation_payload(result.result) + + # Verify all branches succeeded + assert result_data["success_count"] == 3 + + # Verify results + results = result_data["results"] + assert len(results) == 3 + assert results == [100, 200, 300] + + # Verify total + assert result_data["total"] == 600 + + # Get the parallel operation + parallel_op = result.get_context("parallel_with_batch_serdes") + assert parallel_op is not None + assert parallel_op.status is OperationStatus.SUCCEEDED + + # Verify all 3 child operations exist and succeeded + assert len(parallel_op.child_operations) == 3 + for child in parallel_op.child_operations: + assert child.status is OperationStatus.SUCCEEDED diff --git a/examples/test/parallel/test_parallel_with_custom_serdes.py b/examples/test/parallel/test_parallel_with_custom_serdes.py new file mode 100644 index 00000000..548dd5f7 --- /dev/null +++ b/examples/test/parallel/test_parallel_with_custom_serdes.py @@ -0,0 +1,46 @@ +"""Tests for parallel with custom serdes.""" + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus +from aws_durable_execution_sdk_python.lambda_service import OperationStatus +from src.parallel import parallel_with_custom_serdes +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=parallel_with_custom_serdes.handler, + lambda_function_name="Parallel with Custom SerDes", +) +def test_parallel_with_custom_serdes(durable_runner): + """Test parallel with custom item serialization.""" + with durable_runner: + result = durable_runner.run(input="test", timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + + result_data = deserialize_operation_payload(result.result) + + # Verify all tasks succeeded + assert result_data["success_count"] == 3 + + # Verify results were properly deserialized + results = result_data["results"] + assert len(results) == 3 + + # Verify the custom serdes worked (data was serialized and deserialized correctly) + task_names = {r["task"] for r in results} + assert task_names == {"task1", "task2", "task3"} + + # Verify values were preserved through serialization + assert result_data["total_value"] == 600 # 100 + 200 + 300 + + # Get the parallel operation + parallel_op = result.get_context("parallel_with_custom_serdes") + assert parallel_op is not None + assert parallel_op.status is OperationStatus.SUCCEEDED + + # Verify all 3 child operations exist and succeeded + assert len(parallel_op.child_operations) == 3 + for child in parallel_op.child_operations: + assert child.status is OperationStatus.SUCCEEDED diff --git a/examples/test/parallel/test_parallel_with_failure_tolerance.py b/examples/test/parallel/test_parallel_with_failure_tolerance.py new file mode 100644 index 00000000..275e27b8 --- /dev/null +++ b/examples/test/parallel/test_parallel_with_failure_tolerance.py @@ -0,0 +1,49 @@ +"""Tests for parallel with failure tolerance.""" + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus +from aws_durable_execution_sdk_python.lambda_service import OperationStatus +from src.parallel import parallel_with_failure_tolerance +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=parallel_with_failure_tolerance.handler, + lambda_function_name="Parallel with Failure Tolerance", +) +def test_parallel_with_failure_tolerance(durable_runner): + """Test parallel with failure tolerance.""" + with durable_runner: + result = durable_runner.run(input="test", timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + + result_data = deserialize_operation_payload(result.result) + + # Should have 3 successes and 2 failures + assert result_data["success_count"] == 3 + assert result_data["failure_count"] == 2 + assert set(result_data["succeeded"]) == {"success 1", "success 3", "success 5"} + assert result_data["completion_reason"] == "ALL_COMPLETED" + + # Get the parallel operation + parallel_op = result.get_context("parallel_with_tolerance") + assert parallel_op is not None + assert parallel_op.status is OperationStatus.SUCCEEDED + + # Verify all 5 child operations exist + assert len(parallel_op.child_operations) == 5 + + # Count successes and failures + succeeded = [ + op + for op in parallel_op.child_operations + if op.status is OperationStatus.SUCCEEDED + ] + failed = [ + op for op in parallel_op.child_operations if op.status is OperationStatus.FAILED + ] + + assert len(succeeded) == 3 + assert len(failed) == 2 diff --git a/examples/test/parallel/test_parallel_with_max_concurrency.py b/examples/test/parallel/test_parallel_with_max_concurrency.py new file mode 100644 index 00000000..ce65bdef --- /dev/null +++ b/examples/test/parallel/test_parallel_with_max_concurrency.py @@ -0,0 +1,36 @@ +"""Tests for parallel with maxConcurrency.""" + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus +from aws_durable_execution_sdk_python.lambda_service import OperationStatus +from src.parallel import parallel_with_max_concurrency +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=parallel_with_max_concurrency.handler, + lambda_function_name="Parallel with Max Concurrency", +) +def test_parallel_with_max_concurrency(durable_runner): + """Test parallel with maxConcurrency limit.""" + with durable_runner: + result = durable_runner.run(input="test", timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + + results_list = deserialize_operation_payload(result.result) + assert len(results_list) == 5 + assert set(results_list) == {"task 1", "task 2", "task 3", "task 4", "task 5"} + + # Get the parallel operation + parallel_op = result.get_context("parallel_with_concurrency") + assert parallel_op is not None + assert parallel_op.status is OperationStatus.SUCCEEDED + + # Verify all 5 child operations exist + assert len(parallel_op.child_operations) == 5 + + # Verify all children succeeded + for child in parallel_op.child_operations: + assert child.status is OperationStatus.SUCCEEDED diff --git a/examples/test/parallel/test_parallel_with_wait.py b/examples/test/parallel/test_parallel_with_wait.py new file mode 100644 index 00000000..b1b9a154 --- /dev/null +++ b/examples/test/parallel/test_parallel_with_wait.py @@ -0,0 +1,47 @@ +"""Tests for parallel with wait operations.""" + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus +from aws_durable_execution_sdk_python.lambda_service import ( + OperationStatus, + OperationType, +) +from src.parallel import parallel_with_wait +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=parallel_with_wait.handler, + lambda_function_name="Parallel with Wait", +) +def test_parallel_with_wait(durable_runner): + """Test parallel with wait operations.""" + with durable_runner: + result = durable_runner.run(input="test", timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + assert deserialize_operation_payload(result.result) == "Completed waits" + + # Get the parallel operation + parallel_op = result.get_context("parallel_waits") + assert parallel_op is not None + assert parallel_op.status is OperationStatus.SUCCEEDED + + # Verify all 3 child operations exist + assert len(parallel_op.child_operations) == 3 + + # Each child should have a wait operation + wait_names = set() + for child in parallel_op.child_operations: + # Find wait operations in child + wait_ops = [ + op + for op in child.child_operations + if op.operation_type == OperationType.WAIT + ] + assert len(wait_ops) == 1 + wait_names.add(wait_ops[0].name) + + # Verify all expected wait operations exist + assert wait_names == {"wait_1_second", "wait_2_seconds", "wait_5_seconds"} diff --git a/examples/test/run_in_child_context/test_run_in_child_context.py b/examples/test/run_in_child_context/test_run_in_child_context.py new file mode 100644 index 00000000..61bf200e --- /dev/null +++ b/examples/test/run_in_child_context/test_run_in_child_context.py @@ -0,0 +1,27 @@ +"""Tests for run_in_child_context example.""" + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus + +from src.run_in_child_context import run_in_child_context +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=run_in_child_context.handler, + lambda_function_name="run in child context", +) +def test_run_in_child_context(durable_runner): + """Test run_in_child_context example.""" + with durable_runner: + result = durable_runner.run(input="test", timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + assert deserialize_operation_payload(result.result) == "Child context result: 10" + + # Verify child context operation exists + context_ops = [ + op for op in result.operations if op.operation_type.value == "CONTEXT" + ] + assert len(context_ops) >= 1 diff --git a/examples/test/run_in_child_context/test_run_in_child_context_large_data.py b/examples/test/run_in_child_context/test_run_in_child_context_large_data.py new file mode 100644 index 00000000..34697802 --- /dev/null +++ b/examples/test/run_in_child_context/test_run_in_child_context_large_data.py @@ -0,0 +1,36 @@ +"""Tests for run_in_child_context_large_data.""" + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus + +from src.run_in_child_context import run_in_child_context_large_data +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=run_in_child_context_large_data.handler, + lambda_function_name="run in child context large data", +) +def test_handle_large_data_exceeding_256k_limit_using_run_in_child_context( + durable_runner, +): + """Test handling large data exceeding 256k limit using runInChildContext.""" + with durable_runner: + result = durable_runner.run(input=None, timeout=30) + + result_data = deserialize_operation_payload(result.result) + + # Verify the execution succeeded + assert result.status is InvocationStatus.SUCCEEDED + assert result_data["success"] is True + + # Verify large data was processed + assert result_data["summary"]["totalDataSize"] > 240 # Should be ~250KB + assert result_data["summary"]["stepsExecuted"] == 5 + assert result_data["summary"]["childContextUsed"] is True + assert result_data["summary"]["waitExecuted"] is True + assert result_data["summary"]["dataPreservedAcrossWait"] is True + + # Verify data integrity across wait + assert result_data["dataIntegrityCheck"] is True diff --git a/examples/test/run_in_child_context/test_run_in_child_context_step_failure.py b/examples/test/run_in_child_context/test_run_in_child_context_step_failure.py new file mode 100644 index 00000000..52c1b8c1 --- /dev/null +++ b/examples/test/run_in_child_context/test_run_in_child_context_step_failure.py @@ -0,0 +1,23 @@ +"""Tests for run_in_child_context_failing_step.""" + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus + +from src.run_in_child_context import run_in_child_context_step_failure +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=run_in_child_context_step_failure.handler, + lambda_function_name="Run In Child Context With Failing Step", +) +def test_succeed_despite_failing_step_in_child_context(durable_runner): + """Test that execution succeeds despite failing step in child context.""" + with durable_runner: + result = durable_runner.run(input=None, timeout=30) + + assert result.status is InvocationStatus.SUCCEEDED + + result_data = deserialize_operation_payload(result.result) + assert result_data == {"success": True, "error": "Step failed in child context"} diff --git a/examples/test/simple_execution/test_simple_execution.py b/examples/test/simple_execution/test_simple_execution.py new file mode 100644 index 00000000..740cce48 --- /dev/null +++ b/examples/test/simple_execution/test_simple_execution.py @@ -0,0 +1,40 @@ +"""Tests for simple_execution.""" + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus + +from src.simple_execution import simple_execution +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=simple_execution.handler, + lambda_function_name="simple execution", +) +def test_execute_simple_handler_without_operations(durable_runner): + """Test simple handler execution without operations.""" + test_payload = { + "userId": "test-user", + "action": "simple-execution", + } + + with durable_runner: + result = durable_runner.run(input=test_payload, timeout=10) + + result_data = deserialize_operation_payload(result.result) + + # Verify the result structure and content + assert ( + result_data["received"] + == '{"userId": "test-user", "action": "simple-execution"}' + ) + assert result_data["message"] == "Handler completed successfully" + assert isinstance(result_data["timestamp"], int) + assert result_data["timestamp"] > 0 + + # Should have no operations for simple execution + assert len(result.operations) == 0 + + # Verify no error occurred + assert result.status is InvocationStatus.SUCCEEDED diff --git a/examples/test/step/test_step.py b/examples/test/step/test_step.py new file mode 100644 index 00000000..63d79299 --- /dev/null +++ b/examples/test/step/test_step.py @@ -0,0 +1,24 @@ +"""Tests for step example.""" + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus + +from src.step import step +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=step.handler, + lambda_function_name="Basic Step", +) +def test_step(durable_runner): + """Test basic step example.""" + with durable_runner: + result = durable_runner.run(input="test", timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + assert deserialize_operation_payload(result.result) == 8 + + step_result = result.get_step("add_numbers") + assert deserialize_operation_payload(step_result.result) == 8 diff --git a/examples/test/step/test_step_permutations.py b/examples/test/step/test_step_permutations.py new file mode 100644 index 00000000..04a0a809 --- /dev/null +++ b/examples/test/step/test_step_permutations.py @@ -0,0 +1,75 @@ +"""Tests for step operation permutations.""" + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus +from aws_durable_execution_sdk_python.lambda_service import OperationType + +from src.step import step_no_name, step_with_exponential_backoff, step_with_name +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=step_no_name.handler, + lambda_function_name="step no name", +) +def test_step_no_name(durable_runner): + """Test step without explicit name.""" + with durable_runner: + result = durable_runner.run(input="test", timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + assert deserialize_operation_payload(result.result) == "Result: Step without name" + + step_ops = [ + op for op in result.operations if op.operation_type == OperationType.STEP + ] + assert len(step_ops) == 1 + # Should use function name when no name provided + assert step_ops[0].name is None or step_ops[0].name == "" + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=step_with_name.handler, + lambda_function_name="step with name", +) +def test_step_with_name(durable_runner): + """Test step with explicit name.""" + with durable_runner: + result = durable_runner.run(input="test", timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + assert ( + deserialize_operation_payload(result.result) + == "Result: Step with explicit name" + ) + + step_ops = [ + op for op in result.operations if op.operation_type == OperationType.STEP + ] + assert len(step_ops) == 1 + assert step_ops[0].name == "custom_step" + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=step_with_exponential_backoff.handler, + lambda_function_name="step with exponential backoff", +) +def test_step_with_exponential_backoff(durable_runner): + """Test step with exponential backoff retry strategy.""" + with durable_runner: + result = durable_runner.run(input="test", timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + assert ( + deserialize_operation_payload(result.result) + == "Result: Step with exponential backoff" + ) + + step_ops = [ + op for op in result.operations if op.operation_type == OperationType.STEP + ] + assert len(step_ops) == 1 + assert step_ops[0].name == "retry_step" diff --git a/examples/test/step/test_step_semantics_at_most_once.py b/examples/test/step/test_step_semantics_at_most_once.py new file mode 100644 index 00000000..a67892e2 --- /dev/null +++ b/examples/test/step/test_step_semantics_at_most_once.py @@ -0,0 +1,32 @@ +"""Tests for step_semantics_at_most_once example.""" + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus +from aws_durable_execution_sdk_python.lambda_service import OperationType + +from src.step import step_semantics_at_most_once +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=step_semantics_at_most_once.handler, + lambda_function_name="step semantics at most once", +) +def test_step_semantics_at_most_once(durable_runner): + """Test step with at-most-once semantics.""" + with durable_runner: + result = durable_runner.run(input="test", timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + assert ( + deserialize_operation_payload(result.result) + == "Result: AT_MOST_ONCE_PER_RETRY semantics" + ) + + # Verify step operation exists with correct name + step_ops = [ + op for op in result.operations if op.operation_type == OperationType.STEP + ] + assert len(step_ops) == 1 + assert step_ops[0].name == "at_most_once_step" diff --git a/examples/test/step/test_step_with_retry.py b/examples/test/step/test_step_with_retry.py new file mode 100644 index 00000000..bb6ba8be --- /dev/null +++ b/examples/test/step/test_step_with_retry.py @@ -0,0 +1,40 @@ +"""Tests for step_with_retry example.""" + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus +from aws_durable_execution_sdk_python.lambda_service import OperationType +from src.step import step_with_retry +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=step_with_retry.handler, + lambda_function_name="step with retry", +) +def test_step_with_retry(durable_runner): + """Test step with retry configuration. + + With counter-based deterministic behavior: + - Attempt 1: counter = 1 < 2 → raises RuntimeError ❌ + - Attempt 2: counter = 2 >= 2 → succeeds ✓ + + The function deterministically fails once then succeeds on the second attempt. + """ + with durable_runner: + result = durable_runner.run(input="test", timeout=30) + + # With counter-based deterministic behavior, succeeds on attempt 2 + assert result.status is InvocationStatus.SUCCEEDED + assert deserialize_operation_payload(result.result) == "Operation succeeded" + + # Verify step operation exists with retry details + step_ops = [ + op for op in result.operations if op.operation_type == OperationType.STEP + ] + assert len(step_ops) == 1 + + # The step should have succeeded on attempt 2 (after 1 failure) + # Attempt numbering: 1 (initial attempt), 2 (first retry) + step_op = step_ops[0] + assert step_op.attempt == 2 # Succeeded on first retry (1-indexed: 2=first retry) diff --git a/examples/test/step/test_steps_with_retry.py b/examples/test/step/test_steps_with_retry.py new file mode 100644 index 00000000..17ad8dc8 --- /dev/null +++ b/examples/test/step/test_steps_with_retry.py @@ -0,0 +1,52 @@ +"""Tests for steps_with_retry.""" + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus +from aws_durable_execution_sdk_python.lambda_service import OperationType +from src.step import steps_with_retry +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=steps_with_retry.handler, + lambda_function_name="steps with retry", +) +def test_steps_with_retry(durable_runner): + """Test steps_with_retry pattern. + + With counter-based deterministic behavior: + - Poll 1, Attempt 1: counter = 1 → raises RuntimeError ❌ + - Poll 1, Attempt 2: counter = 2 → returns None + - Poll 2, Attempt 1: counter = 3 → returns item ✓ + + The function finds the item on poll 2 after 1 retry on poll 1. + """ + with durable_runner: + result = durable_runner.run(input={"name": "test-item"}, timeout=30) + + assert result.status is InvocationStatus.SUCCEEDED + + # With counter-based deterministic behavior, finds item on poll 2 + result_data = deserialize_operation_payload(result.result) + assert isinstance(result_data, dict) + assert result_data.get("success") is True + assert result_data.get("pollsRequired") == 2 + assert "item" in result_data + assert result_data["item"]["id"] == "test-item" + + # Verify step operations exist + step_ops = [ + op for op in result.operations if op.operation_type == OperationType.STEP + ] + # Should have exactly 2 step operations (poll 1 and poll 2) + assert len(step_ops) == 2 + + # Poll 1: succeeded after 1 retry (returned None) + assert step_ops[0].name == "get_item_poll_1" + assert step_ops[0].result == "null" + assert step_ops[0].attempt == 2 # 1 retry occurred (1-indexed: 2=first retry) + + # Poll 2: succeeded immediately (returned item) + assert step_ops[1].name == "get_item_poll_2" + assert step_ops[1].attempt == 1 # No retries needed (1-indexed: 1=initial) diff --git a/examples/test/test_hello_world.py b/examples/test/test_hello_world.py new file mode 100644 index 00000000..f0a54468 --- /dev/null +++ b/examples/test/test_hello_world.py @@ -0,0 +1,24 @@ +"""Integration tests for hello world example.""" + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus + +from src import hello_world +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=hello_world.handler, + lambda_function_name="hello world", +) +def test_hello_world(durable_runner): + """Test hello world example.""" + with durable_runner: + result = durable_runner.run(input="test", timeout=30) + + assert result.status is InvocationStatus.SUCCEEDED + assert deserialize_operation_payload(result.result) == { + "statusCode": 200, + "body": "Hello from Durable Lambda! (status: 200)", + } diff --git a/examples/test/wait/test_multiple_wait.py b/examples/test/wait/test_multiple_wait.py new file mode 100644 index 00000000..40ecbc56 --- /dev/null +++ b/examples/test/wait/test_multiple_wait.py @@ -0,0 +1,57 @@ +"""Tests for multiple_waits.""" + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus + +from src.wait import multiple_wait +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=multiple_wait.handler, + lambda_function_name="multiple wait", +) +def test_multiple_sequential_wait_operations(durable_runner): + """Test multiple sequential wait operations.""" + with durable_runner: + result = durable_runner.run(input=None, timeout=20) + + assert result.status is InvocationStatus.SUCCEEDED + + # Verify the final result + assert deserialize_operation_payload(result.result) == { + "completedWaits": 2, + "finalStep": "done", + } + + # Verify operations were tracked + operations = [op for op in result.operations if op.operation_type.value == "WAIT"] + assert len(operations) == 2 + + # Find the wait operations by name + wait_1_ops = [ + op + for op in operations + if op.operation_type.value == "WAIT" and op.name == "wait-1" + ] + assert len(wait_1_ops) == 1 + first_wait = wait_1_ops[0] + + wait_2_ops = [ + op + for op in operations + if op.operation_type.value == "WAIT" and op.name == "wait-2" + ] + assert len(wait_2_ops) == 1 + second_wait = wait_2_ops[0] + + # Verify operation types and status + assert first_wait.operation_type.value == "WAIT" + assert first_wait.status.value == "SUCCEEDED" + assert second_wait.operation_type.value == "WAIT" + assert second_wait.status.value == "SUCCEEDED" + + # Verify wait details + assert first_wait.scheduled_end_timestamp is not None + assert second_wait.scheduled_end_timestamp is not None diff --git a/examples/test/wait/test_wait.py b/examples/test/wait/test_wait.py new file mode 100644 index 00000000..66cd6279 --- /dev/null +++ b/examples/test/wait/test_wait.py @@ -0,0 +1,27 @@ +"""Tests for wait example.""" + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus + +from src.wait import wait +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=wait.handler, + lambda_function_name="Wait State", +) +def test_wait(durable_runner): + """Test wait example.""" + with durable_runner: + result = durable_runner.run(input="test", timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + assert deserialize_operation_payload(result.result) == "Wait completed" + + # Find the wait operation (it should be the only non-execution operation) + wait_ops = [op for op in result.operations if op.operation_type.value == "WAIT"] + assert len(wait_ops) == 1 + wait_op = wait_ops[0] + assert wait_op.scheduled_end_timestamp is not None diff --git a/examples/test/wait/test_wait_permutations.py b/examples/test/wait/test_wait_permutations.py new file mode 100644 index 00000000..bc39d211 --- /dev/null +++ b/examples/test/wait/test_wait_permutations.py @@ -0,0 +1,25 @@ +"""Tests for wait operation permutations.""" + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus + +from src.wait import wait_with_name +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=wait_with_name.handler, + lambda_function_name="wait with name", +) +def test_wait_with_name(durable_runner): + """Test wait with explicit name.""" + with durable_runner: + result = durable_runner.run(input="test", timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + assert deserialize_operation_payload(result.result) == "Wait with name completed" + + wait_ops = [op for op in result.operations if op.operation_type.value == "WAIT"] + assert len(wait_ops) == 1 + assert wait_ops[0].name == "custom_wait" diff --git a/examples/test/wait_for_callback/test_wait_for_callback_anonymous.py b/examples/test/wait_for_callback/test_wait_for_callback_anonymous.py new file mode 100644 index 00000000..d047da23 --- /dev/null +++ b/examples/test/wait_for_callback/test_wait_for_callback_anonymous.py @@ -0,0 +1,39 @@ +"""Tests for wait_for_callback_anonymous.""" + +import json + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus + +from src.wait_for_callback import wait_for_callback_anonymous +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=wait_for_callback_anonymous.handler, + lambda_function_name="Wait For Callback Success Anonymous", +) +def test_handle_basic_wait_for_callback_with_anonymous_submitter(durable_runner): + """Test basic waitForCallback with anonymous submitter.""" + with durable_runner: + execution_arn = durable_runner.run_async(input=None, timeout=30) + callback_id = durable_runner.wait_for_callback(execution_arn=execution_arn) + callback_result = json.dumps({"data": "callback_completed"}) + durable_runner.send_callback_success( + callback_id=callback_id, result=callback_result.encode() + ) + + result = durable_runner.wait_for_result(execution_arn=execution_arn) + + assert result.status is InvocationStatus.SUCCEEDED + + result_data = deserialize_operation_payload(result.result) + + assert result_data == { + "callbackResult": callback_result, + "completed": True, + } + + # Verify operations were tracked + assert len(result.operations) > 0 diff --git a/examples/test/wait_for_callback/test_wait_for_callback_child.py b/examples/test/wait_for_callback/test_wait_for_callback_child.py new file mode 100644 index 00000000..3016a364 --- /dev/null +++ b/examples/test/wait_for_callback/test_wait_for_callback_child.py @@ -0,0 +1,73 @@ +"""Tests for wait_for_callback_child_context.""" + +import json + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus + +from src.wait_for_callback import wait_for_callback_child +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=wait_for_callback_child.handler, + lambda_function_name="Wait For Callback With Child Context", +) +def test_handle_wait_for_callback_within_child_contexts(durable_runner): + """Test waitForCallback within child contexts.""" + test_payload = {"test": "child-context-callbacks"} + + with durable_runner: + execution_arn = durable_runner.run_async(input=test_payload, timeout=30) + # Wait for parent callback and get callback_id + parent_callback_id = durable_runner.wait_for_callback( + execution_arn=execution_arn + ) + # Send parent callback result + parent_callback_result = json.dumps({"parentData": "parent-completed"}) + durable_runner.send_callback_success( + callback_id=parent_callback_id, result=parent_callback_result.encode() + ) + # Wait for child callback and get callback_id + child_callback_id = durable_runner.wait_for_callback( + execution_arn=execution_arn, name="child-callback-op create callback id" + ) + # Send child callback result + child_callback_result = json.dumps({"childData": 42}) + durable_runner.send_callback_success( + callback_id=child_callback_id, result=child_callback_result.encode() + ) + # Wait for the execution to complete + result = durable_runner.wait_for_result(execution_arn=execution_arn) + + assert result.status is InvocationStatus.SUCCEEDED + result_data = deserialize_operation_payload(result.result) + assert result_data == { + "parentResult": parent_callback_result, + "childContextResult": { + "childResult": child_callback_result, + "childProcessed": True, + }, + } + + # Find the child context operation + child_context_ops = [ + op + for op in result.operations + if op.operation_type.value == "CONTEXT" + and op.name == "child-context-with-callback" + ] + assert len(child_context_ops) == 1 + child_context_op = child_context_ops[0] + + # Verify child operations are accessible + child_operations = child_context_op.child_operations + assert child_operations is not None + assert len(child_operations) == 2 # wait + waitForCallback + + all_ops = result.get_all_operations() + + # Verify completed operations count + completed_operations = [op for op in all_ops if op.status.value == "SUCCEEDED"] + assert len(completed_operations) == 8 diff --git a/examples/test/wait_for_callback/test_wait_for_callback_failure.py b/examples/test/wait_for_callback/test_wait_for_callback_failure.py new file mode 100644 index 00000000..ac8d52fb --- /dev/null +++ b/examples/test/wait_for_callback/test_wait_for_callback_failure.py @@ -0,0 +1,27 @@ +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus +from aws_durable_execution_sdk_python.lambda_service import ErrorObject + +from src.wait_for_callback import wait_for_callback + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=wait_for_callback.handler, + lambda_function_name="Wait For Callback Failure", +) +def test_wait_for_callback_failure(durable_runner): + with durable_runner: + execution_arn = durable_runner.run_async(input="test", timeout=30) + callback_id = durable_runner.wait_for_callback(execution_arn=execution_arn) + durable_runner.send_callback_failure( + callback_id=callback_id, error=ErrorObject.from_message("my callback error") + ) + result = durable_runner.wait_for_result(execution_arn=execution_arn) + + assert result.status is InvocationStatus.FAILED + assert isinstance(result.error, ErrorObject) + assert result.error.to_dict() == { + "ErrorMessage": "my callback error", + "ErrorType": "CallableRuntimeError", + } diff --git a/examples/test/wait_for_callback/test_wait_for_callback_heartbeat.py b/examples/test/wait_for_callback/test_wait_for_callback_heartbeat.py new file mode 100644 index 00000000..bdbf6274 --- /dev/null +++ b/examples/test/wait_for_callback/test_wait_for_callback_heartbeat.py @@ -0,0 +1,62 @@ +"""Tests for wait_for_callback_heartbeat_sends.""" + +import json +import time + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus + +from src.wait_for_callback import wait_for_callback_heartbeat +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=wait_for_callback_heartbeat.handler, + lambda_function_name="Wait For Callback Heartbeat Sends", +) +def test_handle_wait_for_callback_heartbeat_scenarios_during_long_running_submitter( + durable_runner, +): + """Test waitForCallback heartbeat scenarios during long-running submitter execution.""" + + with durable_runner: + # Start the execution (this will pause at the callback) + execution_arn = durable_runner.run_async( + input={"input": "test_payload"}, timeout=60 + ) + + # Wait for callback and get callback_id + callback_id = durable_runner.wait_for_callback(execution_arn=execution_arn) + + # Send heartbeat to keep the callback alive during processing + durable_runner.send_callback_heartbeat(callback_id=callback_id) + + # Wait a bit more to simulate callback processing time + wait_time = 7.0 + time.sleep(wait_time) + + # Send another heartbeat + durable_runner.send_callback_heartbeat(callback_id=callback_id) + + # Finally complete the callback + callback_result = json.dumps({"processed": 1000}) + durable_runner.send_callback_success( + callback_id=callback_id, result=callback_result.encode() + ) + + # Wait for the execution to complete + result = durable_runner.wait_for_result(execution_arn=execution_arn) + + assert result.status is InvocationStatus.SUCCEEDED + + result_data = deserialize_operation_payload(result.result) + + assert result_data["callbackResult"] == callback_result + assert result_data["completed"] is True + + # Should have completed operations with successful callback + completed_operations = [ + op for op in result.operations if op.status.value == "SUCCEEDED" + ] + assert len(completed_operations) > 0 diff --git a/examples/test/wait_for_callback/test_wait_for_callback_mixed_ops.py b/examples/test/wait_for_callback/test_wait_for_callback_mixed_ops.py new file mode 100644 index 00000000..4f4f982a --- /dev/null +++ b/examples/test/wait_for_callback/test_wait_for_callback_mixed_ops.py @@ -0,0 +1,52 @@ +"""Tests for wait_for_callback_mixed_ops.""" + +import json + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus + +from src.wait_for_callback import wait_for_callback_mixed_ops +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=wait_for_callback_mixed_ops.handler, + lambda_function_name="Wait For Callback Mixed Ops", +) +def test_handle_wait_for_callback_mixed_with_steps_waits_and_other_operations( + durable_runner, +): + """Test waitForCallback mixed with steps, waits, and other operations.""" + with durable_runner: + # Start the execution (this will pause at the callback) + execution_arn = durable_runner.run_async(input=None, timeout=30) + + # Wait for callback and get callback_id + callback_id = durable_runner.wait_for_callback(execution_arn=execution_arn) + + # Complete the callback + callback_result = json.dumps({"processed": True}) + durable_runner.send_callback_success( + callback_id=callback_id, result=callback_result.encode() + ) + + # Wait for the execution to complete + result = durable_runner.wait_for_result(execution_arn=execution_arn) + + assert result.status is InvocationStatus.SUCCEEDED + + result_data = deserialize_operation_payload(result.result) + + # Verify all expected fields + assert result_data["stepResult"] == {"userId": 123, "name": "John Doe"} + assert result_data["callbackResult"] == callback_result + assert result_data["finalStep"]["status"] == "completed" + assert isinstance(result_data["finalStep"]["timestamp"], int) + assert result_data["workflowCompleted"] is True + + # Verify all operations were tracked - should have wait, step, waitForCallback (context + callback + submitter), wait, step + completed_operations = [ + op for op in result.get_all_operations() if op.status.value == "SUCCEEDED" + ] + assert len(completed_operations) == 7 diff --git a/examples/test/wait_for_callback/test_wait_for_callback_multiple_invocations.py b/examples/test/wait_for_callback/test_wait_for_callback_multiple_invocations.py new file mode 100644 index 00000000..8c297dc0 --- /dev/null +++ b/examples/test/wait_for_callback/test_wait_for_callback_multiple_invocations.py @@ -0,0 +1,74 @@ +"""Tests for wait_for_callback_multiple_invocations.""" + +import json +import time + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus + +from src.wait_for_callback import ( + wait_for_callback_multiple_invocations, +) +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=wait_for_callback_multiple_invocations.handler, + lambda_function_name="Wait For Callback Multiple Invocations", +) +def test_handle_multiple_invocations_tracking_with_wait_for_callback_operations( + durable_runner, +): + """Test multiple invocations tracking with waitForCallback operations.""" + test_payload = {"test": "multiple-invocations"} + + with durable_runner: + # Start the execution (this will pause at callbacks) + execution_arn = durable_runner.run_async(input=test_payload, timeout=60) + + # Wait for first callback and get callback_id + first_callback_id = durable_runner.wait_for_callback( + execution_arn=execution_arn + ) + + # Complete first callback + first_callback_result = json.dumps({"step": 1}) + durable_runner.send_callback_success( + callback_id=first_callback_id, result=first_callback_result.encode() + ) + + # Wait for second callback and get callback_id + second_callback_id = durable_runner.wait_for_callback( + execution_arn=execution_arn, name="second-callback create callback id" + ) + + # Complete second callback + second_callback_result = json.dumps({"step": 2}) + durable_runner.send_callback_success( + callback_id=second_callback_id, result=second_callback_result.encode() + ) + + # Wait for the execution to complete + result = durable_runner.wait_for_result(execution_arn=execution_arn) + + assert result.status is InvocationStatus.SUCCEEDED + + result_data = deserialize_operation_payload(result.result) + + assert result_data == { + "firstCallback": '{"step": 1}', + "secondCallback": '{"step": 2}', + "stepResult": {"processed": True, "step": 1}, + "invocationCount": "multiple", + } + + # Verify invocations were tracked - should be exactly 5 invocations + # Note: Check if Python SDK provides invocations tracking + if hasattr(result, "invocations"): + invocations = result.invocations + assert len(invocations) == 5 + + # Verify operations were executed + operations = result.operations + assert len(operations) > 4 # wait + callback + step + wait + callback operations diff --git a/examples/test/wait_for_callback/test_wait_for_callback_nested.py b/examples/test/wait_for_callback/test_wait_for_callback_nested.py new file mode 100644 index 00000000..2c1c941f --- /dev/null +++ b/examples/test/wait_for_callback/test_wait_for_callback_nested.py @@ -0,0 +1,101 @@ +"""Tests for wait_for_callback_nested.""" + +import json + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus + +from src.wait_for_callback import wait_for_callback_nested +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=wait_for_callback_nested.handler, + lambda_function_name="Wait For Callback Nested", +) +def test_handle_nested_wait_for_callback_operations_in_child_contexts(durable_runner): + """Test nested waitForCallback operations in child contexts.""" + with durable_runner: + # Start the execution (this will pause at callbacks) + execution_arn = durable_runner.run_async(input=None, timeout=60) + + # Complete outer callback first + outer_callback_id = durable_runner.wait_for_callback( + execution_arn=execution_arn + ) + outer_callback_result = json.dumps({"level": "outer-completed"}) + durable_runner.send_callback_success( + callback_id=outer_callback_id, result=outer_callback_result.encode() + ) + + # Complete inner callback + inner_callback_id = durable_runner.wait_for_callback( + execution_arn=execution_arn, name="inner-callback-op create callback id" + ) + inner_callback_result = json.dumps({"level": "inner-completed"}) + durable_runner.send_callback_success( + callback_id=inner_callback_id, result=inner_callback_result.encode() + ) + + # Complete nested callback + nested_callback_id = durable_runner.wait_for_callback( + execution_arn=execution_arn, name="nested-callback-op create callback id" + ) + nested_callback_result = json.dumps({"level": "nested-completed"}) + durable_runner.send_callback_success( + callback_id=nested_callback_id, result=nested_callback_result.encode() + ) + + # Wait for the execution to complete + result = durable_runner.wait_for_result(execution_arn=execution_arn) + + assert result.status is InvocationStatus.SUCCEEDED + + result_data = deserialize_operation_payload(result.result) + + assert result_data == { + "outerCallback": outer_callback_result, + "nestedResults": { + "innerCallback": inner_callback_result, + "deepNested": { + "nestedCallback": nested_callback_result, + "deepLevel": "inner-child", + }, + "level": "outer-child", + }, + } + + # Get all operations including nested ones + all_ops = result.get_all_operations() + + # Find the outer context operation + outer_context_ops = [ + op + for op in result.operations + if op.operation_type.value == "CONTEXT" and op.name == "outer-child-context" + ] + assert len(outer_context_ops) == 1 + outer_context_op = outer_context_ops[0] + + # Verify outer child operations hierarchy + outer_children = outer_context_op.child_operations + assert outer_children is not None + assert len(outer_children) == 2 # inner callback + inner context + + # Find the inner context operation + inner_context_ops = [ + op + for op in all_ops + if op.operation_type.value == "CONTEXT" and op.name == "inner-child-context" + ] + assert len(inner_context_ops) == 1 + inner_context_op = inner_context_ops[0] + + # Verify inner child operations hierarchy + inner_children = inner_context_op.child_operations + assert inner_children is not None + assert len(inner_children) == 2 # deep wait + nested callback + + # Should have tracked all operations + assert len(all_ops) == 12 diff --git a/examples/test/wait_for_callback/test_wait_for_callback_serdes.py b/examples/test/wait_for_callback/test_wait_for_callback_serdes.py new file mode 100644 index 00000000..1333f88d --- /dev/null +++ b/examples/test/wait_for_callback/test_wait_for_callback_serdes.py @@ -0,0 +1,66 @@ +"""Tests for wait_for_callback_serdes.""" + +import json +from datetime import datetime, timezone + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus + +from src.wait_for_callback import wait_for_callback_serdes +from src.wait_for_callback.wait_for_callback_serdes import CustomSerdes +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=wait_for_callback_serdes.handler, + lambda_function_name="Wait For Callback Serdes", +) +def test_handle_wait_for_callback_with_custom_serdes_configuration(durable_runner): + """Test waitForCallback with custom serdes configuration.""" + with durable_runner: + # Start the execution (this will pause at the callback) + execution_arn = durable_runner.run_async(input=None, timeout=30) + + # Wait for callback and get callback_id + callback_id = durable_runner.wait_for_callback(execution_arn=execution_arn) + + # Send data that requires custom serialization + test_data = { + "id": 42, + "message": "Hello Custom Serdes", + "timestamp": datetime(2025, 6, 15, 12, 30, 45, tzinfo=timezone.utc), + "metadata": { + "version": "2.0.0", + "processed": True, + }, + } + + # Serialize the data using custom serdes for sending + custom_serdes = CustomSerdes() + serialized_data = custom_serdes.serialize(test_data) + durable_runner.send_callback_success( + callback_id=callback_id, result=serialized_data.encode() + ) + + # Wait for the execution to complete + result = durable_runner.wait_for_result(execution_arn=execution_arn) + + assert result.status is InvocationStatus.SUCCEEDED + + result_data = deserialize_operation_payload(result.result) + + # The result will always get stringified since it's the lambda response + # DateTime will be serialized to ISO string in the final result + assert result_data["receivedData"]["id"] == 42 + assert result_data["receivedData"]["message"] == "Hello Custom Serdes" + assert "2025-06-15T12:30:45" in result_data["receivedData"]["timestamp"] + assert result_data["receivedData"]["metadata"]["version"] == "2.0.0" + assert result_data["receivedData"]["metadata"]["processed"] is True + assert result_data["isDateObject"] is True + + # Should have completed operations with successful callback + completed_operations = [ + op for op in result.operations if op.status.value == "SUCCEEDED" + ] + assert len(completed_operations) > 0 diff --git a/examples/test/wait_for_callback/test_wait_for_callback_submitter_failure.py b/examples/test/wait_for_callback/test_wait_for_callback_submitter_failure.py new file mode 100644 index 00000000..e4463c8f --- /dev/null +++ b/examples/test/wait_for_callback/test_wait_for_callback_submitter_failure.py @@ -0,0 +1,32 @@ +"""Tests for wait_for_callback_submitter_retry_success.""" + +import json + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus + +from src.wait_for_callback import ( + wait_for_callback_submitter_failure, +) + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=wait_for_callback_submitter_failure.handler, + lambda_function_name="Wait For Callback Submitter Failure", +) +def test_fail_after_exhausting_retries_when_submitter_always_fails(durable_runner): + """Test that execution fails after exhausting retries when submitter always fails.""" + test_payload = {"shouldFail": True} + + with durable_runner: + execution_arn = durable_runner.run_async(input=test_payload, timeout=30) + result = durable_runner.wait_for_result(execution_arn=execution_arn) + + # Execution should fail after retries are exhausted + assert result.status is InvocationStatus.FAILED + + # Verify error details + error = result.error + assert error is not None + assert "Simulated submitter failure" in error.message diff --git a/examples/test/wait_for_callback/test_wait_for_callback_submitter_failure_catchable.py b/examples/test/wait_for_callback/test_wait_for_callback_submitter_failure_catchable.py new file mode 100644 index 00000000..b3458d06 --- /dev/null +++ b/examples/test/wait_for_callback/test_wait_for_callback_submitter_failure_catchable.py @@ -0,0 +1,28 @@ +"""Tests for wait_for_callback_failing_submitter.""" + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus + +from src.wait_for_callback import wait_for_callback_submitter_failure_catchable +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=wait_for_callback_submitter_failure_catchable.handler, + lambda_function_name="Wait For Callback Failing Submitter Catchable", +) +def test_handle_wait_for_callback_with_failing_submitter_function_errors( + durable_runner, +): + """Test waitForCallback with failing submitter function errors.""" + with durable_runner: + execution_arn = durable_runner.run_async(input=None, timeout=30) + result = durable_runner.wait_for_result(execution_arn=execution_arn) + + result_data = deserialize_operation_payload(result.result) + + assert result_data == { + "success": False, + "error": "Submitter failed", + } diff --git a/examples/test/wait_for_callback/test_wait_for_callback_success.py b/examples/test/wait_for_callback/test_wait_for_callback_success.py new file mode 100644 index 00000000..67217a25 --- /dev/null +++ b/examples/test/wait_for_callback/test_wait_for_callback_success.py @@ -0,0 +1,25 @@ +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus + +from src.wait_for_callback import wait_for_callback +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=wait_for_callback.handler, + lambda_function_name="Wait For Callback Success", +) +def test_wait_for_callback_success(durable_runner): + with durable_runner: + execution_arn = durable_runner.run_async(input="test", timeout=30) + callback_id = durable_runner.wait_for_callback(execution_arn=execution_arn) + durable_runner.send_callback_success( + callback_id=callback_id, result="callback success".encode() + ) + result = durable_runner.wait_for_result(execution_arn=execution_arn) + assert result.status is InvocationStatus.SUCCEEDED + assert ( + deserialize_operation_payload(result.result) + == "External system result: callback success" + ) diff --git a/examples/test/wait_for_callback/test_wait_for_callback_timeout.py b/examples/test/wait_for_callback/test_wait_for_callback_timeout.py new file mode 100644 index 00000000..9b69796d --- /dev/null +++ b/examples/test/wait_for_callback/test_wait_for_callback_timeout.py @@ -0,0 +1,32 @@ +"""Tests for wait_for_callback_timeout.""" + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus + +from src.wait_for_callback import wait_for_callback_timeout +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=wait_for_callback_timeout.handler, + lambda_function_name="Wait For Callback Timeout", +) +def test_handle_wait_for_callback_timeout_scenarios(durable_runner): + """Test waitForCallback timeout scenarios.""" + test_payload = {"test": "timeout-scenario"} + + with durable_runner: + execution_arn = durable_runner.run_async(input=test_payload, timeout=2) + # Don't send callback - let it timeout + result = durable_runner.wait_for_result(execution_arn=execution_arn) + + # Handler catches the timeout error, so execution succeeds with error in result + assert result.status is InvocationStatus.SUCCEEDED + + result_data = deserialize_operation_payload(result.result) + + assert result_data["success"] is False + assert isinstance(result_data["error"], str) + assert len(result_data["error"]) > 0 + assert "Callback timed out: Callback.Timeout" == result_data["error"] diff --git a/examples/test/wait_for_condition/test_wait_for_condition.py b/examples/test/wait_for_condition/test_wait_for_condition.py new file mode 100644 index 00000000..589ca37b --- /dev/null +++ b/examples/test/wait_for_condition/test_wait_for_condition.py @@ -0,0 +1,24 @@ +"""Tests for wait_for_condition.""" + +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus + +from src.wait_for_condition import wait_for_condition +from test.conftest import deserialize_operation_payload + + +@pytest.mark.example +@pytest.mark.durable_execution( + handler=wait_for_condition.handler, + lambda_function_name="wait for condition", +) +def test_wait_for_condition(durable_runner): + """Test wait_for_condition pattern.""" + pass + # TODO: fix bug in local runner so that local tests can pass + # with durable_runner: + # result = durable_runner.run(input="test", timeout=30) + + # assert result.status is InvocationStatus.SUCCEEDED + # # Should reach state 3 after 3 increments + # assert deserialize_operation_payload(result.result) == 3 diff --git a/pyproject.toml b/pyproject.toml index a77d3861..e4a687c0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,16 +35,42 @@ packages = ["src/aws_durable_execution_sdk_python"] path = "src/aws_durable_execution_sdk_python/__about__.py" [tool.hatch.envs.test] -dependencies = ["coverage[toml]", "pytest", "pytest-cov"] +dependencies = [ + "coverage[toml]", + "pytest", + "pytest-cov", + "aws-durable-execution-sdk-python-testing>=1.0.0", +] [tool.hatch.envs.test.scripts] +examples = "pytest --runner-mode=local -m example examples/test/ -v" +examples-integration = "pytest --runner-mode=cloud -m example examples/test/ -v {args}" cov = "pytest --cov-report=term-missing --cov-config=pyproject.toml --cov=src/aws_durable_execution_sdk_python --cov-fail-under=98" [tool.hatch.envs.types] extra-dependencies = ["mypy>=1.0.0", "pytest", "boto3-stubs[lambda]"] + [tool.hatch.envs.types.scripts] check = "mypy --install-types --non-interactive {args:src/aws_durable_execution_sdk_python tests}" +[tool.hatch.envs.examples] +dependencies = [ + "aws-durable-execution-sdk-python-testing>=1.0.0", +] + +[tool.hatch.envs.examples.scripts] +cli = "python examples/cli.py {args}" +bootstrap = "python examples/cli.py bootstrap" +generate-sam-template = "python examples/scripts/generate_sam_template.py" +build = "python examples/cli.py build" +deploy = "python examples/cli.py deploy {args}" +invoke = "python examples/cli.py invoke {args}" +get = "python examples/cli.py get {args}" +history = "python examples/cli.py history {args}" +policy = "python examples/cli.py policy {args}" +list = "python examples/cli.py list" +clean = "rm -rf examples/build examples/.aws-sam examples/*.zip" + [tool.coverage.run] source_pkgs = ["aws_durable_execution_sdk_python"] branch = true @@ -84,3 +110,25 @@ lines-after-imports = 2 "SIM117", "TRY301", ] +"examples/test/**" = [ + "ARG001", + "ARG002", + "ARG005", + "S101", + "PLR2004", + "SIM117", + "TRY301", +] + +[tool.pytest.ini_options] +# Declare custom markers to avoid warnings with --strict-markers +markers = [ + # Used for test selection with -m example + "example: marks tests as example tests (deselect with '-m \"not example\"')", + # Used for configuration - passes handler and lambda_function_name to durable_runner fixture + "durable_execution: marks tests that use the durable_runner fixture (not used for test selection)", +] +# Default test discovery paths +testpaths = ["tests", "examples/test"] +# Default options for all test runs +addopts = "-v --strict-markers" From cf19176c48ed79ece7a87f0a79955abf171bde89 Mon Sep 17 00:00:00 2001 From: Frank Chen Date: Mon, 27 Apr 2026 18:40:26 -0700 Subject: [PATCH 2/6] fix format --- .github/workflows/integration-tests.yml | 4 ++-- examples/cli.py | 12 +++++++----- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index 1b3ae8f7..10ef6114 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -30,10 +30,10 @@ jobs: - name: Install Hatch run: python -m pip install hatch==1.16.5 - - name: Setup and run Testing SDK + - name: Setup and run tests working-directory: language-sdk run: | - echo "Running SDK tests against Language SDK PR changes..." + echo "Running SDK tests..." hatch run -- test:pip install -e ../language-sdk hatch fmt --check hatch run types:check diff --git a/examples/cli.py b/examples/cli.py index d15fc916..5e135a44 100755 --- a/examples/cli.py +++ b/examples/cli.py @@ -50,16 +50,16 @@ def build_examples(): sdk_path = Path(aws_durable_execution_sdk_python_testing.__file__).parent logger.info("Copying SDK from %s", sdk_path) - shutil.copytree(sdk_path, build_dir / "aws_durable_execution_sdk_python_testing") + shutil.copytree( + sdk_path, build_dir / "aws_durable_execution_sdk_python_testing" + ) except (ImportError, OSError): logger.exception("Failed to copy testing library") return False # Copy testing SDK source testing_src = ( - Path(__file__).parent.parent - / "src" - / "aws_durable_execution_sdk_python" + Path(__file__).parent.parent / "src" / "aws_durable_execution_sdk_python" ) logger.info("Copying SDK from %s", testing_src) shutil.copytree(testing_src, build_dir / "aws_durable_execution_sdk_python") @@ -243,7 +243,9 @@ def get_aws_config(): """Get AWS configuration from environment.""" config = { "region": os.getenv("AWS_REGION", "us-west-2"), - "lambda_endpoint": os.getenv("LAMBDA_ENDPOINT", "https://lambda.us-west-2.amazonaws.com"), + "lambda_endpoint": os.getenv( + "LAMBDA_ENDPOINT", "https://lambda.us-west-2.amazonaws.com" + ), "account_id": os.getenv("AWS_ACCOUNT_ID"), "kms_key_arn": os.getenv("KMS_KEY_ARN"), } From f39e942cc40d4aa7387c2bbe9d32de76d71ef8a8 Mon Sep 17 00:00:00 2001 From: Frank Chen Date: Mon, 27 Apr 2026 18:42:25 -0700 Subject: [PATCH 3/6] remove ssh key --- .github/workflows/deploy-examples.yml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/.github/workflows/deploy-examples.yml b/.github/workflows/deploy-examples.yml index 0c6e6d07..b77807b5 100644 --- a/.github/workflows/deploy-examples.yml +++ b/.github/workflows/deploy-examples.yml @@ -42,11 +42,6 @@ jobs: steps: - uses: actions/checkout@v4 - - name: Setup SSH Agent - uses: webfactory/ssh-agent@dc588b651fe13675774614f8e6a936a468676387 # v0.9.0 - with: - ssh-private-key: ${{ secrets.SDK_KEY }} - - name: Setup Python uses: actions/setup-python@v4 with: From 74b93248946a854782d287355b232e2afeef8690 Mon Sep 17 00:00:00 2001 From: Frank Chen Date: Mon, 27 Apr 2026 18:48:45 -0700 Subject: [PATCH 4/6] remove endpoint --- .github/workflows/deploy-examples.yml | 4 ++-- .github/workflows/integration-tests.yml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/deploy-examples.yml b/.github/workflows/deploy-examples.yml index b77807b5..694e90f5 100644 --- a/.github/workflows/deploy-examples.yml +++ b/.github/workflows/deploy-examples.yml @@ -64,7 +64,7 @@ jobs: id: deploy env: AWS_ACCOUNT_ID: ${{ secrets.AWS_ACCOUNT_ID }} - LAMBDA_ENDPOINT: ${{ secrets.LAMBDA_ENDPOINT_BETA }} + LAMBDA_ENDPOINT: "https://lambda.us-west-2.amazonaws.com" KMS_KEY_ARN: ${{ secrets.KMS_KEY_ARN }} run: | # Build function name @@ -101,7 +101,7 @@ jobs: - name: Run Integration Tests - ${{ matrix.example.name }} env: AWS_REGION: ${{ env.AWS_REGION }} - LAMBDA_ENDPOINT: ${{ secrets.LAMBDA_ENDPOINT_BETA }} + LAMBDA_ENDPOINT: "https://lambda.us-west-2.amazonaws.com" QUALIFIED_FUNCTION_NAME: ${{ env.QUALIFIED_FUNCTION_NAME }} LAMBDA_FUNCTION_TEST_NAME: ${{ matrix.example.name }} run: | diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index 10ef6114..beed0fac 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -84,7 +84,7 @@ jobs: working-directory: language-sdk env: AWS_ACCOUNT_ID: ${{ secrets.AWS_ACCOUNT_ID }} - LAMBDA_ENDPOINT: ${{ secrets.LAMBDA_ENDPOINT }} + LAMBDA_ENDPOINT: "https://lambda.us-west-2.amazonaws.com" INVOKE_ACCOUNT_ID: ${{ secrets.INVOKE_ACCOUNT_ID }} KMS_KEY_ARN: ${{ secrets.KMS_KEY_ARN }} run: | From 57f8a0ba57bf2f64ec5b181fba99b812d44607dc Mon Sep 17 00:00:00 2001 From: Frank Chen Date: Mon, 27 Apr 2026 19:12:23 -0700 Subject: [PATCH 5/6] remove cov --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index e4a687c0..8c7836a5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -129,6 +129,6 @@ markers = [ "durable_execution: marks tests that use the durable_runner fixture (not used for test selection)", ] # Default test discovery paths -testpaths = ["tests", "examples/test"] +testpaths = ["tests"] # Default options for all test runs addopts = "-v --strict-markers" From 4d0e7d62f889d1391fb66e72173db5c1276aefdf Mon Sep 17 00:00:00 2001 From: Frank Chen Date: Mon, 27 Apr 2026 19:19:43 -0700 Subject: [PATCH 6/6] fix hatch --- .github/workflows/integration-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index beed0fac..ba1f6ba4 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -66,6 +66,9 @@ jobs: role-session-name: languageSDKIntegrationTest aws-region: ${{ env.AWS_REGION }} + - name: Install Hatch + run: python -m pip install hatch==1.16.5 + - name: Get integration examples id: get-examples working-directory: language-sdk/examples