From df248bb127f1378f20fec875c69f9813cec26048 Mon Sep 17 00:00:00 2001 From: Rachel Hagerman <110480692+rlhagerm@users.noreply.github.com> Date: Wed, 12 Nov 2025 13:54:28 -0600 Subject: [PATCH 01/23] Updates to steering --- .gitignore | 1 + .kiro/settings/mcp.json | 40 + .kiro/steering/orchestration.md | 17 + dotnetv4/DotNetV4Examples.sln | 23 + .../SPECIFICATION copy.md | 118 +++ .../SPECIFICATION.md | 345 +++++-- steering_docs/dotnet-tech/scenario.md | 898 ++++++++++++++++++ 7 files changed, 1388 insertions(+), 54 deletions(-) create mode 100644 .kiro/settings/mcp.json create mode 100644 .kiro/steering/orchestration.md create mode 100644 scenarios/features/cloudwatch_logs_large_query/SPECIFICATION copy.md create mode 100644 steering_docs/dotnet-tech/scenario.md diff --git a/.gitignore b/.gitignore index 0b25f6593e2..d6f42f4afdd 100644 --- a/.gitignore +++ b/.gitignore @@ -38,3 +38,4 @@ kotlin/services/**/gradlew kotlin/services/**/gradlew.bat kotlin/services/**/.kotlin/ /.local/ +/.kiro/settings diff --git a/.kiro/settings/mcp.json b/.kiro/settings/mcp.json new file mode 100644 index 00000000000..42fb25a070f --- /dev/null +++ b/.kiro/settings/mcp.json @@ -0,0 +1,40 @@ +{ + "mcpServers": { + "awslabs.bedrock-kb-retrieval-mcp-server": { + "command": "uv", + "args": [ + "tool", + "run", + "--from", + "awslabs.bedrock-kb-retrieval-mcp-server@latest", + "awslabs.bedrock-kb-retrieval-mcp-server.exe" + ], + "env": { + "FASTMCP_LOG_LEVEL": "ERROR", + "AWS_PROFILE": "cex-ai-kb-access", + "AWS_REGION": "us-west-2" + }, + "disabled": false, + "autoApprove": [ + "QueryKnowledgeBases" + ], + "disabledTools": [ + "ListKnowledgeBases" + ] + }, + "aws-knowledge-mcp-server": { + "command": "uvx", + "args": [ + "mcp-proxy", + "--transport", + "streamablehttp", + "https://knowledge-mcp.global.api.aws" + ], + "disabled": false, + "autoApprove": [ + "aws___search_documentation", + "aws___read_documentation" + ] + } + } +} \ No newline at end of file diff --git a/.kiro/steering/orchestration.md b/.kiro/steering/orchestration.md new file mode 100644 index 00000000000..190c9c603f2 --- /dev/null +++ b/.kiro/steering/orchestration.md @@ -0,0 +1,17 @@ +# Code Generation Orchestration + +## Purpose +Define location of relevant steering docs that are outside of the .kiro directory. Refer to all steering docs in the ./steering_docs directory. Use the appropriate steering instructions for the requested language. Use the directories given below. + +- .NET: dotnet-tech +- Java: java-tech +- Kotlin: kotlin-tech + +## Code Generation +When a specification file is provided by the user, use that specification directly. Do not create your own spec or task breakdown. Follow the provided specification exactly and implement the requirements as described. + +If no specification is provided, then do not use separate steps for planning and tasks unless specifically asked to do so. Perform the tasks without stopping for user input. + + + + diff --git a/dotnetv4/DotNetV4Examples.sln b/dotnetv4/DotNetV4Examples.sln index e4e1cf6f809..cfa92fb639b 100644 --- a/dotnetv4/DotNetV4Examples.sln +++ b/dotnetv4/DotNetV4Examples.sln @@ -93,6 +93,14 @@ Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CloudWatchScenario", "Cloud EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CloudWatchActions", "CloudWatch\Actions\CloudWatchActions.csproj", "{EAF4A3B8-5CD0-48ED-B848-0EA6D451B8D3}" EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "CloudWatchLogs", "CloudWatchLogs", "{A1B2C3D4-E5F6-7890-1234-567890ABCDEF}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CloudWatchLogsTests", "CloudWatchLogs\Tests\CloudWatchLogsTests.csproj", "{B2C3D4E5-F6A7-8901-2345-678901BCDEFG}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CloudWatchLogsScenarios", "CloudWatchLogs\Scenarios\CloudWatchLogsScenarios.csproj", "{C3D4E5F6-A7B8-9012-3456-789012CDEFGH}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CloudWatchLogsActions", "CloudWatchLogs\Actions\CloudWatchLogsActions.csproj", "{D4E5F6A7-B8C9-0123-4567-890123DEFGHI}" +EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "EC2", "EC2", "{9424FB14-B6DE-44CE-B675-AC2B57EC1E69}" EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "EC2Tests", "EC2\Tests\EC2Tests.csproj", "{C99A0F7C-9477-4985-90F6-8EED38ECAC10}" @@ -279,6 +287,18 @@ Global {EAF4A3B8-5CD0-48ED-B848-0EA6D451B8D3}.Debug|Any CPU.Build.0 = Debug|Any CPU {EAF4A3B8-5CD0-48ED-B848-0EA6D451B8D3}.Release|Any CPU.ActiveCfg = Release|Any CPU {EAF4A3B8-5CD0-48ED-B848-0EA6D451B8D3}.Release|Any CPU.Build.0 = Release|Any CPU + {B2C3D4E5-F6A7-8901-2345-678901BCDEFG}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {B2C3D4E5-F6A7-8901-2345-678901BCDEFG}.Debug|Any CPU.Build.0 = Debug|Any CPU + {B2C3D4E5-F6A7-8901-2345-678901BCDEFG}.Release|Any CPU.ActiveCfg = Release|Any CPU + {B2C3D4E5-F6A7-8901-2345-678901BCDEFG}.Release|Any CPU.Build.0 = Release|Any CPU + {C3D4E5F6-A7B8-9012-3456-789012CDEFGH}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {C3D4E5F6-A7B8-9012-3456-789012CDEFGH}.Debug|Any CPU.Build.0 = Debug|Any CPU + {C3D4E5F6-A7B8-9012-3456-789012CDEFGH}.Release|Any CPU.ActiveCfg = Release|Any CPU + {C3D4E5F6-A7B8-9012-3456-789012CDEFGH}.Release|Any CPU.Build.0 = Release|Any CPU + {D4E5F6A7-B8C9-0123-4567-890123DEFGHI}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {D4E5F6A7-B8C9-0123-4567-890123DEFGHI}.Debug|Any CPU.Build.0 = Debug|Any CPU + {D4E5F6A7-B8C9-0123-4567-890123DEFGHI}.Release|Any CPU.ActiveCfg = Release|Any CPU + {D4E5F6A7-B8C9-0123-4567-890123DEFGHI}.Release|Any CPU.Build.0 = Release|Any CPU {C99A0F7C-9477-4985-90F6-8EED38ECAC10}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {C99A0F7C-9477-4985-90F6-8EED38ECAC10}.Debug|Any CPU.Build.0 = Debug|Any CPU {C99A0F7C-9477-4985-90F6-8EED38ECAC10}.Release|Any CPU.ActiveCfg = Release|Any CPU @@ -392,6 +412,9 @@ Global {106FBE12-6FF7-40DC-9B3C-E5F67F335B32} = {CED87D19-7F82-4D67-8A30-3EE085D07E45} {565A9701-3D9C-49F8-86B7-D256A1D9E074} = {CED87D19-7F82-4D67-8A30-3EE085D07E45} {EAF4A3B8-5CD0-48ED-B848-0EA6D451B8D3} = {CED87D19-7F82-4D67-8A30-3EE085D07E45} + {B2C3D4E5-F6A7-8901-2345-678901BCDEFG} = {A1B2C3D4-E5F6-7890-1234-567890ABCDEF} + {C3D4E5F6-A7B8-9012-3456-789012CDEFGH} = {A1B2C3D4-E5F6-7890-1234-567890ABCDEF} + {D4E5F6A7-B8C9-0123-4567-890123DEFGHI} = {A1B2C3D4-E5F6-7890-1234-567890ABCDEF} {C99A0F7C-9477-4985-90F6-8EED38ECAC10} = {9424FB14-B6DE-44CE-B675-AC2B57EC1E69} {6C167F25-F97F-4854-8CD8-A2D446B6799B} = {9424FB14-B6DE-44CE-B675-AC2B57EC1E69} {D95519CA-BD27-45AE-B83B-3FB02E7AE445} = {6C167F25-F97F-4854-8CD8-A2D446B6799B} diff --git a/scenarios/features/cloudwatch_logs_large_query/SPECIFICATION copy.md b/scenarios/features/cloudwatch_logs_large_query/SPECIFICATION copy.md new file mode 100644 index 00000000000..788d7859efc --- /dev/null +++ b/scenarios/features/cloudwatch_logs_large_query/SPECIFICATION copy.md @@ -0,0 +1,118 @@ +# CloudWatch Logs large query - Technical specification + +This document contains the technical specifications for _CloudWatch Logs large query_, +a feature scenario that showcases AWS services and SDKs. It is primarily intended for the AWS code +examples team to use while developing this example in additional languages. + +This document explains the following: + +- Deploying AWS resources. +- Adding sample data. +- Setting up a large query. + +For an introduction, see the [README.md](README.md). + +--- + +### Table of contents + +- [Architecture](#architecture) +- [User input](#user-input) +- [Common resources](#common-resources) +- [Building the queries](#building-the-queries) +- [Output](#output) +- [Metadata](#metadata) + +## Architecture + +- Amazon CloudWatch Logs group +- Amazon CloudWatch Logs stream + +--- + +## User input + +The example should allow the configuration of a query start date, query end date, and results limit. It's up to you to decide how to allow this configuration. + +### Suggested variable names + +- `QUERY_START_DATE` - The oldest date that will be queried. +- `QUERY_END_DATE` - The newest date that will be queried. +- `QUERY_LIMIT` - The maximum number of results to return. CloudWatch has a maximum of 10,000. + +--- + +## Common resources + +This example has a set of common resources that are stored in the [resources](resources) folder. + +- [stack.yaml](resources/stack.yaml) is an AWS CloudFormation template containing the resources needed to run this example. +- [make-log-files.sh](resources/make-log-files.sh) is a bash script that creates log data. **Five minutes of logs, starting at the time of execution, will be created. Wait at least five minutes after running this script before attempting to query.** +- [put-log-events](resources/put-log-events.sh) is a bash script that ingests log data and uploads it to CloudWatch. + +--- + +## Building the queries + +### Building and waiting for single query + +The query itself is a "CloudWatch Logs Insights query syntax" string. The query must return the `@timestamp` field so follow-up queries can use that information. Here's a sample query string: `fields @timestamp, @message | sort @timestamp asc`. Notice it sorts in ascending order. You can sort in either `asc` or `desc`, but the recursive strategy described later will need to match accordingly. + +Queries are jobs. You can start a query with `StartQuery`, but it immediately returns the `queryId`. You must poll a query using `GetQueryResults` until the query has finished. For the purpose of this example, a query has "finished" when `GetQueryResults` has returned a status of one of "Complete", "Failed", "Cancelled", "Timeout", or "Unknown". + +`StartQuery` responds with an error if the query's start or end date occurs out of bounds of the log group creation date. The error message starts with "Query's end date and time". + +Start the query and wait for it to "finish". Store the `results`. If the count of the results is less than the configured LIMIT, return the results. If the results are greater than or equal to the limit, go to [Recursive queries](#recursive-queries). + +--- + +### Recursive queries + +If the result count from the previous step is 10000 (or the configured LIMIT), it is very likely that there are more results. **The example must do a binary search of the remaining logs**. To do this, get the date of the last log (earliest or latest, depending on sort order). Use that date as the start date of a new date range. The end date can remain the same. + +Split that date range in half, resulting in two new date ranges. Call your query function twice; once for each new date range. + +Concatenate the results of the first query with the results of the two new queries. + +The following pseudocode illustrates this. + +```pseudocode +func large_query(date_range): + query_results = get_query_results(date_range) + + if query_results.length < LIMIT + return query_results + else + date_range = [query_results.end, date_range.end] + d1, d2 = split(date_range) + return concat(query_results, large_query(d1), large_query(d2)) +``` + +## Output + +To illustrate the search, log the date ranges for each query made and the number of logs that were found. + +Example: + +``` +Starting a recursive query... +Query date range: 2023-12-22T19:08:42.000Z to 2023-12-22T19:13:41.994Z. Found 10000 logs. +Query date range: 2023-12-22T19:09:41.995Z to 2023-12-22T19:11:41.994Z. Found 10000 logs. +Query date range: 2023-12-22T19:11:41.995Z to 2023-12-22T19:13:41.994Z. Found 10000 logs. +Query date range: 2023-12-22T19:10:41.995Z to 2023-12-22T19:11:11.994Z. Found 5000 logs. +Query date range: 2023-12-22T19:11:11.995Z to 2023-12-22T19:11:41.994Z. Found 5000 logs. +Query date range: 2023-12-22T19:12:41.995Z to 2023-12-22T19:13:11.994Z. Found 5000 logs. +Query date range: 2023-12-22T19:13:11.995Z to 2023-12-22T19:13:41.994Z. Found 5000 logs. +Queries finished in 11.253 seconds. +Total logs found: 50000 +``` + +--- + +## Metadata + +| action / scenario | metadata file | metadata key | +| ----------------- | ----------------------------- | --------------------------------- | +| `GetQueryResults` | cloudwatch-logs_metadata.yaml | cloudwatch-logs_GetQueryResults | +| `StartQuery` | cloudwatch-logs_metadata.yaml | cloudwatch-logs_StartQuery | +| `Large Query` | cloudwatch-logs_metadata.yaml | cloudwatch-logs_Scenario_LargeQuery | diff --git a/scenarios/features/cloudwatch_logs_large_query/SPECIFICATION.md b/scenarios/features/cloudwatch_logs_large_query/SPECIFICATION.md index 788d7859efc..acb3406cb89 100644 --- a/scenarios/features/cloudwatch_logs_large_query/SPECIFICATION.md +++ b/scenarios/features/cloudwatch_logs_large_query/SPECIFICATION.md @@ -1,101 +1,270 @@ -# CloudWatch Logs large query - Technical specification +# CloudWatch Logs Large Query - Technical Specification -This document contains the technical specifications for _CloudWatch Logs large query_, -a feature scenario that showcases AWS services and SDKs. It is primarily intended for the AWS code -examples team to use while developing this example in additional languages. +## Overview -This document explains the following: +This feature scenario demonstrates how to perform large-scale queries on Amazon CloudWatch Logs using recursive binary search to retrieve more than the 10,000 result limit. The scenario showcases: -- Deploying AWS resources. -- Adding sample data. -- Setting up a large query. +1. Deploying CloudFormation resources (log group and stream) +2. Generating and ingesting 50,000 sample log entries +3. Performing recursive queries to retrieve all logs using binary search +4. Cleaning up resources For an introduction, see the [README.md](README.md). --- -### Table of contents +## Table of Contents -- [Architecture](#architecture) -- [User input](#user-input) -- [Common resources](#common-resources) -- [Building the queries](#building-the-queries) -- [Output](#output) +- [API Actions Used](#api-actions-used) +- [Resources](#resources) +- [Proposed Example Structure](#proposed-example-structure) +- [Implementation Details](#implementation-details) +- [Output Format](#output-format) +- [Errors](#errors) - [Metadata](#metadata) -## Architecture +--- + +## API Actions Used + +This scenario uses the following CloudWatch Logs API actions: + +- `StartQuery` - Initiates a CloudWatch Logs Insights query +- `GetQueryResults` - Retrieves results from a query, polling until complete -- Amazon CloudWatch Logs group -- Amazon CloudWatch Logs stream +This scenario uses the following CloudFormation API actions: + +- `CreateStack` - Deploys the CloudFormation template +- `DescribeStacks` - Checks stack status and retrieves outputs +- `DeleteStack` - Removes the CloudFormation stack --- -## User input +## Resources -The example should allow the configuration of a query start date, query end date, and results limit. It's up to you to decide how to allow this configuration. +### CloudFormation Template -### Suggested variable names +**Location**: `scenarios/features/cloudwatch_logs_large_query/resources/stack.yaml` -- `QUERY_START_DATE` - The oldest date that will be queried. -- `QUERY_END_DATE` - The newest date that will be queried. -- `QUERY_LIMIT` - The maximum number of results to return. CloudWatch has a maximum of 10,000. +**Resources Created**: +- CloudWatch Logs Log Group: `/workflows/cloudwatch-logs/large-query` +- CloudWatch Logs Log Stream: `stream1` ---- +**Stack Outputs**: None (resources use fixed names) + +### Sample Data Generation Scripts -## Common resources +**Script 1**: `scenarios/features/cloudwatch_logs_large_query/resources/make-log-files.sh` +- Creates 50,000 log entries divided into 5 JSON files (10,000 entries each) +- Generates timestamps spanning 5 minutes from execution time +- Outputs `QUERY_START_DATE` and `QUERY_END_DATE` environment variables +- Creates files: `file1.json`, `file2.json`, `file3.json`, `file4.json`, `file5.json` -This example has a set of common resources that are stored in the [resources](resources) folder. +**Script 2**: `scenarios/features/cloudwatch_logs_large_query/resources/put-log-events.sh` +- Uploads the generated JSON files to CloudWatch Logs +- Uses AWS CLI `put-log-events` command +- Targets log group: `/workflows/cloudwatch-logs/large-query` +- Targets log stream: `stream1` -- [stack.yaml](resources/stack.yaml) is an AWS CloudFormation template containing the resources needed to run this example. -- [make-log-files.sh](resources/make-log-files.sh) is a bash script that creates log data. **Five minutes of logs, starting at the time of execution, will be created. Wait at least five minutes after running this script before attempting to query.** -- [put-log-events](resources/put-log-events.sh) is a bash script that ingests log data and uploads it to CloudWatch. +**Python Alternative**: `scenarios/features/cloudwatch_logs_large_query/resources/create_logs.py` +- Python script that combines both generation and upload +- Creates 50,000 log entries and uploads them directly +- Returns start and end timestamps for query configuration +- Preferred for cross-platform compatibility --- -## Building the queries +## Proposed Example Structure + +### Phase 1: Setup + +**Purpose**: Deploy resources and generate sample data + +**Steps**: +1. Welcome message explaining the scenario +2. Prompt user: "Would you like to deploy the CloudFormation stack and generate sample logs? (y/n)" +3. If yes: + - Prompt for CloudFormation stack name (default: "CloudWatchLargeQueryStack") + - Deploy CloudFormation stack from `resources/stack.yaml` + - Wait for stack creation to complete (status: CREATE_COMPLETE) + - Execute log generation: + - **Option A** (Bash): Run `make-log-files.sh` then `put-log-events.sh` + - **Option B** (Python): Run `create_logs.py` (recommended for cross-platform) + - Capture `QUERY_START_DATE` and `QUERY_END_DATE` from script output + - Display message: "Sample logs created. Waiting 5 minutes for logs to be fully ingested..." + - Wait 5 minutes (300 seconds) for log ingestion +4. If no: + - Prompt user for existing log group name + - Prompt user for log stream name + - Prompt user for query start date (ISO 8601 format with milliseconds) + - Prompt user for query end date (ISO 8601 format with milliseconds) + +**Variables Set**: +- `stackName` - CloudFormation stack name +- `logGroupName` - Log group name (default: `/workflows/cloudwatch-logs/large-query`) +- `logStreamName` - Log stream name (default: `stream1`) +- `queryStartDate` - Start timestamp for query (milliseconds since epoch) +- `queryEndDate` - End timestamp for query (milliseconds since epoch) + +### Phase 2: Query Execution + +**Purpose**: Demonstrate recursive large query functionality + +**Steps**: +1. Display message: "Starting recursive query to retrieve all logs..." +2. Prompt user for query limit (default: 10000, max: 10000) +3. Set query string: `fields @timestamp, @message | sort @timestamp asc` +4. Execute recursive query function with: + - Log group name + - Query string + - Start date + - End date + - Limit +5. Display progress for each query executed (see [Output Format](#output-format)) +6. Display total execution time +7. Display total logs found +8. Prompt user: "Would you like to see a sample of the logs? (y/n)" +9. If yes, display first 10 log entries with timestamps and messages + +### Phase 3: Cleanup + +**Purpose**: Remove created resources + +**Steps**: +1. Prompt user: "Would you like to delete the CloudFormation stack and all resources? (y/n)" +2. If yes: + - Delete CloudFormation stack + - Wait for stack deletion to complete (status: DELETE_COMPLETE or stack not found) + - Display message: "Stack deleted successfully" +3. If no: + - Display message: "Resources will remain. You can delete them later through the AWS Console." + - Display stack name and log group name for reference -### Building and waiting for single query +--- -The query itself is a "CloudWatch Logs Insights query syntax" string. The query must return the `@timestamp` field so follow-up queries can use that information. Here's a sample query string: `fields @timestamp, @message | sort @timestamp asc`. Notice it sorts in ascending order. You can sort in either `asc` or `desc`, but the recursive strategy described later will need to match accordingly. +## Implementation Details -Queries are jobs. You can start a query with `StartQuery`, but it immediately returns the `queryId`. You must poll a query using `GetQueryResults` until the query has finished. For the purpose of this example, a query has "finished" when `GetQueryResults` has returned a status of one of "Complete", "Failed", "Cancelled", "Timeout", or "Unknown". +### CloudFormation Stack Deployment -`StartQuery` responds with an error if the query's start or end date occurs out of bounds of the log group creation date. The error message starts with "Query's end date and time". +**Deployment**: +``` +Stack Name: User-provided or default "CloudWatchLargeQueryStack" +Template: scenarios/features/cloudwatch_logs_large_query/resources/stack.yaml +Capabilities: None required (no IAM resources) +``` -Start the query and wait for it to "finish". Store the `results`. If the count of the results is less than the configured LIMIT, return the results. If the results are greater than or equal to the limit, go to [Recursive queries](#recursive-queries). +**Polling for Completion**: +- Poll `DescribeStacks` every 5-10 seconds +- Success: `StackStatus` = `CREATE_COMPLETE` +- Failure: `StackStatus` = `CREATE_FAILED`, `ROLLBACK_COMPLETE`, or `ROLLBACK_FAILED` +- Timeout: 5 minutes maximum wait time ---- +### Log Generation Execution -### Recursive queries +**Cross-Platform Considerations**: +- Bash scripts work on Linux, macOS, and Git Bash on Windows +- Python script is preferred for true cross-platform support +- Check for script availability before execution +- Handle script execution errors gracefully -If the result count from the previous step is 10000 (or the configured LIMIT), it is very likely that there are more results. **The example must do a binary search of the remaining logs**. To do this, get the date of the last log (earliest or latest, depending on sort order). Use that date as the start date of a new date range. The end date can remain the same. +**Capturing Output**: +- Parse stdout for `QUERY_START_DATE` and `QUERY_END_DATE` +- Convert timestamps to appropriate format for SDK +- Store timestamps for query configuration -Split that date range in half, resulting in two new date ranges. Call your query function twice; once for each new date range. +**Wait Time**: +- CloudWatch Logs requires time to ingest and index logs +- Minimum wait: 5 minutes (300 seconds) +- Display countdown or progress indicator during wait -Concatenate the results of the first query with the results of the two new queries. +### Building and Executing Queries + +**Query String**: +``` +fields @timestamp, @message | sort @timestamp asc +``` -The following pseudocode illustrates this. +**Important**: The query MUST return `@timestamp` field for recursive queries to work. -```pseudocode -func large_query(date_range): - query_results = get_query_results(date_range) +**StartQuery Parameters**: +- `logGroupName` - The log group to query +- `startTime` - Start of date range (seconds since epoch) +- `endTime` - End of date range (seconds since epoch) +- `queryString` - CloudWatch Logs Insights query syntax +- `limit` - Maximum results (default: 10000, max: 10000) - if query_results.length < LIMIT - return query_results - else - date_range = [query_results.end, date_range.end] - d1, d2 = split(date_range) - return concat(query_results, large_query(d1), large_query(d2)) +**GetQueryResults Polling**: +- Poll every 1-2 seconds +- Continue until status is one of: `Complete`, `Failed`, `Cancelled`, `Timeout`, `Unknown` +- Timeout after 60 seconds of polling + +**Error Handling**: +- If `StartQuery` returns error starting with "Query's end date and time", the date range is out of bounds +- Handle this by adjusting the date range or informing the user + +### Recursive Query Algorithm + +**Purpose**: Retrieve more than 10,000 results by splitting date ranges + +**Algorithm**: ``` +function LargeQuery(startDate, endDate, limit): + results = ExecuteQuery(startDate, endDate, limit) + + if results.count < limit: + return results + else: + // Get timestamp of last result + lastTimestamp = results[results.count - 1].timestamp + + // Calculate midpoint between last result and end date + midpoint = (lastTimestamp + endDate) / 2 + + // Query first half + results1 = LargeQuery(lastTimestamp, midpoint, limit) + + // Query second half + results2 = LargeQuery(midpoint, endDate, limit) + + // Combine results + return Concatenate(results, results1, results2) +``` + +**Key Points**: +- Use binary search to split remaining date range +- Recursively query each half +- Concatenate all results +- Log each query's date range and result count (see [Output Format](#output-format)) + +### Stack Deletion + +**Deletion**: +``` +Stack Name: Same as used during creation +``` + +**Polling for Completion**: +- Poll `DescribeStacks` every 5-10 seconds +- Success: Stack not found (ValidationError) or `StackStatus` = `DELETE_COMPLETE` +- Failure: `StackStatus` = `DELETE_FAILED` +- If `DELETE_FAILED`, optionally retry with force delete +- Timeout: 5 minutes maximum wait time + +--- -## Output +## Output Format -To illustrate the search, log the date ranges for each query made and the number of logs that were found. +### Query Progress Output -Example: +Display each query execution with the following format: ``` -Starting a recursive query... +Query date range: to . Found logs. +``` + +**Example**: +``` +Starting recursive query... Query date range: 2023-12-22T19:08:42.000Z to 2023-12-22T19:13:41.994Z. Found 10000 logs. Query date range: 2023-12-22T19:09:41.995Z to 2023-12-22T19:11:41.994Z. Found 10000 logs. Query date range: 2023-12-22T19:11:41.995Z to 2023-12-22T19:13:41.994Z. Found 10000 logs. @@ -107,6 +276,74 @@ Queries finished in 11.253 seconds. Total logs found: 50000 ``` +### Summary Output + +After all queries complete, display: +- Total execution time (in seconds with 3 decimal places) +- Total number of logs found + +### Sample Logs Output + +If user chooses to view sample logs, display first 10 entries: + +``` +Sample logs (first 10 of 50000): +[2023-12-22T19:08:42.000Z] Entry 0 +[2023-12-22T19:08:42.006Z] Entry 1 +[2023-12-22T19:08:42.012Z] Entry 2 +... +``` + +--- + +## Errors + +### CloudFormation Errors + +| Error Code | Error Message Pattern | Handling Strategy | +|------------|----------------------|-------------------| +| `AlreadyExistsException` | Stack already exists | Prompt user for different stack name and retry | +| `ValidationError` | Template validation failed | Display error message and exit setup | +| `InsufficientCapabilitiesException` | Requires capabilities | Should not occur (template has no IAM resources) | + +### CloudWatch Logs Errors + +| Error Code | Error Message Pattern | Handling Strategy | +|------------|----------------------|-------------------| +| `InvalidParameterException` | "Query's end date and time" | Date range is out of bounds; inform user and adjust dates | +| `ResourceNotFoundException` | Log group not found | Verify log group exists; prompt user to run setup | +| `LimitExceededException` | Too many concurrent queries | Wait and retry after 5 seconds | +| `ServiceUnavailableException` | Service temporarily unavailable | Retry with exponential backoff (max 3 retries) | + +### Script Execution Errors + +| Error Type | Handling Strategy | +|------------|-------------------| +| Script not found | Display error message; provide manual instructions | +| Script execution failed | Display error output; allow user to retry or skip | +| Permission denied | Suggest making script executable (`chmod +x`) | +| AWS CLI not available | Inform user AWS CLI is required for bash scripts; suggest Python alternative | + +--- + +## User Input Variables + +### Required Variables + +| Variable Name | Description | Type | Default | Validation | +|--------------|-------------|------|---------|------------| +| `stackName` | CloudFormation stack name | String | "CloudWatchLargeQueryStack" | Must match pattern: `[a-zA-Z][-a-zA-Z0-9]*` | +| `queryStartDate` | Query start timestamp | Long/Integer | From script output | Milliseconds since epoch | +| `queryEndDate` | Query end timestamp | Long/Integer | From script output | Milliseconds since epoch | +| `queryLimit` | Maximum results per query | Integer | 10000 | Min: 1, Max: 10000 | + +### Optional Variables + +| Variable Name | Description | Type | Default | +|--------------|-------------|------|---------| +| `logGroupName` | Log group name (if not using stack) | String | "/workflows/cloudwatch-logs/large-query" | +| `logStreamName` | Log stream name (if not using stack) | String | "stream1" | + --- ## Metadata diff --git a/steering_docs/dotnet-tech/scenario.md b/steering_docs/dotnet-tech/scenario.md new file mode 100644 index 00000000000..271fac811af --- /dev/null +++ b/steering_docs/dotnet-tech/scenario.md @@ -0,0 +1,898 @@ +# .NET Feature Scenario Generation + +## Purpose +Generate feature scenarios that demonstrate complete workflows using multiple service operations in a guided, educational manner. Implementation must be based on the service SPECIFICATION.md file. + +## Requirements +- **Specification-Driven**: MUST read the `scenarios/features/{service_feature}/SPECIFICATION.md` +- **Interactive**: Use Console.WriteLine and Console.ReadLine for user input and guidance +- **Educational**: Break complex workflows into logical phases +- **Comprehensive**: Cover setup, demonstration, examination, and cleanup +- **Error Handling**: Graceful error handling with user-friendly messages +- **Wrapper Classes**: MUST use service wrapper classes for all operations +- **CloudFormation**: Deploy resources using CloudFormation stacks when specified +- **Namespaces**: MUST use file-level namespaces that match the project names +- **Using Statements**: MUST cleanup unused using statements + +## Project Structure + +Feature scenarios use a multi-project structure with separate projects for actions, scenarios, and tests: + +``` +dotnetv3/{Service}/ +├── {Service}.sln # Solution file +├── Actions/ +│ ├── {Service}Wrapper.cs # Wrapper class for service operations +│ ├── Hello{Service}.cs # Hello world example (optional) +│ └── {Service}Actions.csproj # Actions project file +├── Scenarios/ +│ ├── {Service}Workflow.cs # Main workflow/scenario file +│ ├── README.md # Scenario documentation +│ └── {Service}Scenario.csproj # Scenario project file (references Actions) +└── Tests/ + ├── {Service}WorkflowTests.cs # Unit tests for workflow + ├── Usings.cs # Global usings for tests + └── {Service}Tests.csproj # Test project file (references Scenarios) +``` + +## MANDATORY Pre-Implementation Steps + +### Step 1: Read Scenario Specification +**CRITICAL**: Always read `scenarios/features/{servicefeature}/SPECIFICATION.md` first to understand: +- **API Actions Used**: Exact operations to implement +- **Proposed Example Structure**: Setup, demonstration, examination, cleanup phases +- **Error Handling**: Specific error codes and handling requirements +- **Scenario Flow**: Step-by-step scenario description + +### Step 2: Extract Implementation Requirements +From the specification, identify: +- **Setup Phase**: What resources need to be created/configured +- **Demonstration Phase**: What operations to demonstrate +- **Examination Phase**: What data to display and how to filter/analyze +- **Cleanup Phase**: What resources to clean up and user options + +## Workflow Class Pattern + +### Implementation Pattern Based on SPECIFICATION.md + +```csharp +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +// snippet-start:[{Service}.dotnetv3.{Service}Workflow] +using Amazon.{Service}; +using Amazon.CloudFormation; +using Amazon.CloudFormation.Model; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Hosting; +using Microsoft.Extensions.Logging; +using {Service}Actions; + +namespace {Service}Scenario; + +public class {Service}Workflow +{ + /* + Before running this .NET code example, set up your development environment, including your credentials. + This .NET code example performs the following tasks for the {AWS Service} workflow: + + 1. Prepare the Application: + - {Setup step 1 from specification} + - {Setup step 2 from specification} + - Deploy the Cloud Formation template for resource creation. + - Store the outputs of the stack into variables for use in the scenario. + + 2. {Phase 2 Name}: + - {Phase 2 description from specification} + + 3. {Phase 3 Name}: + - {Phase 3 description from specification} + + 4. Clean up: + - Prompt the user for y/n answer if they want to destroy the stack and clean up all resources. + - Delete resources created during the workflow. + - Destroy the Cloud Formation stack and wait until the stack has been removed. + */ + + public static ILogger<{Service}Workflow> _logger = null!; + public static {Service}Wrapper _wrapper = null!; + public static IAmazonCloudFormation _amazonCloudFormation = null!; + + private static string _roleArn = null!; + private static string _targetArn = null!; + + public static bool _interactive = true; + private static string _stackName = "default-{service}-scenario-stack-name"; + private static string _stackResourcePath = "../../../../../../scenarios/features/{service_feature}/resources/cfn_template.yaml"; + + public static async Task Main(string[] args) + { + using var host = Host.CreateDefaultBuilder(args) + .ConfigureLogging(logging => + logging.AddFilter("System", LogLevel.Debug) + .AddFilter("Microsoft", LogLevel.Information) + .AddFilter("Microsoft", LogLevel.Trace)) + .ConfigureServices((_, services) => + services.AddAWSService() + .AddAWSService() + .AddTransient<{Service}Wrapper>() + ) + .Build(); + + if (_interactive) + { + _logger = LoggerFactory.Create(builder => { builder.AddConsole(); }) + .CreateLogger<{Service}Workflow>(); + + _wrapper = host.Services.GetRequiredService<{Service}Wrapper>(); + _amazonCloudFormation = host.Services.GetRequiredService(); + } + + Console.WriteLine(new string('-', 80)); + Console.WriteLine("Welcome to the {AWS Service Feature} Scenario."); + Console.WriteLine(new string('-', 80)); + + try + { + Console.WriteLine(new string('-', 80)); + var prepareSuccess = await PrepareApplication(); + Console.WriteLine(new string('-', 80)); + + if (prepareSuccess) + { + Console.WriteLine(new string('-', 80)); + await Phase2(); + Console.WriteLine(new string('-', 80)); + + Console.WriteLine(new string('-', 80)); + await Phase3(); + Console.WriteLine(new string('-', 80)); + } + + Console.WriteLine(new string('-', 80)); + await Cleanup(); + Console.WriteLine(new string('-', 80)); + } + catch (Exception ex) + { + _logger.LogError(ex, "There was a problem with the scenario, initiating cleanup..."); + _interactive = false; + await Cleanup(); + } + + Console.WriteLine("{AWS Service} scenario completed."); + } + + /// + /// Prepares the application by creating the necessary resources. + /// + /// True if the application was prepared successfully. + public static async Task PrepareApplication() + { + Console.WriteLine("Preparing the application..."); + try + { + // Prompt the user for required input (e.g., email, parameters) + Console.WriteLine("\nThis example creates resources in a CloudFormation stack."); + + var userInput = PromptUserForInput(); + + // Prompt the user for a name for the CloudFormation stack + _stackName = PromptUserForStackName(); + + // Deploy the CloudFormation stack + var deploySuccess = await DeployCloudFormationStack(_stackName, userInput); + + if (deploySuccess) + { + // Create additional resources if needed + Console.WriteLine("Application preparation complete."); + return true; + } + } + catch (Exception ex) + { + _logger.LogError(ex, "An error occurred while preparing the application."); + } + Console.WriteLine("Application preparation failed."); + return false; + } + + /// + /// Deploys the CloudFormation stack with the necessary resources. + /// + /// The name of the CloudFormation stack. + /// Parameter value for the stack. + /// True if the stack was deployed successfully. + private static async Task DeployCloudFormationStack(string stackName, string parameter) + { + Console.WriteLine($"\nDeploying CloudFormation stack: {stackName}"); + + try + { + var request = new CreateStackRequest + { + StackName = stackName, + TemplateBody = await File.ReadAllTextAsync(_stackResourcePath), + Capabilities = { Capability.CAPABILITY_NAMED_IAM } + }; + + // If parameters are provided, set them + if (!string.IsNullOrWhiteSpace(parameter)) + { + request.Parameters = new List() + { + new() { ParameterKey = "parameterName", ParameterValue = parameter } + }; + } + + var response = await _amazonCloudFormation.CreateStackAsync(request); + + if (response.HttpStatusCode == System.Net.HttpStatusCode.OK) + { + Console.WriteLine($"CloudFormation stack creation started: {stackName}"); + + // Wait for the stack to be in CREATE_COMPLETE state + bool stackCreated = await WaitForStackCompletion(response.StackId); + + if (stackCreated) + { + // Retrieve the output values + var success = await GetStackOutputs(response.StackId); + return success; + } + else + { + _logger.LogError($"CloudFormation stack creation failed: {stackName}"); + return false; + } + } + else + { + _logger.LogError($"Failed to create CloudFormation stack: {stackName}"); + return false; + } + } + catch (AlreadyExistsException) + { + _logger.LogWarning($"CloudFormation stack '{stackName}' already exists. Please provide a unique name."); + var newStackName = PromptUserForStackName(); + return await DeployCloudFormationStack(newStackName, parameter); + } + catch (Exception ex) + { + _logger.LogError(ex, $"An error occurred while deploying the CloudFormation stack: {stackName}"); + return false; + } + } + + /// + /// Waits for the CloudFormation stack to be in the CREATE_COMPLETE state. + /// + /// The ID of the CloudFormation stack. + /// True if the stack was created successfully. + private static async Task WaitForStackCompletion(string stackId) + { + int retryCount = 0; + const int maxRetries = 10; + const int retryDelay = 30000; // 30 seconds. + + while (retryCount < maxRetries) + { + var describeStacksRequest = new DescribeStacksRequest + { + StackName = stackId + }; + + var describeStacksResponse = await _amazonCloudFormation.DescribeStacksAsync(describeStacksRequest); + + if (describeStacksResponse.Stacks.Count > 0) + { + if (describeStacksResponse.Stacks[0].StackStatus == StackStatus.CREATE_COMPLETE) + { + Console.WriteLine("CloudFormation stack creation complete."); + return true; + } + if (describeStacksResponse.Stacks[0].StackStatus == StackStatus.CREATE_FAILED || + describeStacksResponse.Stacks[0].StackStatus == StackStatus.ROLLBACK_COMPLETE) + { + Console.WriteLine("CloudFormation stack creation failed."); + return false; + } + } + + Console.WriteLine("Waiting for CloudFormation stack creation to complete..."); + await Task.Delay(retryDelay); + retryCount++; + } + + _logger.LogError("Timed out waiting for CloudFormation stack creation to complete."); + return false; + } + + /// + /// Retrieves the output values from the CloudFormation stack. + /// + /// The ID of the CloudFormation stack. + private static async Task GetStackOutputs(string stackId) + { + try + { + var describeStacksRequest = new DescribeStacksRequest { StackName = stackId }; + + var describeStacksResponse = + await _amazonCloudFormation.DescribeStacksAsync(describeStacksRequest); + + if (describeStacksResponse.Stacks.Count > 0) + { + var stack = describeStacksResponse.Stacks[0]; + _roleArn = GetStackOutputValue(stack, "RoleARN"); + _targetArn = GetStackOutputValue(stack, "TargetARN"); + return true; + } + else + { + _logger.LogError($"No stack found for stack outputs: {stackId}"); + return false; + } + } + catch (Exception ex) + { + _logger.LogError( + ex, $"Failed to retrieve CloudFormation stack outputs: {stackId}"); + return false; + } + } + + /// + /// Get an output value by key from a CloudFormation stack. + /// + /// The CloudFormation stack. + /// The key of the output. + /// The value as a string. + private static string GetStackOutputValue(Stack stack, string outputKey) + { + var output = stack.Outputs.First(o => o.OutputKey == outputKey); + var outputValue = output.OutputValue; + Console.WriteLine($"Stack output {outputKey}: {outputValue}"); + return outputValue; + } + + /// + /// Cleans up the resources created during the scenario. + /// + /// True if the cleanup was successful. + public static async Task Cleanup() + { + // Prompt the user to confirm cleanup. + var cleanup = !_interactive || GetYesNoResponse( + "Do you want to delete all resources created by this scenario? (y/n) "); + if (cleanup) + { + try + { + // Delete scenario-specific resources first + + // Destroy the CloudFormation stack and wait for it to be removed. + var stackDeleteSuccess = await DeleteCloudFormationStack(_stackName, false); + + return stackDeleteSuccess; + } + catch (Exception ex) + { + _logger.LogError(ex, + "An error occurred while cleaning up the resources."); + return false; + } + } + _logger.LogInformation("{Service} scenario is complete."); + return true; + } + + /// + /// Delete the resources in the stack and wait for confirmation. + /// + /// The name of the stack. + /// True to force delete the stack. + /// True if successful. + private static async Task DeleteCloudFormationStack(string stackName, bool forceDelete) + { + var request = new DeleteStackRequest + { + StackName = stackName, + }; + + if (forceDelete) + { + request.DeletionMode = DeletionMode.FORCE_DELETE_STACK; + } + + await _amazonCloudFormation.DeleteStackAsync(request); + Console.WriteLine($"CloudFormation stack '{_stackName}' is being deleted. This may take a few minutes."); + + bool stackDeleted = await WaitForStackDeletion(_stackName, forceDelete); + + if (stackDeleted) + { + Console.WriteLine($"CloudFormation stack '{_stackName}' has been deleted."); + return true; + } + else + { + _logger.LogError($"Failed to delete CloudFormation stack '{_stackName}'."); + return false; + } + } + + /// + /// Wait for the stack to be deleted. + /// + /// The name of the stack. + /// True to force delete the stack. + /// True if successful. + private static async Task WaitForStackDeletion(string stackName, bool forceDelete) + { + int retryCount = 0; + const int maxRetries = 10; + const int retryDelay = 30000; // 30 seconds + + while (retryCount < maxRetries) + { + var describeStacksRequest = new DescribeStacksRequest + { + StackName = stackName + }; + + try + { + var describeStacksResponse = await _amazonCloudFormation.DescribeStacksAsync(describeStacksRequest); + + if (describeStacksResponse.Stacks.Count == 0 || describeStacksResponse.Stacks[0].StackStatus == StackStatus.DELETE_COMPLETE) + { + return true; + } + if (!forceDelete && describeStacksResponse.Stacks[0].StackStatus == StackStatus.DELETE_FAILED) + { + // Try one time to force delete. + return await DeleteCloudFormationStack(stackName, true); + } + } + catch (AmazonCloudFormationException ex) when (ex.ErrorCode == "ValidationError") + { + // Stack does not exist, so it has been successfully deleted. + return true; + } + + Console.WriteLine($"Waiting for CloudFormation stack '{stackName}' to be deleted..."); + await Task.Delay(retryDelay); + retryCount++; + } + + _logger.LogError($"Timed out waiting for CloudFormation stack '{stackName}' to be deleted."); + return false; + } + + /// + /// Helper method to get a yes or no response from the user. + /// + /// The question string to print on the console. + /// True if the user responds with a yes. + private static bool GetYesNoResponse(string question) + { + Console.WriteLine(question); + var ynResponse = Console.ReadLine(); + var response = ynResponse != null && ynResponse.Equals("y", StringComparison.InvariantCultureIgnoreCase); + return response; + } + + /// + /// Prompt the user for a non-empty stack name. + /// + /// The valid stack name + private static string PromptUserForStackName() + { + Console.WriteLine("Enter a name for the AWS Cloud Formation Stack: "); + if (_interactive) + { + string stackName = Console.ReadLine()!; + var regex = "[a-zA-Z][-a-zA-Z0-9]|arn:[-a-zA-Z0-9:/._+]"; + if (!Regex.IsMatch(stackName, regex)) + { + Console.WriteLine( + $"Invalid stack name. Please use a name that matches the pattern {regex}."); + return PromptUserForStackName(); + } + + return stackName; + } + // Used when running without user prompts. + return _stackName; + } + + /// + /// Prompt the user for required input. + /// + /// The user input value + private static string PromptUserForInput() + { + if (_interactive) + { + Console.WriteLine("Enter required input: "); + string input = Console.ReadLine()!; + // Add validation as needed + return input; + } + // Used when running without user prompts. + return ""; + } +} +// snippet-end:[{Service}.dotnetv3.{Service}Workflow] +``` + +## Project Files + +### Actions Project (.csproj) + +```xml + + + + Exe + net6.0 + enable + enable + + + + + + + + + + +``` + +### Scenarios Project (.csproj) + +```xml + + + + Exe + net6.0 + enable + enable + + + + + + + + + + + + + + +``` + +### Tests Project (.csproj) + +```xml + + + + net6.0 + enable + enable + false + + + + + + + + + + runtime; build; native; contentfiles; analyzers; buildtransitive + all + + + runtime; build; native; contentfiles; analyzers; buildtransitive + all + + + + + + PreserveNewest + testsettings.json + + + + + + + + +``` + +## Workflow Phase Structure (Based on Specification) + +### Prepare Application Phase +- **Read specification Setup section** for exact requirements +- Prompt user for required input (email, parameters, etc.) +- Prompt user for CloudFormation stack name +- Deploy CloudFormation stack with resources +- Wait for stack creation to complete +- Retrieve stack outputs (ARNs, IDs, etc.) +- Create additional resources if needed (schedule groups, etc.) +- Verify setup completion + +### Demonstration Phases +- **Follow specification phases** exactly +- Implement each phase as a separate method +- Use wrapper methods for all service operations +- Prompt user for input as specified +- Display progress and results +- Handle errors gracefully +- Allow user to proceed at their own pace + +### Cleanup Phase +- **Follow specification Cleanup section** guidance +- Prompt user to confirm cleanup +- Delete scenario-specific resources first +- Delete CloudFormation stack +- Wait for stack deletion to complete +- Handle deletion errors (retry with force delete if needed) +- Confirm completion + +## CloudFormation Integration + +### Stack Deployment +- Store CloudFormation template path in a constant +- Template should be in `scenarios/features/{service_feature}/resources/cfn_template.yaml` +- Use relative path from Scenarios project: `"../../../../../../scenarios/features/{service_feature}/resources/cfn_template.yaml"` +- Deploy stack with `CAPABILITY_NAMED_IAM` capability +- Pass user input as stack parameters +- Handle `AlreadyExistsException` by prompting for new stack name + +### Stack Output Retrieval +- Retrieve outputs after stack creation completes +- Store output values in static fields for use throughout workflow +- Common outputs: Role ARNs, Topic ARNs, Resource IDs +- Display output values to console for user visibility + +### Stack Deletion +- Delete stack during cleanup phase +- Wait for deletion to complete +- Handle `DELETE_FAILED` status by retrying with force delete +- Catch `ValidationError` exception (indicates stack already deleted) + +## User Interaction Patterns + +### Question Types +```csharp +// Yes/No questions +private static bool GetYesNoResponse(string question) +{ + Console.WriteLine(question); + var ynResponse = Console.ReadLine(); + var response = ynResponse != null && ynResponse.Equals("y", StringComparison.InvariantCultureIgnoreCase); + return response; +} + +// Text input with validation +private static string PromptUserForResourceName(string prompt) +{ + if (_interactive) + { + Console.WriteLine(prompt); + string resourceName = Console.ReadLine()!; + var regex = "[0-9a-zA-Z-_.]+"; + if (!Regex.IsMatch(resourceName, regex)) + { + Console.WriteLine($"Invalid resource name. Please use a name that matches the pattern {regex}."); + return PromptUserForResourceName(prompt); + } + return resourceName!; + } + // Used when running without user prompts. + return "resource-" + Guid.NewGuid(); +} + +// Numeric input +private static int PromptUserForInteger(string prompt) +{ + if (_interactive) + { + Console.WriteLine(prompt); + string stringResponse = Console.ReadLine()!; + if (string.IsNullOrWhiteSpace(stringResponse) || + !Int32.TryParse(stringResponse, out var intResponse)) + { + Console.WriteLine($"Invalid integer. "); + return PromptUserForInteger(prompt); + } + return intResponse!; + } + // Used when running without user prompts. + return 1; +} +``` + +### Information Display +```csharp +// Section separators +Console.WriteLine(new string('-', 80)); + +// Progress indicators +Console.WriteLine($"✓ Operation completed successfully"); +Console.WriteLine($"Waiting for operation to complete..."); + +// Formatted output +Console.WriteLine($"Found {count} items:"); +foreach (var item in items) +{ + Console.WriteLine($" - {item}"); +} +``` + +## Wrapper Class Pattern + +### Wrapper Class Structure +```csharp +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +// snippet-start:[{Service}.dotnetv3.{Service}Wrapper] +using Amazon.{Service}; +using Amazon.{Service}.Model; +using Microsoft.Extensions.Logging; + +namespace {Service}Actions; + +/// +/// Wrapper class for {AWS Service} operations. +/// +public class {Service}Wrapper +{ + private readonly IAmazon{Service} _amazon{Service}; + private readonly ILogger<{Service}Wrapper> _logger; + + /// + /// Constructor for the {Service}Wrapper class. + /// + /// The injected {Service} client. + /// The injected logger. + public {Service}Wrapper(IAmazon{Service} amazon{Service}, ILogger<{Service}Wrapper> logger) + { + _amazon{Service} = amazon{Service}; + _logger = logger; + } + + // snippet-start:[{Service}.dotnetv3.OperationName] + /// + /// Description of what this operation does. + /// + /// Description of parameter. + /// Description of return value. + public async Task OperationAsync(string paramName) + { + try + { + var request = new OperationRequest + { + Parameter = paramName + }; + + var response = await _amazon{Service}.OperationAsync(request); + + Console.WriteLine($"Successfully performed operation."); + return true; + } + catch (ConflictException ex) + { + _logger.LogError($"Failed to perform operation due to a conflict. {ex.Message}"); + return false; + } + catch (ResourceNotFoundException ex) + { + _logger.LogError($"Resource not found: {ex.Message}"); + return false; + } + catch (Exception ex) + { + _logger.LogError($"An error occurred: {ex.Message}"); + return false; + } + } + // snippet-end:[{Service}.dotnetv3.OperationName] +} +// snippet-end:[{Service}.dotnetv3.{Service}Wrapper] +``` + +### Wrapper Method Guidelines +- Return `bool` for success/failure operations +- Return specific types for data retrieval operations +- Log errors using injected logger +- Display success messages to console +- Catch specific exceptions first, then general exceptions +- Include XML documentation for all public methods +- Use snippet tags for documentation extraction + +## Error Handling + +### Specification-Based Error Handling +The specification includes an "Errors" section with specific error codes and handling: + +```csharp +// Example error handling based on specification +try +{ + var response = await _wrapper.CreateResourceAsync(); + return response; +} +catch (ConflictException ex) +{ + // Handle as specified: Resource already exists + _logger.LogError($"Failed to create resource due to a conflict. {ex.Message}"); + return false; +} +catch (ResourceNotFoundException ex) +{ + // Handle as specified: Resource not found + _logger.LogError($"Resource not found: {ex.Message}"); + return true; // May return true if deletion was the goal +} +catch (Exception ex) +{ + _logger.LogError($"An error occurred: {ex.Message}"); + return false; +} +``` + +### Workflow Error Handling +- Wrap main workflow in try-catch block +- Log errors and initiate cleanup on failure +- Set `_interactive = false` to skip prompts during error cleanup +- Ensure cleanup runs in finally block or after error + +## Feature Scenario Requirements + +### MUST HAVE +- ✅ Read and implement based on `scenarios/features/{service_feature}/SPECIFICATION.md` +- ✅ Use multi-project structure (Actions, Scenarios, Tests) +- ✅ Deploy CloudFormation stack for resource creation +- ✅ Retrieve and use stack outputs +- ✅ Use wrapper classes for all AWS operations +- ✅ Implement proper cleanup with stack deletion +- ✅ Break workflow into logical phases per specification +- ✅ Include error handling per specification +- ✅ Support non-interactive mode for testing +- ✅ Use file-level namespaces +- ✅ Include snippet tags for documentation + +### Implementation Workflow + +1. **Read Specification**: Study `scenarios/features/{service_feature}/SPECIFICATION.md` +2. **Create Project Structure**: Set up Actions, Scenarios, and Tests projects +3. **Implement Wrapper**: Create wrapper class with all required operations +4. **Implement Workflow**: Create workflow class with phases from specification +5. **Add CloudFormation**: Integrate stack deployment and deletion +6. **Add User Interaction**: Implement prompts and validation +7. **Test**: Create unit tests for workflow methods +8. **Document**: Add README.md with scenario description + +### Specification Sections to Implement +- **API Actions Used**: All operations must be in wrapper class +- **Proposed example structure**: Maps to workflow phases +- **Setup**: CloudFormation deployment and resource creation +- **Demonstration**: Core service operations +- **Examination**: Data analysis and display +- **Cleanup**: Resource and stack deletion +- **Errors**: Specific error handling strategies \ No newline at end of file From d2e2335ddfea4d5a2cf9421a225fc95d03359089 Mon Sep 17 00:00:00 2001 From: Rachel Hagerman <110480692+rlhagerm@users.noreply.github.com> Date: Thu, 13 Nov 2025 09:52:12 -0600 Subject: [PATCH 02/23] Adding project files --- .../Actions/CloudWatchLogsActions.csproj | 18 + .../Actions/CloudWatchLogsWrapper.cs | 148 ++++ .../LargeQuery/CloudWatchLogsLargeQuery.sln | 31 + dotnetv4/CloudWatchLogs/LargeQuery/README.md | 69 ++ .../Scenarios/CloudWatchLogsScenario.csproj | 22 + .../Scenarios/LargeQueryWorkflow.cs | 662 ++++++++++++++++++ .../LargeQuery/Scenarios/README.md | 109 +++ .../Tests/CloudWatchLogsTests.csproj | 31 + .../Tests/LargeQueryWorkflowTests.cs | 143 ++++ .../CloudWatchLogs/LargeQuery/Tests/Usings.cs | 4 + dotnetv4/CloudWatchLogs/README.md | 35 + .../resources/create_logs.py | 70 ++ steering_docs/dotnet-tech/scenario.md | 33 +- 13 files changed, 1363 insertions(+), 12 deletions(-) create mode 100644 dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsActions.csproj create mode 100644 dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsWrapper.cs create mode 100644 dotnetv4/CloudWatchLogs/LargeQuery/CloudWatchLogsLargeQuery.sln create mode 100644 dotnetv4/CloudWatchLogs/LargeQuery/README.md create mode 100644 dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/CloudWatchLogsScenario.csproj create mode 100644 dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs create mode 100644 dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/README.md create mode 100644 dotnetv4/CloudWatchLogs/LargeQuery/Tests/CloudWatchLogsTests.csproj create mode 100644 dotnetv4/CloudWatchLogs/LargeQuery/Tests/LargeQueryWorkflowTests.cs create mode 100644 dotnetv4/CloudWatchLogs/LargeQuery/Tests/Usings.cs create mode 100644 dotnetv4/CloudWatchLogs/README.md create mode 100644 scenarios/features/cloudwatch_logs_large_query/resources/create_logs.py diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsActions.csproj b/dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsActions.csproj new file mode 100644 index 00000000000..3aa4085c546 --- /dev/null +++ b/dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsActions.csproj @@ -0,0 +1,18 @@ + + + + Exe + net8.0 + enable + enable + + + + + + + + + + + diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsWrapper.cs b/dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsWrapper.cs new file mode 100644 index 00000000000..98e5c1fcc14 --- /dev/null +++ b/dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsWrapper.cs @@ -0,0 +1,148 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +// snippet-start:[CloudWatchLogs.dotnetv3.CloudWatchLogsWrapper] +using Amazon.CloudWatchLogs; +using Amazon.CloudWatchLogs.Model; +using Microsoft.Extensions.Logging; + +namespace CloudWatchLogsActions; + +/// +/// Wrapper class for Amazon CloudWatch Logs operations. +/// +public class CloudWatchLogsWrapper +{ + private readonly IAmazonCloudWatchLogs _amazonCloudWatchLogs; + private readonly ILogger _logger; + + /// + /// Constructor for the CloudWatchLogsWrapper class. + /// + /// The injected CloudWatch Logs client. + /// The injected logger. + public CloudWatchLogsWrapper(IAmazonCloudWatchLogs amazonCloudWatchLogs, ILogger logger) + { + _amazonCloudWatchLogs = amazonCloudWatchLogs; + _logger = logger; + } + + // snippet-start:[CloudWatchLogs.dotnetv3.StartQuery] + /// + /// Starts a CloudWatch Logs Insights query. + /// + /// The name of the log group to query. + /// The CloudWatch Logs Insights query string. + /// The start time for the query (seconds since epoch). + /// The end time for the query (seconds since epoch). + /// The maximum number of results to return. + /// The query ID if successful, null otherwise. + public async Task StartQueryAsync( + string logGroupName, + string queryString, + long startTime, + long endTime, + int limit = 10000) + { + try + { + var request = new StartQueryRequest + { + LogGroupName = logGroupName, + QueryString = queryString, + StartTime = startTime, + EndTime = endTime, + Limit = limit + }; + + var response = await _amazonCloudWatchLogs.StartQueryAsync(request); + return response.QueryId; + } + catch (InvalidParameterException ex) + { + _logger.LogError($"Invalid parameter for query: {ex.Message}"); + return null; + } + catch (ResourceNotFoundException ex) + { + _logger.LogError($"Log group not found: {ex.Message}"); + return null; + } + catch (Exception ex) + { + _logger.LogError($"An error occurred while starting query: {ex.Message}"); + return null; + } + } + // snippet-end:[CloudWatchLogs.dotnetv3.StartQuery] + + // snippet-start:[CloudWatchLogs.dotnetv3.GetQueryResults] + /// + /// Gets the results of a CloudWatch Logs Insights query. + /// + /// The ID of the query. + /// The query results response. + public async Task GetQueryResultsAsync(string queryId) + { + try + { + var request = new GetQueryResultsRequest + { + QueryId = queryId + }; + + var response = await _amazonCloudWatchLogs.GetQueryResultsAsync(request); + return response; + } + catch (ResourceNotFoundException ex) + { + _logger.LogError($"Query not found: {ex.Message}"); + return null; + } + catch (Exception ex) + { + _logger.LogError($"An error occurred while getting query results: {ex.Message}"); + return null; + } + } + // snippet-end:[CloudWatchLogs.dotnetv3.GetQueryResults] + + // snippet-start:[CloudWatchLogs.dotnetv3.PutLogEvents] + /// + /// Puts log events to a CloudWatch Logs log stream. + /// + /// The name of the log group. + /// The name of the log stream. + /// The list of log events to put. + /// True if successful, false otherwise. + public async Task PutLogEventsAsync( + string logGroupName, + string logStreamName, + List logEvents) + { + try + { + var request = new PutLogEventsRequest + { + LogGroupName = logGroupName, + LogStreamName = logStreamName, + LogEvents = logEvents + }; + + await _amazonCloudWatchLogs.PutLogEventsAsync(request); + return true; + } + catch (ResourceNotFoundException ex) + { + _logger.LogError($"Log group or stream not found: {ex.Message}"); + return false; + } + catch (Exception ex) + { + _logger.LogError($"An error occurred while putting log events: {ex.Message}"); + return false; + } + } + // snippet-end:[CloudWatchLogs.dotnetv3.PutLogEvents] +} +// snippet-end:[CloudWatchLogs.dotnetv3.CloudWatchLogsWrapper] diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/CloudWatchLogsLargeQuery.sln b/dotnetv4/CloudWatchLogs/LargeQuery/CloudWatchLogsLargeQuery.sln new file mode 100644 index 00000000000..eb27a092342 --- /dev/null +++ b/dotnetv4/CloudWatchLogs/LargeQuery/CloudWatchLogsLargeQuery.sln @@ -0,0 +1,31 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 17 +VisualStudioVersion = 17.0.31903.59 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CloudWatchLogsActions", "Actions\CloudWatchLogsActions.csproj", "{A1B2C3D4-E5F6-4A5B-8C9D-0E1F2A3B4C5D}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CloudWatchLogsScenario", "Scenarios\CloudWatchLogsScenario.csproj", "{B2C3D4E5-F6A7-5B6C-9D0E-1F2A3B4C5D6E}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CloudWatchLogsTests", "Tests\CloudWatchLogsTests.csproj", "{C3D4E5F6-A7B8-6C7D-0E1F-2A3B4C5D6E7F}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Release|Any CPU = Release|Any CPU + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {A1B2C3D4-E5F6-4A5B-8C9D-0E1F2A3B4C5D}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {A1B2C3D4-E5F6-4A5B-8C9D-0E1F2A3B4C5D}.Debug|Any CPU.Build.0 = Debug|Any CPU + {A1B2C3D4-E5F6-4A5B-8C9D-0E1F2A3B4C5D}.Release|Any CPU.ActiveCfg = Release|Any CPU + {A1B2C3D4-E5F6-4A5B-8C9D-0E1F2A3B4C5D}.Release|Any CPU.Build.0 = Release|Any CPU + {B2C3D4E5-F6A7-5B6C-9D0E-1F2A3B4C5D6E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {B2C3D4E5-F6A7-5B6C-9D0E-1F2A3B4C5D6E}.Debug|Any CPU.Build.0 = Debug|Any CPU + {B2C3D4E5-F6A7-5B6C-9D0E-1F2A3B4C5D6E}.Release|Any CPU.ActiveCfg = Release|Any CPU + {B2C3D4E5-F6A7-5B6C-9D0E-1F2A3B4C5D6E}.Release|Any CPU.Build.0 = Release|Any CPU + {C3D4E5F6-A7B8-6C7D-0E1F-2A3B4C5D6E7F}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {C3D4E5F6-A7B8-6C7D-0E1F-2A3B4C5D6E7F}.Debug|Any CPU.Build.0 = Debug|Any CPU + {C3D4E5F6-A7B8-6C7D-0E1F-2A3B4C5D6E7F}.Release|Any CPU.ActiveCfg = Release|Any CPU + {C3D4E5F6-A7B8-6C7D-0E1F-2A3B4C5D6E7F}.Release|Any CPU.Build.0 = Release|Any CPU + EndGlobalSection +EndGlobal diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/README.md b/dotnetv4/CloudWatchLogs/LargeQuery/README.md new file mode 100644 index 00000000000..ccb87751d1d --- /dev/null +++ b/dotnetv4/CloudWatchLogs/LargeQuery/README.md @@ -0,0 +1,69 @@ +# CloudWatch Logs Large Query Example + +This folder contains a .NET feature scenario that demonstrates how to perform large-scale queries on Amazon CloudWatch Logs using recursive binary search to retrieve more than the 10,000 result limit. + +## Project Structure + +``` +LargeQuery/ +├── Actions/ +│ ├── CloudWatchLogsWrapper.cs # Wrapper class for CloudWatch Logs operations +│ └── CloudWatchLogsActions.csproj # Actions project file +├── Scenarios/ +│ ├── LargeQueryWorkflow.cs # Main workflow implementation +│ ├── README.md # Detailed scenario documentation +│ └── CloudWatchLogsScenario.csproj # Scenario project file +├── Tests/ +│ ├── LargeQueryWorkflowTests.cs # Unit tests +│ ├── Usings.cs # Global usings +│ └── CloudWatchLogsTests.csproj # Test project file +└── CloudWatchLogsLargeQuery.sln # Solution file +``` + +## What This Example Demonstrates + +- Deploying AWS resources using CloudFormation +- Generating and ingesting large volumes of log data +- Performing CloudWatch Logs Insights queries +- Using recursive binary search to retrieve more than 10,000 results +- Cleaning up resources after completion + +## Running the Example + +1. Navigate to the solution directory: + ``` + cd dotnetv4/CloudWatchLogs/LargeQuery + ``` + +2. Build the solution: + ``` + dotnet build + ``` + +3. Run the scenario: + ``` + dotnet run --project Scenarios/CloudWatchLogsScenario.csproj + ``` + +4. Run the tests: + ``` + dotnet test + ``` + +## Prerequisites + +- .NET 8.0 or later +- AWS credentials configured +- Python 3.x (for log generation) +- Permissions for CloudWatch Logs and CloudFormation + +## Related Resources + +- [CloudWatch Logs Documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/) +- [CloudWatch Logs Insights Query Syntax](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html) +- [AWS SDK for .NET](https://aws.amazon.com/sdk-for-net/) + +--- + +Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +SPDX-License-Identifier: Apache-2.0 diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/CloudWatchLogsScenario.csproj b/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/CloudWatchLogsScenario.csproj new file mode 100644 index 00000000000..1047d133f69 --- /dev/null +++ b/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/CloudWatchLogsScenario.csproj @@ -0,0 +1,22 @@ + + + + Exe + net8.0 + enable + enable + + + + + + + + + + + + + + + diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs b/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs new file mode 100644 index 00000000000..9d3ea05bfe8 --- /dev/null +++ b/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs @@ -0,0 +1,662 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +// snippet-start:[CloudWatchLogs.dotnetv3.LargeQueryWorkflow] +using System.Diagnostics; +using System.Text.RegularExpressions; +using Amazon.CloudFormation; +using Amazon.CloudFormation.Model; +using Amazon.CloudWatchLogs; +using Amazon.CloudWatchLogs.Model; +using CloudWatchLogsActions; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Hosting; +using Microsoft.Extensions.Logging; + +namespace CloudWatchLogsScenario; + +public class LargeQueryWorkflow +{ + /* + Before running this .NET code example, set up your development environment, including your credentials. + This .NET code example performs the following tasks for the CloudWatch Logs Large Query workflow: + + 1. Prepare the Application: + - Prompt the user to deploy CloudFormation stack and generate sample logs. + - Deploy the CloudFormation template for resource creation. + - Generate 50,000 sample log entries using a Python script. + - Wait 5 minutes for logs to be fully ingested. + + 2. Execute Large Query: + - Perform recursive queries to retrieve all logs using binary search. + - Display progress for each query executed. + - Show total execution time and logs found. + + 3. Clean up: + - Prompt the user to delete the CloudFormation stack and all resources. + - Destroy the CloudFormation stack and wait until removed. + */ + + public static ILogger _logger = null!; + public static CloudWatchLogsWrapper _wrapper = null!; + public static IAmazonCloudFormation _amazonCloudFormation = null!; + + private static string _logGroupName = "/workflows/cloudwatch-logs/large-query"; + private static string _logStreamName = "stream1"; + private static long _queryStartDate; + private static long _queryEndDate; + + public static bool _interactive = true; + private static string _stackName = "CloudWatchLargeQueryStack"; + private static string _stackResourcePath = "../../../../../../scenarios/features/cloudwatch_logs_large_query/resources/stack.yaml"; + private static string _pythonScriptPath = "../../../../../../scenarios/features/cloudwatch_logs_large_query/resources/create_logs.py"; + + public static async Task Main(string[] args) + { + using var host = Host.CreateDefaultBuilder(args) + .ConfigureLogging(logging => + logging.AddFilter("System", LogLevel.Debug) + .AddFilter("Microsoft", LogLevel.Information)) + .ConfigureServices((_, services) => + services.AddAWSService() + .AddAWSService() + .AddTransient() + ) + .Build(); + + if (_interactive) + { + _logger = LoggerFactory.Create(builder => { builder.AddConsole(); }) + .CreateLogger(); + + _wrapper = host.Services.GetRequiredService(); + _amazonCloudFormation = host.Services.GetRequiredService(); + } + + Console.WriteLine(new string('-', 80)); + Console.WriteLine("Welcome to the CloudWatch Logs Large Query Scenario."); + Console.WriteLine(new string('-', 80)); + Console.WriteLine("This scenario demonstrates how to perform large-scale queries on"); + Console.WriteLine("CloudWatch Logs using recursive binary search to retrieve more than"); + Console.WriteLine("the 10,000 result limit."); + Console.WriteLine(); + + try + { + Console.WriteLine(new string('-', 80)); + var prepareSuccess = await PrepareApplication(); + Console.WriteLine(new string('-', 80)); + + if (prepareSuccess) + { + Console.WriteLine(new string('-', 80)); + await ExecuteLargeQuery(); + Console.WriteLine(new string('-', 80)); + } + + Console.WriteLine(new string('-', 80)); + await Cleanup(); + Console.WriteLine(new string('-', 80)); + } + catch (Exception ex) + { + _logger.LogError(ex, "There was a problem with the scenario, initiating cleanup..."); + _interactive = false; + await Cleanup(); + } + + Console.WriteLine("CloudWatch Logs Large Query scenario completed."); + } + + /// + /// Prepares the application by creating the necessary resources. + /// + /// True if the application was prepared successfully. + public static async Task PrepareApplication() + { + Console.WriteLine("Preparing the application..."); + Console.WriteLine(); + + try + { + var deployStack = !_interactive || GetYesNoResponse( + "Would you like to deploy the CloudFormation stack and generate sample logs? (y/n) "); + + if (deployStack) + { + _stackName = PromptUserForStackName(); + + var deploySuccess = await DeployCloudFormationStack(_stackName); + + if (deploySuccess) + { + Console.WriteLine(); + Console.WriteLine("Generating 50,000 sample log entries..."); + var generateSuccess = await GenerateSampleLogs(); + + if (generateSuccess) + { + Console.WriteLine(); + Console.WriteLine("Sample logs created. Waiting 5 minutes for logs to be fully ingested..."); + await WaitWithCountdown(300); + + Console.WriteLine("Application preparation complete."); + return true; + } + } + } + else + { + _logGroupName = PromptUserForInput("Enter the log group name: ", _logGroupName); + _logStreamName = PromptUserForInput("Enter the log stream name: ", _logStreamName); + + var startDateMs = PromptUserForLong("Enter the query start date (milliseconds since epoch): "); + var endDateMs = PromptUserForLong("Enter the query end date (milliseconds since epoch): "); + + _queryStartDate = startDateMs / 1000; + _queryEndDate = endDateMs / 1000; + + Console.WriteLine("Application preparation complete."); + return true; + } + } + catch (Exception ex) + { + _logger.LogError(ex, "An error occurred while preparing the application."); + } + + Console.WriteLine("Application preparation failed."); + return false; + } + + /// + /// Deploys the CloudFormation stack with the necessary resources. + /// + /// The name of the CloudFormation stack. + /// True if the stack was deployed successfully. + private static async Task DeployCloudFormationStack(string stackName) + { + Console.WriteLine($"\nDeploying CloudFormation stack: {stackName}"); + + try + { + var request = new CreateStackRequest + { + StackName = stackName, + TemplateBody = await File.ReadAllTextAsync(_stackResourcePath) + }; + + var response = await _amazonCloudFormation.CreateStackAsync(request); + + if (response.HttpStatusCode == System.Net.HttpStatusCode.OK) + { + Console.WriteLine($"CloudFormation stack creation started: {stackName}"); + + bool stackCreated = await WaitForStackCompletion(response.StackId); + + if (stackCreated) + { + Console.WriteLine("CloudFormation stack created successfully."); + return true; + } + else + { + _logger.LogError($"CloudFormation stack creation failed: {stackName}"); + return false; + } + } + else + { + _logger.LogError($"Failed to create CloudFormation stack: {stackName}"); + return false; + } + } + catch (AlreadyExistsException) + { + _logger.LogWarning($"CloudFormation stack '{stackName}' already exists. Please provide a unique name."); + var newStackName = PromptUserForStackName(); + return await DeployCloudFormationStack(newStackName); + } + catch (Exception ex) + { + _logger.LogError(ex, $"An error occurred while deploying the CloudFormation stack: {stackName}"); + return false; + } + } + + /// + /// Waits for the CloudFormation stack to be in the CREATE_COMPLETE state. + /// + /// The ID of the CloudFormation stack. + /// True if the stack was created successfully. + private static async Task WaitForStackCompletion(string stackId) + { + int retryCount = 0; + const int maxRetries = 30; + const int retryDelay = 10000; + + while (retryCount < maxRetries) + { + var describeStacksRequest = new DescribeStacksRequest + { + StackName = stackId + }; + + var describeStacksResponse = await _amazonCloudFormation.DescribeStacksAsync(describeStacksRequest); + + if (describeStacksResponse.Stacks.Count > 0) + { + if (describeStacksResponse.Stacks[0].StackStatus == StackStatus.CREATE_COMPLETE) + { + return true; + } + if (describeStacksResponse.Stacks[0].StackStatus == StackStatus.CREATE_FAILED || + describeStacksResponse.Stacks[0].StackStatus == StackStatus.ROLLBACK_COMPLETE) + { + return false; + } + } + + Console.WriteLine("Waiting for CloudFormation stack creation to complete..."); + await Task.Delay(retryDelay); + retryCount++; + } + + _logger.LogError("Timed out waiting for CloudFormation stack creation to complete."); + return false; + } + + /// + /// Generates sample logs using a Python script. + /// + /// True if logs were generated successfully. + private static async Task GenerateSampleLogs() + { + try + { + if (!File.Exists(_pythonScriptPath)) + { + _logger.LogError($"Python script not found at: {_pythonScriptPath}"); + Console.WriteLine("Please run the script manually from:"); + Console.WriteLine($" {_pythonScriptPath}"); + return false; + } + + var processStartInfo = new ProcessStartInfo + { + FileName = "python", + Arguments = _pythonScriptPath, + RedirectStandardOutput = true, + RedirectStandardError = true, + UseShellExecute = false, + CreateNoWindow = true + }; + + using var process = Process.Start(processStartInfo); + if (process == null) + { + _logger.LogError("Failed to start Python process."); + return false; + } + + var output = await process.StandardOutput.ReadToEndAsync(); + var error = await process.StandardError.ReadToEndAsync(); + await process.WaitForExitAsync(); + + if (process.ExitCode != 0) + { + _logger.LogError($"Python script failed: {error}"); + return false; + } + + var startMatch = Regex.Match(output, @"QUERY_START_DATE=(\d+)"); + var endMatch = Regex.Match(output, @"QUERY_END_DATE=(\d+)"); + + if (startMatch.Success && endMatch.Success) + { + _queryStartDate = long.Parse(startMatch.Groups[1].Value) / 1000; + _queryEndDate = long.Parse(endMatch.Groups[1].Value) / 1000; + + Console.WriteLine($"Query start date: {DateTimeOffset.FromUnixTimeSeconds(_queryStartDate):yyyy-MM-ddTHH:mm:ss.fffZ}"); + Console.WriteLine($"Query end date: {DateTimeOffset.FromUnixTimeSeconds(_queryEndDate):yyyy-MM-ddTHH:mm:ss.fffZ}"); + return true; + } + else + { + _logger.LogError("Failed to parse timestamps from script output."); + return false; + } + } + catch (Exception ex) + { + _logger.LogError(ex, "An error occurred while generating sample logs."); + return false; + } + } + + /// + /// Executes the large query workflow. + /// + public static async Task ExecuteLargeQuery() + { + Console.WriteLine("Starting recursive query to retrieve all logs..."); + Console.WriteLine(); + + var queryLimit = PromptUserForInteger("Enter the query limit (default 10000, max 10000): ", 10000); + if (queryLimit > 10000) queryLimit = 10000; + + var queryString = "fields @timestamp, @message | sort @timestamp asc"; + + var stopwatch = Stopwatch.StartNew(); + var allResults = await PerformLargeQuery(_logGroupName, queryString, _queryStartDate, _queryEndDate, queryLimit); + stopwatch.Stop(); + + Console.WriteLine(); + Console.WriteLine($"Queries finished in {stopwatch.Elapsed.TotalSeconds:F3} seconds."); + Console.WriteLine($"Total logs found: {allResults.Count}"); + Console.WriteLine(); + + var viewSample = !_interactive || GetYesNoResponse("Would you like to see a sample of the logs? (y/n) "); + if (viewSample) + { + Console.WriteLine(); + Console.WriteLine($"Sample logs (first 10 of {allResults.Count}):"); + for (int i = 0; i < Math.Min(10, allResults.Count); i++) + { + var timestamp = allResults[i].Find(f => f.Field == "@timestamp")?.Value ?? "N/A"; + var message = allResults[i].Find(f => f.Field == "@message")?.Value ?? "N/A"; + Console.WriteLine($"[{timestamp}] {message}"); + } + } + } + + /// + /// Performs a large query using recursive binary search. + /// + private static async Task>> PerformLargeQuery( + string logGroupName, + string queryString, + long startTime, + long endTime, + int limit) + { + var queryId = await _wrapper.StartQueryAsync(logGroupName, queryString, startTime, endTime, limit); + if (queryId == null) + { + return new List>(); + } + + var results = await PollQueryResults(queryId); + if (results == null || results.Count == 0) + { + return new List>(); + } + + var startDate = DateTimeOffset.FromUnixTimeSeconds(startTime).ToString("yyyy-MM-ddTHH:mm:ss.fffZ"); + var endDate = DateTimeOffset.FromUnixTimeSeconds(endTime).ToString("yyyy-MM-ddTHH:mm:ss.fffZ"); + Console.WriteLine($"Query date range: {startDate} to {endDate}. Found {results.Count} logs."); + + if (results.Count < limit) + { + return results; + } + + var lastTimestamp = results[results.Count - 1].Find(f => f.Field == "@timestamp")?.Value; + if (lastTimestamp == null) + { + return results; + } + + var lastTime = DateTimeOffset.Parse(lastTimestamp).ToUnixTimeSeconds(); + var midpoint = (lastTime + endTime) / 2; + + var results1 = await PerformLargeQuery(logGroupName, queryString, lastTime, midpoint, limit); + var results2 = await PerformLargeQuery(logGroupName, queryString, midpoint, endTime, limit); + + var allResults = new List>(results); + allResults.AddRange(results1); + allResults.AddRange(results2); + + return allResults; + } + + /// + /// Polls for query results until complete. + /// + private static async Task>?> PollQueryResults(string queryId) + { + int retryCount = 0; + const int maxRetries = 60; + const int retryDelay = 1000; + + while (retryCount < maxRetries) + { + var response = await _wrapper.GetQueryResultsAsync(queryId); + if (response == null) + { + return null; + } + + if (response.Status == QueryStatus.Complete) + { + return response.Results; + } + + if (response.Status == QueryStatus.Failed || + response.Status == QueryStatus.Cancelled || + response.Status == QueryStatus.Timeout || + response.Status == QueryStatus.Unknown) + { + _logger.LogError($"Query failed with status: {response.Status}"); + return null; + } + + await Task.Delay(retryDelay); + retryCount++; + } + + _logger.LogError("Timed out waiting for query results."); + return null; + } + + /// + /// Cleans up the resources created during the scenario. + /// + public static async Task Cleanup() + { + var cleanup = !_interactive || GetYesNoResponse( + "Do you want to delete the CloudFormation stack and all resources? (y/n) "); + + if (cleanup) + { + try + { + var stackDeleteSuccess = await DeleteCloudFormationStack(_stackName, false); + return stackDeleteSuccess; + } + catch (Exception ex) + { + _logger.LogError(ex, "An error occurred while cleaning up the resources."); + return false; + } + } + + Console.WriteLine($"Resources will remain. Stack name: {_stackName}, Log group: {_logGroupName}"); + _logger.LogInformation("CloudWatch Logs Large Query scenario is complete."); + return true; + } + + /// + /// Deletes the CloudFormation stack and waits for confirmation. + /// + private static async Task DeleteCloudFormationStack(string stackName, bool forceDelete) + { + var request = new DeleteStackRequest + { + StackName = stackName, + }; + + if (forceDelete) + { + request.DeletionMode = DeletionMode.FORCE_DELETE_STACK; + } + + await _amazonCloudFormation.DeleteStackAsync(request); + Console.WriteLine($"CloudFormation stack '{stackName}' is being deleted. This may take a few minutes."); + + bool stackDeleted = await WaitForStackDeletion(stackName, forceDelete); + + if (stackDeleted) + { + Console.WriteLine($"CloudFormation stack '{stackName}' has been deleted."); + return true; + } + else + { + _logger.LogError($"Failed to delete CloudFormation stack '{stackName}'."); + return false; + } + } + + /// + /// Waits for the stack to be deleted. + /// + private static async Task WaitForStackDeletion(string stackName, bool forceDelete) + { + int retryCount = 0; + const int maxRetries = 30; + const int retryDelay = 10000; + + while (retryCount < maxRetries) + { + var describeStacksRequest = new DescribeStacksRequest + { + StackName = stackName + }; + + try + { + var describeStacksResponse = await _amazonCloudFormation.DescribeStacksAsync(describeStacksRequest); + + if (describeStacksResponse.Stacks.Count == 0 || + describeStacksResponse.Stacks[0].StackStatus == StackStatus.DELETE_COMPLETE) + { + return true; + } + + if (!forceDelete && describeStacksResponse.Stacks[0].StackStatus == StackStatus.DELETE_FAILED) + { + return await DeleteCloudFormationStack(stackName, true); + } + } + catch (AmazonCloudFormationException ex) when (ex.ErrorCode == "ValidationError") + { + return true; + } + + Console.WriteLine($"Waiting for CloudFormation stack '{stackName}' to be deleted..."); + await Task.Delay(retryDelay); + retryCount++; + } + + _logger.LogError($"Timed out waiting for CloudFormation stack '{stackName}' to be deleted."); + return false; + } + + /// + /// Waits with a countdown display. + /// + private static async Task WaitWithCountdown(int seconds) + { + for (int i = seconds; i > 0; i--) + { + Console.Write($"\rWaiting: {i} seconds remaining... "); + await Task.Delay(1000); + } + Console.WriteLine("\rWait complete. "); + } + + /// + /// Helper method to get a yes or no response from the user. + /// + private static bool GetYesNoResponse(string question) + { + Console.WriteLine(question); + var ynResponse = Console.ReadLine(); + var response = ynResponse != null && ynResponse.Equals("y", StringComparison.InvariantCultureIgnoreCase); + return response; + } + + /// + /// Prompts the user for a stack name. + /// + private static string PromptUserForStackName() + { + Console.WriteLine($"Enter a name for the CloudFormation stack (default: {_stackName}): "); + if (_interactive) + { + string? input = Console.ReadLine(); + if (!string.IsNullOrWhiteSpace(input)) + { + var regex = "[a-zA-Z][-a-zA-Z0-9]*"; + if (!Regex.IsMatch(input, regex)) + { + Console.WriteLine($"Invalid stack name. Using default: {_stackName}"); + return _stackName; + } + return input; + } + } + return _stackName; + } + + /// + /// Prompts the user for input with a default value. + /// + private static string PromptUserForInput(string prompt, string defaultValue) + { + if (_interactive) + { + Console.Write(prompt); + string? input = Console.ReadLine(); + return string.IsNullOrWhiteSpace(input) ? defaultValue : input; + } + return defaultValue; + } + + /// + /// Prompts the user for an integer value. + /// + private static int PromptUserForInteger(string prompt, int defaultValue) + { + if (_interactive) + { + Console.Write(prompt); + string? input = Console.ReadLine(); + if (string.IsNullOrWhiteSpace(input) || !int.TryParse(input, out var result)) + { + return defaultValue; + } + return result; + } + return defaultValue; + } + + /// + /// Prompts the user for a long value. + /// + private static long PromptUserForLong(string prompt) + { + if (_interactive) + { + Console.Write(prompt); + string? input = Console.ReadLine(); + if (long.TryParse(input, out var result)) + { + return result; + } + } + return 0; + } +} +// snippet-end:[CloudWatchLogs.dotnetv3.LargeQueryWorkflow] diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/README.md b/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/README.md new file mode 100644 index 00000000000..c5d35a8317f --- /dev/null +++ b/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/README.md @@ -0,0 +1,109 @@ +# CloudWatch Logs Large Query Workflow + +## Overview + +This example demonstrates how to perform large-scale queries on Amazon CloudWatch Logs using recursive binary search to retrieve more than the 10,000 result limit. The workflow showcases how to use CloudWatch Logs Insights queries with a recursive algorithm to fetch all matching log entries. + +## Workflow Steps + +This workflow demonstrates the following steps and tasks: + +1. **Prepare the Application** + - Prompts the user to deploy a CloudFormation stack and generate sample logs + - Deploys the CloudFormation template to create a log group and log stream + - Executes a Python script to generate 50,000 sample log entries + - Waits 5 minutes for logs to be fully ingested and indexed + +2. **Execute Large Query** + - Prompts the user for query parameters (limit) + - Performs recursive queries using binary search to retrieve all logs + - Displays progress for each query executed with date ranges and result counts + - Shows total execution time and total logs found + - Optionally displays a sample of the retrieved logs + +3. **Clean Up** + - Prompts the user to confirm deletion of resources + - Deletes the CloudFormation stack + - Waits for stack deletion to complete + +## ⚠ Important + +* Running this code might result in charges to your AWS account. +* Running the tests might result in charges to your AWS account. +* We recommend that you grant your code least privilege. At most, grant only the minimum permissions required to perform the task. For more information, see [Grant least privilege](https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#grant-least-privilege). +* This code is not tested in every AWS Region. For more information, see [AWS Regional Services](https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services). + +## Scenario + +### Prerequisites + +Before running this workflow, ensure you have: + +- An AWS account with proper permissions to use Amazon CloudWatch Logs and AWS CloudFormation +- Python 3.x installed (for log generation script) +- AWS credentials configured + +### AWS Services Used + +This workflow uses the following AWS services: + +- Amazon CloudWatch Logs +- AWS CloudFormation + +### Resources + +The feature scenario deploys an AWS CloudFormation stack with the required resources: + +- CloudWatch Logs Log Group: `/workflows/cloudwatch-logs/large-query` +- CloudWatch Logs Log Stream: `stream1` + +### Instructions + +After the example compiles, you can run it from the command line. To do so, navigate to the folder that contains the .sln file and run the following command: + +``` +dotnet run --project Scenarios/CloudWatchLogsScenario.csproj +``` + +Alternatively, you can run the example from within your IDE. + +This starts an interactive scenario that walks you through: + +1. Deploying a CloudFormation stack with CloudWatch Logs resources +2. Generating 50,000 sample log entries +3. Performing recursive queries to retrieve all logs +4. Cleaning up resources + +## How the Recursive Query Works + +The recursive query algorithm uses binary search to retrieve more than the 10,000 result limit: + +1. Execute a query with the specified date range +2. If results < limit, return the results +3. If results >= limit: + - Get the timestamp of the last result + - Calculate the midpoint between the last result and the end date + - Recursively query the first half (last result to midpoint) + - Recursively query the second half (midpoint to end date) + - Concatenate all results + +This approach efficiently retrieves all matching logs by splitting the date range whenever the result limit is reached. + +## CloudWatch Logs Actions + +The workflow covers the following CloudWatch Logs API actions: + +- [`StartQuery`](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_StartQuery.html) - Initiates a CloudWatch Logs Insights query +- [`GetQueryResults`](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_GetQueryResults.html) - Retrieves results from a query +- [`PutLogEvents`](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html) - Uploads log events to a log stream + +## Additional Resources + +* [CloudWatch Logs User Guide](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/WhatIsCloudWatchLogs.html) +* [CloudWatch Logs Insights Query Syntax](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html) +* [CloudWatch Logs API Reference](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/Welcome.html) + +--- + +Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +SPDX-License-Identifier: Apache-2.0 diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/Tests/CloudWatchLogsTests.csproj b/dotnetv4/CloudWatchLogs/LargeQuery/Tests/CloudWatchLogsTests.csproj new file mode 100644 index 00000000000..f08fb4a50ef --- /dev/null +++ b/dotnetv4/CloudWatchLogs/LargeQuery/Tests/CloudWatchLogsTests.csproj @@ -0,0 +1,31 @@ + + + + net8.0 + enable + enable + false + true + + + + + + + + + + runtime; build; native; contentfiles; analyzers; buildtransitive + all + + + runtime; build; native; contentfiles; analyzers; buildtransitive + all + + + + + + + + diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/Tests/LargeQueryWorkflowTests.cs b/dotnetv4/CloudWatchLogs/LargeQuery/Tests/LargeQueryWorkflowTests.cs new file mode 100644 index 00000000000..0951c9b2549 --- /dev/null +++ b/dotnetv4/CloudWatchLogs/LargeQuery/Tests/LargeQueryWorkflowTests.cs @@ -0,0 +1,143 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +using Amazon.CloudWatchLogs; +using Amazon.CloudWatchLogs.Model; +using CloudWatchLogsActions; +using Microsoft.Extensions.Logging; +using Moq; + +namespace CloudWatchLogsTests; + +public class LargeQueryWorkflowTests +{ + private readonly Mock _mockCloudWatchLogs; + private readonly Mock> _mockLogger; + private readonly CloudWatchLogsWrapper _wrapper; + + public LargeQueryWorkflowTests() + { + _mockCloudWatchLogs = new Mock(); + _mockLogger = new Mock>(); + _wrapper = new CloudWatchLogsWrapper(_mockCloudWatchLogs.Object, _mockLogger.Object); + } + + [Fact] + public async Task StartQueryAsync_Success_ReturnsQueryId() + { + // Arrange + var expectedQueryId = "test-query-id-123"; + _mockCloudWatchLogs + .Setup(x => x.StartQueryAsync(It.IsAny(), default)) + .ReturnsAsync(new StartQueryResponse { QueryId = expectedQueryId }); + + // Act + var result = await _wrapper.StartQueryAsync( + "/test/log-group", + "fields @timestamp, @message", + 1000, + 2000, + 10000); + + // Assert + Assert.Equal(expectedQueryId, result); + } + + [Fact] + public async Task StartQueryAsync_InvalidParameter_ReturnsNull() + { + // Arrange + _mockCloudWatchLogs + .Setup(x => x.StartQueryAsync(It.IsAny(), default)) + .ThrowsAsync(new InvalidParameterException("Invalid parameter")); + + // Act + var result = await _wrapper.StartQueryAsync( + "/test/log-group", + "fields @timestamp, @message", + 1000, + 2000, + 10000); + + // Assert + Assert.Null(result); + } + + [Fact] + public async Task GetQueryResultsAsync_Success_ReturnsResults() + { + // Arrange + var expectedResponse = new GetQueryResultsResponse + { + Status = QueryStatus.Complete, + Results = new List> + { + new List + { + new ResultField { Field = "@timestamp", Value = "2023-01-01T00:00:00.000Z" }, + new ResultField { Field = "@message", Value = "Test message" } + } + } + }; + + _mockCloudWatchLogs + .Setup(x => x.GetQueryResultsAsync(It.IsAny(), default)) + .ReturnsAsync(expectedResponse); + + // Act + var result = await _wrapper.GetQueryResultsAsync("test-query-id"); + + // Assert + Assert.NotNull(result); + Assert.Equal(QueryStatus.Complete, result.Status); + Assert.Single(result.Results); + } + + [Fact] + public async Task PutLogEventsAsync_Success_ReturnsTrue() + { + // Arrange + _mockCloudWatchLogs + .Setup(x => x.PutLogEventsAsync(It.IsAny(), default)) + .ReturnsAsync(new PutLogEventsResponse()); + + var logEvents = new List + { + new InputLogEvent + { + Timestamp = DateTime.UtcNow, + Message = "Test log message" + } + }; + + // Act + var result = await _wrapper.PutLogEventsAsync("/test/log-group", "test-stream", logEvents); + + // Assert + Assert.True(result); + } + + [Fact] + public async Task PutLogEventsAsync_ResourceNotFound_ReturnsFalse() + { + // Arrange + _mockCloudWatchLogs + .Setup(x => x.PutLogEventsAsync(It.IsAny(), default)) + .ThrowsAsync(new ResourceNotFoundException("Log group not found")); + + var logEvents = new List + { + new InputLogEvent + { + Timestamp = DateTime.UtcNow, + Message = "Test log message" + } + }; + + // Act + var result = await _wrapper.PutLogEventsAsync("/test/log-group", "test-stream", logEvents); + + // Assert + Assert.False(result); + } +} diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/Tests/Usings.cs b/dotnetv4/CloudWatchLogs/LargeQuery/Tests/Usings.cs new file mode 100644 index 00000000000..4cb6a55926e --- /dev/null +++ b/dotnetv4/CloudWatchLogs/LargeQuery/Tests/Usings.cs @@ -0,0 +1,4 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +global using Xunit; diff --git a/dotnetv4/CloudWatchLogs/README.md b/dotnetv4/CloudWatchLogs/README.md new file mode 100644 index 00000000000..d5aec8c2d7e --- /dev/null +++ b/dotnetv4/CloudWatchLogs/README.md @@ -0,0 +1,35 @@ +# CloudWatch Logs Examples for .NET + +This folder contains examples for Amazon CloudWatch Logs using the AWS SDK for .NET. + +## Examples + +### Feature Scenarios + +- **[LargeQuery](LargeQuery/)** - Demonstrates how to perform large-scale queries on CloudWatch Logs using recursive binary search to retrieve more than the 10,000 result limit. + +## Running the Examples + +Each example includes its own README with specific instructions. Generally, you can: + +1. Navigate to the example directory +2. Build the solution: `dotnet build` +3. Run the example: `dotnet run --project Scenarios/{ProjectName}.csproj` +4. Run tests: `dotnet test` + +## Prerequisites + +- .NET 8.0 or later +- AWS credentials configured +- Appropriate AWS permissions for CloudWatch Logs + +## Additional Resources + +- [CloudWatch Logs Documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/) +- [AWS SDK for .NET Documentation](https://docs.aws.amazon.com/sdk-for-net/) +- [CloudWatch Logs API Reference](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/) + +--- + +Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +SPDX-License-Identifier: Apache-2.0 diff --git a/scenarios/features/cloudwatch_logs_large_query/resources/create_logs.py b/scenarios/features/cloudwatch_logs_large_query/resources/create_logs.py new file mode 100644 index 00000000000..882bbdc1eb7 --- /dev/null +++ b/scenarios/features/cloudwatch_logs_large_query/resources/create_logs.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python3 +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Script to generate and upload 50,000 sample log entries to CloudWatch Logs. +This script creates logs spanning 5 minutes and uploads them in batches. +""" + +import boto3 +import time +from datetime import datetime + +LOG_GROUP_NAME = "/workflows/cloudwatch-logs/large-query" +LOG_STREAM_NAME = "stream1" +TOTAL_ENTRIES = 50000 +ENTRIES_PER_BATCH = 10000 +FIVE_MINUTES_MS = 5 * 60 * 1000 + + +def main(): + """Generate and upload log entries to CloudWatch Logs.""" + client = boto3.client('logs') + + # Calculate timestamps + start_time_ms = int(time.time() * 1000) + timestamp_increment = FIVE_MINUTES_MS // TOTAL_ENTRIES + + print(f"Generating {TOTAL_ENTRIES} log entries...") + print(f"QUERY_START_DATE={start_time_ms}") + + entry_count = 0 + current_timestamp = start_time_ms + + # Generate and upload logs in batches + num_batches = TOTAL_ENTRIES // ENTRIES_PER_BATCH + + for batch_num in range(num_batches): + log_events = [] + + for i in range(ENTRIES_PER_BATCH): + log_events.append({ + 'timestamp': current_timestamp, + 'message': f'Entry {entry_count}' + }) + + entry_count += 1 + current_timestamp += timestamp_increment + + # Upload batch + try: + client.put_log_events( + logGroupName=LOG_GROUP_NAME, + logStreamName=LOG_STREAM_NAME, + logEvents=log_events + ) + print(f"Uploaded batch {batch_num + 1}/{num_batches}") + except Exception as e: + print(f"Error uploading batch {batch_num + 1}: {e}") + return 1 + + end_time_ms = current_timestamp - timestamp_increment + print(f"QUERY_END_DATE={end_time_ms}") + print(f"Successfully uploaded {TOTAL_ENTRIES} log entries") + + return 0 + + +if __name__ == "__main__": + exit(main()) diff --git a/steering_docs/dotnet-tech/scenario.md b/steering_docs/dotnet-tech/scenario.md index 271fac811af..8e88034ce29 100644 --- a/steering_docs/dotnet-tech/scenario.md +++ b/steering_docs/dotnet-tech/scenario.md @@ -3,6 +3,12 @@ ## Purpose Generate feature scenarios that demonstrate complete workflows using multiple service operations in a guided, educational manner. Implementation must be based on the service SPECIFICATION.md file. +## Target Directory +**IMPORTANT**: All new feature scenarios MUST be created in the `dotnetv4` directory, NOT `dotnetv3`. + +- **New scenarios**: `dotnetv4/{Service}/` +- **Legacy examples**: `dotnetv3/{Service}/` (Must NOT add new examples here) + ## Requirements - **Specification-Driven**: MUST read the `scenarios/features/{service_feature}/SPECIFICATION.md` - **Interactive**: Use Console.WriteLine and Console.ReadLine for user input and guidance @@ -19,7 +25,7 @@ Generate feature scenarios that demonstrate complete workflows using multiple se Feature scenarios use a multi-project structure with separate projects for actions, scenarios, and tests: ``` -dotnetv3/{Service}/ +dotnetv4/{Service}/ ├── {Service}.sln # Solution file ├── Actions/ │ ├── {Service}Wrapper.cs # Wrapper class for service operations @@ -35,6 +41,8 @@ dotnetv3/{Service}/ └── {Service}Tests.csproj # Test project file (references Scenarios) ``` +**Note**: Use `dotnetv4` for all new feature scenarios. The `dotnetv3` directory is for legacy examples only. + ## MANDATORY Pre-Implementation Steps ### Step 1: Read Scenario Specification @@ -538,16 +546,16 @@ public class {Service}Workflow Exe - net6.0 + net8.0 enable enable - - - + + + @@ -585,23 +593,24 @@ public class {Service}Workflow - net6.0 + net8.0 enable enable false + true - - - - + + + + - + runtime; build; native; contentfiles; analyzers; buildtransitive all - + runtime; build; native; contentfiles; analyzers; buildtransitive all From 249a5351ca5e9fcf78446a397e5a44f07d01d096 Mon Sep 17 00:00:00 2001 From: Rachel Hagerman <110480692+rlhagerm@users.noreply.github.com> Date: Tue, 18 Nov 2025 14:35:29 -0600 Subject: [PATCH 03/23] Adding stack deployment --- .kiro/settings/mcp.json | 14 ++ .../Actions/CloudWatchLogsActions.csproj | 12 +- .../Actions/CloudWatchLogsWrapper.cs | 16 +- dotnetv4/CloudWatchLogs/LargeQuery/README.md | 1 - .../Scenarios/CloudWatchLogsScenario.csproj | 10 +- .../Scenarios/LargeQueryWorkflow.cs | 170 ++++++++++++------ .../Tests/CloudWatchLogsTests.csproj | 15 +- .../resources/stack.yaml | 2 +- steering_docs/dotnet-tech/scenario.md | 43 ++--- 9 files changed, 175 insertions(+), 108 deletions(-) diff --git a/.kiro/settings/mcp.json b/.kiro/settings/mcp.json index 42fb25a070f..d9c0a7bbd0c 100644 --- a/.kiro/settings/mcp.json +++ b/.kiro/settings/mcp.json @@ -35,6 +35,20 @@ "aws___search_documentation", "aws___read_documentation" ] + }, + "codeloom-mcp": { + "disabled": false, + "command": "code-loom-mcp", + "args": [], + "env": {}, + "transportType": "stdio", + "autoApprove": [ + "loomer", + "search_aws_docs", + "read_aws_docs", + "query_knowledge_bases", + "list_knowledge_bases" + ] } } } \ No newline at end of file diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsActions.csproj b/dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsActions.csproj index 3aa4085c546..4934ade0dec 100644 --- a/dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsActions.csproj +++ b/dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsActions.csproj @@ -1,18 +1,18 @@ - Exe + Library net8.0 enable enable - - - - - + + + + + diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsWrapper.cs b/dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsWrapper.cs index 98e5c1fcc14..eb09c41cfd1 100644 --- a/dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsWrapper.cs +++ b/dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsWrapper.cs @@ -1,7 +1,7 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 -// snippet-start:[CloudWatchLogs.dotnetv3.CloudWatchLogsWrapper] +// snippet-start:[CloudWatchLogs.dotnetv4.CloudWatchLogsWrapper] using Amazon.CloudWatchLogs; using Amazon.CloudWatchLogs.Model; using Microsoft.Extensions.Logging; @@ -27,7 +27,7 @@ public CloudWatchLogsWrapper(IAmazonCloudWatchLogs amazonCloudWatchLogs, ILogger _logger = logger; } - // snippet-start:[CloudWatchLogs.dotnetv3.StartQuery] + // snippet-start:[CloudWatchLogs.dotnetv4.StartQuery] /// /// Starts a CloudWatch Logs Insights query. /// @@ -74,9 +74,9 @@ public CloudWatchLogsWrapper(IAmazonCloudWatchLogs amazonCloudWatchLogs, ILogger return null; } } - // snippet-end:[CloudWatchLogs.dotnetv3.StartQuery] + // snippet-end:[CloudWatchLogs.dotnetv4.StartQuery] - // snippet-start:[CloudWatchLogs.dotnetv3.GetQueryResults] + // snippet-start:[CloudWatchLogs.dotnetv4.GetQueryResults] /// /// Gets the results of a CloudWatch Logs Insights query. /// @@ -105,9 +105,9 @@ public CloudWatchLogsWrapper(IAmazonCloudWatchLogs amazonCloudWatchLogs, ILogger return null; } } - // snippet-end:[CloudWatchLogs.dotnetv3.GetQueryResults] + // snippet-end:[CloudWatchLogs.dotnetv4.GetQueryResults] - // snippet-start:[CloudWatchLogs.dotnetv3.PutLogEvents] + // snippet-start:[CloudWatchLogs.dotnetv4.PutLogEvents] /// /// Puts log events to a CloudWatch Logs log stream. /// @@ -143,6 +143,6 @@ public async Task PutLogEventsAsync( return false; } } - // snippet-end:[CloudWatchLogs.dotnetv3.PutLogEvents] + // snippet-end:[CloudWatchLogs.dotnetv4.PutLogEvents] } -// snippet-end:[CloudWatchLogs.dotnetv3.CloudWatchLogsWrapper] +// snippet-end:[CloudWatchLogs.dotnetv4.CloudWatchLogsWrapper] diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/README.md b/dotnetv4/CloudWatchLogs/LargeQuery/README.md index ccb87751d1d..c530567eac2 100644 --- a/dotnetv4/CloudWatchLogs/LargeQuery/README.md +++ b/dotnetv4/CloudWatchLogs/LargeQuery/README.md @@ -54,7 +54,6 @@ LargeQuery/ - .NET 8.0 or later - AWS credentials configured -- Python 3.x (for log generation) - Permissions for CloudWatch Logs and CloudFormation ## Related Resources diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/CloudWatchLogsScenario.csproj b/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/CloudWatchLogsScenario.csproj index 1047d133f69..fcf70daf1c3 100644 --- a/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/CloudWatchLogsScenario.csproj +++ b/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/CloudWatchLogsScenario.csproj @@ -8,11 +8,11 @@ - - - - - + + + + + diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs b/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs index 9d3ea05bfe8..7f7a27e73ed 100644 --- a/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs +++ b/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs @@ -1,7 +1,7 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 -// snippet-start:[CloudWatchLogs.dotnetv3.LargeQueryWorkflow] +// snippet-start:[CloudWatchLogs.dotnetv4.LargeQueryWorkflow] using System.Diagnostics; using System.Text.RegularExpressions; using Amazon.CloudFormation; @@ -24,7 +24,7 @@ public class LargeQueryWorkflow 1. Prepare the Application: - Prompt the user to deploy CloudFormation stack and generate sample logs. - Deploy the CloudFormation template for resource creation. - - Generate 50,000 sample log entries using a Python script. + - Generate 50,000 sample log entries using CloudWatch Logs API. - Wait 5 minutes for logs to be fully ingested. 2. Execute Large Query: @@ -48,8 +48,7 @@ public class LargeQueryWorkflow public static bool _interactive = true; private static string _stackName = "CloudWatchLargeQueryStack"; - private static string _stackResourcePath = "../../../../../../scenarios/features/cloudwatch_logs_large_query/resources/stack.yaml"; - private static string _pythonScriptPath = "../../../../../../scenarios/features/cloudwatch_logs_large_query/resources/create_logs.py"; + private static string _stackResourcePath = "../../../../../../../scenarios/features/cloudwatch_logs_large_query/resources/stack.yaml"; public static async Task Main(string[] args) { @@ -147,8 +146,8 @@ public static async Task PrepareApplication() } else { - _logGroupName = PromptUserForInput("Enter the log group name: ", _logGroupName); - _logStreamName = PromptUserForInput("Enter the log stream name: ", _logStreamName); + _logGroupName = PromptUserForInput("Enter the log group name ", _logGroupName); + _logStreamName = PromptUserForInput("Enter the log stream name ", _logStreamName); var startDateMs = PromptUserForLong("Enter the query start date (milliseconds since epoch): "); var endDateMs = PromptUserForLong("Enter the query end date (milliseconds since epoch): "); @@ -267,65 +266,65 @@ private static async Task WaitForStackCompletion(string stackId) } /// - /// Generates sample logs using a Python script. + /// Generates sample logs directly using CloudWatch Logs API. + /// Creates 50,000 log entries spanning 5 minutes. /// /// True if logs were generated successfully. private static async Task GenerateSampleLogs() { + const int totalEntries = 50000; + const int entriesPerBatch = 10000; + const int fiveMinutesMs = 5 * 60 * 1000; + try { - if (!File.Exists(_pythonScriptPath)) - { - _logger.LogError($"Python script not found at: {_pythonScriptPath}"); - Console.WriteLine("Please run the script manually from:"); - Console.WriteLine($" {_pythonScriptPath}"); - return false; - } + // Calculate timestamps + var startTimeMs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds(); + var timestampIncrement = fiveMinutesMs / totalEntries; - var processStartInfo = new ProcessStartInfo - { - FileName = "python", - Arguments = _pythonScriptPath, - RedirectStandardOutput = true, - RedirectStandardError = true, - UseShellExecute = false, - CreateNoWindow = true - }; + Console.WriteLine($"Generating {totalEntries} log entries..."); - using var process = Process.Start(processStartInfo); - if (process == null) + var entryCount = 0; + var currentTimestamp = startTimeMs; + var numBatches = totalEntries / entriesPerBatch; + + // Generate and upload logs in batches + for (int batchNum = 0; batchNum < numBatches; batchNum++) { - _logger.LogError("Failed to start Python process."); - return false; - } + var logEvents = new List(); - var output = await process.StandardOutput.ReadToEndAsync(); - var error = await process.StandardError.ReadToEndAsync(); - await process.WaitForExitAsync(); + for (int i = 0; i < entriesPerBatch; i++) + { + logEvents.Add(new InputLogEvent + { + Timestamp = DateTimeOffset.FromUnixTimeMilliseconds(currentTimestamp).UtcDateTime, + Message = $"Entry {entryCount}" + }); - if (process.ExitCode != 0) - { - _logger.LogError($"Python script failed: {error}"); - return false; + entryCount++; + currentTimestamp += timestampIncrement; + } + + // Upload batch + var success = await _wrapper.PutLogEventsAsync(_logGroupName, _logStreamName, logEvents); + if (!success) + { + _logger.LogError($"Failed to upload batch {batchNum + 1}/{numBatches}"); + return false; + } + + Console.WriteLine($"Uploaded batch {batchNum + 1}/{numBatches}"); } - var startMatch = Regex.Match(output, @"QUERY_START_DATE=(\d+)"); - var endMatch = Regex.Match(output, @"QUERY_END_DATE=(\d+)"); + // Set query date range (convert milliseconds to seconds for query API) + _queryStartDate = startTimeMs / 1000; + _queryEndDate = (currentTimestamp - timestampIncrement) / 1000; - if (startMatch.Success && endMatch.Success) - { - _queryStartDate = long.Parse(startMatch.Groups[1].Value) / 1000; - _queryEndDate = long.Parse(endMatch.Groups[1].Value) / 1000; + Console.WriteLine($"Query start date: {DateTimeOffset.FromUnixTimeSeconds(_queryStartDate):yyyy-MM-ddTHH:mm:ss.fffZ}"); + Console.WriteLine($"Query end date: {DateTimeOffset.FromUnixTimeSeconds(_queryEndDate):yyyy-MM-ddTHH:mm:ss.fffZ}"); + Console.WriteLine($"Successfully uploaded {totalEntries} log entries"); - Console.WriteLine($"Query start date: {DateTimeOffset.FromUnixTimeSeconds(_queryStartDate):yyyy-MM-ddTHH:mm:ss.fffZ}"); - Console.WriteLine($"Query end date: {DateTimeOffset.FromUnixTimeSeconds(_queryEndDate):yyyy-MM-ddTHH:mm:ss.fffZ}"); - return true; - } - else - { - _logger.LogError("Failed to parse timestamps from script output."); - return false; - } + return true; } catch (Exception ex) { @@ -342,7 +341,7 @@ public static async Task ExecuteLargeQuery() Console.WriteLine("Starting recursive query to retrieve all logs..."); Console.WriteLine(); - var queryLimit = PromptUserForInteger("Enter the query limit (default 10000, max 10000): ", 10000); + var queryLimit = PromptUserForInteger("Enter the query limit (max 10000) ", 10000); if (queryLimit > 10000) queryLimit = 10000; var queryString = "fields @timestamp, @message | sort @timestamp asc"; @@ -407,17 +406,70 @@ private static async Task>> PerformLargeQuery( return results; } + // Parse the timestamp - CloudWatch returns ISO 8601 format with milliseconds var lastTime = DateTimeOffset.Parse(lastTimestamp).ToUnixTimeSeconds(); + + // Check if there's any time range left to query + if (lastTime >= endTime) + { + return results; + } + + // Calculate midpoint between last result and end time var midpoint = (lastTime + endTime) / 2; + + // Ensure we have enough range to split + if (midpoint <= lastTime || midpoint >= endTime) + { + // Range too small to split, just query the remaining range + var remainingResults = await PerformLargeQuery(logGroupName, queryString, lastTime, endTime, limit); + + var allResults = new List>(results); + // Skip the first result if it's a duplicate of the last result from previous query + if (remainingResults.Count > 0) + { + var firstTimestamp = remainingResults[0].Find(f => f.Field == "@timestamp")?.Value; + if (firstTimestamp == lastTimestamp) + { + remainingResults.RemoveAt(0); + } + } + allResults.AddRange(remainingResults); + return allResults; + } + // Split the remaining range in half var results1 = await PerformLargeQuery(logGroupName, queryString, lastTime, midpoint, limit); var results2 = await PerformLargeQuery(logGroupName, queryString, midpoint, endTime, limit); - var allResults = new List>(results); - allResults.AddRange(results1); - allResults.AddRange(results2); + var combinedResults = new List>(results); + + // Remove duplicate from results1 if it matches the last result + if (results1.Count > 0) + { + var firstTimestamp1 = results1[0].Find(f => f.Field == "@timestamp")?.Value; + if (firstTimestamp1 == lastTimestamp) + { + results1.RemoveAt(0); + } + } + + combinedResults.AddRange(results1); + + // Remove duplicate from results2 if it matches the last result from results1 + if (results2.Count > 0 && results1.Count > 0) + { + var lastTimestamp1 = results1[results1.Count - 1].Find(f => f.Field == "@timestamp")?.Value; + var firstTimestamp2 = results2[0].Find(f => f.Field == "@timestamp")?.Value; + if (firstTimestamp2 == lastTimestamp1) + { + results2.RemoveAt(0); + } + } + + combinedResults.AddRange(results2); - return allResults; + return combinedResults; } /// @@ -592,9 +644,9 @@ private static bool GetYesNoResponse(string question) /// private static string PromptUserForStackName() { - Console.WriteLine($"Enter a name for the CloudFormation stack (default: {_stackName}): "); if (_interactive) { + Console.Write($"Enter a name for the CloudFormation stack (press Enter for default '{_stackName}'): "); string? input = Console.ReadLine(); if (!string.IsNullOrWhiteSpace(input)) { @@ -617,7 +669,7 @@ private static string PromptUserForInput(string prompt, string defaultValue) { if (_interactive) { - Console.Write(prompt); + Console.Write($"{prompt}(press Enter for default '{defaultValue}'): "); string? input = Console.ReadLine(); return string.IsNullOrWhiteSpace(input) ? defaultValue : input; } @@ -631,7 +683,7 @@ private static int PromptUserForInteger(string prompt, int defaultValue) { if (_interactive) { - Console.Write(prompt); + Console.Write($"{prompt}(press Enter for default '{defaultValue}'): "); string? input = Console.ReadLine(); if (string.IsNullOrWhiteSpace(input) || !int.TryParse(input, out var result)) { @@ -659,4 +711,4 @@ private static long PromptUserForLong(string prompt) return 0; } } -// snippet-end:[CloudWatchLogs.dotnetv3.LargeQueryWorkflow] +// snippet-end:[CloudWatchLogs.dotnetv4.LargeQueryWorkflow] diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/Tests/CloudWatchLogsTests.csproj b/dotnetv4/CloudWatchLogs/LargeQuery/Tests/CloudWatchLogsTests.csproj index f08fb4a50ef..8222f0ee7fa 100644 --- a/dotnetv4/CloudWatchLogs/LargeQuery/Tests/CloudWatchLogsTests.csproj +++ b/dotnetv4/CloudWatchLogs/LargeQuery/Tests/CloudWatchLogsTests.csproj @@ -6,19 +6,20 @@ enable false true + $(NoWarn);NETSDK1206 - - - - - - + + + + + + runtime; build; native; contentfiles; analyzers; buildtransitive all - + runtime; build; native; contentfiles; analyzers; buildtransitive all diff --git a/scenarios/features/cloudwatch_logs_large_query/resources/stack.yaml b/scenarios/features/cloudwatch_logs_large_query/resources/stack.yaml index ed9f451193d..25937630e31 100644 --- a/scenarios/features/cloudwatch_logs_large_query/resources/stack.yaml +++ b/scenarios/features/cloudwatch_logs_large_query/resources/stack.yaml @@ -2,7 +2,7 @@ Resources: LargeQueryLogGroup: Type: AWS::Logs::LogGroup Properties: - LogGroupName: /workflows/cloudwatch-logs/large-query + LogGroupName: /workflows/cloudwatch-logs/large-query12 LargeQueryLogGroupStream1: Type: AWS::Logs::LogStream Properties: diff --git a/steering_docs/dotnet-tech/scenario.md b/steering_docs/dotnet-tech/scenario.md index 8e88034ce29..51cb61865d7 100644 --- a/steering_docs/dotnet-tech/scenario.md +++ b/steering_docs/dotnet-tech/scenario.md @@ -67,7 +67,7 @@ From the specification, identify: // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 -// snippet-start:[{Service}.dotnetv3.{Service}Workflow] +// snippet-start:[{Service}.dotnetv4.{Service}Workflow] using Amazon.{Service}; using Amazon.CloudFormation; using Amazon.CloudFormation.Model; @@ -534,7 +534,7 @@ public class {Service}Workflow return ""; } } -// snippet-end:[{Service}.dotnetv3.{Service}Workflow] +// snippet-end:[{Service}.dotnetv4.{Service}Workflow] ``` ## Project Files @@ -552,10 +552,10 @@ public class {Service}Workflow - - - - + + + + @@ -574,10 +574,10 @@ public class {Service}Workflow - - - - + + + + @@ -598,19 +598,20 @@ public class {Service}Workflow enable false true + $(NoWarn);NETSDK1206 - - - - - - + + + + + + runtime; build; native; contentfiles; analyzers; buildtransitive all - + runtime; build; native; contentfiles; analyzers; buildtransitive all @@ -758,7 +759,7 @@ foreach (var item in items) // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 -// snippet-start:[{Service}.dotnetv3.{Service}Wrapper] +// snippet-start:[{Service}.dotnetv4.{Service}Wrapper] using Amazon.{Service}; using Amazon.{Service}.Model; using Microsoft.Extensions.Logging; @@ -784,7 +785,7 @@ public class {Service}Wrapper _logger = logger; } - // snippet-start:[{Service}.dotnetv3.OperationName] + // snippet-start:[{Service}.dotnetv4.OperationName] /// /// Description of what this operation does. /// @@ -820,9 +821,9 @@ public class {Service}Wrapper return false; } } - // snippet-end:[{Service}.dotnetv3.OperationName] + // snippet-end:[{Service}.dotnetv4.OperationName] } -// snippet-end:[{Service}.dotnetv3.{Service}Wrapper] +// snippet-end:[{Service}.dotnetv4.{Service}Wrapper] ``` ### Wrapper Method Guidelines From ee9bfc110625ceb10fbac36ce7b572f855425292 Mon Sep 17 00:00:00 2001 From: Rachel Hagerman <110480692+rlhagerm@users.noreply.github.com> Date: Wed, 19 Nov 2025 14:14:05 -0600 Subject: [PATCH 04/23] Update LargeQueryWorkflow.cs --- .../Scenarios/LargeQueryWorkflow.cs | 131 ++++++++++-------- 1 file changed, 76 insertions(+), 55 deletions(-) diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs b/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs index 7f7a27e73ed..cbb87b5b4e1 100644 --- a/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs +++ b/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs @@ -393,83 +393,104 @@ private static async Task>> PerformLargeQuery( var startDate = DateTimeOffset.FromUnixTimeSeconds(startTime).ToString("yyyy-MM-ddTHH:mm:ss.fffZ"); var endDate = DateTimeOffset.FromUnixTimeSeconds(endTime).ToString("yyyy-MM-ddTHH:mm:ss.fffZ"); - Console.WriteLine($"Query date range: {startDate} to {endDate}. Found {results.Count} logs."); + Console.WriteLine($"Query date range: {startDate} ({startTime}s) to {endDate} ({endTime}s). Found {results.Count} logs."); if (results.Count < limit) { + Console.WriteLine($" -> Returning {results.Count} logs (less than limit of {limit})"); return results; } - var lastTimestamp = results[results.Count - 1].Find(f => f.Field == "@timestamp")?.Value; - if (lastTimestamp == null) + Console.WriteLine($" -> Hit limit of {limit}. Need to split and recurse."); + + // Get the timestamp of the last log (sorted to find the actual last one) + var lastLogTimestamp = GetLastLogTimestamp(results); + if (lastLogTimestamp == null) { + Console.WriteLine($" -> No timestamp found in results. Returning {results.Count} logs."); return results; } - // Parse the timestamp - CloudWatch returns ISO 8601 format with milliseconds - var lastTime = DateTimeOffset.Parse(lastTimestamp).ToUnixTimeSeconds(); + Console.WriteLine($" -> Last log timestamp: {lastLogTimestamp}"); + + // Parse the timestamp and add 1 millisecond to avoid querying the same log again + var lastLogDate = DateTimeOffset.Parse(lastLogTimestamp + " +0000"); + Console.WriteLine($" -> Last log as DateTimeOffset: {lastLogDate:yyyy-MM-ddTHH:mm:ss.fffZ} ({lastLogDate.ToUnixTimeSeconds()}s)"); + + var offsetLastLogDate = lastLogDate.AddMilliseconds(1); + Console.WriteLine($" -> Offset timestamp (last + 1ms): {offsetLastLogDate:yyyy-MM-ddTHH:mm:ss.fffZ} ({offsetLastLogDate.ToUnixTimeSeconds()}s)"); + // Convert back to seconds for the API + var offsetLastLogTime = offsetLastLogDate.ToUnixTimeSeconds(); + + Console.WriteLine($" -> Comparing: offsetLastLogTime={offsetLastLogTime}s vs endTime={endTime}s"); + Console.WriteLine($" -> End time as date: {DateTimeOffset.FromUnixTimeSeconds(endTime):yyyy-MM-ddTHH:mm:ss.fffZ}"); + // Check if there's any time range left to query - if (lastTime >= endTime) + if (offsetLastLogTime >= endTime) { + Console.WriteLine($" -> No time range left to query. Offset time ({offsetLastLogTime}s) >= end time ({endTime}s)"); return results; } - // Calculate midpoint between last result and end time - var midpoint = (lastTime + endTime) / 2; - - // Ensure we have enough range to split - if (midpoint <= lastTime || midpoint >= endTime) - { - // Range too small to split, just query the remaining range - var remainingResults = await PerformLargeQuery(logGroupName, queryString, lastTime, endTime, limit); - - var allResults = new List>(results); - // Skip the first result if it's a duplicate of the last result from previous query - if (remainingResults.Count > 0) - { - var firstTimestamp = remainingResults[0].Find(f => f.Field == "@timestamp")?.Value; - if (firstTimestamp == lastTimestamp) - { - remainingResults.RemoveAt(0); - } - } - allResults.AddRange(remainingResults); - return allResults; - } - - // Split the remaining range in half - var results1 = await PerformLargeQuery(logGroupName, queryString, lastTime, midpoint, limit); - var results2 = await PerformLargeQuery(logGroupName, queryString, midpoint, endTime, limit); - - var combinedResults = new List>(results); + // Split the remaining date range in half + var (range1Start, range1End, range2Start, range2End) = SplitDateRange(offsetLastLogTime, endTime); - // Remove duplicate from results1 if it matches the last result - if (results1.Count > 0) - { - var firstTimestamp1 = results1[0].Find(f => f.Field == "@timestamp")?.Value; - if (firstTimestamp1 == lastTimestamp) - { - results1.RemoveAt(0); - } - } + var range1StartDate = DateTimeOffset.FromUnixTimeSeconds(range1Start).ToString("yyyy-MM-ddTHH:mm:ss.fffZ"); + var range1EndDate = DateTimeOffset.FromUnixTimeSeconds(range1End).ToString("yyyy-MM-ddTHH:mm:ss.fffZ"); + var range2StartDate = DateTimeOffset.FromUnixTimeSeconds(range2Start).ToString("yyyy-MM-ddTHH:mm:ss.fffZ"); + var range2EndDate = DateTimeOffset.FromUnixTimeSeconds(range2End).ToString("yyyy-MM-ddTHH:mm:ss.fffZ"); - combinedResults.AddRange(results1); + Console.WriteLine($" -> Splitting remaining range:"); + Console.WriteLine($" Range 1: {range1StartDate} ({range1Start}s) to {range1EndDate} ({range1End}s)"); + Console.WriteLine($" Range 2: {range2StartDate} ({range2Start}s) to {range2EndDate} ({range2End}s)"); + + // Query both halves recursively + Console.WriteLine($" -> Querying range 1..."); + var results1 = await PerformLargeQuery(logGroupName, queryString, range1Start, range1End, limit); + Console.WriteLine($" -> Range 1 returned {results1.Count} logs"); - // Remove duplicate from results2 if it matches the last result from results1 - if (results2.Count > 0 && results1.Count > 0) + Console.WriteLine($" -> Querying range 2..."); + var results2 = await PerformLargeQuery(logGroupName, queryString, range2Start, range2End, limit); + Console.WriteLine($" -> Range 2 returned {results2.Count} logs"); + + // Combine all results + var allResults = new List>(results); + allResults.AddRange(results1); + allResults.AddRange(results2); + + Console.WriteLine($" -> Combined total: {allResults.Count} logs ({results.Count} + {results1.Count} + {results2.Count})"); + + return allResults; + } + + /// + /// Gets the timestamp string of the most recent log from a list of logs. + /// Sorts timestamps to find the actual last one. + /// + private static string? GetLastLogTimestamp(List> logs) + { + var timestamps = logs + .Select(log => log.Find(f => f.Field == "@timestamp")?.Value) + .Where(t => !string.IsNullOrEmpty(t)) + .OrderBy(t => t) + .ToList(); + + if (timestamps.Count == 0) { - var lastTimestamp1 = results1[results1.Count - 1].Find(f => f.Field == "@timestamp")?.Value; - var firstTimestamp2 = results2[0].Find(f => f.Field == "@timestamp")?.Value; - if (firstTimestamp2 == lastTimestamp1) - { - results2.RemoveAt(0); - } + return null; } - - combinedResults.AddRange(results2); - return combinedResults; + return timestamps[timestamps.Count - 1]; + } + + /// + /// Splits a date range in half. + /// + private static (long range1Start, long range1End, long range2Start, long range2End) SplitDateRange(long startTime, long endTime) + { + var midpoint = startTime + (endTime - startTime) / 2; + return (startTime, midpoint, midpoint, endTime); } /// From 32cdede99d61d58abc73026f154b0a87b6fd7ff8 Mon Sep 17 00:00:00 2001 From: Rachel Hagerman <110480692+rlhagerm@users.noreply.github.com> Date: Wed, 19 Nov 2025 14:38:17 -0600 Subject: [PATCH 05/23] duplicate fixes. --- .../Scenarios/LargeQueryWorkflow.cs | 67 ++++++++++++++++++- 1 file changed, 65 insertions(+), 2 deletions(-) diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs b/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs index cbb87b5b4e1..61c1d572a6b 100644 --- a/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs +++ b/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs @@ -353,6 +353,27 @@ public static async Task ExecuteLargeQuery() Console.WriteLine(); Console.WriteLine($"Queries finished in {stopwatch.Elapsed.TotalSeconds:F3} seconds."); Console.WriteLine($"Total logs found: {allResults.Count}"); + + // Check for duplicates + Console.WriteLine(); + Console.WriteLine("Checking for duplicate logs..."); + var duplicates = FindDuplicateLogs(allResults); + if (duplicates.Count > 0) + { + Console.WriteLine($"WARNING: Found {duplicates.Count} duplicate log entries!"); + Console.WriteLine("Duplicate entries (showing first 10):"); + foreach (var dup in duplicates.Take(10)) + { + Console.WriteLine($" [{dup.Timestamp}] {dup.Message} (appears {dup.Count} times)"); + } + + var uniqueCount = allResults.Count - duplicates.Sum(d => d.Count - 1); + Console.WriteLine($"Unique logs: {uniqueCount}"); + } + else + { + Console.WriteLine("No duplicates found. All logs are unique."); + } Console.WriteLine(); var viewSample = !_interactive || GetYesNoResponse("Would you like to see a sample of the logs? (y/n) "); @@ -420,8 +441,14 @@ private static async Task>> PerformLargeQuery( var offsetLastLogDate = lastLogDate.AddMilliseconds(1); Console.WriteLine($" -> Offset timestamp (last + 1ms): {offsetLastLogDate:yyyy-MM-ddTHH:mm:ss.fffZ} ({offsetLastLogDate.ToUnixTimeSeconds()}s)"); - // Convert back to seconds for the API + // Convert to seconds, but round UP to the next second to avoid overlapping with logs in the same second + // This ensures we don't re-query logs that share the same second as the last log var offsetLastLogTime = offsetLastLogDate.ToUnixTimeSeconds(); + if (offsetLastLogDate.Millisecond > 0) + { + offsetLastLogTime++; // Move to the next full second + Console.WriteLine($" -> Adjusted to next full second: {offsetLastLogTime}s ({DateTimeOffset.FromUnixTimeSeconds(offsetLastLogTime):yyyy-MM-ddTHH:mm:ss.fffZ})"); + } Console.WriteLine($" -> Comparing: offsetLastLogTime={offsetLastLogTime}s vs endTime={endTime}s"); Console.WriteLine($" -> End time as date: {DateTimeOffset.FromUnixTimeSeconds(endTime):yyyy-MM-ddTHH:mm:ss.fffZ}"); @@ -486,11 +513,13 @@ private static async Task>> PerformLargeQuery( /// /// Splits a date range in half. + /// Range 2 starts at midpoint + 1 second to avoid overlap. /// private static (long range1Start, long range1End, long range2Start, long range2End) SplitDateRange(long startTime, long endTime) { var midpoint = startTime + (endTime - startTime) / 2; - return (startTime, midpoint, midpoint, endTime); + // Range 2 starts at midpoint + 1 to avoid querying the same second twice + return (startTime, midpoint, midpoint + 1, endTime); } /// @@ -731,5 +760,39 @@ private static long PromptUserForLong(string prompt) } return 0; } + + /// + /// Finds duplicate log entries based on timestamp and message. + /// + private static List<(string Timestamp, string Message, int Count)> FindDuplicateLogs(List> logs) + { + var logSignatures = new Dictionary(); + + foreach (var log in logs) + { + var timestamp = log.Find(f => f.Field == "@timestamp")?.Value ?? ""; + var message = log.Find(f => f.Field == "@message")?.Value ?? ""; + var signature = $"{timestamp}|{message}"; + + if (logSignatures.ContainsKey(signature)) + { + logSignatures[signature]++; + } + else + { + logSignatures[signature] = 1; + } + } + + return logSignatures + .Where(kvp => kvp.Value > 1) + .Select(kvp => + { + var parts = kvp.Key.Split('|'); + return (Timestamp: parts[0], Message: parts[1], Count: kvp.Value); + }) + .OrderByDescending(x => x.Count) + .ToList(); + } } // snippet-end:[CloudWatchLogs.dotnetv4.LargeQueryWorkflow] From 76c2bb15572edfa45a2796a0bda13ecac25d5a35 Mon Sep 17 00:00:00 2001 From: Rachel Hagerman <110480692+rlhagerm@users.noreply.github.com> Date: Thu, 20 Nov 2025 08:13:16 -0600 Subject: [PATCH 06/23] Updating tests and metadata --- .../metadata/cloudwatch-logs_metadata.yaml | 36 ++++ dotnetv3/CloudWatchLogs/README.md | 24 ++- dotnetv4/CloudWatchLogs/LargeQuery/README.md | 95 ++++++++++- .../Scenarios/LargeQueryWorkflow.cs | 42 ++++- .../Tests/LargeQueryWorkflowTests.cs | 158 ++++-------------- .../SPECIFICATION.md | 42 +++-- steering_docs/dotnet-tech/scenario.md | 84 +++++++++- 7 files changed, 335 insertions(+), 146 deletions(-) diff --git a/.doc_gen/metadata/cloudwatch-logs_metadata.yaml b/.doc_gen/metadata/cloudwatch-logs_metadata.yaml index beac1ce1a8a..dd7dd43291d 100644 --- a/.doc_gen/metadata/cloudwatch-logs_metadata.yaml +++ b/.doc_gen/metadata/cloudwatch-logs_metadata.yaml @@ -288,6 +288,14 @@ cloudwatch-logs_PutSubscriptionFilter: cloudwatch-logs: {PutSubscriptionFilter} cloudwatch-logs_GetQueryResults: languages: + .NET: + versions: + - sdk_version: 3 + github: dotnetv4/CloudWatchLogs/LargeQuery + excerpts: + - description: + snippet_tags: + - CloudWatchLogs.dotnetv4.GetQueryResults JavaScript: versions: - sdk_version: 3 @@ -306,8 +314,28 @@ cloudwatch-logs_GetQueryResults: - python.example_code.cloudwatch_logs.get_query_results services: cloudwatch-logs: {GetQueryResults} +cloudwatch-logs_PutLogEvents: + languages: + .NET: + versions: + - sdk_version: 3 + github: dotnetv4/CloudWatchLogs/LargeQuery + excerpts: + - description: + snippet_tags: + - CloudWatchLogs.dotnetv4.PutLogEvents + services: + cloudwatch-logs: {PutLogEvents} cloudwatch-logs_StartQuery: languages: + .NET: + versions: + - sdk_version: 3 + github: dotnetv4/CloudWatchLogs/LargeQuery + excerpts: + - description: + snippet_tags: + - CloudWatchLogs.dotnetv4.StartQuery JavaScript: versions: - sdk_version: 3 @@ -332,6 +360,14 @@ cloudwatch-logs_Scenario_BigQuery: synopsis: use &CWL; to query more than 10,000 records. category: Scenarios languages: + .NET: + versions: + - sdk_version: 3 + github: dotnetv4/CloudWatchLogs/LargeQuery + excerpts: + - description: This is the main workflow that demonstrates the large query scenario. + snippet_tags: + - CloudWatchLogs.dotnetv4.LargeQueryWorkflow JavaScript: versions: - sdk_version: 3 diff --git a/dotnetv3/CloudWatchLogs/README.md b/dotnetv3/CloudWatchLogs/README.md index 33a4867e9dd..47dbb4d874a 100644 --- a/dotnetv3/CloudWatchLogs/README.md +++ b/dotnetv3/CloudWatchLogs/README.md @@ -41,6 +41,16 @@ Code excerpts that show you how to call individual service functions. - [DeleteLogGroup](DeleteLogGroupExample/DeleteLogGroup.cs#L6) - [DescribeExportTasks](DescribeExportTasksExample/DescribeExportTasks.cs#L6) - [DescribeLogGroups](DescribeLogGroupsExample/DescribeLogGroups.cs#L6) +- [GetQueryResults](../../dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsWrapper.cs#L79) +- [PutLogEvents](../../dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsWrapper.cs#L110) +- [StartQuery](../../dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsWrapper.cs#L30) + +### Scenarios + +Code examples that show you how to accomplish a specific task by calling multiple +functions within the same service. + +- [Run a large query](../../dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs) @@ -73,6 +83,18 @@ Alternatively, you can run the example from within your IDE. +#### Run a large query + +This example shows you how to use CloudWatch Logs to query more than 10,000 records. + + + + + + + + + ### Tests ⚠ Running tests might result in charges to your AWS account. @@ -99,4 +121,4 @@ in the `dotnetv3` folder. Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -SPDX-License-Identifier: Apache-2.0 \ No newline at end of file +SPDX-License-Identifier: Apache-2.0 diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/README.md b/dotnetv4/CloudWatchLogs/LargeQuery/README.md index c530567eac2..bfdd6fdbf3c 100644 --- a/dotnetv4/CloudWatchLogs/LargeQuery/README.md +++ b/dotnetv4/CloudWatchLogs/LargeQuery/README.md @@ -2,6 +2,17 @@ This folder contains a .NET feature scenario that demonstrates how to perform large-scale queries on Amazon CloudWatch Logs using recursive binary search to retrieve more than the 10,000 result limit. +## Overview + +CloudWatch Logs Insights queries have a maximum result limit of 10,000 records per query. This example demonstrates how to overcome this limitation by using a recursive binary search algorithm that splits the time range into smaller segments when the limit is reached. + +The scenario performs the following steps: + +1. **Setup**: Deploys a CloudFormation stack with a log group and log stream +2. **Data Generation**: Creates and uploads 50,000 sample log entries +3. **Query Execution**: Performs recursive queries to retrieve all logs using binary search +4. **Cleanup**: Removes all created resources + ## Project Structure ``` @@ -14,7 +25,7 @@ LargeQuery/ │ ├── README.md # Detailed scenario documentation │ └── CloudWatchLogsScenario.csproj # Scenario project file ├── Tests/ -│ ├── LargeQueryWorkflowTests.cs # Unit tests +│ ├── LargeQueryWorkflowTests.cs # Integration tests │ ├── Usings.cs # Global usings │ └── CloudWatchLogsTests.csproj # Test project file └── CloudWatchLogsLargeQuery.sln # Solution file @@ -23,13 +34,16 @@ LargeQuery/ ## What This Example Demonstrates - Deploying AWS resources using CloudFormation -- Generating and ingesting large volumes of log data -- Performing CloudWatch Logs Insights queries +- Generating and ingesting large volumes of log data using PutLogEvents +- Performing CloudWatch Logs Insights queries with StartQuery and GetQueryResults - Using recursive binary search to retrieve more than 10,000 results +- Handling timestamp precision for accurate query splitting - Cleaning up resources after completion ## Running the Example +### Interactive Mode + 1. Navigate to the solution directory: ``` cd dotnetv4/CloudWatchLogs/LargeQuery @@ -45,22 +59,87 @@ LargeQuery/ dotnet run --project Scenarios/CloudWatchLogsScenario.csproj ``` -4. Run the tests: - ``` - dotnet test - ``` +4. Follow the prompts to: + - Deploy the CloudFormation stack + - Generate sample logs + - Execute the recursive query + - View sample results + - Clean up resources + +### Non-Interactive Mode (Testing) + +Run the integration tests to execute the scenario without user prompts: + +``` +dotnet test +``` + +The test verifies that the scenario completes without errors and successfully retrieves all 50,000 log entries. ## Prerequisites - .NET 8.0 or later - AWS credentials configured -- Permissions for CloudWatch Logs and CloudFormation +- Permissions for: + - CloudWatch Logs (CreateLogGroup, CreateLogStream, PutLogEvents, StartQuery, GetQueryResults, DeleteLogGroup) + - CloudFormation (CreateStack, DescribeStacks, DeleteStack) + +## How It Works + +### Recursive Query Algorithm + +The key to retrieving more than 10,000 results is the recursive binary search algorithm: + +1. Execute a query with the full date range +2. If results < 10,000, return them (we have all logs in this range) +3. If results = 10,000, there may be more logs: + - Get the timestamp of the last result + - Calculate the midpoint between the last timestamp and end date + - Recursively query the first half (last timestamp to midpoint) + - Recursively query the second half (midpoint to end date) + - Combine all results + +This approach ensures all logs are retrieved by progressively narrowing the time ranges until each segment contains fewer than 10,000 results. + +### Timestamp Precision + +The algorithm uses millisecond precision for timestamps to ensure accurate splitting and prevent duplicate or missing log entries. Each query adjusts the start time by 1 millisecond to avoid overlapping results. + +## Expected Output + +When running the scenario, you'll see output similar to: + +``` +-------------------------------------------------------------------------------- +Welcome to the CloudWatch Logs Large Query Scenario. +-------------------------------------------------------------------------------- +Preparing the application... +Deploying CloudFormation stack: CloudWatchLargeQueryStack +CloudFormation stack creation started: CloudWatchLargeQueryStack +Waiting for CloudFormation stack creation to complete... +CloudFormation stack creation complete. +Stack output RoleARN: arn:aws:iam::123456789012:role/... +Generating 50,000 sample log entries... +Batch 1/5: Created 10,000 log entries +Batch 2/5: Created 10,000 log entries +... +Waiting 5 minutes for logs to be fully ingested... +-------------------------------------------------------------------------------- +Starting recursive query to retrieve all logs... +Query date range: 2024-01-15T10:00:00.000Z to 2024-01-15T10:05:00.000Z. Found 10000 logs. +Query date range: 2024-01-15T10:02:30.000Z to 2024-01-15T10:03:45.000Z. Found 10000 logs. +... +Queries finished in 8.234 seconds. +Total logs found: 50000 +-------------------------------------------------------------------------------- +``` ## Related Resources - [CloudWatch Logs Documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/) - [CloudWatch Logs Insights Query Syntax](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html) - [AWS SDK for .NET](https://aws.amazon.com/sdk-for-net/) +- [CloudWatch Logs API Reference](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/) --- diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs b/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs index 61c1d572a6b..82d2e1864f6 100644 --- a/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs +++ b/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs @@ -47,7 +47,7 @@ public class LargeQueryWorkflow private static long _queryEndDate; public static bool _interactive = true; - private static string _stackName = "CloudWatchLargeQueryStack"; + public static string _stackName = "CloudWatchLargeQueryStack"; private static string _stackResourcePath = "../../../../../../../scenarios/features/cloudwatch_logs_large_query/resources/stack.yaml"; public static async Task Main(string[] args) @@ -107,6 +107,46 @@ public static async Task Main(string[] args) Console.WriteLine("CloudWatch Logs Large Query scenario completed."); } + /// + /// Runs the scenario workflow. Used for testing. + /// + public static async Task RunScenario() + { + Console.WriteLine(new string('-', 80)); + Console.WriteLine("Welcome to the CloudWatch Logs Large Query Scenario."); + Console.WriteLine(new string('-', 80)); + Console.WriteLine("This scenario demonstrates how to perform large-scale queries on"); + Console.WriteLine("CloudWatch Logs using recursive binary search to retrieve more than"); + Console.WriteLine("the 10,000 result limit."); + Console.WriteLine(); + + try + { + Console.WriteLine(new string('-', 80)); + var prepareSuccess = await PrepareApplication(); + Console.WriteLine(new string('-', 80)); + + if (prepareSuccess) + { + Console.WriteLine(new string('-', 80)); + await ExecuteLargeQuery(); + Console.WriteLine(new string('-', 80)); + } + + Console.WriteLine(new string('-', 80)); + await Cleanup(); + Console.WriteLine(new string('-', 80)); + } + catch (Exception ex) + { + _logger.LogError(ex, "There was a problem with the scenario, initiating cleanup..."); + _interactive = false; + await Cleanup(); + } + + Console.WriteLine("CloudWatch Logs Large Query scenario completed."); + } + /// /// Prepares the application by creating the necessary resources. /// diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/Tests/LargeQueryWorkflowTests.cs b/dotnetv4/CloudWatchLogs/LargeQuery/Tests/LargeQueryWorkflowTests.cs index 0951c9b2549..d01e00aed81 100644 --- a/dotnetv4/CloudWatchLogs/LargeQuery/Tests/LargeQueryWorkflowTests.cs +++ b/dotnetv4/CloudWatchLogs/LargeQuery/Tests/LargeQueryWorkflowTests.cs @@ -1,143 +1,55 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 +using Amazon.CloudFormation; using Amazon.CloudWatchLogs; -using Amazon.CloudWatchLogs.Model; using CloudWatchLogsActions; +using CloudWatchLogsScenario; using Microsoft.Extensions.Logging; using Moq; namespace CloudWatchLogsTests; +/// +/// Integration tests for the CloudWatch Logs Large Query workflow. +/// public class LargeQueryWorkflowTests { - private readonly Mock _mockCloudWatchLogs; - private readonly Mock> _mockLogger; - private readonly CloudWatchLogsWrapper _wrapper; - - public LargeQueryWorkflowTests() - { - _mockCloudWatchLogs = new Mock(); - _mockLogger = new Mock>(); - _wrapper = new CloudWatchLogsWrapper(_mockCloudWatchLogs.Object, _mockLogger.Object); - } - - [Fact] - public async Task StartQueryAsync_Success_ReturnsQueryId() - { - // Arrange - var expectedQueryId = "test-query-id-123"; - _mockCloudWatchLogs - .Setup(x => x.StartQueryAsync(It.IsAny(), default)) - .ReturnsAsync(new StartQueryResponse { QueryId = expectedQueryId }); - - // Act - var result = await _wrapper.StartQueryAsync( - "/test/log-group", - "fields @timestamp, @message", - 1000, - 2000, - 10000); - - // Assert - Assert.Equal(expectedQueryId, result); - } - - [Fact] - public async Task StartQueryAsync_InvalidParameter_ReturnsNull() - { - // Arrange - _mockCloudWatchLogs - .Setup(x => x.StartQueryAsync(It.IsAny(), default)) - .ThrowsAsync(new InvalidParameterException("Invalid parameter")); - - // Act - var result = await _wrapper.StartQueryAsync( - "/test/log-group", - "fields @timestamp, @message", - 1000, - 2000, - 10000); - - // Assert - Assert.Null(result); - } - + /// + /// Verifies the scenario with an integration test. No errors should be logged. + /// + /// Async task. [Fact] - public async Task GetQueryResultsAsync_Success_ReturnsResults() + [Trait("Category", "Integration")] + public async Task TestScenarioIntegration() { // Arrange - var expectedResponse = new GetQueryResultsResponse - { - Status = QueryStatus.Complete, - Results = new List> - { - new List - { - new ResultField { Field = "@timestamp", Value = "2023-01-01T00:00:00.000Z" }, - new ResultField { Field = "@message", Value = "Test message" } - } - } - }; + LargeQueryWorkflow._interactive = false; - _mockCloudWatchLogs - .Setup(x => x.GetQueryResultsAsync(It.IsAny(), default)) - .ReturnsAsync(expectedResponse); + var loggerScenarioMock = new Mock>(); + loggerScenarioMock.Setup(logger => logger.Log( + It.Is(logLevel => logLevel == LogLevel.Error), + It.IsAny(), + It.Is((@object, @type) => true), + It.IsAny(), + It.IsAny>())); // Act - var result = await _wrapper.GetQueryResultsAsync("test-query-id"); - - // Assert - Assert.NotNull(result); - Assert.Equal(QueryStatus.Complete, result.Status); - Assert.Single(result.Results); - } - - [Fact] - public async Task PutLogEventsAsync_Success_ReturnsTrue() - { - // Arrange - _mockCloudWatchLogs - .Setup(x => x.PutLogEventsAsync(It.IsAny(), default)) - .ReturnsAsync(new PutLogEventsResponse()); - - var logEvents = new List - { - new InputLogEvent - { - Timestamp = DateTime.UtcNow, - Message = "Test log message" - } - }; - - // Act - var result = await _wrapper.PutLogEventsAsync("/test/log-group", "test-stream", logEvents); - - // Assert - Assert.True(result); - } - - [Fact] - public async Task PutLogEventsAsync_ResourceNotFound_ReturnsFalse() - { - // Arrange - _mockCloudWatchLogs - .Setup(x => x.PutLogEventsAsync(It.IsAny(), default)) - .ThrowsAsync(new ResourceNotFoundException("Log group not found")); - - var logEvents = new List - { - new InputLogEvent - { - Timestamp = DateTime.UtcNow, - Message = "Test log message" - } - }; - - // Act - var result = await _wrapper.PutLogEventsAsync("/test/log-group", "test-stream", logEvents); - - // Assert - Assert.False(result); + LargeQueryWorkflow._logger = loggerScenarioMock.Object; + LargeQueryWorkflow._wrapper = new CloudWatchLogsWrapper( + new AmazonCloudWatchLogsClient(), + new Mock>().Object); + LargeQueryWorkflow._amazonCloudFormation = new AmazonCloudFormationClient(); + + await LargeQueryWorkflow.RunScenario(); + + // Assert no errors logged + loggerScenarioMock.Verify(logger => logger.Log( + It.Is(logLevel => logLevel == LogLevel.Error), + It.IsAny(), + It.Is((@object, @type) => true), + It.IsAny(), + It.IsAny>()), + Times.Never); } } diff --git a/scenarios/features/cloudwatch_logs_large_query/SPECIFICATION.md b/scenarios/features/cloudwatch_logs_large_query/SPECIFICATION.md index acb3406cb89..b3e23572c2d 100644 --- a/scenarios/features/cloudwatch_logs_large_query/SPECIFICATION.md +++ b/scenarios/features/cloudwatch_logs_large_query/SPECIFICATION.md @@ -2,12 +2,18 @@ ## Overview -This feature scenario demonstrates how to perform large-scale queries on Amazon CloudWatch Logs using recursive binary search to retrieve more than the 10,000 result limit. The scenario showcases: +This feature scenario demonstrates how to perform large-scale queries on Amazon CloudWatch Logs using recursive binary search to retrieve more than the 10,000 result limit. + +**Important**: This is a complete, self-contained scenario that handles all setup and cleanup automatically. The scenario includes: 1. Deploying CloudFormation resources (log group and stream) 2. Generating and ingesting 50,000 sample log entries 3. Performing recursive queries to retrieve all logs using binary search -4. Cleaning up resources +4. Cleaning up all resources + +**The scenario must be runnable in both interactive and non-interactive modes** to support: +- Interactive mode: User runs the scenario manually with prompts +- Non-interactive mode: Automated integration tests run the scenario without user input For an introduction, see the [README.md](README.md). @@ -78,33 +84,40 @@ This scenario uses the following CloudFormation API actions: ### Phase 1: Setup -**Purpose**: Deploy resources and generate sample data +**Purpose**: Deploy resources and generate sample data as part of the scenario -**Steps**: +**Interactive Mode Steps**: 1. Welcome message explaining the scenario 2. Prompt user: "Would you like to deploy the CloudFormation stack and generate sample logs? (y/n)" 3. If yes: - Prompt for CloudFormation stack name (default: "CloudWatchLargeQueryStack") - Deploy CloudFormation stack from `resources/stack.yaml` - Wait for stack creation to complete (status: CREATE_COMPLETE) - - Execute log generation: - - **Option A** (Bash): Run `make-log-files.sh` then `put-log-events.sh` - - **Option B** (Python): Run `create_logs.py` (recommended for cross-platform) - - Capture `QUERY_START_DATE` and `QUERY_END_DATE` from script output + - Generate logs directly using CloudWatch Logs API: + - Create 50,000 log entries with timestamps spanning 5 minutes + - Upload in batches of 10,000 entries using PutLogEvents + - Display progress for each batch uploaded + - Capture start and end timestamps for query configuration - Display message: "Sample logs created. Waiting 5 minutes for logs to be fully ingested..." - - Wait 5 minutes (300 seconds) for log ingestion + - Wait 5 minutes (300 seconds) for log ingestion with countdown display 4. If no: - Prompt user for existing log group name - Prompt user for log stream name - Prompt user for query start date (ISO 8601 format with milliseconds) - Prompt user for query end date (ISO 8601 format with milliseconds) +**Non-Interactive Mode Behavior**: +- Automatically deploys stack with default name +- Automatically generates 50,000 sample logs +- Waits 5 minutes for log ingestion +- Uses default values for all configuration + **Variables Set**: - `stackName` - CloudFormation stack name - `logGroupName` - Log group name (default: `/workflows/cloudwatch-logs/large-query`) - `logStreamName` - Log stream name (default: `stream1`) -- `queryStartDate` - Start timestamp for query (milliseconds since epoch) -- `queryEndDate` - End timestamp for query (milliseconds since epoch) +- `queryStartDate` - Start timestamp for query (seconds since epoch) +- `queryEndDate` - End timestamp for query (seconds since epoch) ### Phase 2: Query Execution @@ -130,7 +143,7 @@ This scenario uses the following CloudFormation API actions: **Purpose**: Remove created resources -**Steps**: +**Interactive Mode Steps**: 1. Prompt user: "Would you like to delete the CloudFormation stack and all resources? (y/n)" 2. If yes: - Delete CloudFormation stack @@ -140,6 +153,11 @@ This scenario uses the following CloudFormation API actions: - Display message: "Resources will remain. You can delete them later through the AWS Console." - Display stack name and log group name for reference +**Non-Interactive Mode Behavior**: +- Automatically deletes the CloudFormation stack +- Waits for deletion to complete +- Ensures cleanup happens even if errors occur during the scenario + --- ## Implementation Details diff --git a/steering_docs/dotnet-tech/scenario.md b/steering_docs/dotnet-tech/scenario.md index 51cb61865d7..2bbb42c41ef 100644 --- a/steering_docs/dotnet-tech/scenario.md +++ b/steering_docs/dotnet-tech/scenario.md @@ -895,9 +895,91 @@ catch (Exception ex) 4. **Implement Workflow**: Create workflow class with phases from specification 5. **Add CloudFormation**: Integrate stack deployment and deletion 6. **Add User Interaction**: Implement prompts and validation -7. **Test**: Create unit tests for workflow methods +7. **Test**: Create integration tests for workflow methods 8. **Document**: Add README.md with scenario description +## Integration Tests + +### Single Integration Test Pattern + +Integration tests should use a single test method that verifies no errors are logged: + +```csharp +/// +/// Verifies the scenario with an integration test. No errors should be logged. +/// +/// Async task. +[Fact] +[Trait("Category", "Integration")] +public async Task TestScenarioIntegration() +{ + // Arrange + {Service}Workflow._interactive = false; + + var loggerScenarioMock = new Mock>(); + loggerScenarioMock.Setup(logger => logger.Log( + It.Is(logLevel => logLevel == LogLevel.Error), + It.IsAny(), + It.Is((@object, @type) => true), + It.IsAny(), + It.IsAny>())); + + // Act + {Service}Workflow._logger = loggerScenarioMock.Object; + {Service}Workflow._wrapper = new {Service}Wrapper( + new Amazon{Service}Client(), + new Mock>().Object); + {Service}Workflow._amazonCloudFormation = new AmazonCloudFormationClient(); + + await {Service}Workflow.RunScenario(); + + // Assert no errors logged + loggerScenarioMock.Verify(logger => logger.Log( + It.Is(logLevel => logLevel == LogLevel.Error), + It.IsAny(), + It.Is((@object, @type) => true), + It.IsAny(), + It.IsAny>()), + Times.Never); +} +``` + +### RunScenario Method + +The workflow must include a public RunScenario method for testing: + +```csharp +/// +/// Runs the scenario workflow. Used for testing. +/// +public static async Task RunScenario() +{ + Console.WriteLine(new string('-', 80)); + Console.WriteLine("Welcome to the {Service} Scenario."); + Console.WriteLine(new string('-', 80)); + + try + { + var prepareSuccess = await PrepareApplication(); + + if (prepareSuccess) + { + await ExecutePhase2(); + } + + await Cleanup(); + } + catch (Exception ex) + { + _logger.LogError(ex, "There was a problem with the scenario, initiating cleanup..."); + _interactive = false; + await Cleanup(); + } + + Console.WriteLine("Scenario completed."); +} +``` + ### Specification Sections to Implement - **API Actions Used**: All operations must be in wrapper class - **Proposed example structure**: Maps to workflow phases From ec0f943795d57cc7c09eafa0cbadc161958b204b Mon Sep 17 00:00:00 2001 From: Rachel Hagerman <110480692+rlhagerm@users.noreply.github.com> Date: Thu, 20 Nov 2025 08:18:31 -0600 Subject: [PATCH 07/23] Update README and main solution file --- dotnetv4/DotNetV4Examples.sln | 6 +++--- scenarios/features/cloudwatch_logs_large_query/README.md | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/dotnetv4/DotNetV4Examples.sln b/dotnetv4/DotNetV4Examples.sln index cfa92fb639b..0b44f37b700 100644 --- a/dotnetv4/DotNetV4Examples.sln +++ b/dotnetv4/DotNetV4Examples.sln @@ -95,11 +95,11 @@ Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CloudWatchActions", "CloudW EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "CloudWatchLogs", "CloudWatchLogs", "{A1B2C3D4-E5F6-7890-1234-567890ABCDEF}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CloudWatchLogsTests", "CloudWatchLogs\Tests\CloudWatchLogsTests.csproj", "{B2C3D4E5-F6A7-8901-2345-678901BCDEFG}" +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CloudWatchLogsTests", "CloudWatchLogs\LargeQuery\Tests\CloudWatchLogsTests.csproj", "{B2C3D4E5-F6A7-8901-2345-678901BCDEFG}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CloudWatchLogsScenarios", "CloudWatchLogs\Scenarios\CloudWatchLogsScenarios.csproj", "{C3D4E5F6-A7B8-9012-3456-789012CDEFGH}" +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CloudWatchLogsScenario", "CloudWatchLogs\LargeQuery\Scenarios\CloudWatchLogsScenario.csproj", "{C3D4E5F6-A7B8-9012-3456-789012CDEFGH}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CloudWatchLogsActions", "CloudWatchLogs\Actions\CloudWatchLogsActions.csproj", "{D4E5F6A7-B8C9-0123-4567-890123DEFGHI}" +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CloudWatchLogsActions", "CloudWatchLogs\LargeQuery\Actions\CloudWatchLogsActions.csproj", "{D4E5F6A7-B8C9-0123-4567-890123DEFGHI}" EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "EC2", "EC2", "{9424FB14-B6DE-44CE-B675-AC2B57EC1E69}" EndProject diff --git a/scenarios/features/cloudwatch_logs_large_query/README.md b/scenarios/features/cloudwatch_logs_large_query/README.md index 1b76e5758c6..17d487f3e3f 100644 --- a/scenarios/features/cloudwatch_logs_large_query/README.md +++ b/scenarios/features/cloudwatch_logs_large_query/README.md @@ -47,6 +47,7 @@ A lot of logs are needed to make a robust example. If you happen to have a log g This example is implemented in the following languages: +- [.NET](../../../dotnetv4/CloudWatchLogs/LargeQuery/README.md) - [JavaScript](../../../javascriptv3/example_code/cloudwatch-logs/scenarios/large-query/README.md) - [Python](../../../python/example_code/cloudwatch-logs/scenarios/large-query/README.md) From b53da479f2df96927afdd62ef00d939fd0e5040d Mon Sep 17 00:00:00 2001 From: Rachel Hagerman <110480692+rlhagerm@users.noreply.github.com> Date: Wed, 12 Nov 2025 13:54:28 -0600 Subject: [PATCH 08/23] Updates to steering --- .gitignore | 1 + .kiro/settings/mcp.json | 40 + .kiro/steering/orchestration.md | 17 + dotnetv4/DotNetV4Examples.sln | 23 + .../SPECIFICATION copy.md | 118 +++ .../SPECIFICATION.md | 345 +++++-- steering_docs/dotnet-tech/scenario.md | 898 ++++++++++++++++++ 7 files changed, 1388 insertions(+), 54 deletions(-) create mode 100644 .kiro/settings/mcp.json create mode 100644 .kiro/steering/orchestration.md create mode 100644 scenarios/features/cloudwatch_logs_large_query/SPECIFICATION copy.md create mode 100644 steering_docs/dotnet-tech/scenario.md diff --git a/.gitignore b/.gitignore index 0b25f6593e2..d6f42f4afdd 100644 --- a/.gitignore +++ b/.gitignore @@ -38,3 +38,4 @@ kotlin/services/**/gradlew kotlin/services/**/gradlew.bat kotlin/services/**/.kotlin/ /.local/ +/.kiro/settings diff --git a/.kiro/settings/mcp.json b/.kiro/settings/mcp.json new file mode 100644 index 00000000000..42fb25a070f --- /dev/null +++ b/.kiro/settings/mcp.json @@ -0,0 +1,40 @@ +{ + "mcpServers": { + "awslabs.bedrock-kb-retrieval-mcp-server": { + "command": "uv", + "args": [ + "tool", + "run", + "--from", + "awslabs.bedrock-kb-retrieval-mcp-server@latest", + "awslabs.bedrock-kb-retrieval-mcp-server.exe" + ], + "env": { + "FASTMCP_LOG_LEVEL": "ERROR", + "AWS_PROFILE": "cex-ai-kb-access", + "AWS_REGION": "us-west-2" + }, + "disabled": false, + "autoApprove": [ + "QueryKnowledgeBases" + ], + "disabledTools": [ + "ListKnowledgeBases" + ] + }, + "aws-knowledge-mcp-server": { + "command": "uvx", + "args": [ + "mcp-proxy", + "--transport", + "streamablehttp", + "https://knowledge-mcp.global.api.aws" + ], + "disabled": false, + "autoApprove": [ + "aws___search_documentation", + "aws___read_documentation" + ] + } + } +} \ No newline at end of file diff --git a/.kiro/steering/orchestration.md b/.kiro/steering/orchestration.md new file mode 100644 index 00000000000..190c9c603f2 --- /dev/null +++ b/.kiro/steering/orchestration.md @@ -0,0 +1,17 @@ +# Code Generation Orchestration + +## Purpose +Define location of relevant steering docs that are outside of the .kiro directory. Refer to all steering docs in the ./steering_docs directory. Use the appropriate steering instructions for the requested language. Use the directories given below. + +- .NET: dotnet-tech +- Java: java-tech +- Kotlin: kotlin-tech + +## Code Generation +When a specification file is provided by the user, use that specification directly. Do not create your own spec or task breakdown. Follow the provided specification exactly and implement the requirements as described. + +If no specification is provided, then do not use separate steps for planning and tasks unless specifically asked to do so. Perform the tasks without stopping for user input. + + + + diff --git a/dotnetv4/DotNetV4Examples.sln b/dotnetv4/DotNetV4Examples.sln index e4e1cf6f809..cfa92fb639b 100644 --- a/dotnetv4/DotNetV4Examples.sln +++ b/dotnetv4/DotNetV4Examples.sln @@ -93,6 +93,14 @@ Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CloudWatchScenario", "Cloud EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CloudWatchActions", "CloudWatch\Actions\CloudWatchActions.csproj", "{EAF4A3B8-5CD0-48ED-B848-0EA6D451B8D3}" EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "CloudWatchLogs", "CloudWatchLogs", "{A1B2C3D4-E5F6-7890-1234-567890ABCDEF}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CloudWatchLogsTests", "CloudWatchLogs\Tests\CloudWatchLogsTests.csproj", "{B2C3D4E5-F6A7-8901-2345-678901BCDEFG}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CloudWatchLogsScenarios", "CloudWatchLogs\Scenarios\CloudWatchLogsScenarios.csproj", "{C3D4E5F6-A7B8-9012-3456-789012CDEFGH}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CloudWatchLogsActions", "CloudWatchLogs\Actions\CloudWatchLogsActions.csproj", "{D4E5F6A7-B8C9-0123-4567-890123DEFGHI}" +EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "EC2", "EC2", "{9424FB14-B6DE-44CE-B675-AC2B57EC1E69}" EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "EC2Tests", "EC2\Tests\EC2Tests.csproj", "{C99A0F7C-9477-4985-90F6-8EED38ECAC10}" @@ -279,6 +287,18 @@ Global {EAF4A3B8-5CD0-48ED-B848-0EA6D451B8D3}.Debug|Any CPU.Build.0 = Debug|Any CPU {EAF4A3B8-5CD0-48ED-B848-0EA6D451B8D3}.Release|Any CPU.ActiveCfg = Release|Any CPU {EAF4A3B8-5CD0-48ED-B848-0EA6D451B8D3}.Release|Any CPU.Build.0 = Release|Any CPU + {B2C3D4E5-F6A7-8901-2345-678901BCDEFG}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {B2C3D4E5-F6A7-8901-2345-678901BCDEFG}.Debug|Any CPU.Build.0 = Debug|Any CPU + {B2C3D4E5-F6A7-8901-2345-678901BCDEFG}.Release|Any CPU.ActiveCfg = Release|Any CPU + {B2C3D4E5-F6A7-8901-2345-678901BCDEFG}.Release|Any CPU.Build.0 = Release|Any CPU + {C3D4E5F6-A7B8-9012-3456-789012CDEFGH}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {C3D4E5F6-A7B8-9012-3456-789012CDEFGH}.Debug|Any CPU.Build.0 = Debug|Any CPU + {C3D4E5F6-A7B8-9012-3456-789012CDEFGH}.Release|Any CPU.ActiveCfg = Release|Any CPU + {C3D4E5F6-A7B8-9012-3456-789012CDEFGH}.Release|Any CPU.Build.0 = Release|Any CPU + {D4E5F6A7-B8C9-0123-4567-890123DEFGHI}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {D4E5F6A7-B8C9-0123-4567-890123DEFGHI}.Debug|Any CPU.Build.0 = Debug|Any CPU + {D4E5F6A7-B8C9-0123-4567-890123DEFGHI}.Release|Any CPU.ActiveCfg = Release|Any CPU + {D4E5F6A7-B8C9-0123-4567-890123DEFGHI}.Release|Any CPU.Build.0 = Release|Any CPU {C99A0F7C-9477-4985-90F6-8EED38ECAC10}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {C99A0F7C-9477-4985-90F6-8EED38ECAC10}.Debug|Any CPU.Build.0 = Debug|Any CPU {C99A0F7C-9477-4985-90F6-8EED38ECAC10}.Release|Any CPU.ActiveCfg = Release|Any CPU @@ -392,6 +412,9 @@ Global {106FBE12-6FF7-40DC-9B3C-E5F67F335B32} = {CED87D19-7F82-4D67-8A30-3EE085D07E45} {565A9701-3D9C-49F8-86B7-D256A1D9E074} = {CED87D19-7F82-4D67-8A30-3EE085D07E45} {EAF4A3B8-5CD0-48ED-B848-0EA6D451B8D3} = {CED87D19-7F82-4D67-8A30-3EE085D07E45} + {B2C3D4E5-F6A7-8901-2345-678901BCDEFG} = {A1B2C3D4-E5F6-7890-1234-567890ABCDEF} + {C3D4E5F6-A7B8-9012-3456-789012CDEFGH} = {A1B2C3D4-E5F6-7890-1234-567890ABCDEF} + {D4E5F6A7-B8C9-0123-4567-890123DEFGHI} = {A1B2C3D4-E5F6-7890-1234-567890ABCDEF} {C99A0F7C-9477-4985-90F6-8EED38ECAC10} = {9424FB14-B6DE-44CE-B675-AC2B57EC1E69} {6C167F25-F97F-4854-8CD8-A2D446B6799B} = {9424FB14-B6DE-44CE-B675-AC2B57EC1E69} {D95519CA-BD27-45AE-B83B-3FB02E7AE445} = {6C167F25-F97F-4854-8CD8-A2D446B6799B} diff --git a/scenarios/features/cloudwatch_logs_large_query/SPECIFICATION copy.md b/scenarios/features/cloudwatch_logs_large_query/SPECIFICATION copy.md new file mode 100644 index 00000000000..788d7859efc --- /dev/null +++ b/scenarios/features/cloudwatch_logs_large_query/SPECIFICATION copy.md @@ -0,0 +1,118 @@ +# CloudWatch Logs large query - Technical specification + +This document contains the technical specifications for _CloudWatch Logs large query_, +a feature scenario that showcases AWS services and SDKs. It is primarily intended for the AWS code +examples team to use while developing this example in additional languages. + +This document explains the following: + +- Deploying AWS resources. +- Adding sample data. +- Setting up a large query. + +For an introduction, see the [README.md](README.md). + +--- + +### Table of contents + +- [Architecture](#architecture) +- [User input](#user-input) +- [Common resources](#common-resources) +- [Building the queries](#building-the-queries) +- [Output](#output) +- [Metadata](#metadata) + +## Architecture + +- Amazon CloudWatch Logs group +- Amazon CloudWatch Logs stream + +--- + +## User input + +The example should allow the configuration of a query start date, query end date, and results limit. It's up to you to decide how to allow this configuration. + +### Suggested variable names + +- `QUERY_START_DATE` - The oldest date that will be queried. +- `QUERY_END_DATE` - The newest date that will be queried. +- `QUERY_LIMIT` - The maximum number of results to return. CloudWatch has a maximum of 10,000. + +--- + +## Common resources + +This example has a set of common resources that are stored in the [resources](resources) folder. + +- [stack.yaml](resources/stack.yaml) is an AWS CloudFormation template containing the resources needed to run this example. +- [make-log-files.sh](resources/make-log-files.sh) is a bash script that creates log data. **Five minutes of logs, starting at the time of execution, will be created. Wait at least five minutes after running this script before attempting to query.** +- [put-log-events](resources/put-log-events.sh) is a bash script that ingests log data and uploads it to CloudWatch. + +--- + +## Building the queries + +### Building and waiting for single query + +The query itself is a "CloudWatch Logs Insights query syntax" string. The query must return the `@timestamp` field so follow-up queries can use that information. Here's a sample query string: `fields @timestamp, @message | sort @timestamp asc`. Notice it sorts in ascending order. You can sort in either `asc` or `desc`, but the recursive strategy described later will need to match accordingly. + +Queries are jobs. You can start a query with `StartQuery`, but it immediately returns the `queryId`. You must poll a query using `GetQueryResults` until the query has finished. For the purpose of this example, a query has "finished" when `GetQueryResults` has returned a status of one of "Complete", "Failed", "Cancelled", "Timeout", or "Unknown". + +`StartQuery` responds with an error if the query's start or end date occurs out of bounds of the log group creation date. The error message starts with "Query's end date and time". + +Start the query and wait for it to "finish". Store the `results`. If the count of the results is less than the configured LIMIT, return the results. If the results are greater than or equal to the limit, go to [Recursive queries](#recursive-queries). + +--- + +### Recursive queries + +If the result count from the previous step is 10000 (or the configured LIMIT), it is very likely that there are more results. **The example must do a binary search of the remaining logs**. To do this, get the date of the last log (earliest or latest, depending on sort order). Use that date as the start date of a new date range. The end date can remain the same. + +Split that date range in half, resulting in two new date ranges. Call your query function twice; once for each new date range. + +Concatenate the results of the first query with the results of the two new queries. + +The following pseudocode illustrates this. + +```pseudocode +func large_query(date_range): + query_results = get_query_results(date_range) + + if query_results.length < LIMIT + return query_results + else + date_range = [query_results.end, date_range.end] + d1, d2 = split(date_range) + return concat(query_results, large_query(d1), large_query(d2)) +``` + +## Output + +To illustrate the search, log the date ranges for each query made and the number of logs that were found. + +Example: + +``` +Starting a recursive query... +Query date range: 2023-12-22T19:08:42.000Z to 2023-12-22T19:13:41.994Z. Found 10000 logs. +Query date range: 2023-12-22T19:09:41.995Z to 2023-12-22T19:11:41.994Z. Found 10000 logs. +Query date range: 2023-12-22T19:11:41.995Z to 2023-12-22T19:13:41.994Z. Found 10000 logs. +Query date range: 2023-12-22T19:10:41.995Z to 2023-12-22T19:11:11.994Z. Found 5000 logs. +Query date range: 2023-12-22T19:11:11.995Z to 2023-12-22T19:11:41.994Z. Found 5000 logs. +Query date range: 2023-12-22T19:12:41.995Z to 2023-12-22T19:13:11.994Z. Found 5000 logs. +Query date range: 2023-12-22T19:13:11.995Z to 2023-12-22T19:13:41.994Z. Found 5000 logs. +Queries finished in 11.253 seconds. +Total logs found: 50000 +``` + +--- + +## Metadata + +| action / scenario | metadata file | metadata key | +| ----------------- | ----------------------------- | --------------------------------- | +| `GetQueryResults` | cloudwatch-logs_metadata.yaml | cloudwatch-logs_GetQueryResults | +| `StartQuery` | cloudwatch-logs_metadata.yaml | cloudwatch-logs_StartQuery | +| `Large Query` | cloudwatch-logs_metadata.yaml | cloudwatch-logs_Scenario_LargeQuery | diff --git a/scenarios/features/cloudwatch_logs_large_query/SPECIFICATION.md b/scenarios/features/cloudwatch_logs_large_query/SPECIFICATION.md index 788d7859efc..acb3406cb89 100644 --- a/scenarios/features/cloudwatch_logs_large_query/SPECIFICATION.md +++ b/scenarios/features/cloudwatch_logs_large_query/SPECIFICATION.md @@ -1,101 +1,270 @@ -# CloudWatch Logs large query - Technical specification +# CloudWatch Logs Large Query - Technical Specification -This document contains the technical specifications for _CloudWatch Logs large query_, -a feature scenario that showcases AWS services and SDKs. It is primarily intended for the AWS code -examples team to use while developing this example in additional languages. +## Overview -This document explains the following: +This feature scenario demonstrates how to perform large-scale queries on Amazon CloudWatch Logs using recursive binary search to retrieve more than the 10,000 result limit. The scenario showcases: -- Deploying AWS resources. -- Adding sample data. -- Setting up a large query. +1. Deploying CloudFormation resources (log group and stream) +2. Generating and ingesting 50,000 sample log entries +3. Performing recursive queries to retrieve all logs using binary search +4. Cleaning up resources For an introduction, see the [README.md](README.md). --- -### Table of contents +## Table of Contents -- [Architecture](#architecture) -- [User input](#user-input) -- [Common resources](#common-resources) -- [Building the queries](#building-the-queries) -- [Output](#output) +- [API Actions Used](#api-actions-used) +- [Resources](#resources) +- [Proposed Example Structure](#proposed-example-structure) +- [Implementation Details](#implementation-details) +- [Output Format](#output-format) +- [Errors](#errors) - [Metadata](#metadata) -## Architecture +--- + +## API Actions Used + +This scenario uses the following CloudWatch Logs API actions: + +- `StartQuery` - Initiates a CloudWatch Logs Insights query +- `GetQueryResults` - Retrieves results from a query, polling until complete -- Amazon CloudWatch Logs group -- Amazon CloudWatch Logs stream +This scenario uses the following CloudFormation API actions: + +- `CreateStack` - Deploys the CloudFormation template +- `DescribeStacks` - Checks stack status and retrieves outputs +- `DeleteStack` - Removes the CloudFormation stack --- -## User input +## Resources -The example should allow the configuration of a query start date, query end date, and results limit. It's up to you to decide how to allow this configuration. +### CloudFormation Template -### Suggested variable names +**Location**: `scenarios/features/cloudwatch_logs_large_query/resources/stack.yaml` -- `QUERY_START_DATE` - The oldest date that will be queried. -- `QUERY_END_DATE` - The newest date that will be queried. -- `QUERY_LIMIT` - The maximum number of results to return. CloudWatch has a maximum of 10,000. +**Resources Created**: +- CloudWatch Logs Log Group: `/workflows/cloudwatch-logs/large-query` +- CloudWatch Logs Log Stream: `stream1` ---- +**Stack Outputs**: None (resources use fixed names) + +### Sample Data Generation Scripts -## Common resources +**Script 1**: `scenarios/features/cloudwatch_logs_large_query/resources/make-log-files.sh` +- Creates 50,000 log entries divided into 5 JSON files (10,000 entries each) +- Generates timestamps spanning 5 minutes from execution time +- Outputs `QUERY_START_DATE` and `QUERY_END_DATE` environment variables +- Creates files: `file1.json`, `file2.json`, `file3.json`, `file4.json`, `file5.json` -This example has a set of common resources that are stored in the [resources](resources) folder. +**Script 2**: `scenarios/features/cloudwatch_logs_large_query/resources/put-log-events.sh` +- Uploads the generated JSON files to CloudWatch Logs +- Uses AWS CLI `put-log-events` command +- Targets log group: `/workflows/cloudwatch-logs/large-query` +- Targets log stream: `stream1` -- [stack.yaml](resources/stack.yaml) is an AWS CloudFormation template containing the resources needed to run this example. -- [make-log-files.sh](resources/make-log-files.sh) is a bash script that creates log data. **Five minutes of logs, starting at the time of execution, will be created. Wait at least five minutes after running this script before attempting to query.** -- [put-log-events](resources/put-log-events.sh) is a bash script that ingests log data and uploads it to CloudWatch. +**Python Alternative**: `scenarios/features/cloudwatch_logs_large_query/resources/create_logs.py` +- Python script that combines both generation and upload +- Creates 50,000 log entries and uploads them directly +- Returns start and end timestamps for query configuration +- Preferred for cross-platform compatibility --- -## Building the queries +## Proposed Example Structure + +### Phase 1: Setup + +**Purpose**: Deploy resources and generate sample data + +**Steps**: +1. Welcome message explaining the scenario +2. Prompt user: "Would you like to deploy the CloudFormation stack and generate sample logs? (y/n)" +3. If yes: + - Prompt for CloudFormation stack name (default: "CloudWatchLargeQueryStack") + - Deploy CloudFormation stack from `resources/stack.yaml` + - Wait for stack creation to complete (status: CREATE_COMPLETE) + - Execute log generation: + - **Option A** (Bash): Run `make-log-files.sh` then `put-log-events.sh` + - **Option B** (Python): Run `create_logs.py` (recommended for cross-platform) + - Capture `QUERY_START_DATE` and `QUERY_END_DATE` from script output + - Display message: "Sample logs created. Waiting 5 minutes for logs to be fully ingested..." + - Wait 5 minutes (300 seconds) for log ingestion +4. If no: + - Prompt user for existing log group name + - Prompt user for log stream name + - Prompt user for query start date (ISO 8601 format with milliseconds) + - Prompt user for query end date (ISO 8601 format with milliseconds) + +**Variables Set**: +- `stackName` - CloudFormation stack name +- `logGroupName` - Log group name (default: `/workflows/cloudwatch-logs/large-query`) +- `logStreamName` - Log stream name (default: `stream1`) +- `queryStartDate` - Start timestamp for query (milliseconds since epoch) +- `queryEndDate` - End timestamp for query (milliseconds since epoch) + +### Phase 2: Query Execution + +**Purpose**: Demonstrate recursive large query functionality + +**Steps**: +1. Display message: "Starting recursive query to retrieve all logs..." +2. Prompt user for query limit (default: 10000, max: 10000) +3. Set query string: `fields @timestamp, @message | sort @timestamp asc` +4. Execute recursive query function with: + - Log group name + - Query string + - Start date + - End date + - Limit +5. Display progress for each query executed (see [Output Format](#output-format)) +6. Display total execution time +7. Display total logs found +8. Prompt user: "Would you like to see a sample of the logs? (y/n)" +9. If yes, display first 10 log entries with timestamps and messages + +### Phase 3: Cleanup + +**Purpose**: Remove created resources + +**Steps**: +1. Prompt user: "Would you like to delete the CloudFormation stack and all resources? (y/n)" +2. If yes: + - Delete CloudFormation stack + - Wait for stack deletion to complete (status: DELETE_COMPLETE or stack not found) + - Display message: "Stack deleted successfully" +3. If no: + - Display message: "Resources will remain. You can delete them later through the AWS Console." + - Display stack name and log group name for reference -### Building and waiting for single query +--- -The query itself is a "CloudWatch Logs Insights query syntax" string. The query must return the `@timestamp` field so follow-up queries can use that information. Here's a sample query string: `fields @timestamp, @message | sort @timestamp asc`. Notice it sorts in ascending order. You can sort in either `asc` or `desc`, but the recursive strategy described later will need to match accordingly. +## Implementation Details -Queries are jobs. You can start a query with `StartQuery`, but it immediately returns the `queryId`. You must poll a query using `GetQueryResults` until the query has finished. For the purpose of this example, a query has "finished" when `GetQueryResults` has returned a status of one of "Complete", "Failed", "Cancelled", "Timeout", or "Unknown". +### CloudFormation Stack Deployment -`StartQuery` responds with an error if the query's start or end date occurs out of bounds of the log group creation date. The error message starts with "Query's end date and time". +**Deployment**: +``` +Stack Name: User-provided or default "CloudWatchLargeQueryStack" +Template: scenarios/features/cloudwatch_logs_large_query/resources/stack.yaml +Capabilities: None required (no IAM resources) +``` -Start the query and wait for it to "finish". Store the `results`. If the count of the results is less than the configured LIMIT, return the results. If the results are greater than or equal to the limit, go to [Recursive queries](#recursive-queries). +**Polling for Completion**: +- Poll `DescribeStacks` every 5-10 seconds +- Success: `StackStatus` = `CREATE_COMPLETE` +- Failure: `StackStatus` = `CREATE_FAILED`, `ROLLBACK_COMPLETE`, or `ROLLBACK_FAILED` +- Timeout: 5 minutes maximum wait time ---- +### Log Generation Execution -### Recursive queries +**Cross-Platform Considerations**: +- Bash scripts work on Linux, macOS, and Git Bash on Windows +- Python script is preferred for true cross-platform support +- Check for script availability before execution +- Handle script execution errors gracefully -If the result count from the previous step is 10000 (or the configured LIMIT), it is very likely that there are more results. **The example must do a binary search of the remaining logs**. To do this, get the date of the last log (earliest or latest, depending on sort order). Use that date as the start date of a new date range. The end date can remain the same. +**Capturing Output**: +- Parse stdout for `QUERY_START_DATE` and `QUERY_END_DATE` +- Convert timestamps to appropriate format for SDK +- Store timestamps for query configuration -Split that date range in half, resulting in two new date ranges. Call your query function twice; once for each new date range. +**Wait Time**: +- CloudWatch Logs requires time to ingest and index logs +- Minimum wait: 5 minutes (300 seconds) +- Display countdown or progress indicator during wait -Concatenate the results of the first query with the results of the two new queries. +### Building and Executing Queries + +**Query String**: +``` +fields @timestamp, @message | sort @timestamp asc +``` -The following pseudocode illustrates this. +**Important**: The query MUST return `@timestamp` field for recursive queries to work. -```pseudocode -func large_query(date_range): - query_results = get_query_results(date_range) +**StartQuery Parameters**: +- `logGroupName` - The log group to query +- `startTime` - Start of date range (seconds since epoch) +- `endTime` - End of date range (seconds since epoch) +- `queryString` - CloudWatch Logs Insights query syntax +- `limit` - Maximum results (default: 10000, max: 10000) - if query_results.length < LIMIT - return query_results - else - date_range = [query_results.end, date_range.end] - d1, d2 = split(date_range) - return concat(query_results, large_query(d1), large_query(d2)) +**GetQueryResults Polling**: +- Poll every 1-2 seconds +- Continue until status is one of: `Complete`, `Failed`, `Cancelled`, `Timeout`, `Unknown` +- Timeout after 60 seconds of polling + +**Error Handling**: +- If `StartQuery` returns error starting with "Query's end date and time", the date range is out of bounds +- Handle this by adjusting the date range or informing the user + +### Recursive Query Algorithm + +**Purpose**: Retrieve more than 10,000 results by splitting date ranges + +**Algorithm**: ``` +function LargeQuery(startDate, endDate, limit): + results = ExecuteQuery(startDate, endDate, limit) + + if results.count < limit: + return results + else: + // Get timestamp of last result + lastTimestamp = results[results.count - 1].timestamp + + // Calculate midpoint between last result and end date + midpoint = (lastTimestamp + endDate) / 2 + + // Query first half + results1 = LargeQuery(lastTimestamp, midpoint, limit) + + // Query second half + results2 = LargeQuery(midpoint, endDate, limit) + + // Combine results + return Concatenate(results, results1, results2) +``` + +**Key Points**: +- Use binary search to split remaining date range +- Recursively query each half +- Concatenate all results +- Log each query's date range and result count (see [Output Format](#output-format)) + +### Stack Deletion + +**Deletion**: +``` +Stack Name: Same as used during creation +``` + +**Polling for Completion**: +- Poll `DescribeStacks` every 5-10 seconds +- Success: Stack not found (ValidationError) or `StackStatus` = `DELETE_COMPLETE` +- Failure: `StackStatus` = `DELETE_FAILED` +- If `DELETE_FAILED`, optionally retry with force delete +- Timeout: 5 minutes maximum wait time + +--- -## Output +## Output Format -To illustrate the search, log the date ranges for each query made and the number of logs that were found. +### Query Progress Output -Example: +Display each query execution with the following format: ``` -Starting a recursive query... +Query date range: to . Found logs. +``` + +**Example**: +``` +Starting recursive query... Query date range: 2023-12-22T19:08:42.000Z to 2023-12-22T19:13:41.994Z. Found 10000 logs. Query date range: 2023-12-22T19:09:41.995Z to 2023-12-22T19:11:41.994Z. Found 10000 logs. Query date range: 2023-12-22T19:11:41.995Z to 2023-12-22T19:13:41.994Z. Found 10000 logs. @@ -107,6 +276,74 @@ Queries finished in 11.253 seconds. Total logs found: 50000 ``` +### Summary Output + +After all queries complete, display: +- Total execution time (in seconds with 3 decimal places) +- Total number of logs found + +### Sample Logs Output + +If user chooses to view sample logs, display first 10 entries: + +``` +Sample logs (first 10 of 50000): +[2023-12-22T19:08:42.000Z] Entry 0 +[2023-12-22T19:08:42.006Z] Entry 1 +[2023-12-22T19:08:42.012Z] Entry 2 +... +``` + +--- + +## Errors + +### CloudFormation Errors + +| Error Code | Error Message Pattern | Handling Strategy | +|------------|----------------------|-------------------| +| `AlreadyExistsException` | Stack already exists | Prompt user for different stack name and retry | +| `ValidationError` | Template validation failed | Display error message and exit setup | +| `InsufficientCapabilitiesException` | Requires capabilities | Should not occur (template has no IAM resources) | + +### CloudWatch Logs Errors + +| Error Code | Error Message Pattern | Handling Strategy | +|------------|----------------------|-------------------| +| `InvalidParameterException` | "Query's end date and time" | Date range is out of bounds; inform user and adjust dates | +| `ResourceNotFoundException` | Log group not found | Verify log group exists; prompt user to run setup | +| `LimitExceededException` | Too many concurrent queries | Wait and retry after 5 seconds | +| `ServiceUnavailableException` | Service temporarily unavailable | Retry with exponential backoff (max 3 retries) | + +### Script Execution Errors + +| Error Type | Handling Strategy | +|------------|-------------------| +| Script not found | Display error message; provide manual instructions | +| Script execution failed | Display error output; allow user to retry or skip | +| Permission denied | Suggest making script executable (`chmod +x`) | +| AWS CLI not available | Inform user AWS CLI is required for bash scripts; suggest Python alternative | + +--- + +## User Input Variables + +### Required Variables + +| Variable Name | Description | Type | Default | Validation | +|--------------|-------------|------|---------|------------| +| `stackName` | CloudFormation stack name | String | "CloudWatchLargeQueryStack" | Must match pattern: `[a-zA-Z][-a-zA-Z0-9]*` | +| `queryStartDate` | Query start timestamp | Long/Integer | From script output | Milliseconds since epoch | +| `queryEndDate` | Query end timestamp | Long/Integer | From script output | Milliseconds since epoch | +| `queryLimit` | Maximum results per query | Integer | 10000 | Min: 1, Max: 10000 | + +### Optional Variables + +| Variable Name | Description | Type | Default | +|--------------|-------------|------|---------| +| `logGroupName` | Log group name (if not using stack) | String | "/workflows/cloudwatch-logs/large-query" | +| `logStreamName` | Log stream name (if not using stack) | String | "stream1" | + --- ## Metadata diff --git a/steering_docs/dotnet-tech/scenario.md b/steering_docs/dotnet-tech/scenario.md new file mode 100644 index 00000000000..271fac811af --- /dev/null +++ b/steering_docs/dotnet-tech/scenario.md @@ -0,0 +1,898 @@ +# .NET Feature Scenario Generation + +## Purpose +Generate feature scenarios that demonstrate complete workflows using multiple service operations in a guided, educational manner. Implementation must be based on the service SPECIFICATION.md file. + +## Requirements +- **Specification-Driven**: MUST read the `scenarios/features/{service_feature}/SPECIFICATION.md` +- **Interactive**: Use Console.WriteLine and Console.ReadLine for user input and guidance +- **Educational**: Break complex workflows into logical phases +- **Comprehensive**: Cover setup, demonstration, examination, and cleanup +- **Error Handling**: Graceful error handling with user-friendly messages +- **Wrapper Classes**: MUST use service wrapper classes for all operations +- **CloudFormation**: Deploy resources using CloudFormation stacks when specified +- **Namespaces**: MUST use file-level namespaces that match the project names +- **Using Statements**: MUST cleanup unused using statements + +## Project Structure + +Feature scenarios use a multi-project structure with separate projects for actions, scenarios, and tests: + +``` +dotnetv3/{Service}/ +├── {Service}.sln # Solution file +├── Actions/ +│ ├── {Service}Wrapper.cs # Wrapper class for service operations +│ ├── Hello{Service}.cs # Hello world example (optional) +│ └── {Service}Actions.csproj # Actions project file +├── Scenarios/ +│ ├── {Service}Workflow.cs # Main workflow/scenario file +│ ├── README.md # Scenario documentation +│ └── {Service}Scenario.csproj # Scenario project file (references Actions) +└── Tests/ + ├── {Service}WorkflowTests.cs # Unit tests for workflow + ├── Usings.cs # Global usings for tests + └── {Service}Tests.csproj # Test project file (references Scenarios) +``` + +## MANDATORY Pre-Implementation Steps + +### Step 1: Read Scenario Specification +**CRITICAL**: Always read `scenarios/features/{servicefeature}/SPECIFICATION.md` first to understand: +- **API Actions Used**: Exact operations to implement +- **Proposed Example Structure**: Setup, demonstration, examination, cleanup phases +- **Error Handling**: Specific error codes and handling requirements +- **Scenario Flow**: Step-by-step scenario description + +### Step 2: Extract Implementation Requirements +From the specification, identify: +- **Setup Phase**: What resources need to be created/configured +- **Demonstration Phase**: What operations to demonstrate +- **Examination Phase**: What data to display and how to filter/analyze +- **Cleanup Phase**: What resources to clean up and user options + +## Workflow Class Pattern + +### Implementation Pattern Based on SPECIFICATION.md + +```csharp +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +// snippet-start:[{Service}.dotnetv3.{Service}Workflow] +using Amazon.{Service}; +using Amazon.CloudFormation; +using Amazon.CloudFormation.Model; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Hosting; +using Microsoft.Extensions.Logging; +using {Service}Actions; + +namespace {Service}Scenario; + +public class {Service}Workflow +{ + /* + Before running this .NET code example, set up your development environment, including your credentials. + This .NET code example performs the following tasks for the {AWS Service} workflow: + + 1. Prepare the Application: + - {Setup step 1 from specification} + - {Setup step 2 from specification} + - Deploy the Cloud Formation template for resource creation. + - Store the outputs of the stack into variables for use in the scenario. + + 2. {Phase 2 Name}: + - {Phase 2 description from specification} + + 3. {Phase 3 Name}: + - {Phase 3 description from specification} + + 4. Clean up: + - Prompt the user for y/n answer if they want to destroy the stack and clean up all resources. + - Delete resources created during the workflow. + - Destroy the Cloud Formation stack and wait until the stack has been removed. + */ + + public static ILogger<{Service}Workflow> _logger = null!; + public static {Service}Wrapper _wrapper = null!; + public static IAmazonCloudFormation _amazonCloudFormation = null!; + + private static string _roleArn = null!; + private static string _targetArn = null!; + + public static bool _interactive = true; + private static string _stackName = "default-{service}-scenario-stack-name"; + private static string _stackResourcePath = "../../../../../../scenarios/features/{service_feature}/resources/cfn_template.yaml"; + + public static async Task Main(string[] args) + { + using var host = Host.CreateDefaultBuilder(args) + .ConfigureLogging(logging => + logging.AddFilter("System", LogLevel.Debug) + .AddFilter("Microsoft", LogLevel.Information) + .AddFilter("Microsoft", LogLevel.Trace)) + .ConfigureServices((_, services) => + services.AddAWSService() + .AddAWSService() + .AddTransient<{Service}Wrapper>() + ) + .Build(); + + if (_interactive) + { + _logger = LoggerFactory.Create(builder => { builder.AddConsole(); }) + .CreateLogger<{Service}Workflow>(); + + _wrapper = host.Services.GetRequiredService<{Service}Wrapper>(); + _amazonCloudFormation = host.Services.GetRequiredService(); + } + + Console.WriteLine(new string('-', 80)); + Console.WriteLine("Welcome to the {AWS Service Feature} Scenario."); + Console.WriteLine(new string('-', 80)); + + try + { + Console.WriteLine(new string('-', 80)); + var prepareSuccess = await PrepareApplication(); + Console.WriteLine(new string('-', 80)); + + if (prepareSuccess) + { + Console.WriteLine(new string('-', 80)); + await Phase2(); + Console.WriteLine(new string('-', 80)); + + Console.WriteLine(new string('-', 80)); + await Phase3(); + Console.WriteLine(new string('-', 80)); + } + + Console.WriteLine(new string('-', 80)); + await Cleanup(); + Console.WriteLine(new string('-', 80)); + } + catch (Exception ex) + { + _logger.LogError(ex, "There was a problem with the scenario, initiating cleanup..."); + _interactive = false; + await Cleanup(); + } + + Console.WriteLine("{AWS Service} scenario completed."); + } + + /// + /// Prepares the application by creating the necessary resources. + /// + /// True if the application was prepared successfully. + public static async Task PrepareApplication() + { + Console.WriteLine("Preparing the application..."); + try + { + // Prompt the user for required input (e.g., email, parameters) + Console.WriteLine("\nThis example creates resources in a CloudFormation stack."); + + var userInput = PromptUserForInput(); + + // Prompt the user for a name for the CloudFormation stack + _stackName = PromptUserForStackName(); + + // Deploy the CloudFormation stack + var deploySuccess = await DeployCloudFormationStack(_stackName, userInput); + + if (deploySuccess) + { + // Create additional resources if needed + Console.WriteLine("Application preparation complete."); + return true; + } + } + catch (Exception ex) + { + _logger.LogError(ex, "An error occurred while preparing the application."); + } + Console.WriteLine("Application preparation failed."); + return false; + } + + /// + /// Deploys the CloudFormation stack with the necessary resources. + /// + /// The name of the CloudFormation stack. + /// Parameter value for the stack. + /// True if the stack was deployed successfully. + private static async Task DeployCloudFormationStack(string stackName, string parameter) + { + Console.WriteLine($"\nDeploying CloudFormation stack: {stackName}"); + + try + { + var request = new CreateStackRequest + { + StackName = stackName, + TemplateBody = await File.ReadAllTextAsync(_stackResourcePath), + Capabilities = { Capability.CAPABILITY_NAMED_IAM } + }; + + // If parameters are provided, set them + if (!string.IsNullOrWhiteSpace(parameter)) + { + request.Parameters = new List() + { + new() { ParameterKey = "parameterName", ParameterValue = parameter } + }; + } + + var response = await _amazonCloudFormation.CreateStackAsync(request); + + if (response.HttpStatusCode == System.Net.HttpStatusCode.OK) + { + Console.WriteLine($"CloudFormation stack creation started: {stackName}"); + + // Wait for the stack to be in CREATE_COMPLETE state + bool stackCreated = await WaitForStackCompletion(response.StackId); + + if (stackCreated) + { + // Retrieve the output values + var success = await GetStackOutputs(response.StackId); + return success; + } + else + { + _logger.LogError($"CloudFormation stack creation failed: {stackName}"); + return false; + } + } + else + { + _logger.LogError($"Failed to create CloudFormation stack: {stackName}"); + return false; + } + } + catch (AlreadyExistsException) + { + _logger.LogWarning($"CloudFormation stack '{stackName}' already exists. Please provide a unique name."); + var newStackName = PromptUserForStackName(); + return await DeployCloudFormationStack(newStackName, parameter); + } + catch (Exception ex) + { + _logger.LogError(ex, $"An error occurred while deploying the CloudFormation stack: {stackName}"); + return false; + } + } + + /// + /// Waits for the CloudFormation stack to be in the CREATE_COMPLETE state. + /// + /// The ID of the CloudFormation stack. + /// True if the stack was created successfully. + private static async Task WaitForStackCompletion(string stackId) + { + int retryCount = 0; + const int maxRetries = 10; + const int retryDelay = 30000; // 30 seconds. + + while (retryCount < maxRetries) + { + var describeStacksRequest = new DescribeStacksRequest + { + StackName = stackId + }; + + var describeStacksResponse = await _amazonCloudFormation.DescribeStacksAsync(describeStacksRequest); + + if (describeStacksResponse.Stacks.Count > 0) + { + if (describeStacksResponse.Stacks[0].StackStatus == StackStatus.CREATE_COMPLETE) + { + Console.WriteLine("CloudFormation stack creation complete."); + return true; + } + if (describeStacksResponse.Stacks[0].StackStatus == StackStatus.CREATE_FAILED || + describeStacksResponse.Stacks[0].StackStatus == StackStatus.ROLLBACK_COMPLETE) + { + Console.WriteLine("CloudFormation stack creation failed."); + return false; + } + } + + Console.WriteLine("Waiting for CloudFormation stack creation to complete..."); + await Task.Delay(retryDelay); + retryCount++; + } + + _logger.LogError("Timed out waiting for CloudFormation stack creation to complete."); + return false; + } + + /// + /// Retrieves the output values from the CloudFormation stack. + /// + /// The ID of the CloudFormation stack. + private static async Task GetStackOutputs(string stackId) + { + try + { + var describeStacksRequest = new DescribeStacksRequest { StackName = stackId }; + + var describeStacksResponse = + await _amazonCloudFormation.DescribeStacksAsync(describeStacksRequest); + + if (describeStacksResponse.Stacks.Count > 0) + { + var stack = describeStacksResponse.Stacks[0]; + _roleArn = GetStackOutputValue(stack, "RoleARN"); + _targetArn = GetStackOutputValue(stack, "TargetARN"); + return true; + } + else + { + _logger.LogError($"No stack found for stack outputs: {stackId}"); + return false; + } + } + catch (Exception ex) + { + _logger.LogError( + ex, $"Failed to retrieve CloudFormation stack outputs: {stackId}"); + return false; + } + } + + /// + /// Get an output value by key from a CloudFormation stack. + /// + /// The CloudFormation stack. + /// The key of the output. + /// The value as a string. + private static string GetStackOutputValue(Stack stack, string outputKey) + { + var output = stack.Outputs.First(o => o.OutputKey == outputKey); + var outputValue = output.OutputValue; + Console.WriteLine($"Stack output {outputKey}: {outputValue}"); + return outputValue; + } + + /// + /// Cleans up the resources created during the scenario. + /// + /// True if the cleanup was successful. + public static async Task Cleanup() + { + // Prompt the user to confirm cleanup. + var cleanup = !_interactive || GetYesNoResponse( + "Do you want to delete all resources created by this scenario? (y/n) "); + if (cleanup) + { + try + { + // Delete scenario-specific resources first + + // Destroy the CloudFormation stack and wait for it to be removed. + var stackDeleteSuccess = await DeleteCloudFormationStack(_stackName, false); + + return stackDeleteSuccess; + } + catch (Exception ex) + { + _logger.LogError(ex, + "An error occurred while cleaning up the resources."); + return false; + } + } + _logger.LogInformation("{Service} scenario is complete."); + return true; + } + + /// + /// Delete the resources in the stack and wait for confirmation. + /// + /// The name of the stack. + /// True to force delete the stack. + /// True if successful. + private static async Task DeleteCloudFormationStack(string stackName, bool forceDelete) + { + var request = new DeleteStackRequest + { + StackName = stackName, + }; + + if (forceDelete) + { + request.DeletionMode = DeletionMode.FORCE_DELETE_STACK; + } + + await _amazonCloudFormation.DeleteStackAsync(request); + Console.WriteLine($"CloudFormation stack '{_stackName}' is being deleted. This may take a few minutes."); + + bool stackDeleted = await WaitForStackDeletion(_stackName, forceDelete); + + if (stackDeleted) + { + Console.WriteLine($"CloudFormation stack '{_stackName}' has been deleted."); + return true; + } + else + { + _logger.LogError($"Failed to delete CloudFormation stack '{_stackName}'."); + return false; + } + } + + /// + /// Wait for the stack to be deleted. + /// + /// The name of the stack. + /// True to force delete the stack. + /// True if successful. + private static async Task WaitForStackDeletion(string stackName, bool forceDelete) + { + int retryCount = 0; + const int maxRetries = 10; + const int retryDelay = 30000; // 30 seconds + + while (retryCount < maxRetries) + { + var describeStacksRequest = new DescribeStacksRequest + { + StackName = stackName + }; + + try + { + var describeStacksResponse = await _amazonCloudFormation.DescribeStacksAsync(describeStacksRequest); + + if (describeStacksResponse.Stacks.Count == 0 || describeStacksResponse.Stacks[0].StackStatus == StackStatus.DELETE_COMPLETE) + { + return true; + } + if (!forceDelete && describeStacksResponse.Stacks[0].StackStatus == StackStatus.DELETE_FAILED) + { + // Try one time to force delete. + return await DeleteCloudFormationStack(stackName, true); + } + } + catch (AmazonCloudFormationException ex) when (ex.ErrorCode == "ValidationError") + { + // Stack does not exist, so it has been successfully deleted. + return true; + } + + Console.WriteLine($"Waiting for CloudFormation stack '{stackName}' to be deleted..."); + await Task.Delay(retryDelay); + retryCount++; + } + + _logger.LogError($"Timed out waiting for CloudFormation stack '{stackName}' to be deleted."); + return false; + } + + /// + /// Helper method to get a yes or no response from the user. + /// + /// The question string to print on the console. + /// True if the user responds with a yes. + private static bool GetYesNoResponse(string question) + { + Console.WriteLine(question); + var ynResponse = Console.ReadLine(); + var response = ynResponse != null && ynResponse.Equals("y", StringComparison.InvariantCultureIgnoreCase); + return response; + } + + /// + /// Prompt the user for a non-empty stack name. + /// + /// The valid stack name + private static string PromptUserForStackName() + { + Console.WriteLine("Enter a name for the AWS Cloud Formation Stack: "); + if (_interactive) + { + string stackName = Console.ReadLine()!; + var regex = "[a-zA-Z][-a-zA-Z0-9]|arn:[-a-zA-Z0-9:/._+]"; + if (!Regex.IsMatch(stackName, regex)) + { + Console.WriteLine( + $"Invalid stack name. Please use a name that matches the pattern {regex}."); + return PromptUserForStackName(); + } + + return stackName; + } + // Used when running without user prompts. + return _stackName; + } + + /// + /// Prompt the user for required input. + /// + /// The user input value + private static string PromptUserForInput() + { + if (_interactive) + { + Console.WriteLine("Enter required input: "); + string input = Console.ReadLine()!; + // Add validation as needed + return input; + } + // Used when running without user prompts. + return ""; + } +} +// snippet-end:[{Service}.dotnetv3.{Service}Workflow] +``` + +## Project Files + +### Actions Project (.csproj) + +```xml + + + + Exe + net6.0 + enable + enable + + + + + + + + + + +``` + +### Scenarios Project (.csproj) + +```xml + + + + Exe + net6.0 + enable + enable + + + + + + + + + + + + + + +``` + +### Tests Project (.csproj) + +```xml + + + + net6.0 + enable + enable + false + + + + + + + + + + runtime; build; native; contentfiles; analyzers; buildtransitive + all + + + runtime; build; native; contentfiles; analyzers; buildtransitive + all + + + + + + PreserveNewest + testsettings.json + + + + + + + + +``` + +## Workflow Phase Structure (Based on Specification) + +### Prepare Application Phase +- **Read specification Setup section** for exact requirements +- Prompt user for required input (email, parameters, etc.) +- Prompt user for CloudFormation stack name +- Deploy CloudFormation stack with resources +- Wait for stack creation to complete +- Retrieve stack outputs (ARNs, IDs, etc.) +- Create additional resources if needed (schedule groups, etc.) +- Verify setup completion + +### Demonstration Phases +- **Follow specification phases** exactly +- Implement each phase as a separate method +- Use wrapper methods for all service operations +- Prompt user for input as specified +- Display progress and results +- Handle errors gracefully +- Allow user to proceed at their own pace + +### Cleanup Phase +- **Follow specification Cleanup section** guidance +- Prompt user to confirm cleanup +- Delete scenario-specific resources first +- Delete CloudFormation stack +- Wait for stack deletion to complete +- Handle deletion errors (retry with force delete if needed) +- Confirm completion + +## CloudFormation Integration + +### Stack Deployment +- Store CloudFormation template path in a constant +- Template should be in `scenarios/features/{service_feature}/resources/cfn_template.yaml` +- Use relative path from Scenarios project: `"../../../../../../scenarios/features/{service_feature}/resources/cfn_template.yaml"` +- Deploy stack with `CAPABILITY_NAMED_IAM` capability +- Pass user input as stack parameters +- Handle `AlreadyExistsException` by prompting for new stack name + +### Stack Output Retrieval +- Retrieve outputs after stack creation completes +- Store output values in static fields for use throughout workflow +- Common outputs: Role ARNs, Topic ARNs, Resource IDs +- Display output values to console for user visibility + +### Stack Deletion +- Delete stack during cleanup phase +- Wait for deletion to complete +- Handle `DELETE_FAILED` status by retrying with force delete +- Catch `ValidationError` exception (indicates stack already deleted) + +## User Interaction Patterns + +### Question Types +```csharp +// Yes/No questions +private static bool GetYesNoResponse(string question) +{ + Console.WriteLine(question); + var ynResponse = Console.ReadLine(); + var response = ynResponse != null && ynResponse.Equals("y", StringComparison.InvariantCultureIgnoreCase); + return response; +} + +// Text input with validation +private static string PromptUserForResourceName(string prompt) +{ + if (_interactive) + { + Console.WriteLine(prompt); + string resourceName = Console.ReadLine()!; + var regex = "[0-9a-zA-Z-_.]+"; + if (!Regex.IsMatch(resourceName, regex)) + { + Console.WriteLine($"Invalid resource name. Please use a name that matches the pattern {regex}."); + return PromptUserForResourceName(prompt); + } + return resourceName!; + } + // Used when running without user prompts. + return "resource-" + Guid.NewGuid(); +} + +// Numeric input +private static int PromptUserForInteger(string prompt) +{ + if (_interactive) + { + Console.WriteLine(prompt); + string stringResponse = Console.ReadLine()!; + if (string.IsNullOrWhiteSpace(stringResponse) || + !Int32.TryParse(stringResponse, out var intResponse)) + { + Console.WriteLine($"Invalid integer. "); + return PromptUserForInteger(prompt); + } + return intResponse!; + } + // Used when running without user prompts. + return 1; +} +``` + +### Information Display +```csharp +// Section separators +Console.WriteLine(new string('-', 80)); + +// Progress indicators +Console.WriteLine($"✓ Operation completed successfully"); +Console.WriteLine($"Waiting for operation to complete..."); + +// Formatted output +Console.WriteLine($"Found {count} items:"); +foreach (var item in items) +{ + Console.WriteLine($" - {item}"); +} +``` + +## Wrapper Class Pattern + +### Wrapper Class Structure +```csharp +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +// snippet-start:[{Service}.dotnetv3.{Service}Wrapper] +using Amazon.{Service}; +using Amazon.{Service}.Model; +using Microsoft.Extensions.Logging; + +namespace {Service}Actions; + +/// +/// Wrapper class for {AWS Service} operations. +/// +public class {Service}Wrapper +{ + private readonly IAmazon{Service} _amazon{Service}; + private readonly ILogger<{Service}Wrapper> _logger; + + /// + /// Constructor for the {Service}Wrapper class. + /// + /// The injected {Service} client. + /// The injected logger. + public {Service}Wrapper(IAmazon{Service} amazon{Service}, ILogger<{Service}Wrapper> logger) + { + _amazon{Service} = amazon{Service}; + _logger = logger; + } + + // snippet-start:[{Service}.dotnetv3.OperationName] + /// + /// Description of what this operation does. + /// + /// Description of parameter. + /// Description of return value. + public async Task OperationAsync(string paramName) + { + try + { + var request = new OperationRequest + { + Parameter = paramName + }; + + var response = await _amazon{Service}.OperationAsync(request); + + Console.WriteLine($"Successfully performed operation."); + return true; + } + catch (ConflictException ex) + { + _logger.LogError($"Failed to perform operation due to a conflict. {ex.Message}"); + return false; + } + catch (ResourceNotFoundException ex) + { + _logger.LogError($"Resource not found: {ex.Message}"); + return false; + } + catch (Exception ex) + { + _logger.LogError($"An error occurred: {ex.Message}"); + return false; + } + } + // snippet-end:[{Service}.dotnetv3.OperationName] +} +// snippet-end:[{Service}.dotnetv3.{Service}Wrapper] +``` + +### Wrapper Method Guidelines +- Return `bool` for success/failure operations +- Return specific types for data retrieval operations +- Log errors using injected logger +- Display success messages to console +- Catch specific exceptions first, then general exceptions +- Include XML documentation for all public methods +- Use snippet tags for documentation extraction + +## Error Handling + +### Specification-Based Error Handling +The specification includes an "Errors" section with specific error codes and handling: + +```csharp +// Example error handling based on specification +try +{ + var response = await _wrapper.CreateResourceAsync(); + return response; +} +catch (ConflictException ex) +{ + // Handle as specified: Resource already exists + _logger.LogError($"Failed to create resource due to a conflict. {ex.Message}"); + return false; +} +catch (ResourceNotFoundException ex) +{ + // Handle as specified: Resource not found + _logger.LogError($"Resource not found: {ex.Message}"); + return true; // May return true if deletion was the goal +} +catch (Exception ex) +{ + _logger.LogError($"An error occurred: {ex.Message}"); + return false; +} +``` + +### Workflow Error Handling +- Wrap main workflow in try-catch block +- Log errors and initiate cleanup on failure +- Set `_interactive = false` to skip prompts during error cleanup +- Ensure cleanup runs in finally block or after error + +## Feature Scenario Requirements + +### MUST HAVE +- ✅ Read and implement based on `scenarios/features/{service_feature}/SPECIFICATION.md` +- ✅ Use multi-project structure (Actions, Scenarios, Tests) +- ✅ Deploy CloudFormation stack for resource creation +- ✅ Retrieve and use stack outputs +- ✅ Use wrapper classes for all AWS operations +- ✅ Implement proper cleanup with stack deletion +- ✅ Break workflow into logical phases per specification +- ✅ Include error handling per specification +- ✅ Support non-interactive mode for testing +- ✅ Use file-level namespaces +- ✅ Include snippet tags for documentation + +### Implementation Workflow + +1. **Read Specification**: Study `scenarios/features/{service_feature}/SPECIFICATION.md` +2. **Create Project Structure**: Set up Actions, Scenarios, and Tests projects +3. **Implement Wrapper**: Create wrapper class with all required operations +4. **Implement Workflow**: Create workflow class with phases from specification +5. **Add CloudFormation**: Integrate stack deployment and deletion +6. **Add User Interaction**: Implement prompts and validation +7. **Test**: Create unit tests for workflow methods +8. **Document**: Add README.md with scenario description + +### Specification Sections to Implement +- **API Actions Used**: All operations must be in wrapper class +- **Proposed example structure**: Maps to workflow phases +- **Setup**: CloudFormation deployment and resource creation +- **Demonstration**: Core service operations +- **Examination**: Data analysis and display +- **Cleanup**: Resource and stack deletion +- **Errors**: Specific error handling strategies \ No newline at end of file From f2c71ae3036b6cc1ad1ae91d9d6dd56574600d4c Mon Sep 17 00:00:00 2001 From: Rachel Hagerman <110480692+rlhagerm@users.noreply.github.com> Date: Thu, 13 Nov 2025 09:52:12 -0600 Subject: [PATCH 09/23] Adding project files --- .../Actions/CloudWatchLogsActions.csproj | 18 + .../Actions/CloudWatchLogsWrapper.cs | 148 ++++ .../LargeQuery/CloudWatchLogsLargeQuery.sln | 31 + dotnetv4/CloudWatchLogs/LargeQuery/README.md | 69 ++ .../Scenarios/CloudWatchLogsScenario.csproj | 22 + .../Scenarios/LargeQueryWorkflow.cs | 662 ++++++++++++++++++ .../LargeQuery/Scenarios/README.md | 109 +++ .../Tests/CloudWatchLogsTests.csproj | 31 + .../Tests/LargeQueryWorkflowTests.cs | 143 ++++ .../CloudWatchLogs/LargeQuery/Tests/Usings.cs | 4 + dotnetv4/CloudWatchLogs/README.md | 35 + .../resources/create_logs.py | 70 ++ steering_docs/dotnet-tech/scenario.md | 33 +- 13 files changed, 1363 insertions(+), 12 deletions(-) create mode 100644 dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsActions.csproj create mode 100644 dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsWrapper.cs create mode 100644 dotnetv4/CloudWatchLogs/LargeQuery/CloudWatchLogsLargeQuery.sln create mode 100644 dotnetv4/CloudWatchLogs/LargeQuery/README.md create mode 100644 dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/CloudWatchLogsScenario.csproj create mode 100644 dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs create mode 100644 dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/README.md create mode 100644 dotnetv4/CloudWatchLogs/LargeQuery/Tests/CloudWatchLogsTests.csproj create mode 100644 dotnetv4/CloudWatchLogs/LargeQuery/Tests/LargeQueryWorkflowTests.cs create mode 100644 dotnetv4/CloudWatchLogs/LargeQuery/Tests/Usings.cs create mode 100644 dotnetv4/CloudWatchLogs/README.md create mode 100644 scenarios/features/cloudwatch_logs_large_query/resources/create_logs.py diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsActions.csproj b/dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsActions.csproj new file mode 100644 index 00000000000..3aa4085c546 --- /dev/null +++ b/dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsActions.csproj @@ -0,0 +1,18 @@ + + + + Exe + net8.0 + enable + enable + + + + + + + + + + + diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsWrapper.cs b/dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsWrapper.cs new file mode 100644 index 00000000000..98e5c1fcc14 --- /dev/null +++ b/dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsWrapper.cs @@ -0,0 +1,148 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +// snippet-start:[CloudWatchLogs.dotnetv3.CloudWatchLogsWrapper] +using Amazon.CloudWatchLogs; +using Amazon.CloudWatchLogs.Model; +using Microsoft.Extensions.Logging; + +namespace CloudWatchLogsActions; + +/// +/// Wrapper class for Amazon CloudWatch Logs operations. +/// +public class CloudWatchLogsWrapper +{ + private readonly IAmazonCloudWatchLogs _amazonCloudWatchLogs; + private readonly ILogger _logger; + + /// + /// Constructor for the CloudWatchLogsWrapper class. + /// + /// The injected CloudWatch Logs client. + /// The injected logger. + public CloudWatchLogsWrapper(IAmazonCloudWatchLogs amazonCloudWatchLogs, ILogger logger) + { + _amazonCloudWatchLogs = amazonCloudWatchLogs; + _logger = logger; + } + + // snippet-start:[CloudWatchLogs.dotnetv3.StartQuery] + /// + /// Starts a CloudWatch Logs Insights query. + /// + /// The name of the log group to query. + /// The CloudWatch Logs Insights query string. + /// The start time for the query (seconds since epoch). + /// The end time for the query (seconds since epoch). + /// The maximum number of results to return. + /// The query ID if successful, null otherwise. + public async Task StartQueryAsync( + string logGroupName, + string queryString, + long startTime, + long endTime, + int limit = 10000) + { + try + { + var request = new StartQueryRequest + { + LogGroupName = logGroupName, + QueryString = queryString, + StartTime = startTime, + EndTime = endTime, + Limit = limit + }; + + var response = await _amazonCloudWatchLogs.StartQueryAsync(request); + return response.QueryId; + } + catch (InvalidParameterException ex) + { + _logger.LogError($"Invalid parameter for query: {ex.Message}"); + return null; + } + catch (ResourceNotFoundException ex) + { + _logger.LogError($"Log group not found: {ex.Message}"); + return null; + } + catch (Exception ex) + { + _logger.LogError($"An error occurred while starting query: {ex.Message}"); + return null; + } + } + // snippet-end:[CloudWatchLogs.dotnetv3.StartQuery] + + // snippet-start:[CloudWatchLogs.dotnetv3.GetQueryResults] + /// + /// Gets the results of a CloudWatch Logs Insights query. + /// + /// The ID of the query. + /// The query results response. + public async Task GetQueryResultsAsync(string queryId) + { + try + { + var request = new GetQueryResultsRequest + { + QueryId = queryId + }; + + var response = await _amazonCloudWatchLogs.GetQueryResultsAsync(request); + return response; + } + catch (ResourceNotFoundException ex) + { + _logger.LogError($"Query not found: {ex.Message}"); + return null; + } + catch (Exception ex) + { + _logger.LogError($"An error occurred while getting query results: {ex.Message}"); + return null; + } + } + // snippet-end:[CloudWatchLogs.dotnetv3.GetQueryResults] + + // snippet-start:[CloudWatchLogs.dotnetv3.PutLogEvents] + /// + /// Puts log events to a CloudWatch Logs log stream. + /// + /// The name of the log group. + /// The name of the log stream. + /// The list of log events to put. + /// True if successful, false otherwise. + public async Task PutLogEventsAsync( + string logGroupName, + string logStreamName, + List logEvents) + { + try + { + var request = new PutLogEventsRequest + { + LogGroupName = logGroupName, + LogStreamName = logStreamName, + LogEvents = logEvents + }; + + await _amazonCloudWatchLogs.PutLogEventsAsync(request); + return true; + } + catch (ResourceNotFoundException ex) + { + _logger.LogError($"Log group or stream not found: {ex.Message}"); + return false; + } + catch (Exception ex) + { + _logger.LogError($"An error occurred while putting log events: {ex.Message}"); + return false; + } + } + // snippet-end:[CloudWatchLogs.dotnetv3.PutLogEvents] +} +// snippet-end:[CloudWatchLogs.dotnetv3.CloudWatchLogsWrapper] diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/CloudWatchLogsLargeQuery.sln b/dotnetv4/CloudWatchLogs/LargeQuery/CloudWatchLogsLargeQuery.sln new file mode 100644 index 00000000000..eb27a092342 --- /dev/null +++ b/dotnetv4/CloudWatchLogs/LargeQuery/CloudWatchLogsLargeQuery.sln @@ -0,0 +1,31 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 17 +VisualStudioVersion = 17.0.31903.59 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CloudWatchLogsActions", "Actions\CloudWatchLogsActions.csproj", "{A1B2C3D4-E5F6-4A5B-8C9D-0E1F2A3B4C5D}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CloudWatchLogsScenario", "Scenarios\CloudWatchLogsScenario.csproj", "{B2C3D4E5-F6A7-5B6C-9D0E-1F2A3B4C5D6E}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CloudWatchLogsTests", "Tests\CloudWatchLogsTests.csproj", "{C3D4E5F6-A7B8-6C7D-0E1F-2A3B4C5D6E7F}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Release|Any CPU = Release|Any CPU + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {A1B2C3D4-E5F6-4A5B-8C9D-0E1F2A3B4C5D}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {A1B2C3D4-E5F6-4A5B-8C9D-0E1F2A3B4C5D}.Debug|Any CPU.Build.0 = Debug|Any CPU + {A1B2C3D4-E5F6-4A5B-8C9D-0E1F2A3B4C5D}.Release|Any CPU.ActiveCfg = Release|Any CPU + {A1B2C3D4-E5F6-4A5B-8C9D-0E1F2A3B4C5D}.Release|Any CPU.Build.0 = Release|Any CPU + {B2C3D4E5-F6A7-5B6C-9D0E-1F2A3B4C5D6E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {B2C3D4E5-F6A7-5B6C-9D0E-1F2A3B4C5D6E}.Debug|Any CPU.Build.0 = Debug|Any CPU + {B2C3D4E5-F6A7-5B6C-9D0E-1F2A3B4C5D6E}.Release|Any CPU.ActiveCfg = Release|Any CPU + {B2C3D4E5-F6A7-5B6C-9D0E-1F2A3B4C5D6E}.Release|Any CPU.Build.0 = Release|Any CPU + {C3D4E5F6-A7B8-6C7D-0E1F-2A3B4C5D6E7F}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {C3D4E5F6-A7B8-6C7D-0E1F-2A3B4C5D6E7F}.Debug|Any CPU.Build.0 = Debug|Any CPU + {C3D4E5F6-A7B8-6C7D-0E1F-2A3B4C5D6E7F}.Release|Any CPU.ActiveCfg = Release|Any CPU + {C3D4E5F6-A7B8-6C7D-0E1F-2A3B4C5D6E7F}.Release|Any CPU.Build.0 = Release|Any CPU + EndGlobalSection +EndGlobal diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/README.md b/dotnetv4/CloudWatchLogs/LargeQuery/README.md new file mode 100644 index 00000000000..ccb87751d1d --- /dev/null +++ b/dotnetv4/CloudWatchLogs/LargeQuery/README.md @@ -0,0 +1,69 @@ +# CloudWatch Logs Large Query Example + +This folder contains a .NET feature scenario that demonstrates how to perform large-scale queries on Amazon CloudWatch Logs using recursive binary search to retrieve more than the 10,000 result limit. + +## Project Structure + +``` +LargeQuery/ +├── Actions/ +│ ├── CloudWatchLogsWrapper.cs # Wrapper class for CloudWatch Logs operations +│ └── CloudWatchLogsActions.csproj # Actions project file +├── Scenarios/ +│ ├── LargeQueryWorkflow.cs # Main workflow implementation +│ ├── README.md # Detailed scenario documentation +│ └── CloudWatchLogsScenario.csproj # Scenario project file +├── Tests/ +│ ├── LargeQueryWorkflowTests.cs # Unit tests +│ ├── Usings.cs # Global usings +│ └── CloudWatchLogsTests.csproj # Test project file +└── CloudWatchLogsLargeQuery.sln # Solution file +``` + +## What This Example Demonstrates + +- Deploying AWS resources using CloudFormation +- Generating and ingesting large volumes of log data +- Performing CloudWatch Logs Insights queries +- Using recursive binary search to retrieve more than 10,000 results +- Cleaning up resources after completion + +## Running the Example + +1. Navigate to the solution directory: + ``` + cd dotnetv4/CloudWatchLogs/LargeQuery + ``` + +2. Build the solution: + ``` + dotnet build + ``` + +3. Run the scenario: + ``` + dotnet run --project Scenarios/CloudWatchLogsScenario.csproj + ``` + +4. Run the tests: + ``` + dotnet test + ``` + +## Prerequisites + +- .NET 8.0 or later +- AWS credentials configured +- Python 3.x (for log generation) +- Permissions for CloudWatch Logs and CloudFormation + +## Related Resources + +- [CloudWatch Logs Documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/) +- [CloudWatch Logs Insights Query Syntax](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html) +- [AWS SDK for .NET](https://aws.amazon.com/sdk-for-net/) + +--- + +Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +SPDX-License-Identifier: Apache-2.0 diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/CloudWatchLogsScenario.csproj b/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/CloudWatchLogsScenario.csproj new file mode 100644 index 00000000000..1047d133f69 --- /dev/null +++ b/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/CloudWatchLogsScenario.csproj @@ -0,0 +1,22 @@ + + + + Exe + net8.0 + enable + enable + + + + + + + + + + + + + + + diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs b/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs new file mode 100644 index 00000000000..9d3ea05bfe8 --- /dev/null +++ b/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs @@ -0,0 +1,662 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +// snippet-start:[CloudWatchLogs.dotnetv3.LargeQueryWorkflow] +using System.Diagnostics; +using System.Text.RegularExpressions; +using Amazon.CloudFormation; +using Amazon.CloudFormation.Model; +using Amazon.CloudWatchLogs; +using Amazon.CloudWatchLogs.Model; +using CloudWatchLogsActions; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Hosting; +using Microsoft.Extensions.Logging; + +namespace CloudWatchLogsScenario; + +public class LargeQueryWorkflow +{ + /* + Before running this .NET code example, set up your development environment, including your credentials. + This .NET code example performs the following tasks for the CloudWatch Logs Large Query workflow: + + 1. Prepare the Application: + - Prompt the user to deploy CloudFormation stack and generate sample logs. + - Deploy the CloudFormation template for resource creation. + - Generate 50,000 sample log entries using a Python script. + - Wait 5 minutes for logs to be fully ingested. + + 2. Execute Large Query: + - Perform recursive queries to retrieve all logs using binary search. + - Display progress for each query executed. + - Show total execution time and logs found. + + 3. Clean up: + - Prompt the user to delete the CloudFormation stack and all resources. + - Destroy the CloudFormation stack and wait until removed. + */ + + public static ILogger _logger = null!; + public static CloudWatchLogsWrapper _wrapper = null!; + public static IAmazonCloudFormation _amazonCloudFormation = null!; + + private static string _logGroupName = "/workflows/cloudwatch-logs/large-query"; + private static string _logStreamName = "stream1"; + private static long _queryStartDate; + private static long _queryEndDate; + + public static bool _interactive = true; + private static string _stackName = "CloudWatchLargeQueryStack"; + private static string _stackResourcePath = "../../../../../../scenarios/features/cloudwatch_logs_large_query/resources/stack.yaml"; + private static string _pythonScriptPath = "../../../../../../scenarios/features/cloudwatch_logs_large_query/resources/create_logs.py"; + + public static async Task Main(string[] args) + { + using var host = Host.CreateDefaultBuilder(args) + .ConfigureLogging(logging => + logging.AddFilter("System", LogLevel.Debug) + .AddFilter("Microsoft", LogLevel.Information)) + .ConfigureServices((_, services) => + services.AddAWSService() + .AddAWSService() + .AddTransient() + ) + .Build(); + + if (_interactive) + { + _logger = LoggerFactory.Create(builder => { builder.AddConsole(); }) + .CreateLogger(); + + _wrapper = host.Services.GetRequiredService(); + _amazonCloudFormation = host.Services.GetRequiredService(); + } + + Console.WriteLine(new string('-', 80)); + Console.WriteLine("Welcome to the CloudWatch Logs Large Query Scenario."); + Console.WriteLine(new string('-', 80)); + Console.WriteLine("This scenario demonstrates how to perform large-scale queries on"); + Console.WriteLine("CloudWatch Logs using recursive binary search to retrieve more than"); + Console.WriteLine("the 10,000 result limit."); + Console.WriteLine(); + + try + { + Console.WriteLine(new string('-', 80)); + var prepareSuccess = await PrepareApplication(); + Console.WriteLine(new string('-', 80)); + + if (prepareSuccess) + { + Console.WriteLine(new string('-', 80)); + await ExecuteLargeQuery(); + Console.WriteLine(new string('-', 80)); + } + + Console.WriteLine(new string('-', 80)); + await Cleanup(); + Console.WriteLine(new string('-', 80)); + } + catch (Exception ex) + { + _logger.LogError(ex, "There was a problem with the scenario, initiating cleanup..."); + _interactive = false; + await Cleanup(); + } + + Console.WriteLine("CloudWatch Logs Large Query scenario completed."); + } + + /// + /// Prepares the application by creating the necessary resources. + /// + /// True if the application was prepared successfully. + public static async Task PrepareApplication() + { + Console.WriteLine("Preparing the application..."); + Console.WriteLine(); + + try + { + var deployStack = !_interactive || GetYesNoResponse( + "Would you like to deploy the CloudFormation stack and generate sample logs? (y/n) "); + + if (deployStack) + { + _stackName = PromptUserForStackName(); + + var deploySuccess = await DeployCloudFormationStack(_stackName); + + if (deploySuccess) + { + Console.WriteLine(); + Console.WriteLine("Generating 50,000 sample log entries..."); + var generateSuccess = await GenerateSampleLogs(); + + if (generateSuccess) + { + Console.WriteLine(); + Console.WriteLine("Sample logs created. Waiting 5 minutes for logs to be fully ingested..."); + await WaitWithCountdown(300); + + Console.WriteLine("Application preparation complete."); + return true; + } + } + } + else + { + _logGroupName = PromptUserForInput("Enter the log group name: ", _logGroupName); + _logStreamName = PromptUserForInput("Enter the log stream name: ", _logStreamName); + + var startDateMs = PromptUserForLong("Enter the query start date (milliseconds since epoch): "); + var endDateMs = PromptUserForLong("Enter the query end date (milliseconds since epoch): "); + + _queryStartDate = startDateMs / 1000; + _queryEndDate = endDateMs / 1000; + + Console.WriteLine("Application preparation complete."); + return true; + } + } + catch (Exception ex) + { + _logger.LogError(ex, "An error occurred while preparing the application."); + } + + Console.WriteLine("Application preparation failed."); + return false; + } + + /// + /// Deploys the CloudFormation stack with the necessary resources. + /// + /// The name of the CloudFormation stack. + /// True if the stack was deployed successfully. + private static async Task DeployCloudFormationStack(string stackName) + { + Console.WriteLine($"\nDeploying CloudFormation stack: {stackName}"); + + try + { + var request = new CreateStackRequest + { + StackName = stackName, + TemplateBody = await File.ReadAllTextAsync(_stackResourcePath) + }; + + var response = await _amazonCloudFormation.CreateStackAsync(request); + + if (response.HttpStatusCode == System.Net.HttpStatusCode.OK) + { + Console.WriteLine($"CloudFormation stack creation started: {stackName}"); + + bool stackCreated = await WaitForStackCompletion(response.StackId); + + if (stackCreated) + { + Console.WriteLine("CloudFormation stack created successfully."); + return true; + } + else + { + _logger.LogError($"CloudFormation stack creation failed: {stackName}"); + return false; + } + } + else + { + _logger.LogError($"Failed to create CloudFormation stack: {stackName}"); + return false; + } + } + catch (AlreadyExistsException) + { + _logger.LogWarning($"CloudFormation stack '{stackName}' already exists. Please provide a unique name."); + var newStackName = PromptUserForStackName(); + return await DeployCloudFormationStack(newStackName); + } + catch (Exception ex) + { + _logger.LogError(ex, $"An error occurred while deploying the CloudFormation stack: {stackName}"); + return false; + } + } + + /// + /// Waits for the CloudFormation stack to be in the CREATE_COMPLETE state. + /// + /// The ID of the CloudFormation stack. + /// True if the stack was created successfully. + private static async Task WaitForStackCompletion(string stackId) + { + int retryCount = 0; + const int maxRetries = 30; + const int retryDelay = 10000; + + while (retryCount < maxRetries) + { + var describeStacksRequest = new DescribeStacksRequest + { + StackName = stackId + }; + + var describeStacksResponse = await _amazonCloudFormation.DescribeStacksAsync(describeStacksRequest); + + if (describeStacksResponse.Stacks.Count > 0) + { + if (describeStacksResponse.Stacks[0].StackStatus == StackStatus.CREATE_COMPLETE) + { + return true; + } + if (describeStacksResponse.Stacks[0].StackStatus == StackStatus.CREATE_FAILED || + describeStacksResponse.Stacks[0].StackStatus == StackStatus.ROLLBACK_COMPLETE) + { + return false; + } + } + + Console.WriteLine("Waiting for CloudFormation stack creation to complete..."); + await Task.Delay(retryDelay); + retryCount++; + } + + _logger.LogError("Timed out waiting for CloudFormation stack creation to complete."); + return false; + } + + /// + /// Generates sample logs using a Python script. + /// + /// True if logs were generated successfully. + private static async Task GenerateSampleLogs() + { + try + { + if (!File.Exists(_pythonScriptPath)) + { + _logger.LogError($"Python script not found at: {_pythonScriptPath}"); + Console.WriteLine("Please run the script manually from:"); + Console.WriteLine($" {_pythonScriptPath}"); + return false; + } + + var processStartInfo = new ProcessStartInfo + { + FileName = "python", + Arguments = _pythonScriptPath, + RedirectStandardOutput = true, + RedirectStandardError = true, + UseShellExecute = false, + CreateNoWindow = true + }; + + using var process = Process.Start(processStartInfo); + if (process == null) + { + _logger.LogError("Failed to start Python process."); + return false; + } + + var output = await process.StandardOutput.ReadToEndAsync(); + var error = await process.StandardError.ReadToEndAsync(); + await process.WaitForExitAsync(); + + if (process.ExitCode != 0) + { + _logger.LogError($"Python script failed: {error}"); + return false; + } + + var startMatch = Regex.Match(output, @"QUERY_START_DATE=(\d+)"); + var endMatch = Regex.Match(output, @"QUERY_END_DATE=(\d+)"); + + if (startMatch.Success && endMatch.Success) + { + _queryStartDate = long.Parse(startMatch.Groups[1].Value) / 1000; + _queryEndDate = long.Parse(endMatch.Groups[1].Value) / 1000; + + Console.WriteLine($"Query start date: {DateTimeOffset.FromUnixTimeSeconds(_queryStartDate):yyyy-MM-ddTHH:mm:ss.fffZ}"); + Console.WriteLine($"Query end date: {DateTimeOffset.FromUnixTimeSeconds(_queryEndDate):yyyy-MM-ddTHH:mm:ss.fffZ}"); + return true; + } + else + { + _logger.LogError("Failed to parse timestamps from script output."); + return false; + } + } + catch (Exception ex) + { + _logger.LogError(ex, "An error occurred while generating sample logs."); + return false; + } + } + + /// + /// Executes the large query workflow. + /// + public static async Task ExecuteLargeQuery() + { + Console.WriteLine("Starting recursive query to retrieve all logs..."); + Console.WriteLine(); + + var queryLimit = PromptUserForInteger("Enter the query limit (default 10000, max 10000): ", 10000); + if (queryLimit > 10000) queryLimit = 10000; + + var queryString = "fields @timestamp, @message | sort @timestamp asc"; + + var stopwatch = Stopwatch.StartNew(); + var allResults = await PerformLargeQuery(_logGroupName, queryString, _queryStartDate, _queryEndDate, queryLimit); + stopwatch.Stop(); + + Console.WriteLine(); + Console.WriteLine($"Queries finished in {stopwatch.Elapsed.TotalSeconds:F3} seconds."); + Console.WriteLine($"Total logs found: {allResults.Count}"); + Console.WriteLine(); + + var viewSample = !_interactive || GetYesNoResponse("Would you like to see a sample of the logs? (y/n) "); + if (viewSample) + { + Console.WriteLine(); + Console.WriteLine($"Sample logs (first 10 of {allResults.Count}):"); + for (int i = 0; i < Math.Min(10, allResults.Count); i++) + { + var timestamp = allResults[i].Find(f => f.Field == "@timestamp")?.Value ?? "N/A"; + var message = allResults[i].Find(f => f.Field == "@message")?.Value ?? "N/A"; + Console.WriteLine($"[{timestamp}] {message}"); + } + } + } + + /// + /// Performs a large query using recursive binary search. + /// + private static async Task>> PerformLargeQuery( + string logGroupName, + string queryString, + long startTime, + long endTime, + int limit) + { + var queryId = await _wrapper.StartQueryAsync(logGroupName, queryString, startTime, endTime, limit); + if (queryId == null) + { + return new List>(); + } + + var results = await PollQueryResults(queryId); + if (results == null || results.Count == 0) + { + return new List>(); + } + + var startDate = DateTimeOffset.FromUnixTimeSeconds(startTime).ToString("yyyy-MM-ddTHH:mm:ss.fffZ"); + var endDate = DateTimeOffset.FromUnixTimeSeconds(endTime).ToString("yyyy-MM-ddTHH:mm:ss.fffZ"); + Console.WriteLine($"Query date range: {startDate} to {endDate}. Found {results.Count} logs."); + + if (results.Count < limit) + { + return results; + } + + var lastTimestamp = results[results.Count - 1].Find(f => f.Field == "@timestamp")?.Value; + if (lastTimestamp == null) + { + return results; + } + + var lastTime = DateTimeOffset.Parse(lastTimestamp).ToUnixTimeSeconds(); + var midpoint = (lastTime + endTime) / 2; + + var results1 = await PerformLargeQuery(logGroupName, queryString, lastTime, midpoint, limit); + var results2 = await PerformLargeQuery(logGroupName, queryString, midpoint, endTime, limit); + + var allResults = new List>(results); + allResults.AddRange(results1); + allResults.AddRange(results2); + + return allResults; + } + + /// + /// Polls for query results until complete. + /// + private static async Task>?> PollQueryResults(string queryId) + { + int retryCount = 0; + const int maxRetries = 60; + const int retryDelay = 1000; + + while (retryCount < maxRetries) + { + var response = await _wrapper.GetQueryResultsAsync(queryId); + if (response == null) + { + return null; + } + + if (response.Status == QueryStatus.Complete) + { + return response.Results; + } + + if (response.Status == QueryStatus.Failed || + response.Status == QueryStatus.Cancelled || + response.Status == QueryStatus.Timeout || + response.Status == QueryStatus.Unknown) + { + _logger.LogError($"Query failed with status: {response.Status}"); + return null; + } + + await Task.Delay(retryDelay); + retryCount++; + } + + _logger.LogError("Timed out waiting for query results."); + return null; + } + + /// + /// Cleans up the resources created during the scenario. + /// + public static async Task Cleanup() + { + var cleanup = !_interactive || GetYesNoResponse( + "Do you want to delete the CloudFormation stack and all resources? (y/n) "); + + if (cleanup) + { + try + { + var stackDeleteSuccess = await DeleteCloudFormationStack(_stackName, false); + return stackDeleteSuccess; + } + catch (Exception ex) + { + _logger.LogError(ex, "An error occurred while cleaning up the resources."); + return false; + } + } + + Console.WriteLine($"Resources will remain. Stack name: {_stackName}, Log group: {_logGroupName}"); + _logger.LogInformation("CloudWatch Logs Large Query scenario is complete."); + return true; + } + + /// + /// Deletes the CloudFormation stack and waits for confirmation. + /// + private static async Task DeleteCloudFormationStack(string stackName, bool forceDelete) + { + var request = new DeleteStackRequest + { + StackName = stackName, + }; + + if (forceDelete) + { + request.DeletionMode = DeletionMode.FORCE_DELETE_STACK; + } + + await _amazonCloudFormation.DeleteStackAsync(request); + Console.WriteLine($"CloudFormation stack '{stackName}' is being deleted. This may take a few minutes."); + + bool stackDeleted = await WaitForStackDeletion(stackName, forceDelete); + + if (stackDeleted) + { + Console.WriteLine($"CloudFormation stack '{stackName}' has been deleted."); + return true; + } + else + { + _logger.LogError($"Failed to delete CloudFormation stack '{stackName}'."); + return false; + } + } + + /// + /// Waits for the stack to be deleted. + /// + private static async Task WaitForStackDeletion(string stackName, bool forceDelete) + { + int retryCount = 0; + const int maxRetries = 30; + const int retryDelay = 10000; + + while (retryCount < maxRetries) + { + var describeStacksRequest = new DescribeStacksRequest + { + StackName = stackName + }; + + try + { + var describeStacksResponse = await _amazonCloudFormation.DescribeStacksAsync(describeStacksRequest); + + if (describeStacksResponse.Stacks.Count == 0 || + describeStacksResponse.Stacks[0].StackStatus == StackStatus.DELETE_COMPLETE) + { + return true; + } + + if (!forceDelete && describeStacksResponse.Stacks[0].StackStatus == StackStatus.DELETE_FAILED) + { + return await DeleteCloudFormationStack(stackName, true); + } + } + catch (AmazonCloudFormationException ex) when (ex.ErrorCode == "ValidationError") + { + return true; + } + + Console.WriteLine($"Waiting for CloudFormation stack '{stackName}' to be deleted..."); + await Task.Delay(retryDelay); + retryCount++; + } + + _logger.LogError($"Timed out waiting for CloudFormation stack '{stackName}' to be deleted."); + return false; + } + + /// + /// Waits with a countdown display. + /// + private static async Task WaitWithCountdown(int seconds) + { + for (int i = seconds; i > 0; i--) + { + Console.Write($"\rWaiting: {i} seconds remaining... "); + await Task.Delay(1000); + } + Console.WriteLine("\rWait complete. "); + } + + /// + /// Helper method to get a yes or no response from the user. + /// + private static bool GetYesNoResponse(string question) + { + Console.WriteLine(question); + var ynResponse = Console.ReadLine(); + var response = ynResponse != null && ynResponse.Equals("y", StringComparison.InvariantCultureIgnoreCase); + return response; + } + + /// + /// Prompts the user for a stack name. + /// + private static string PromptUserForStackName() + { + Console.WriteLine($"Enter a name for the CloudFormation stack (default: {_stackName}): "); + if (_interactive) + { + string? input = Console.ReadLine(); + if (!string.IsNullOrWhiteSpace(input)) + { + var regex = "[a-zA-Z][-a-zA-Z0-9]*"; + if (!Regex.IsMatch(input, regex)) + { + Console.WriteLine($"Invalid stack name. Using default: {_stackName}"); + return _stackName; + } + return input; + } + } + return _stackName; + } + + /// + /// Prompts the user for input with a default value. + /// + private static string PromptUserForInput(string prompt, string defaultValue) + { + if (_interactive) + { + Console.Write(prompt); + string? input = Console.ReadLine(); + return string.IsNullOrWhiteSpace(input) ? defaultValue : input; + } + return defaultValue; + } + + /// + /// Prompts the user for an integer value. + /// + private static int PromptUserForInteger(string prompt, int defaultValue) + { + if (_interactive) + { + Console.Write(prompt); + string? input = Console.ReadLine(); + if (string.IsNullOrWhiteSpace(input) || !int.TryParse(input, out var result)) + { + return defaultValue; + } + return result; + } + return defaultValue; + } + + /// + /// Prompts the user for a long value. + /// + private static long PromptUserForLong(string prompt) + { + if (_interactive) + { + Console.Write(prompt); + string? input = Console.ReadLine(); + if (long.TryParse(input, out var result)) + { + return result; + } + } + return 0; + } +} +// snippet-end:[CloudWatchLogs.dotnetv3.LargeQueryWorkflow] diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/README.md b/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/README.md new file mode 100644 index 00000000000..c5d35a8317f --- /dev/null +++ b/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/README.md @@ -0,0 +1,109 @@ +# CloudWatch Logs Large Query Workflow + +## Overview + +This example demonstrates how to perform large-scale queries on Amazon CloudWatch Logs using recursive binary search to retrieve more than the 10,000 result limit. The workflow showcases how to use CloudWatch Logs Insights queries with a recursive algorithm to fetch all matching log entries. + +## Workflow Steps + +This workflow demonstrates the following steps and tasks: + +1. **Prepare the Application** + - Prompts the user to deploy a CloudFormation stack and generate sample logs + - Deploys the CloudFormation template to create a log group and log stream + - Executes a Python script to generate 50,000 sample log entries + - Waits 5 minutes for logs to be fully ingested and indexed + +2. **Execute Large Query** + - Prompts the user for query parameters (limit) + - Performs recursive queries using binary search to retrieve all logs + - Displays progress for each query executed with date ranges and result counts + - Shows total execution time and total logs found + - Optionally displays a sample of the retrieved logs + +3. **Clean Up** + - Prompts the user to confirm deletion of resources + - Deletes the CloudFormation stack + - Waits for stack deletion to complete + +## ⚠ Important + +* Running this code might result in charges to your AWS account. +* Running the tests might result in charges to your AWS account. +* We recommend that you grant your code least privilege. At most, grant only the minimum permissions required to perform the task. For more information, see [Grant least privilege](https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#grant-least-privilege). +* This code is not tested in every AWS Region. For more information, see [AWS Regional Services](https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services). + +## Scenario + +### Prerequisites + +Before running this workflow, ensure you have: + +- An AWS account with proper permissions to use Amazon CloudWatch Logs and AWS CloudFormation +- Python 3.x installed (for log generation script) +- AWS credentials configured + +### AWS Services Used + +This workflow uses the following AWS services: + +- Amazon CloudWatch Logs +- AWS CloudFormation + +### Resources + +The feature scenario deploys an AWS CloudFormation stack with the required resources: + +- CloudWatch Logs Log Group: `/workflows/cloudwatch-logs/large-query` +- CloudWatch Logs Log Stream: `stream1` + +### Instructions + +After the example compiles, you can run it from the command line. To do so, navigate to the folder that contains the .sln file and run the following command: + +``` +dotnet run --project Scenarios/CloudWatchLogsScenario.csproj +``` + +Alternatively, you can run the example from within your IDE. + +This starts an interactive scenario that walks you through: + +1. Deploying a CloudFormation stack with CloudWatch Logs resources +2. Generating 50,000 sample log entries +3. Performing recursive queries to retrieve all logs +4. Cleaning up resources + +## How the Recursive Query Works + +The recursive query algorithm uses binary search to retrieve more than the 10,000 result limit: + +1. Execute a query with the specified date range +2. If results < limit, return the results +3. If results >= limit: + - Get the timestamp of the last result + - Calculate the midpoint between the last result and the end date + - Recursively query the first half (last result to midpoint) + - Recursively query the second half (midpoint to end date) + - Concatenate all results + +This approach efficiently retrieves all matching logs by splitting the date range whenever the result limit is reached. + +## CloudWatch Logs Actions + +The workflow covers the following CloudWatch Logs API actions: + +- [`StartQuery`](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_StartQuery.html) - Initiates a CloudWatch Logs Insights query +- [`GetQueryResults`](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_GetQueryResults.html) - Retrieves results from a query +- [`PutLogEvents`](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html) - Uploads log events to a log stream + +## Additional Resources + +* [CloudWatch Logs User Guide](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/WhatIsCloudWatchLogs.html) +* [CloudWatch Logs Insights Query Syntax](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html) +* [CloudWatch Logs API Reference](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/Welcome.html) + +--- + +Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +SPDX-License-Identifier: Apache-2.0 diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/Tests/CloudWatchLogsTests.csproj b/dotnetv4/CloudWatchLogs/LargeQuery/Tests/CloudWatchLogsTests.csproj new file mode 100644 index 00000000000..f08fb4a50ef --- /dev/null +++ b/dotnetv4/CloudWatchLogs/LargeQuery/Tests/CloudWatchLogsTests.csproj @@ -0,0 +1,31 @@ + + + + net8.0 + enable + enable + false + true + + + + + + + + + + runtime; build; native; contentfiles; analyzers; buildtransitive + all + + + runtime; build; native; contentfiles; analyzers; buildtransitive + all + + + + + + + + diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/Tests/LargeQueryWorkflowTests.cs b/dotnetv4/CloudWatchLogs/LargeQuery/Tests/LargeQueryWorkflowTests.cs new file mode 100644 index 00000000000..0951c9b2549 --- /dev/null +++ b/dotnetv4/CloudWatchLogs/LargeQuery/Tests/LargeQueryWorkflowTests.cs @@ -0,0 +1,143 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +using Amazon.CloudWatchLogs; +using Amazon.CloudWatchLogs.Model; +using CloudWatchLogsActions; +using Microsoft.Extensions.Logging; +using Moq; + +namespace CloudWatchLogsTests; + +public class LargeQueryWorkflowTests +{ + private readonly Mock _mockCloudWatchLogs; + private readonly Mock> _mockLogger; + private readonly CloudWatchLogsWrapper _wrapper; + + public LargeQueryWorkflowTests() + { + _mockCloudWatchLogs = new Mock(); + _mockLogger = new Mock>(); + _wrapper = new CloudWatchLogsWrapper(_mockCloudWatchLogs.Object, _mockLogger.Object); + } + + [Fact] + public async Task StartQueryAsync_Success_ReturnsQueryId() + { + // Arrange + var expectedQueryId = "test-query-id-123"; + _mockCloudWatchLogs + .Setup(x => x.StartQueryAsync(It.IsAny(), default)) + .ReturnsAsync(new StartQueryResponse { QueryId = expectedQueryId }); + + // Act + var result = await _wrapper.StartQueryAsync( + "/test/log-group", + "fields @timestamp, @message", + 1000, + 2000, + 10000); + + // Assert + Assert.Equal(expectedQueryId, result); + } + + [Fact] + public async Task StartQueryAsync_InvalidParameter_ReturnsNull() + { + // Arrange + _mockCloudWatchLogs + .Setup(x => x.StartQueryAsync(It.IsAny(), default)) + .ThrowsAsync(new InvalidParameterException("Invalid parameter")); + + // Act + var result = await _wrapper.StartQueryAsync( + "/test/log-group", + "fields @timestamp, @message", + 1000, + 2000, + 10000); + + // Assert + Assert.Null(result); + } + + [Fact] + public async Task GetQueryResultsAsync_Success_ReturnsResults() + { + // Arrange + var expectedResponse = new GetQueryResultsResponse + { + Status = QueryStatus.Complete, + Results = new List> + { + new List + { + new ResultField { Field = "@timestamp", Value = "2023-01-01T00:00:00.000Z" }, + new ResultField { Field = "@message", Value = "Test message" } + } + } + }; + + _mockCloudWatchLogs + .Setup(x => x.GetQueryResultsAsync(It.IsAny(), default)) + .ReturnsAsync(expectedResponse); + + // Act + var result = await _wrapper.GetQueryResultsAsync("test-query-id"); + + // Assert + Assert.NotNull(result); + Assert.Equal(QueryStatus.Complete, result.Status); + Assert.Single(result.Results); + } + + [Fact] + public async Task PutLogEventsAsync_Success_ReturnsTrue() + { + // Arrange + _mockCloudWatchLogs + .Setup(x => x.PutLogEventsAsync(It.IsAny(), default)) + .ReturnsAsync(new PutLogEventsResponse()); + + var logEvents = new List + { + new InputLogEvent + { + Timestamp = DateTime.UtcNow, + Message = "Test log message" + } + }; + + // Act + var result = await _wrapper.PutLogEventsAsync("/test/log-group", "test-stream", logEvents); + + // Assert + Assert.True(result); + } + + [Fact] + public async Task PutLogEventsAsync_ResourceNotFound_ReturnsFalse() + { + // Arrange + _mockCloudWatchLogs + .Setup(x => x.PutLogEventsAsync(It.IsAny(), default)) + .ThrowsAsync(new ResourceNotFoundException("Log group not found")); + + var logEvents = new List + { + new InputLogEvent + { + Timestamp = DateTime.UtcNow, + Message = "Test log message" + } + }; + + // Act + var result = await _wrapper.PutLogEventsAsync("/test/log-group", "test-stream", logEvents); + + // Assert + Assert.False(result); + } +} diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/Tests/Usings.cs b/dotnetv4/CloudWatchLogs/LargeQuery/Tests/Usings.cs new file mode 100644 index 00000000000..4cb6a55926e --- /dev/null +++ b/dotnetv4/CloudWatchLogs/LargeQuery/Tests/Usings.cs @@ -0,0 +1,4 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +global using Xunit; diff --git a/dotnetv4/CloudWatchLogs/README.md b/dotnetv4/CloudWatchLogs/README.md new file mode 100644 index 00000000000..d5aec8c2d7e --- /dev/null +++ b/dotnetv4/CloudWatchLogs/README.md @@ -0,0 +1,35 @@ +# CloudWatch Logs Examples for .NET + +This folder contains examples for Amazon CloudWatch Logs using the AWS SDK for .NET. + +## Examples + +### Feature Scenarios + +- **[LargeQuery](LargeQuery/)** - Demonstrates how to perform large-scale queries on CloudWatch Logs using recursive binary search to retrieve more than the 10,000 result limit. + +## Running the Examples + +Each example includes its own README with specific instructions. Generally, you can: + +1. Navigate to the example directory +2. Build the solution: `dotnet build` +3. Run the example: `dotnet run --project Scenarios/{ProjectName}.csproj` +4. Run tests: `dotnet test` + +## Prerequisites + +- .NET 8.0 or later +- AWS credentials configured +- Appropriate AWS permissions for CloudWatch Logs + +## Additional Resources + +- [CloudWatch Logs Documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/) +- [AWS SDK for .NET Documentation](https://docs.aws.amazon.com/sdk-for-net/) +- [CloudWatch Logs API Reference](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/) + +--- + +Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +SPDX-License-Identifier: Apache-2.0 diff --git a/scenarios/features/cloudwatch_logs_large_query/resources/create_logs.py b/scenarios/features/cloudwatch_logs_large_query/resources/create_logs.py new file mode 100644 index 00000000000..882bbdc1eb7 --- /dev/null +++ b/scenarios/features/cloudwatch_logs_large_query/resources/create_logs.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python3 +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Script to generate and upload 50,000 sample log entries to CloudWatch Logs. +This script creates logs spanning 5 minutes and uploads them in batches. +""" + +import boto3 +import time +from datetime import datetime + +LOG_GROUP_NAME = "/workflows/cloudwatch-logs/large-query" +LOG_STREAM_NAME = "stream1" +TOTAL_ENTRIES = 50000 +ENTRIES_PER_BATCH = 10000 +FIVE_MINUTES_MS = 5 * 60 * 1000 + + +def main(): + """Generate and upload log entries to CloudWatch Logs.""" + client = boto3.client('logs') + + # Calculate timestamps + start_time_ms = int(time.time() * 1000) + timestamp_increment = FIVE_MINUTES_MS // TOTAL_ENTRIES + + print(f"Generating {TOTAL_ENTRIES} log entries...") + print(f"QUERY_START_DATE={start_time_ms}") + + entry_count = 0 + current_timestamp = start_time_ms + + # Generate and upload logs in batches + num_batches = TOTAL_ENTRIES // ENTRIES_PER_BATCH + + for batch_num in range(num_batches): + log_events = [] + + for i in range(ENTRIES_PER_BATCH): + log_events.append({ + 'timestamp': current_timestamp, + 'message': f'Entry {entry_count}' + }) + + entry_count += 1 + current_timestamp += timestamp_increment + + # Upload batch + try: + client.put_log_events( + logGroupName=LOG_GROUP_NAME, + logStreamName=LOG_STREAM_NAME, + logEvents=log_events + ) + print(f"Uploaded batch {batch_num + 1}/{num_batches}") + except Exception as e: + print(f"Error uploading batch {batch_num + 1}: {e}") + return 1 + + end_time_ms = current_timestamp - timestamp_increment + print(f"QUERY_END_DATE={end_time_ms}") + print(f"Successfully uploaded {TOTAL_ENTRIES} log entries") + + return 0 + + +if __name__ == "__main__": + exit(main()) diff --git a/steering_docs/dotnet-tech/scenario.md b/steering_docs/dotnet-tech/scenario.md index 271fac811af..8e88034ce29 100644 --- a/steering_docs/dotnet-tech/scenario.md +++ b/steering_docs/dotnet-tech/scenario.md @@ -3,6 +3,12 @@ ## Purpose Generate feature scenarios that demonstrate complete workflows using multiple service operations in a guided, educational manner. Implementation must be based on the service SPECIFICATION.md file. +## Target Directory +**IMPORTANT**: All new feature scenarios MUST be created in the `dotnetv4` directory, NOT `dotnetv3`. + +- **New scenarios**: `dotnetv4/{Service}/` +- **Legacy examples**: `dotnetv3/{Service}/` (Must NOT add new examples here) + ## Requirements - **Specification-Driven**: MUST read the `scenarios/features/{service_feature}/SPECIFICATION.md` - **Interactive**: Use Console.WriteLine and Console.ReadLine for user input and guidance @@ -19,7 +25,7 @@ Generate feature scenarios that demonstrate complete workflows using multiple se Feature scenarios use a multi-project structure with separate projects for actions, scenarios, and tests: ``` -dotnetv3/{Service}/ +dotnetv4/{Service}/ ├── {Service}.sln # Solution file ├── Actions/ │ ├── {Service}Wrapper.cs # Wrapper class for service operations @@ -35,6 +41,8 @@ dotnetv3/{Service}/ └── {Service}Tests.csproj # Test project file (references Scenarios) ``` +**Note**: Use `dotnetv4` for all new feature scenarios. The `dotnetv3` directory is for legacy examples only. + ## MANDATORY Pre-Implementation Steps ### Step 1: Read Scenario Specification @@ -538,16 +546,16 @@ public class {Service}Workflow Exe - net6.0 + net8.0 enable enable - - - + + + @@ -585,23 +593,24 @@ public class {Service}Workflow - net6.0 + net8.0 enable enable false + true - - - - + + + + - + runtime; build; native; contentfiles; analyzers; buildtransitive all - + runtime; build; native; contentfiles; analyzers; buildtransitive all From 2fb5c701b1c0de657fd52e2395e26a3f6e23c77b Mon Sep 17 00:00:00 2001 From: Rachel Hagerman <110480692+rlhagerm@users.noreply.github.com> Date: Tue, 18 Nov 2025 14:35:29 -0600 Subject: [PATCH 10/23] Adding stack deployment --- .kiro/settings/mcp.json | 14 ++ .../Actions/CloudWatchLogsActions.csproj | 12 +- .../Actions/CloudWatchLogsWrapper.cs | 16 +- dotnetv4/CloudWatchLogs/LargeQuery/README.md | 1 - .../Scenarios/CloudWatchLogsScenario.csproj | 10 +- .../Scenarios/LargeQueryWorkflow.cs | 170 ++++++++++++------ .../Tests/CloudWatchLogsTests.csproj | 15 +- .../resources/stack.yaml | 2 +- steering_docs/dotnet-tech/scenario.md | 43 ++--- 9 files changed, 175 insertions(+), 108 deletions(-) diff --git a/.kiro/settings/mcp.json b/.kiro/settings/mcp.json index 42fb25a070f..d9c0a7bbd0c 100644 --- a/.kiro/settings/mcp.json +++ b/.kiro/settings/mcp.json @@ -35,6 +35,20 @@ "aws___search_documentation", "aws___read_documentation" ] + }, + "codeloom-mcp": { + "disabled": false, + "command": "code-loom-mcp", + "args": [], + "env": {}, + "transportType": "stdio", + "autoApprove": [ + "loomer", + "search_aws_docs", + "read_aws_docs", + "query_knowledge_bases", + "list_knowledge_bases" + ] } } } \ No newline at end of file diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsActions.csproj b/dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsActions.csproj index 3aa4085c546..4934ade0dec 100644 --- a/dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsActions.csproj +++ b/dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsActions.csproj @@ -1,18 +1,18 @@ - Exe + Library net8.0 enable enable - - - - - + + + + + diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsWrapper.cs b/dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsWrapper.cs index 98e5c1fcc14..eb09c41cfd1 100644 --- a/dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsWrapper.cs +++ b/dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsWrapper.cs @@ -1,7 +1,7 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 -// snippet-start:[CloudWatchLogs.dotnetv3.CloudWatchLogsWrapper] +// snippet-start:[CloudWatchLogs.dotnetv4.CloudWatchLogsWrapper] using Amazon.CloudWatchLogs; using Amazon.CloudWatchLogs.Model; using Microsoft.Extensions.Logging; @@ -27,7 +27,7 @@ public CloudWatchLogsWrapper(IAmazonCloudWatchLogs amazonCloudWatchLogs, ILogger _logger = logger; } - // snippet-start:[CloudWatchLogs.dotnetv3.StartQuery] + // snippet-start:[CloudWatchLogs.dotnetv4.StartQuery] /// /// Starts a CloudWatch Logs Insights query. /// @@ -74,9 +74,9 @@ public CloudWatchLogsWrapper(IAmazonCloudWatchLogs amazonCloudWatchLogs, ILogger return null; } } - // snippet-end:[CloudWatchLogs.dotnetv3.StartQuery] + // snippet-end:[CloudWatchLogs.dotnetv4.StartQuery] - // snippet-start:[CloudWatchLogs.dotnetv3.GetQueryResults] + // snippet-start:[CloudWatchLogs.dotnetv4.GetQueryResults] /// /// Gets the results of a CloudWatch Logs Insights query. /// @@ -105,9 +105,9 @@ public CloudWatchLogsWrapper(IAmazonCloudWatchLogs amazonCloudWatchLogs, ILogger return null; } } - // snippet-end:[CloudWatchLogs.dotnetv3.GetQueryResults] + // snippet-end:[CloudWatchLogs.dotnetv4.GetQueryResults] - // snippet-start:[CloudWatchLogs.dotnetv3.PutLogEvents] + // snippet-start:[CloudWatchLogs.dotnetv4.PutLogEvents] /// /// Puts log events to a CloudWatch Logs log stream. /// @@ -143,6 +143,6 @@ public async Task PutLogEventsAsync( return false; } } - // snippet-end:[CloudWatchLogs.dotnetv3.PutLogEvents] + // snippet-end:[CloudWatchLogs.dotnetv4.PutLogEvents] } -// snippet-end:[CloudWatchLogs.dotnetv3.CloudWatchLogsWrapper] +// snippet-end:[CloudWatchLogs.dotnetv4.CloudWatchLogsWrapper] diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/README.md b/dotnetv4/CloudWatchLogs/LargeQuery/README.md index ccb87751d1d..c530567eac2 100644 --- a/dotnetv4/CloudWatchLogs/LargeQuery/README.md +++ b/dotnetv4/CloudWatchLogs/LargeQuery/README.md @@ -54,7 +54,6 @@ LargeQuery/ - .NET 8.0 or later - AWS credentials configured -- Python 3.x (for log generation) - Permissions for CloudWatch Logs and CloudFormation ## Related Resources diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/CloudWatchLogsScenario.csproj b/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/CloudWatchLogsScenario.csproj index 1047d133f69..fcf70daf1c3 100644 --- a/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/CloudWatchLogsScenario.csproj +++ b/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/CloudWatchLogsScenario.csproj @@ -8,11 +8,11 @@ - - - - - + + + + + diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs b/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs index 9d3ea05bfe8..7f7a27e73ed 100644 --- a/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs +++ b/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs @@ -1,7 +1,7 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 -// snippet-start:[CloudWatchLogs.dotnetv3.LargeQueryWorkflow] +// snippet-start:[CloudWatchLogs.dotnetv4.LargeQueryWorkflow] using System.Diagnostics; using System.Text.RegularExpressions; using Amazon.CloudFormation; @@ -24,7 +24,7 @@ public class LargeQueryWorkflow 1. Prepare the Application: - Prompt the user to deploy CloudFormation stack and generate sample logs. - Deploy the CloudFormation template for resource creation. - - Generate 50,000 sample log entries using a Python script. + - Generate 50,000 sample log entries using CloudWatch Logs API. - Wait 5 minutes for logs to be fully ingested. 2. Execute Large Query: @@ -48,8 +48,7 @@ public class LargeQueryWorkflow public static bool _interactive = true; private static string _stackName = "CloudWatchLargeQueryStack"; - private static string _stackResourcePath = "../../../../../../scenarios/features/cloudwatch_logs_large_query/resources/stack.yaml"; - private static string _pythonScriptPath = "../../../../../../scenarios/features/cloudwatch_logs_large_query/resources/create_logs.py"; + private static string _stackResourcePath = "../../../../../../../scenarios/features/cloudwatch_logs_large_query/resources/stack.yaml"; public static async Task Main(string[] args) { @@ -147,8 +146,8 @@ public static async Task PrepareApplication() } else { - _logGroupName = PromptUserForInput("Enter the log group name: ", _logGroupName); - _logStreamName = PromptUserForInput("Enter the log stream name: ", _logStreamName); + _logGroupName = PromptUserForInput("Enter the log group name ", _logGroupName); + _logStreamName = PromptUserForInput("Enter the log stream name ", _logStreamName); var startDateMs = PromptUserForLong("Enter the query start date (milliseconds since epoch): "); var endDateMs = PromptUserForLong("Enter the query end date (milliseconds since epoch): "); @@ -267,65 +266,65 @@ private static async Task WaitForStackCompletion(string stackId) } /// - /// Generates sample logs using a Python script. + /// Generates sample logs directly using CloudWatch Logs API. + /// Creates 50,000 log entries spanning 5 minutes. /// /// True if logs were generated successfully. private static async Task GenerateSampleLogs() { + const int totalEntries = 50000; + const int entriesPerBatch = 10000; + const int fiveMinutesMs = 5 * 60 * 1000; + try { - if (!File.Exists(_pythonScriptPath)) - { - _logger.LogError($"Python script not found at: {_pythonScriptPath}"); - Console.WriteLine("Please run the script manually from:"); - Console.WriteLine($" {_pythonScriptPath}"); - return false; - } + // Calculate timestamps + var startTimeMs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds(); + var timestampIncrement = fiveMinutesMs / totalEntries; - var processStartInfo = new ProcessStartInfo - { - FileName = "python", - Arguments = _pythonScriptPath, - RedirectStandardOutput = true, - RedirectStandardError = true, - UseShellExecute = false, - CreateNoWindow = true - }; + Console.WriteLine($"Generating {totalEntries} log entries..."); - using var process = Process.Start(processStartInfo); - if (process == null) + var entryCount = 0; + var currentTimestamp = startTimeMs; + var numBatches = totalEntries / entriesPerBatch; + + // Generate and upload logs in batches + for (int batchNum = 0; batchNum < numBatches; batchNum++) { - _logger.LogError("Failed to start Python process."); - return false; - } + var logEvents = new List(); - var output = await process.StandardOutput.ReadToEndAsync(); - var error = await process.StandardError.ReadToEndAsync(); - await process.WaitForExitAsync(); + for (int i = 0; i < entriesPerBatch; i++) + { + logEvents.Add(new InputLogEvent + { + Timestamp = DateTimeOffset.FromUnixTimeMilliseconds(currentTimestamp).UtcDateTime, + Message = $"Entry {entryCount}" + }); - if (process.ExitCode != 0) - { - _logger.LogError($"Python script failed: {error}"); - return false; + entryCount++; + currentTimestamp += timestampIncrement; + } + + // Upload batch + var success = await _wrapper.PutLogEventsAsync(_logGroupName, _logStreamName, logEvents); + if (!success) + { + _logger.LogError($"Failed to upload batch {batchNum + 1}/{numBatches}"); + return false; + } + + Console.WriteLine($"Uploaded batch {batchNum + 1}/{numBatches}"); } - var startMatch = Regex.Match(output, @"QUERY_START_DATE=(\d+)"); - var endMatch = Regex.Match(output, @"QUERY_END_DATE=(\d+)"); + // Set query date range (convert milliseconds to seconds for query API) + _queryStartDate = startTimeMs / 1000; + _queryEndDate = (currentTimestamp - timestampIncrement) / 1000; - if (startMatch.Success && endMatch.Success) - { - _queryStartDate = long.Parse(startMatch.Groups[1].Value) / 1000; - _queryEndDate = long.Parse(endMatch.Groups[1].Value) / 1000; + Console.WriteLine($"Query start date: {DateTimeOffset.FromUnixTimeSeconds(_queryStartDate):yyyy-MM-ddTHH:mm:ss.fffZ}"); + Console.WriteLine($"Query end date: {DateTimeOffset.FromUnixTimeSeconds(_queryEndDate):yyyy-MM-ddTHH:mm:ss.fffZ}"); + Console.WriteLine($"Successfully uploaded {totalEntries} log entries"); - Console.WriteLine($"Query start date: {DateTimeOffset.FromUnixTimeSeconds(_queryStartDate):yyyy-MM-ddTHH:mm:ss.fffZ}"); - Console.WriteLine($"Query end date: {DateTimeOffset.FromUnixTimeSeconds(_queryEndDate):yyyy-MM-ddTHH:mm:ss.fffZ}"); - return true; - } - else - { - _logger.LogError("Failed to parse timestamps from script output."); - return false; - } + return true; } catch (Exception ex) { @@ -342,7 +341,7 @@ public static async Task ExecuteLargeQuery() Console.WriteLine("Starting recursive query to retrieve all logs..."); Console.WriteLine(); - var queryLimit = PromptUserForInteger("Enter the query limit (default 10000, max 10000): ", 10000); + var queryLimit = PromptUserForInteger("Enter the query limit (max 10000) ", 10000); if (queryLimit > 10000) queryLimit = 10000; var queryString = "fields @timestamp, @message | sort @timestamp asc"; @@ -407,17 +406,70 @@ private static async Task>> PerformLargeQuery( return results; } + // Parse the timestamp - CloudWatch returns ISO 8601 format with milliseconds var lastTime = DateTimeOffset.Parse(lastTimestamp).ToUnixTimeSeconds(); + + // Check if there's any time range left to query + if (lastTime >= endTime) + { + return results; + } + + // Calculate midpoint between last result and end time var midpoint = (lastTime + endTime) / 2; + + // Ensure we have enough range to split + if (midpoint <= lastTime || midpoint >= endTime) + { + // Range too small to split, just query the remaining range + var remainingResults = await PerformLargeQuery(logGroupName, queryString, lastTime, endTime, limit); + + var allResults = new List>(results); + // Skip the first result if it's a duplicate of the last result from previous query + if (remainingResults.Count > 0) + { + var firstTimestamp = remainingResults[0].Find(f => f.Field == "@timestamp")?.Value; + if (firstTimestamp == lastTimestamp) + { + remainingResults.RemoveAt(0); + } + } + allResults.AddRange(remainingResults); + return allResults; + } + // Split the remaining range in half var results1 = await PerformLargeQuery(logGroupName, queryString, lastTime, midpoint, limit); var results2 = await PerformLargeQuery(logGroupName, queryString, midpoint, endTime, limit); - var allResults = new List>(results); - allResults.AddRange(results1); - allResults.AddRange(results2); + var combinedResults = new List>(results); + + // Remove duplicate from results1 if it matches the last result + if (results1.Count > 0) + { + var firstTimestamp1 = results1[0].Find(f => f.Field == "@timestamp")?.Value; + if (firstTimestamp1 == lastTimestamp) + { + results1.RemoveAt(0); + } + } + + combinedResults.AddRange(results1); + + // Remove duplicate from results2 if it matches the last result from results1 + if (results2.Count > 0 && results1.Count > 0) + { + var lastTimestamp1 = results1[results1.Count - 1].Find(f => f.Field == "@timestamp")?.Value; + var firstTimestamp2 = results2[0].Find(f => f.Field == "@timestamp")?.Value; + if (firstTimestamp2 == lastTimestamp1) + { + results2.RemoveAt(0); + } + } + + combinedResults.AddRange(results2); - return allResults; + return combinedResults; } /// @@ -592,9 +644,9 @@ private static bool GetYesNoResponse(string question) /// private static string PromptUserForStackName() { - Console.WriteLine($"Enter a name for the CloudFormation stack (default: {_stackName}): "); if (_interactive) { + Console.Write($"Enter a name for the CloudFormation stack (press Enter for default '{_stackName}'): "); string? input = Console.ReadLine(); if (!string.IsNullOrWhiteSpace(input)) { @@ -617,7 +669,7 @@ private static string PromptUserForInput(string prompt, string defaultValue) { if (_interactive) { - Console.Write(prompt); + Console.Write($"{prompt}(press Enter for default '{defaultValue}'): "); string? input = Console.ReadLine(); return string.IsNullOrWhiteSpace(input) ? defaultValue : input; } @@ -631,7 +683,7 @@ private static int PromptUserForInteger(string prompt, int defaultValue) { if (_interactive) { - Console.Write(prompt); + Console.Write($"{prompt}(press Enter for default '{defaultValue}'): "); string? input = Console.ReadLine(); if (string.IsNullOrWhiteSpace(input) || !int.TryParse(input, out var result)) { @@ -659,4 +711,4 @@ private static long PromptUserForLong(string prompt) return 0; } } -// snippet-end:[CloudWatchLogs.dotnetv3.LargeQueryWorkflow] +// snippet-end:[CloudWatchLogs.dotnetv4.LargeQueryWorkflow] diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/Tests/CloudWatchLogsTests.csproj b/dotnetv4/CloudWatchLogs/LargeQuery/Tests/CloudWatchLogsTests.csproj index f08fb4a50ef..8222f0ee7fa 100644 --- a/dotnetv4/CloudWatchLogs/LargeQuery/Tests/CloudWatchLogsTests.csproj +++ b/dotnetv4/CloudWatchLogs/LargeQuery/Tests/CloudWatchLogsTests.csproj @@ -6,19 +6,20 @@ enable false true + $(NoWarn);NETSDK1206 - - - - - - + + + + + + runtime; build; native; contentfiles; analyzers; buildtransitive all - + runtime; build; native; contentfiles; analyzers; buildtransitive all diff --git a/scenarios/features/cloudwatch_logs_large_query/resources/stack.yaml b/scenarios/features/cloudwatch_logs_large_query/resources/stack.yaml index ed9f451193d..25937630e31 100644 --- a/scenarios/features/cloudwatch_logs_large_query/resources/stack.yaml +++ b/scenarios/features/cloudwatch_logs_large_query/resources/stack.yaml @@ -2,7 +2,7 @@ Resources: LargeQueryLogGroup: Type: AWS::Logs::LogGroup Properties: - LogGroupName: /workflows/cloudwatch-logs/large-query + LogGroupName: /workflows/cloudwatch-logs/large-query12 LargeQueryLogGroupStream1: Type: AWS::Logs::LogStream Properties: diff --git a/steering_docs/dotnet-tech/scenario.md b/steering_docs/dotnet-tech/scenario.md index 8e88034ce29..51cb61865d7 100644 --- a/steering_docs/dotnet-tech/scenario.md +++ b/steering_docs/dotnet-tech/scenario.md @@ -67,7 +67,7 @@ From the specification, identify: // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 -// snippet-start:[{Service}.dotnetv3.{Service}Workflow] +// snippet-start:[{Service}.dotnetv4.{Service}Workflow] using Amazon.{Service}; using Amazon.CloudFormation; using Amazon.CloudFormation.Model; @@ -534,7 +534,7 @@ public class {Service}Workflow return ""; } } -// snippet-end:[{Service}.dotnetv3.{Service}Workflow] +// snippet-end:[{Service}.dotnetv4.{Service}Workflow] ``` ## Project Files @@ -552,10 +552,10 @@ public class {Service}Workflow - - - - + + + + @@ -574,10 +574,10 @@ public class {Service}Workflow - - - - + + + + @@ -598,19 +598,20 @@ public class {Service}Workflow enable false true + $(NoWarn);NETSDK1206 - - - - - - + + + + + + runtime; build; native; contentfiles; analyzers; buildtransitive all - + runtime; build; native; contentfiles; analyzers; buildtransitive all @@ -758,7 +759,7 @@ foreach (var item in items) // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 -// snippet-start:[{Service}.dotnetv3.{Service}Wrapper] +// snippet-start:[{Service}.dotnetv4.{Service}Wrapper] using Amazon.{Service}; using Amazon.{Service}.Model; using Microsoft.Extensions.Logging; @@ -784,7 +785,7 @@ public class {Service}Wrapper _logger = logger; } - // snippet-start:[{Service}.dotnetv3.OperationName] + // snippet-start:[{Service}.dotnetv4.OperationName] /// /// Description of what this operation does. /// @@ -820,9 +821,9 @@ public class {Service}Wrapper return false; } } - // snippet-end:[{Service}.dotnetv3.OperationName] + // snippet-end:[{Service}.dotnetv4.OperationName] } -// snippet-end:[{Service}.dotnetv3.{Service}Wrapper] +// snippet-end:[{Service}.dotnetv4.{Service}Wrapper] ``` ### Wrapper Method Guidelines From b78afbf1bebf3b654bbe9cf3e6acd7abacb19894 Mon Sep 17 00:00:00 2001 From: Rachel Hagerman <110480692+rlhagerm@users.noreply.github.com> Date: Wed, 19 Nov 2025 14:14:05 -0600 Subject: [PATCH 11/23] Update LargeQueryWorkflow.cs --- .../Scenarios/LargeQueryWorkflow.cs | 131 ++++++++++-------- 1 file changed, 76 insertions(+), 55 deletions(-) diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs b/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs index 7f7a27e73ed..cbb87b5b4e1 100644 --- a/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs +++ b/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs @@ -393,83 +393,104 @@ private static async Task>> PerformLargeQuery( var startDate = DateTimeOffset.FromUnixTimeSeconds(startTime).ToString("yyyy-MM-ddTHH:mm:ss.fffZ"); var endDate = DateTimeOffset.FromUnixTimeSeconds(endTime).ToString("yyyy-MM-ddTHH:mm:ss.fffZ"); - Console.WriteLine($"Query date range: {startDate} to {endDate}. Found {results.Count} logs."); + Console.WriteLine($"Query date range: {startDate} ({startTime}s) to {endDate} ({endTime}s). Found {results.Count} logs."); if (results.Count < limit) { + Console.WriteLine($" -> Returning {results.Count} logs (less than limit of {limit})"); return results; } - var lastTimestamp = results[results.Count - 1].Find(f => f.Field == "@timestamp")?.Value; - if (lastTimestamp == null) + Console.WriteLine($" -> Hit limit of {limit}. Need to split and recurse."); + + // Get the timestamp of the last log (sorted to find the actual last one) + var lastLogTimestamp = GetLastLogTimestamp(results); + if (lastLogTimestamp == null) { + Console.WriteLine($" -> No timestamp found in results. Returning {results.Count} logs."); return results; } - // Parse the timestamp - CloudWatch returns ISO 8601 format with milliseconds - var lastTime = DateTimeOffset.Parse(lastTimestamp).ToUnixTimeSeconds(); + Console.WriteLine($" -> Last log timestamp: {lastLogTimestamp}"); + + // Parse the timestamp and add 1 millisecond to avoid querying the same log again + var lastLogDate = DateTimeOffset.Parse(lastLogTimestamp + " +0000"); + Console.WriteLine($" -> Last log as DateTimeOffset: {lastLogDate:yyyy-MM-ddTHH:mm:ss.fffZ} ({lastLogDate.ToUnixTimeSeconds()}s)"); + + var offsetLastLogDate = lastLogDate.AddMilliseconds(1); + Console.WriteLine($" -> Offset timestamp (last + 1ms): {offsetLastLogDate:yyyy-MM-ddTHH:mm:ss.fffZ} ({offsetLastLogDate.ToUnixTimeSeconds()}s)"); + // Convert back to seconds for the API + var offsetLastLogTime = offsetLastLogDate.ToUnixTimeSeconds(); + + Console.WriteLine($" -> Comparing: offsetLastLogTime={offsetLastLogTime}s vs endTime={endTime}s"); + Console.WriteLine($" -> End time as date: {DateTimeOffset.FromUnixTimeSeconds(endTime):yyyy-MM-ddTHH:mm:ss.fffZ}"); + // Check if there's any time range left to query - if (lastTime >= endTime) + if (offsetLastLogTime >= endTime) { + Console.WriteLine($" -> No time range left to query. Offset time ({offsetLastLogTime}s) >= end time ({endTime}s)"); return results; } - // Calculate midpoint between last result and end time - var midpoint = (lastTime + endTime) / 2; - - // Ensure we have enough range to split - if (midpoint <= lastTime || midpoint >= endTime) - { - // Range too small to split, just query the remaining range - var remainingResults = await PerformLargeQuery(logGroupName, queryString, lastTime, endTime, limit); - - var allResults = new List>(results); - // Skip the first result if it's a duplicate of the last result from previous query - if (remainingResults.Count > 0) - { - var firstTimestamp = remainingResults[0].Find(f => f.Field == "@timestamp")?.Value; - if (firstTimestamp == lastTimestamp) - { - remainingResults.RemoveAt(0); - } - } - allResults.AddRange(remainingResults); - return allResults; - } - - // Split the remaining range in half - var results1 = await PerformLargeQuery(logGroupName, queryString, lastTime, midpoint, limit); - var results2 = await PerformLargeQuery(logGroupName, queryString, midpoint, endTime, limit); - - var combinedResults = new List>(results); + // Split the remaining date range in half + var (range1Start, range1End, range2Start, range2End) = SplitDateRange(offsetLastLogTime, endTime); - // Remove duplicate from results1 if it matches the last result - if (results1.Count > 0) - { - var firstTimestamp1 = results1[0].Find(f => f.Field == "@timestamp")?.Value; - if (firstTimestamp1 == lastTimestamp) - { - results1.RemoveAt(0); - } - } + var range1StartDate = DateTimeOffset.FromUnixTimeSeconds(range1Start).ToString("yyyy-MM-ddTHH:mm:ss.fffZ"); + var range1EndDate = DateTimeOffset.FromUnixTimeSeconds(range1End).ToString("yyyy-MM-ddTHH:mm:ss.fffZ"); + var range2StartDate = DateTimeOffset.FromUnixTimeSeconds(range2Start).ToString("yyyy-MM-ddTHH:mm:ss.fffZ"); + var range2EndDate = DateTimeOffset.FromUnixTimeSeconds(range2End).ToString("yyyy-MM-ddTHH:mm:ss.fffZ"); - combinedResults.AddRange(results1); + Console.WriteLine($" -> Splitting remaining range:"); + Console.WriteLine($" Range 1: {range1StartDate} ({range1Start}s) to {range1EndDate} ({range1End}s)"); + Console.WriteLine($" Range 2: {range2StartDate} ({range2Start}s) to {range2EndDate} ({range2End}s)"); + + // Query both halves recursively + Console.WriteLine($" -> Querying range 1..."); + var results1 = await PerformLargeQuery(logGroupName, queryString, range1Start, range1End, limit); + Console.WriteLine($" -> Range 1 returned {results1.Count} logs"); - // Remove duplicate from results2 if it matches the last result from results1 - if (results2.Count > 0 && results1.Count > 0) + Console.WriteLine($" -> Querying range 2..."); + var results2 = await PerformLargeQuery(logGroupName, queryString, range2Start, range2End, limit); + Console.WriteLine($" -> Range 2 returned {results2.Count} logs"); + + // Combine all results + var allResults = new List>(results); + allResults.AddRange(results1); + allResults.AddRange(results2); + + Console.WriteLine($" -> Combined total: {allResults.Count} logs ({results.Count} + {results1.Count} + {results2.Count})"); + + return allResults; + } + + /// + /// Gets the timestamp string of the most recent log from a list of logs. + /// Sorts timestamps to find the actual last one. + /// + private static string? GetLastLogTimestamp(List> logs) + { + var timestamps = logs + .Select(log => log.Find(f => f.Field == "@timestamp")?.Value) + .Where(t => !string.IsNullOrEmpty(t)) + .OrderBy(t => t) + .ToList(); + + if (timestamps.Count == 0) { - var lastTimestamp1 = results1[results1.Count - 1].Find(f => f.Field == "@timestamp")?.Value; - var firstTimestamp2 = results2[0].Find(f => f.Field == "@timestamp")?.Value; - if (firstTimestamp2 == lastTimestamp1) - { - results2.RemoveAt(0); - } + return null; } - - combinedResults.AddRange(results2); - return combinedResults; + return timestamps[timestamps.Count - 1]; + } + + /// + /// Splits a date range in half. + /// + private static (long range1Start, long range1End, long range2Start, long range2End) SplitDateRange(long startTime, long endTime) + { + var midpoint = startTime + (endTime - startTime) / 2; + return (startTime, midpoint, midpoint, endTime); } /// From 260e6156a1a5dfd7ae0952af4065288c7e0d9d8a Mon Sep 17 00:00:00 2001 From: Rachel Hagerman <110480692+rlhagerm@users.noreply.github.com> Date: Wed, 19 Nov 2025 14:38:17 -0600 Subject: [PATCH 12/23] duplicate fixes. --- .../Scenarios/LargeQueryWorkflow.cs | 67 ++++++++++++++++++- 1 file changed, 65 insertions(+), 2 deletions(-) diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs b/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs index cbb87b5b4e1..61c1d572a6b 100644 --- a/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs +++ b/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs @@ -353,6 +353,27 @@ public static async Task ExecuteLargeQuery() Console.WriteLine(); Console.WriteLine($"Queries finished in {stopwatch.Elapsed.TotalSeconds:F3} seconds."); Console.WriteLine($"Total logs found: {allResults.Count}"); + + // Check for duplicates + Console.WriteLine(); + Console.WriteLine("Checking for duplicate logs..."); + var duplicates = FindDuplicateLogs(allResults); + if (duplicates.Count > 0) + { + Console.WriteLine($"WARNING: Found {duplicates.Count} duplicate log entries!"); + Console.WriteLine("Duplicate entries (showing first 10):"); + foreach (var dup in duplicates.Take(10)) + { + Console.WriteLine($" [{dup.Timestamp}] {dup.Message} (appears {dup.Count} times)"); + } + + var uniqueCount = allResults.Count - duplicates.Sum(d => d.Count - 1); + Console.WriteLine($"Unique logs: {uniqueCount}"); + } + else + { + Console.WriteLine("No duplicates found. All logs are unique."); + } Console.WriteLine(); var viewSample = !_interactive || GetYesNoResponse("Would you like to see a sample of the logs? (y/n) "); @@ -420,8 +441,14 @@ private static async Task>> PerformLargeQuery( var offsetLastLogDate = lastLogDate.AddMilliseconds(1); Console.WriteLine($" -> Offset timestamp (last + 1ms): {offsetLastLogDate:yyyy-MM-ddTHH:mm:ss.fffZ} ({offsetLastLogDate.ToUnixTimeSeconds()}s)"); - // Convert back to seconds for the API + // Convert to seconds, but round UP to the next second to avoid overlapping with logs in the same second + // This ensures we don't re-query logs that share the same second as the last log var offsetLastLogTime = offsetLastLogDate.ToUnixTimeSeconds(); + if (offsetLastLogDate.Millisecond > 0) + { + offsetLastLogTime++; // Move to the next full second + Console.WriteLine($" -> Adjusted to next full second: {offsetLastLogTime}s ({DateTimeOffset.FromUnixTimeSeconds(offsetLastLogTime):yyyy-MM-ddTHH:mm:ss.fffZ})"); + } Console.WriteLine($" -> Comparing: offsetLastLogTime={offsetLastLogTime}s vs endTime={endTime}s"); Console.WriteLine($" -> End time as date: {DateTimeOffset.FromUnixTimeSeconds(endTime):yyyy-MM-ddTHH:mm:ss.fffZ}"); @@ -486,11 +513,13 @@ private static async Task>> PerformLargeQuery( /// /// Splits a date range in half. + /// Range 2 starts at midpoint + 1 second to avoid overlap. /// private static (long range1Start, long range1End, long range2Start, long range2End) SplitDateRange(long startTime, long endTime) { var midpoint = startTime + (endTime - startTime) / 2; - return (startTime, midpoint, midpoint, endTime); + // Range 2 starts at midpoint + 1 to avoid querying the same second twice + return (startTime, midpoint, midpoint + 1, endTime); } /// @@ -731,5 +760,39 @@ private static long PromptUserForLong(string prompt) } return 0; } + + /// + /// Finds duplicate log entries based on timestamp and message. + /// + private static List<(string Timestamp, string Message, int Count)> FindDuplicateLogs(List> logs) + { + var logSignatures = new Dictionary(); + + foreach (var log in logs) + { + var timestamp = log.Find(f => f.Field == "@timestamp")?.Value ?? ""; + var message = log.Find(f => f.Field == "@message")?.Value ?? ""; + var signature = $"{timestamp}|{message}"; + + if (logSignatures.ContainsKey(signature)) + { + logSignatures[signature]++; + } + else + { + logSignatures[signature] = 1; + } + } + + return logSignatures + .Where(kvp => kvp.Value > 1) + .Select(kvp => + { + var parts = kvp.Key.Split('|'); + return (Timestamp: parts[0], Message: parts[1], Count: kvp.Value); + }) + .OrderByDescending(x => x.Count) + .ToList(); + } } // snippet-end:[CloudWatchLogs.dotnetv4.LargeQueryWorkflow] From dda63cf5d8a7ed85324ab26bf5602ecb94befc98 Mon Sep 17 00:00:00 2001 From: Rachel Hagerman <110480692+rlhagerm@users.noreply.github.com> Date: Thu, 20 Nov 2025 08:13:16 -0600 Subject: [PATCH 13/23] Updating tests and metadata --- .../metadata/cloudwatch-logs_metadata.yaml | 36 ++++ dotnetv3/CloudWatchLogs/README.md | 24 ++- dotnetv4/CloudWatchLogs/LargeQuery/README.md | 95 ++++++++++- .../Scenarios/LargeQueryWorkflow.cs | 42 ++++- .../Tests/LargeQueryWorkflowTests.cs | 158 ++++-------------- .../SPECIFICATION.md | 42 +++-- steering_docs/dotnet-tech/scenario.md | 84 +++++++++- 7 files changed, 335 insertions(+), 146 deletions(-) diff --git a/.doc_gen/metadata/cloudwatch-logs_metadata.yaml b/.doc_gen/metadata/cloudwatch-logs_metadata.yaml index beac1ce1a8a..dd7dd43291d 100644 --- a/.doc_gen/metadata/cloudwatch-logs_metadata.yaml +++ b/.doc_gen/metadata/cloudwatch-logs_metadata.yaml @@ -288,6 +288,14 @@ cloudwatch-logs_PutSubscriptionFilter: cloudwatch-logs: {PutSubscriptionFilter} cloudwatch-logs_GetQueryResults: languages: + .NET: + versions: + - sdk_version: 3 + github: dotnetv4/CloudWatchLogs/LargeQuery + excerpts: + - description: + snippet_tags: + - CloudWatchLogs.dotnetv4.GetQueryResults JavaScript: versions: - sdk_version: 3 @@ -306,8 +314,28 @@ cloudwatch-logs_GetQueryResults: - python.example_code.cloudwatch_logs.get_query_results services: cloudwatch-logs: {GetQueryResults} +cloudwatch-logs_PutLogEvents: + languages: + .NET: + versions: + - sdk_version: 3 + github: dotnetv4/CloudWatchLogs/LargeQuery + excerpts: + - description: + snippet_tags: + - CloudWatchLogs.dotnetv4.PutLogEvents + services: + cloudwatch-logs: {PutLogEvents} cloudwatch-logs_StartQuery: languages: + .NET: + versions: + - sdk_version: 3 + github: dotnetv4/CloudWatchLogs/LargeQuery + excerpts: + - description: + snippet_tags: + - CloudWatchLogs.dotnetv4.StartQuery JavaScript: versions: - sdk_version: 3 @@ -332,6 +360,14 @@ cloudwatch-logs_Scenario_BigQuery: synopsis: use &CWL; to query more than 10,000 records. category: Scenarios languages: + .NET: + versions: + - sdk_version: 3 + github: dotnetv4/CloudWatchLogs/LargeQuery + excerpts: + - description: This is the main workflow that demonstrates the large query scenario. + snippet_tags: + - CloudWatchLogs.dotnetv4.LargeQueryWorkflow JavaScript: versions: - sdk_version: 3 diff --git a/dotnetv3/CloudWatchLogs/README.md b/dotnetv3/CloudWatchLogs/README.md index 33a4867e9dd..47dbb4d874a 100644 --- a/dotnetv3/CloudWatchLogs/README.md +++ b/dotnetv3/CloudWatchLogs/README.md @@ -41,6 +41,16 @@ Code excerpts that show you how to call individual service functions. - [DeleteLogGroup](DeleteLogGroupExample/DeleteLogGroup.cs#L6) - [DescribeExportTasks](DescribeExportTasksExample/DescribeExportTasks.cs#L6) - [DescribeLogGroups](DescribeLogGroupsExample/DescribeLogGroups.cs#L6) +- [GetQueryResults](../../dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsWrapper.cs#L79) +- [PutLogEvents](../../dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsWrapper.cs#L110) +- [StartQuery](../../dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsWrapper.cs#L30) + +### Scenarios + +Code examples that show you how to accomplish a specific task by calling multiple +functions within the same service. + +- [Run a large query](../../dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs) @@ -73,6 +83,18 @@ Alternatively, you can run the example from within your IDE. +#### Run a large query + +This example shows you how to use CloudWatch Logs to query more than 10,000 records. + + + + + + + + + ### Tests ⚠ Running tests might result in charges to your AWS account. @@ -99,4 +121,4 @@ in the `dotnetv3` folder. Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -SPDX-License-Identifier: Apache-2.0 \ No newline at end of file +SPDX-License-Identifier: Apache-2.0 diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/README.md b/dotnetv4/CloudWatchLogs/LargeQuery/README.md index c530567eac2..bfdd6fdbf3c 100644 --- a/dotnetv4/CloudWatchLogs/LargeQuery/README.md +++ b/dotnetv4/CloudWatchLogs/LargeQuery/README.md @@ -2,6 +2,17 @@ This folder contains a .NET feature scenario that demonstrates how to perform large-scale queries on Amazon CloudWatch Logs using recursive binary search to retrieve more than the 10,000 result limit. +## Overview + +CloudWatch Logs Insights queries have a maximum result limit of 10,000 records per query. This example demonstrates how to overcome this limitation by using a recursive binary search algorithm that splits the time range into smaller segments when the limit is reached. + +The scenario performs the following steps: + +1. **Setup**: Deploys a CloudFormation stack with a log group and log stream +2. **Data Generation**: Creates and uploads 50,000 sample log entries +3. **Query Execution**: Performs recursive queries to retrieve all logs using binary search +4. **Cleanup**: Removes all created resources + ## Project Structure ``` @@ -14,7 +25,7 @@ LargeQuery/ │ ├── README.md # Detailed scenario documentation │ └── CloudWatchLogsScenario.csproj # Scenario project file ├── Tests/ -│ ├── LargeQueryWorkflowTests.cs # Unit tests +│ ├── LargeQueryWorkflowTests.cs # Integration tests │ ├── Usings.cs # Global usings │ └── CloudWatchLogsTests.csproj # Test project file └── CloudWatchLogsLargeQuery.sln # Solution file @@ -23,13 +34,16 @@ LargeQuery/ ## What This Example Demonstrates - Deploying AWS resources using CloudFormation -- Generating and ingesting large volumes of log data -- Performing CloudWatch Logs Insights queries +- Generating and ingesting large volumes of log data using PutLogEvents +- Performing CloudWatch Logs Insights queries with StartQuery and GetQueryResults - Using recursive binary search to retrieve more than 10,000 results +- Handling timestamp precision for accurate query splitting - Cleaning up resources after completion ## Running the Example +### Interactive Mode + 1. Navigate to the solution directory: ``` cd dotnetv4/CloudWatchLogs/LargeQuery @@ -45,22 +59,87 @@ LargeQuery/ dotnet run --project Scenarios/CloudWatchLogsScenario.csproj ``` -4. Run the tests: - ``` - dotnet test - ``` +4. Follow the prompts to: + - Deploy the CloudFormation stack + - Generate sample logs + - Execute the recursive query + - View sample results + - Clean up resources + +### Non-Interactive Mode (Testing) + +Run the integration tests to execute the scenario without user prompts: + +``` +dotnet test +``` + +The test verifies that the scenario completes without errors and successfully retrieves all 50,000 log entries. ## Prerequisites - .NET 8.0 or later - AWS credentials configured -- Permissions for CloudWatch Logs and CloudFormation +- Permissions for: + - CloudWatch Logs (CreateLogGroup, CreateLogStream, PutLogEvents, StartQuery, GetQueryResults, DeleteLogGroup) + - CloudFormation (CreateStack, DescribeStacks, DeleteStack) + +## How It Works + +### Recursive Query Algorithm + +The key to retrieving more than 10,000 results is the recursive binary search algorithm: + +1. Execute a query with the full date range +2. If results < 10,000, return them (we have all logs in this range) +3. If results = 10,000, there may be more logs: + - Get the timestamp of the last result + - Calculate the midpoint between the last timestamp and end date + - Recursively query the first half (last timestamp to midpoint) + - Recursively query the second half (midpoint to end date) + - Combine all results + +This approach ensures all logs are retrieved by progressively narrowing the time ranges until each segment contains fewer than 10,000 results. + +### Timestamp Precision + +The algorithm uses millisecond precision for timestamps to ensure accurate splitting and prevent duplicate or missing log entries. Each query adjusts the start time by 1 millisecond to avoid overlapping results. + +## Expected Output + +When running the scenario, you'll see output similar to: + +``` +-------------------------------------------------------------------------------- +Welcome to the CloudWatch Logs Large Query Scenario. +-------------------------------------------------------------------------------- +Preparing the application... +Deploying CloudFormation stack: CloudWatchLargeQueryStack +CloudFormation stack creation started: CloudWatchLargeQueryStack +Waiting for CloudFormation stack creation to complete... +CloudFormation stack creation complete. +Stack output RoleARN: arn:aws:iam::123456789012:role/... +Generating 50,000 sample log entries... +Batch 1/5: Created 10,000 log entries +Batch 2/5: Created 10,000 log entries +... +Waiting 5 minutes for logs to be fully ingested... +-------------------------------------------------------------------------------- +Starting recursive query to retrieve all logs... +Query date range: 2024-01-15T10:00:00.000Z to 2024-01-15T10:05:00.000Z. Found 10000 logs. +Query date range: 2024-01-15T10:02:30.000Z to 2024-01-15T10:03:45.000Z. Found 10000 logs. +... +Queries finished in 8.234 seconds. +Total logs found: 50000 +-------------------------------------------------------------------------------- +``` ## Related Resources - [CloudWatch Logs Documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/) - [CloudWatch Logs Insights Query Syntax](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html) - [AWS SDK for .NET](https://aws.amazon.com/sdk-for-net/) +- [CloudWatch Logs API Reference](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/) --- diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs b/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs index 61c1d572a6b..82d2e1864f6 100644 --- a/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs +++ b/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs @@ -47,7 +47,7 @@ public class LargeQueryWorkflow private static long _queryEndDate; public static bool _interactive = true; - private static string _stackName = "CloudWatchLargeQueryStack"; + public static string _stackName = "CloudWatchLargeQueryStack"; private static string _stackResourcePath = "../../../../../../../scenarios/features/cloudwatch_logs_large_query/resources/stack.yaml"; public static async Task Main(string[] args) @@ -107,6 +107,46 @@ public static async Task Main(string[] args) Console.WriteLine("CloudWatch Logs Large Query scenario completed."); } + /// + /// Runs the scenario workflow. Used for testing. + /// + public static async Task RunScenario() + { + Console.WriteLine(new string('-', 80)); + Console.WriteLine("Welcome to the CloudWatch Logs Large Query Scenario."); + Console.WriteLine(new string('-', 80)); + Console.WriteLine("This scenario demonstrates how to perform large-scale queries on"); + Console.WriteLine("CloudWatch Logs using recursive binary search to retrieve more than"); + Console.WriteLine("the 10,000 result limit."); + Console.WriteLine(); + + try + { + Console.WriteLine(new string('-', 80)); + var prepareSuccess = await PrepareApplication(); + Console.WriteLine(new string('-', 80)); + + if (prepareSuccess) + { + Console.WriteLine(new string('-', 80)); + await ExecuteLargeQuery(); + Console.WriteLine(new string('-', 80)); + } + + Console.WriteLine(new string('-', 80)); + await Cleanup(); + Console.WriteLine(new string('-', 80)); + } + catch (Exception ex) + { + _logger.LogError(ex, "There was a problem with the scenario, initiating cleanup..."); + _interactive = false; + await Cleanup(); + } + + Console.WriteLine("CloudWatch Logs Large Query scenario completed."); + } + /// /// Prepares the application by creating the necessary resources. /// diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/Tests/LargeQueryWorkflowTests.cs b/dotnetv4/CloudWatchLogs/LargeQuery/Tests/LargeQueryWorkflowTests.cs index 0951c9b2549..d01e00aed81 100644 --- a/dotnetv4/CloudWatchLogs/LargeQuery/Tests/LargeQueryWorkflowTests.cs +++ b/dotnetv4/CloudWatchLogs/LargeQuery/Tests/LargeQueryWorkflowTests.cs @@ -1,143 +1,55 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 +using Amazon.CloudFormation; using Amazon.CloudWatchLogs; -using Amazon.CloudWatchLogs.Model; using CloudWatchLogsActions; +using CloudWatchLogsScenario; using Microsoft.Extensions.Logging; using Moq; namespace CloudWatchLogsTests; +/// +/// Integration tests for the CloudWatch Logs Large Query workflow. +/// public class LargeQueryWorkflowTests { - private readonly Mock _mockCloudWatchLogs; - private readonly Mock> _mockLogger; - private readonly CloudWatchLogsWrapper _wrapper; - - public LargeQueryWorkflowTests() - { - _mockCloudWatchLogs = new Mock(); - _mockLogger = new Mock>(); - _wrapper = new CloudWatchLogsWrapper(_mockCloudWatchLogs.Object, _mockLogger.Object); - } - - [Fact] - public async Task StartQueryAsync_Success_ReturnsQueryId() - { - // Arrange - var expectedQueryId = "test-query-id-123"; - _mockCloudWatchLogs - .Setup(x => x.StartQueryAsync(It.IsAny(), default)) - .ReturnsAsync(new StartQueryResponse { QueryId = expectedQueryId }); - - // Act - var result = await _wrapper.StartQueryAsync( - "/test/log-group", - "fields @timestamp, @message", - 1000, - 2000, - 10000); - - // Assert - Assert.Equal(expectedQueryId, result); - } - - [Fact] - public async Task StartQueryAsync_InvalidParameter_ReturnsNull() - { - // Arrange - _mockCloudWatchLogs - .Setup(x => x.StartQueryAsync(It.IsAny(), default)) - .ThrowsAsync(new InvalidParameterException("Invalid parameter")); - - // Act - var result = await _wrapper.StartQueryAsync( - "/test/log-group", - "fields @timestamp, @message", - 1000, - 2000, - 10000); - - // Assert - Assert.Null(result); - } - + /// + /// Verifies the scenario with an integration test. No errors should be logged. + /// + /// Async task. [Fact] - public async Task GetQueryResultsAsync_Success_ReturnsResults() + [Trait("Category", "Integration")] + public async Task TestScenarioIntegration() { // Arrange - var expectedResponse = new GetQueryResultsResponse - { - Status = QueryStatus.Complete, - Results = new List> - { - new List - { - new ResultField { Field = "@timestamp", Value = "2023-01-01T00:00:00.000Z" }, - new ResultField { Field = "@message", Value = "Test message" } - } - } - }; + LargeQueryWorkflow._interactive = false; - _mockCloudWatchLogs - .Setup(x => x.GetQueryResultsAsync(It.IsAny(), default)) - .ReturnsAsync(expectedResponse); + var loggerScenarioMock = new Mock>(); + loggerScenarioMock.Setup(logger => logger.Log( + It.Is(logLevel => logLevel == LogLevel.Error), + It.IsAny(), + It.Is((@object, @type) => true), + It.IsAny(), + It.IsAny>())); // Act - var result = await _wrapper.GetQueryResultsAsync("test-query-id"); - - // Assert - Assert.NotNull(result); - Assert.Equal(QueryStatus.Complete, result.Status); - Assert.Single(result.Results); - } - - [Fact] - public async Task PutLogEventsAsync_Success_ReturnsTrue() - { - // Arrange - _mockCloudWatchLogs - .Setup(x => x.PutLogEventsAsync(It.IsAny(), default)) - .ReturnsAsync(new PutLogEventsResponse()); - - var logEvents = new List - { - new InputLogEvent - { - Timestamp = DateTime.UtcNow, - Message = "Test log message" - } - }; - - // Act - var result = await _wrapper.PutLogEventsAsync("/test/log-group", "test-stream", logEvents); - - // Assert - Assert.True(result); - } - - [Fact] - public async Task PutLogEventsAsync_ResourceNotFound_ReturnsFalse() - { - // Arrange - _mockCloudWatchLogs - .Setup(x => x.PutLogEventsAsync(It.IsAny(), default)) - .ThrowsAsync(new ResourceNotFoundException("Log group not found")); - - var logEvents = new List - { - new InputLogEvent - { - Timestamp = DateTime.UtcNow, - Message = "Test log message" - } - }; - - // Act - var result = await _wrapper.PutLogEventsAsync("/test/log-group", "test-stream", logEvents); - - // Assert - Assert.False(result); + LargeQueryWorkflow._logger = loggerScenarioMock.Object; + LargeQueryWorkflow._wrapper = new CloudWatchLogsWrapper( + new AmazonCloudWatchLogsClient(), + new Mock>().Object); + LargeQueryWorkflow._amazonCloudFormation = new AmazonCloudFormationClient(); + + await LargeQueryWorkflow.RunScenario(); + + // Assert no errors logged + loggerScenarioMock.Verify(logger => logger.Log( + It.Is(logLevel => logLevel == LogLevel.Error), + It.IsAny(), + It.Is((@object, @type) => true), + It.IsAny(), + It.IsAny>()), + Times.Never); } } diff --git a/scenarios/features/cloudwatch_logs_large_query/SPECIFICATION.md b/scenarios/features/cloudwatch_logs_large_query/SPECIFICATION.md index acb3406cb89..b3e23572c2d 100644 --- a/scenarios/features/cloudwatch_logs_large_query/SPECIFICATION.md +++ b/scenarios/features/cloudwatch_logs_large_query/SPECIFICATION.md @@ -2,12 +2,18 @@ ## Overview -This feature scenario demonstrates how to perform large-scale queries on Amazon CloudWatch Logs using recursive binary search to retrieve more than the 10,000 result limit. The scenario showcases: +This feature scenario demonstrates how to perform large-scale queries on Amazon CloudWatch Logs using recursive binary search to retrieve more than the 10,000 result limit. + +**Important**: This is a complete, self-contained scenario that handles all setup and cleanup automatically. The scenario includes: 1. Deploying CloudFormation resources (log group and stream) 2. Generating and ingesting 50,000 sample log entries 3. Performing recursive queries to retrieve all logs using binary search -4. Cleaning up resources +4. Cleaning up all resources + +**The scenario must be runnable in both interactive and non-interactive modes** to support: +- Interactive mode: User runs the scenario manually with prompts +- Non-interactive mode: Automated integration tests run the scenario without user input For an introduction, see the [README.md](README.md). @@ -78,33 +84,40 @@ This scenario uses the following CloudFormation API actions: ### Phase 1: Setup -**Purpose**: Deploy resources and generate sample data +**Purpose**: Deploy resources and generate sample data as part of the scenario -**Steps**: +**Interactive Mode Steps**: 1. Welcome message explaining the scenario 2. Prompt user: "Would you like to deploy the CloudFormation stack and generate sample logs? (y/n)" 3. If yes: - Prompt for CloudFormation stack name (default: "CloudWatchLargeQueryStack") - Deploy CloudFormation stack from `resources/stack.yaml` - Wait for stack creation to complete (status: CREATE_COMPLETE) - - Execute log generation: - - **Option A** (Bash): Run `make-log-files.sh` then `put-log-events.sh` - - **Option B** (Python): Run `create_logs.py` (recommended for cross-platform) - - Capture `QUERY_START_DATE` and `QUERY_END_DATE` from script output + - Generate logs directly using CloudWatch Logs API: + - Create 50,000 log entries with timestamps spanning 5 minutes + - Upload in batches of 10,000 entries using PutLogEvents + - Display progress for each batch uploaded + - Capture start and end timestamps for query configuration - Display message: "Sample logs created. Waiting 5 minutes for logs to be fully ingested..." - - Wait 5 minutes (300 seconds) for log ingestion + - Wait 5 minutes (300 seconds) for log ingestion with countdown display 4. If no: - Prompt user for existing log group name - Prompt user for log stream name - Prompt user for query start date (ISO 8601 format with milliseconds) - Prompt user for query end date (ISO 8601 format with milliseconds) +**Non-Interactive Mode Behavior**: +- Automatically deploys stack with default name +- Automatically generates 50,000 sample logs +- Waits 5 minutes for log ingestion +- Uses default values for all configuration + **Variables Set**: - `stackName` - CloudFormation stack name - `logGroupName` - Log group name (default: `/workflows/cloudwatch-logs/large-query`) - `logStreamName` - Log stream name (default: `stream1`) -- `queryStartDate` - Start timestamp for query (milliseconds since epoch) -- `queryEndDate` - End timestamp for query (milliseconds since epoch) +- `queryStartDate` - Start timestamp for query (seconds since epoch) +- `queryEndDate` - End timestamp for query (seconds since epoch) ### Phase 2: Query Execution @@ -130,7 +143,7 @@ This scenario uses the following CloudFormation API actions: **Purpose**: Remove created resources -**Steps**: +**Interactive Mode Steps**: 1. Prompt user: "Would you like to delete the CloudFormation stack and all resources? (y/n)" 2. If yes: - Delete CloudFormation stack @@ -140,6 +153,11 @@ This scenario uses the following CloudFormation API actions: - Display message: "Resources will remain. You can delete them later through the AWS Console." - Display stack name and log group name for reference +**Non-Interactive Mode Behavior**: +- Automatically deletes the CloudFormation stack +- Waits for deletion to complete +- Ensures cleanup happens even if errors occur during the scenario + --- ## Implementation Details diff --git a/steering_docs/dotnet-tech/scenario.md b/steering_docs/dotnet-tech/scenario.md index 51cb61865d7..2bbb42c41ef 100644 --- a/steering_docs/dotnet-tech/scenario.md +++ b/steering_docs/dotnet-tech/scenario.md @@ -895,9 +895,91 @@ catch (Exception ex) 4. **Implement Workflow**: Create workflow class with phases from specification 5. **Add CloudFormation**: Integrate stack deployment and deletion 6. **Add User Interaction**: Implement prompts and validation -7. **Test**: Create unit tests for workflow methods +7. **Test**: Create integration tests for workflow methods 8. **Document**: Add README.md with scenario description +## Integration Tests + +### Single Integration Test Pattern + +Integration tests should use a single test method that verifies no errors are logged: + +```csharp +/// +/// Verifies the scenario with an integration test. No errors should be logged. +/// +/// Async task. +[Fact] +[Trait("Category", "Integration")] +public async Task TestScenarioIntegration() +{ + // Arrange + {Service}Workflow._interactive = false; + + var loggerScenarioMock = new Mock>(); + loggerScenarioMock.Setup(logger => logger.Log( + It.Is(logLevel => logLevel == LogLevel.Error), + It.IsAny(), + It.Is((@object, @type) => true), + It.IsAny(), + It.IsAny>())); + + // Act + {Service}Workflow._logger = loggerScenarioMock.Object; + {Service}Workflow._wrapper = new {Service}Wrapper( + new Amazon{Service}Client(), + new Mock>().Object); + {Service}Workflow._amazonCloudFormation = new AmazonCloudFormationClient(); + + await {Service}Workflow.RunScenario(); + + // Assert no errors logged + loggerScenarioMock.Verify(logger => logger.Log( + It.Is(logLevel => logLevel == LogLevel.Error), + It.IsAny(), + It.Is((@object, @type) => true), + It.IsAny(), + It.IsAny>()), + Times.Never); +} +``` + +### RunScenario Method + +The workflow must include a public RunScenario method for testing: + +```csharp +/// +/// Runs the scenario workflow. Used for testing. +/// +public static async Task RunScenario() +{ + Console.WriteLine(new string('-', 80)); + Console.WriteLine("Welcome to the {Service} Scenario."); + Console.WriteLine(new string('-', 80)); + + try + { + var prepareSuccess = await PrepareApplication(); + + if (prepareSuccess) + { + await ExecutePhase2(); + } + + await Cleanup(); + } + catch (Exception ex) + { + _logger.LogError(ex, "There was a problem with the scenario, initiating cleanup..."); + _interactive = false; + await Cleanup(); + } + + Console.WriteLine("Scenario completed."); +} +``` + ### Specification Sections to Implement - **API Actions Used**: All operations must be in wrapper class - **Proposed example structure**: Maps to workflow phases From bf477a973b35252ecf500407e0d5cda52a1a6964 Mon Sep 17 00:00:00 2001 From: Rachel Hagerman <110480692+rlhagerm@users.noreply.github.com> Date: Thu, 20 Nov 2025 08:18:31 -0600 Subject: [PATCH 14/23] Update README and main solution file --- dotnetv4/DotNetV4Examples.sln | 6 +++--- scenarios/features/cloudwatch_logs_large_query/README.md | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/dotnetv4/DotNetV4Examples.sln b/dotnetv4/DotNetV4Examples.sln index cfa92fb639b..0b44f37b700 100644 --- a/dotnetv4/DotNetV4Examples.sln +++ b/dotnetv4/DotNetV4Examples.sln @@ -95,11 +95,11 @@ Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CloudWatchActions", "CloudW EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "CloudWatchLogs", "CloudWatchLogs", "{A1B2C3D4-E5F6-7890-1234-567890ABCDEF}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CloudWatchLogsTests", "CloudWatchLogs\Tests\CloudWatchLogsTests.csproj", "{B2C3D4E5-F6A7-8901-2345-678901BCDEFG}" +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CloudWatchLogsTests", "CloudWatchLogs\LargeQuery\Tests\CloudWatchLogsTests.csproj", "{B2C3D4E5-F6A7-8901-2345-678901BCDEFG}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CloudWatchLogsScenarios", "CloudWatchLogs\Scenarios\CloudWatchLogsScenarios.csproj", "{C3D4E5F6-A7B8-9012-3456-789012CDEFGH}" +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CloudWatchLogsScenario", "CloudWatchLogs\LargeQuery\Scenarios\CloudWatchLogsScenario.csproj", "{C3D4E5F6-A7B8-9012-3456-789012CDEFGH}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CloudWatchLogsActions", "CloudWatchLogs\Actions\CloudWatchLogsActions.csproj", "{D4E5F6A7-B8C9-0123-4567-890123DEFGHI}" +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CloudWatchLogsActions", "CloudWatchLogs\LargeQuery\Actions\CloudWatchLogsActions.csproj", "{D4E5F6A7-B8C9-0123-4567-890123DEFGHI}" EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "EC2", "EC2", "{9424FB14-B6DE-44CE-B675-AC2B57EC1E69}" EndProject diff --git a/scenarios/features/cloudwatch_logs_large_query/README.md b/scenarios/features/cloudwatch_logs_large_query/README.md index 1b76e5758c6..17d487f3e3f 100644 --- a/scenarios/features/cloudwatch_logs_large_query/README.md +++ b/scenarios/features/cloudwatch_logs_large_query/README.md @@ -47,6 +47,7 @@ A lot of logs are needed to make a robust example. If you happen to have a log g This example is implemented in the following languages: +- [.NET](../../../dotnetv4/CloudWatchLogs/LargeQuery/README.md) - [JavaScript](../../../javascriptv3/example_code/cloudwatch-logs/scenarios/large-query/README.md) - [Python](../../../python/example_code/cloudwatch-logs/scenarios/large-query/README.md) From c3f920f4e1378de68d03d020f4574e3dc7e6ce97 Mon Sep 17 00:00:00 2001 From: Rachel Hagerman <110480692+rlhagerm@users.noreply.github.com> Date: Thu, 20 Nov 2025 09:10:28 -0600 Subject: [PATCH 15/23] Fix formatting. --- .../Actions/CloudWatchLogsWrapper.cs | 2 +- .../Scenarios/LargeQueryWorkflow.cs | 24 +++++++++---------- .../Tests/LargeQueryWorkflowTests.cs | 2 +- .../CloudWatchLogs/LargeQuery/Tests/Usings.cs | 2 +- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsWrapper.cs b/dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsWrapper.cs index eb09c41cfd1..a1fd885cdc7 100644 --- a/dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsWrapper.cs +++ b/dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsWrapper.cs @@ -145,4 +145,4 @@ public async Task PutLogEventsAsync( } // snippet-end:[CloudWatchLogs.dotnetv4.PutLogEvents] } -// snippet-end:[CloudWatchLogs.dotnetv4.CloudWatchLogsWrapper] +// snippet-end:[CloudWatchLogs.dotnetv4.CloudWatchLogsWrapper] \ No newline at end of file diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs b/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs index 82d2e1864f6..c602ab9d41b 100644 --- a/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs +++ b/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs @@ -393,7 +393,7 @@ public static async Task ExecuteLargeQuery() Console.WriteLine(); Console.WriteLine($"Queries finished in {stopwatch.Elapsed.TotalSeconds:F3} seconds."); Console.WriteLine($"Total logs found: {allResults.Count}"); - + // Check for duplicates Console.WriteLine(); Console.WriteLine("Checking for duplicate logs..."); @@ -406,7 +406,7 @@ public static async Task ExecuteLargeQuery() { Console.WriteLine($" [{dup.Timestamp}] {dup.Message} (appears {dup.Count} times)"); } - + var uniqueCount = allResults.Count - duplicates.Sum(d => d.Count - 1); Console.WriteLine($"Unique logs: {uniqueCount}"); } @@ -477,10 +477,10 @@ private static async Task>> PerformLargeQuery( // Parse the timestamp and add 1 millisecond to avoid querying the same log again var lastLogDate = DateTimeOffset.Parse(lastLogTimestamp + " +0000"); Console.WriteLine($" -> Last log as DateTimeOffset: {lastLogDate:yyyy-MM-ddTHH:mm:ss.fffZ} ({lastLogDate.ToUnixTimeSeconds()}s)"); - + var offsetLastLogDate = lastLogDate.AddMilliseconds(1); Console.WriteLine($" -> Offset timestamp (last + 1ms): {offsetLastLogDate:yyyy-MM-ddTHH:mm:ss.fffZ} ({offsetLastLogDate.ToUnixTimeSeconds()}s)"); - + // Convert to seconds, but round UP to the next second to avoid overlapping with logs in the same second // This ensures we don't re-query logs that share the same second as the last log var offsetLastLogTime = offsetLastLogDate.ToUnixTimeSeconds(); @@ -489,7 +489,7 @@ private static async Task>> PerformLargeQuery( offsetLastLogTime++; // Move to the next full second Console.WriteLine($" -> Adjusted to next full second: {offsetLastLogTime}s ({DateTimeOffset.FromUnixTimeSeconds(offsetLastLogTime):yyyy-MM-ddTHH:mm:ss.fffZ})"); } - + Console.WriteLine($" -> Comparing: offsetLastLogTime={offsetLastLogTime}s vs endTime={endTime}s"); Console.WriteLine($" -> End time as date: {DateTimeOffset.FromUnixTimeSeconds(endTime):yyyy-MM-ddTHH:mm:ss.fffZ}"); @@ -502,12 +502,12 @@ private static async Task>> PerformLargeQuery( // Split the remaining date range in half var (range1Start, range1End, range2Start, range2End) = SplitDateRange(offsetLastLogTime, endTime); - + var range1StartDate = DateTimeOffset.FromUnixTimeSeconds(range1Start).ToString("yyyy-MM-ddTHH:mm:ss.fffZ"); var range1EndDate = DateTimeOffset.FromUnixTimeSeconds(range1End).ToString("yyyy-MM-ddTHH:mm:ss.fffZ"); var range2StartDate = DateTimeOffset.FromUnixTimeSeconds(range2Start).ToString("yyyy-MM-ddTHH:mm:ss.fffZ"); var range2EndDate = DateTimeOffset.FromUnixTimeSeconds(range2End).ToString("yyyy-MM-ddTHH:mm:ss.fffZ"); - + Console.WriteLine($" -> Splitting remaining range:"); Console.WriteLine($" Range 1: {range1StartDate} ({range1Start}s) to {range1EndDate} ({range1End}s)"); Console.WriteLine($" Range 2: {range2StartDate} ({range2Start}s) to {range2EndDate} ({range2End}s)"); @@ -516,7 +516,7 @@ private static async Task>> PerformLargeQuery( Console.WriteLine($" -> Querying range 1..."); var results1 = await PerformLargeQuery(logGroupName, queryString, range1Start, range1End, limit); Console.WriteLine($" -> Range 1 returned {results1.Count} logs"); - + Console.WriteLine($" -> Querying range 2..."); var results2 = await PerformLargeQuery(logGroupName, queryString, range2Start, range2End, limit); Console.WriteLine($" -> Range 2 returned {results2.Count} logs"); @@ -807,13 +807,13 @@ private static long PromptUserForLong(string prompt) private static List<(string Timestamp, string Message, int Count)> FindDuplicateLogs(List> logs) { var logSignatures = new Dictionary(); - + foreach (var log in logs) { var timestamp = log.Find(f => f.Field == "@timestamp")?.Value ?? ""; var message = log.Find(f => f.Field == "@message")?.Value ?? ""; var signature = $"{timestamp}|{message}"; - + if (logSignatures.ContainsKey(signature)) { logSignatures[signature]++; @@ -823,7 +823,7 @@ private static long PromptUserForLong(string prompt) logSignatures[signature] = 1; } } - + return logSignatures .Where(kvp => kvp.Value > 1) .Select(kvp => @@ -835,4 +835,4 @@ private static long PromptUserForLong(string prompt) .ToList(); } } -// snippet-end:[CloudWatchLogs.dotnetv4.LargeQueryWorkflow] +// snippet-end:[CloudWatchLogs.dotnetv4.LargeQueryWorkflow] \ No newline at end of file diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/Tests/LargeQueryWorkflowTests.cs b/dotnetv4/CloudWatchLogs/LargeQuery/Tests/LargeQueryWorkflowTests.cs index d01e00aed81..161e61f7fe3 100644 --- a/dotnetv4/CloudWatchLogs/LargeQuery/Tests/LargeQueryWorkflowTests.cs +++ b/dotnetv4/CloudWatchLogs/LargeQuery/Tests/LargeQueryWorkflowTests.cs @@ -52,4 +52,4 @@ public async Task TestScenarioIntegration() It.IsAny>()), Times.Never); } -} +} \ No newline at end of file diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/Tests/Usings.cs b/dotnetv4/CloudWatchLogs/LargeQuery/Tests/Usings.cs index 4cb6a55926e..24f9d54e547 100644 --- a/dotnetv4/CloudWatchLogs/LargeQuery/Tests/Usings.cs +++ b/dotnetv4/CloudWatchLogs/LargeQuery/Tests/Usings.cs @@ -1,4 +1,4 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 -global using Xunit; +global using Xunit; \ No newline at end of file From 4c47d34b8f3171ec8e60c9aff6dea04e0c0514ae Mon Sep 17 00:00:00 2001 From: Rachel Hagerman <110480692+rlhagerm@users.noreply.github.com> Date: Thu, 20 Nov 2025 09:14:42 -0600 Subject: [PATCH 16/23] Update .gitignore --- .gitignore | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index d6f42f4afdd..657ad41f507 100644 --- a/.gitignore +++ b/.gitignore @@ -38,4 +38,6 @@ kotlin/services/**/gradlew kotlin/services/**/gradlew.bat kotlin/services/**/.kotlin/ /.local/ -/.kiro/settings +.kiro/settings/ +.kiro/steering/ + From a989b8e34f77f89e460ce9d4a508b611b704eaa5 Mon Sep 17 00:00:00 2001 From: Rachel Hagerman <110480692+rlhagerm@users.noreply.github.com> Date: Thu, 20 Nov 2025 09:28:19 -0600 Subject: [PATCH 17/23] Updates to metadata --- .../metadata/cloudwatch-logs_metadata.yaml | 8 +- .kiro/settings/mcp.json | 54 ---------- .kiro/steering/orchestration.md | 17 --- .tools/readmes/config.py | 1 + dotnetv3/CloudWatchLogs/README.md | 22 ---- dotnetv4/CloudWatchLogs/README.md | 102 ++++++++++++++---- 6 files changed, 88 insertions(+), 116 deletions(-) delete mode 100644 .kiro/settings/mcp.json delete mode 100644 .kiro/steering/orchestration.md diff --git a/.doc_gen/metadata/cloudwatch-logs_metadata.yaml b/.doc_gen/metadata/cloudwatch-logs_metadata.yaml index dd7dd43291d..7f208a71409 100644 --- a/.doc_gen/metadata/cloudwatch-logs_metadata.yaml +++ b/.doc_gen/metadata/cloudwatch-logs_metadata.yaml @@ -290,7 +290,7 @@ cloudwatch-logs_GetQueryResults: languages: .NET: versions: - - sdk_version: 3 + - sdk_version: 4 github: dotnetv4/CloudWatchLogs/LargeQuery excerpts: - description: @@ -318,7 +318,7 @@ cloudwatch-logs_PutLogEvents: languages: .NET: versions: - - sdk_version: 3 + - sdk_version: 4 github: dotnetv4/CloudWatchLogs/LargeQuery excerpts: - description: @@ -330,7 +330,7 @@ cloudwatch-logs_StartQuery: languages: .NET: versions: - - sdk_version: 3 + - sdk_version: 4 github: dotnetv4/CloudWatchLogs/LargeQuery excerpts: - description: @@ -362,7 +362,7 @@ cloudwatch-logs_Scenario_BigQuery: languages: .NET: versions: - - sdk_version: 3 + - sdk_version: 4 github: dotnetv4/CloudWatchLogs/LargeQuery excerpts: - description: This is the main workflow that demonstrates the large query scenario. diff --git a/.kiro/settings/mcp.json b/.kiro/settings/mcp.json deleted file mode 100644 index d9c0a7bbd0c..00000000000 --- a/.kiro/settings/mcp.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "mcpServers": { - "awslabs.bedrock-kb-retrieval-mcp-server": { - "command": "uv", - "args": [ - "tool", - "run", - "--from", - "awslabs.bedrock-kb-retrieval-mcp-server@latest", - "awslabs.bedrock-kb-retrieval-mcp-server.exe" - ], - "env": { - "FASTMCP_LOG_LEVEL": "ERROR", - "AWS_PROFILE": "cex-ai-kb-access", - "AWS_REGION": "us-west-2" - }, - "disabled": false, - "autoApprove": [ - "QueryKnowledgeBases" - ], - "disabledTools": [ - "ListKnowledgeBases" - ] - }, - "aws-knowledge-mcp-server": { - "command": "uvx", - "args": [ - "mcp-proxy", - "--transport", - "streamablehttp", - "https://knowledge-mcp.global.api.aws" - ], - "disabled": false, - "autoApprove": [ - "aws___search_documentation", - "aws___read_documentation" - ] - }, - "codeloom-mcp": { - "disabled": false, - "command": "code-loom-mcp", - "args": [], - "env": {}, - "transportType": "stdio", - "autoApprove": [ - "loomer", - "search_aws_docs", - "read_aws_docs", - "query_knowledge_bases", - "list_knowledge_bases" - ] - } - } -} \ No newline at end of file diff --git a/.kiro/steering/orchestration.md b/.kiro/steering/orchestration.md deleted file mode 100644 index 190c9c603f2..00000000000 --- a/.kiro/steering/orchestration.md +++ /dev/null @@ -1,17 +0,0 @@ -# Code Generation Orchestration - -## Purpose -Define location of relevant steering docs that are outside of the .kiro directory. Refer to all steering docs in the ./steering_docs directory. Use the appropriate steering instructions for the requested language. Use the directories given below. - -- .NET: dotnet-tech -- Java: java-tech -- Kotlin: kotlin-tech - -## Code Generation -When a specification file is provided by the user, use that specification directly. Do not create your own spec or task breakdown. Follow the provided specification exactly and implement the requirements as described. - -If no specification is provided, then do not use separate steps for planning and tasks unless specifically asked to do so. Perform the tasks without stopping for user input. - - - - diff --git a/.tools/readmes/config.py b/.tools/readmes/config.py index 35bd076a6ac..25e4b7cff7d 100644 --- a/.tools/readmes/config.py +++ b/.tools/readmes/config.py @@ -129,6 +129,7 @@ "auto-scaling": "dotnetv4/AutoScaling", "cloudformation": "dotnetv4/CloudFormation", "cloudwatch": "dotnetv4/CloudWatch", + "cloudwatch-logs": "dotnetv4/CloudWatchLogs", "cognito-identity-provider": "dotnetv4/Cognito", "ec2": "dotnetv4/EC2", "ecs": "dotnetv4/ECS", diff --git a/dotnetv3/CloudWatchLogs/README.md b/dotnetv3/CloudWatchLogs/README.md index 47dbb4d874a..21e3d28fc10 100644 --- a/dotnetv3/CloudWatchLogs/README.md +++ b/dotnetv3/CloudWatchLogs/README.md @@ -41,16 +41,6 @@ Code excerpts that show you how to call individual service functions. - [DeleteLogGroup](DeleteLogGroupExample/DeleteLogGroup.cs#L6) - [DescribeExportTasks](DescribeExportTasksExample/DescribeExportTasks.cs#L6) - [DescribeLogGroups](DescribeLogGroupsExample/DescribeLogGroups.cs#L6) -- [GetQueryResults](../../dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsWrapper.cs#L79) -- [PutLogEvents](../../dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsWrapper.cs#L110) -- [StartQuery](../../dotnetv4/CloudWatchLogs/LargeQuery/Actions/CloudWatchLogsWrapper.cs#L30) - -### Scenarios - -Code examples that show you how to accomplish a specific task by calling multiple -functions within the same service. - -- [Run a large query](../../dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/LargeQueryWorkflow.cs) @@ -83,18 +73,6 @@ Alternatively, you can run the example from within your IDE. -#### Run a large query - -This example shows you how to use CloudWatch Logs to query more than 10,000 records. - - - - - - - - - ### Tests ⚠ Running tests might result in charges to your AWS account. diff --git a/dotnetv4/CloudWatchLogs/README.md b/dotnetv4/CloudWatchLogs/README.md index d5aec8c2d7e..bb3400d77e7 100644 --- a/dotnetv4/CloudWatchLogs/README.md +++ b/dotnetv4/CloudWatchLogs/README.md @@ -1,35 +1,99 @@ -# CloudWatch Logs Examples for .NET +# CloudWatch Logs code examples for the SDK for .NET (v4) -This folder contains examples for Amazon CloudWatch Logs using the AWS SDK for .NET. +## Overview -## Examples +Shows how to use the AWS SDK for .NET (v4) to work with Amazon CloudWatch Logs. -### Feature Scenarios + + -- **[LargeQuery](LargeQuery/)** - Demonstrates how to perform large-scale queries on CloudWatch Logs using recursive binary search to retrieve more than the 10,000 result limit. +_CloudWatch Logs monitor, store, and access your log files from Amazon Elastic Compute Cloud instances, AWS CloudTrail, or other sources._ -## Running the Examples +## ⚠ Important -Each example includes its own README with specific instructions. Generally, you can: +* Running this code might result in charges to your AWS account. For more details, see [AWS Pricing](https://aws.amazon.com/pricing/) and [Free Tier](https://aws.amazon.com/free/). +* Running the tests might result in charges to your AWS account. +* We recommend that you grant your code least privilege. At most, grant only the minimum permissions required to perform the task. For more information, see [Grant least privilege](https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#grant-least-privilege). +* This code is not tested in every AWS Region. For more information, see [AWS Regional Services](https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services). -1. Navigate to the example directory -2. Build the solution: `dotnet build` -3. Run the example: `dotnet run --project Scenarios/{ProjectName}.csproj` -4. Run tests: `dotnet test` + + -## Prerequisites +## Code examples -- .NET 8.0 or later -- AWS credentials configured -- Appropriate AWS permissions for CloudWatch Logs +### Prerequisites -## Additional Resources +For prerequisites, see the [README](../README.md#Prerequisites) in the `dotnetv4` folder. -- [CloudWatch Logs Documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/) -- [AWS SDK for .NET Documentation](https://docs.aws.amazon.com/sdk-for-net/) -- [CloudWatch Logs API Reference](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/) + + + + +### Single actions + +Code excerpts that show you how to call individual service functions. + +- [GetQueryResults](LargeQuery/Actions/CloudWatchLogsWrapper.cs#L79) +- [PutLogEvents](LargeQuery/Actions/CloudWatchLogsWrapper.cs#L110) +- [StartQuery](LargeQuery/Actions/CloudWatchLogsWrapper.cs#L30) + +### Scenarios + +Code examples that show you how to accomplish a specific task by calling multiple +functions within the same service. + +- [Run a large query](LargeQuery/Scenarios/LargeQueryWorkflow.cs) + + + + + +## Run the examples + +### Instructions + + + + + + + +#### Run a large query + +This example shows you how to use CloudWatch Logs to query more than 10,000 records. + + + + + + + + + +### Tests + +⚠ Running tests might result in charges to your AWS account. + + +To find instructions for running these tests, see the [README](../README.md#Tests) +in the `dotnetv4` folder. + + + + + + +## Additional resources + +- [CloudWatch Logs User Guide](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/WhatIsCloudWatchLogs.html) +- [CloudWatch Logs API Reference](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/Welcome.html) +- [SDK for .NET (v4) CloudWatch Logs reference](https://docs.aws.amazon.com/sdkfornet/v4/apidocs/items/Cloudwatch-logs/NCloudwatch-logs.html) + + + --- Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + SPDX-License-Identifier: Apache-2.0 From ca499a1748d68142a9f681b9852fb04a71a5ecec Mon Sep 17 00:00:00 2001 From: Rachel Hagerman <110480692+rlhagerm@users.noreply.github.com> Date: Thu, 20 Nov 2025 10:11:41 -0600 Subject: [PATCH 18/23] File cleanup. --- .../SPECIFICATION copy.md | 118 ------------------ .../resources/create_logs.py | 70 ----------- .../resources/stack.yaml | 2 +- 3 files changed, 1 insertion(+), 189 deletions(-) delete mode 100644 scenarios/features/cloudwatch_logs_large_query/SPECIFICATION copy.md delete mode 100644 scenarios/features/cloudwatch_logs_large_query/resources/create_logs.py diff --git a/scenarios/features/cloudwatch_logs_large_query/SPECIFICATION copy.md b/scenarios/features/cloudwatch_logs_large_query/SPECIFICATION copy.md deleted file mode 100644 index 788d7859efc..00000000000 --- a/scenarios/features/cloudwatch_logs_large_query/SPECIFICATION copy.md +++ /dev/null @@ -1,118 +0,0 @@ -# CloudWatch Logs large query - Technical specification - -This document contains the technical specifications for _CloudWatch Logs large query_, -a feature scenario that showcases AWS services and SDKs. It is primarily intended for the AWS code -examples team to use while developing this example in additional languages. - -This document explains the following: - -- Deploying AWS resources. -- Adding sample data. -- Setting up a large query. - -For an introduction, see the [README.md](README.md). - ---- - -### Table of contents - -- [Architecture](#architecture) -- [User input](#user-input) -- [Common resources](#common-resources) -- [Building the queries](#building-the-queries) -- [Output](#output) -- [Metadata](#metadata) - -## Architecture - -- Amazon CloudWatch Logs group -- Amazon CloudWatch Logs stream - ---- - -## User input - -The example should allow the configuration of a query start date, query end date, and results limit. It's up to you to decide how to allow this configuration. - -### Suggested variable names - -- `QUERY_START_DATE` - The oldest date that will be queried. -- `QUERY_END_DATE` - The newest date that will be queried. -- `QUERY_LIMIT` - The maximum number of results to return. CloudWatch has a maximum of 10,000. - ---- - -## Common resources - -This example has a set of common resources that are stored in the [resources](resources) folder. - -- [stack.yaml](resources/stack.yaml) is an AWS CloudFormation template containing the resources needed to run this example. -- [make-log-files.sh](resources/make-log-files.sh) is a bash script that creates log data. **Five minutes of logs, starting at the time of execution, will be created. Wait at least five minutes after running this script before attempting to query.** -- [put-log-events](resources/put-log-events.sh) is a bash script that ingests log data and uploads it to CloudWatch. - ---- - -## Building the queries - -### Building and waiting for single query - -The query itself is a "CloudWatch Logs Insights query syntax" string. The query must return the `@timestamp` field so follow-up queries can use that information. Here's a sample query string: `fields @timestamp, @message | sort @timestamp asc`. Notice it sorts in ascending order. You can sort in either `asc` or `desc`, but the recursive strategy described later will need to match accordingly. - -Queries are jobs. You can start a query with `StartQuery`, but it immediately returns the `queryId`. You must poll a query using `GetQueryResults` until the query has finished. For the purpose of this example, a query has "finished" when `GetQueryResults` has returned a status of one of "Complete", "Failed", "Cancelled", "Timeout", or "Unknown". - -`StartQuery` responds with an error if the query's start or end date occurs out of bounds of the log group creation date. The error message starts with "Query's end date and time". - -Start the query and wait for it to "finish". Store the `results`. If the count of the results is less than the configured LIMIT, return the results. If the results are greater than or equal to the limit, go to [Recursive queries](#recursive-queries). - ---- - -### Recursive queries - -If the result count from the previous step is 10000 (or the configured LIMIT), it is very likely that there are more results. **The example must do a binary search of the remaining logs**. To do this, get the date of the last log (earliest or latest, depending on sort order). Use that date as the start date of a new date range. The end date can remain the same. - -Split that date range in half, resulting in two new date ranges. Call your query function twice; once for each new date range. - -Concatenate the results of the first query with the results of the two new queries. - -The following pseudocode illustrates this. - -```pseudocode -func large_query(date_range): - query_results = get_query_results(date_range) - - if query_results.length < LIMIT - return query_results - else - date_range = [query_results.end, date_range.end] - d1, d2 = split(date_range) - return concat(query_results, large_query(d1), large_query(d2)) -``` - -## Output - -To illustrate the search, log the date ranges for each query made and the number of logs that were found. - -Example: - -``` -Starting a recursive query... -Query date range: 2023-12-22T19:08:42.000Z to 2023-12-22T19:13:41.994Z. Found 10000 logs. -Query date range: 2023-12-22T19:09:41.995Z to 2023-12-22T19:11:41.994Z. Found 10000 logs. -Query date range: 2023-12-22T19:11:41.995Z to 2023-12-22T19:13:41.994Z. Found 10000 logs. -Query date range: 2023-12-22T19:10:41.995Z to 2023-12-22T19:11:11.994Z. Found 5000 logs. -Query date range: 2023-12-22T19:11:11.995Z to 2023-12-22T19:11:41.994Z. Found 5000 logs. -Query date range: 2023-12-22T19:12:41.995Z to 2023-12-22T19:13:11.994Z. Found 5000 logs. -Query date range: 2023-12-22T19:13:11.995Z to 2023-12-22T19:13:41.994Z. Found 5000 logs. -Queries finished in 11.253 seconds. -Total logs found: 50000 -``` - ---- - -## Metadata - -| action / scenario | metadata file | metadata key | -| ----------------- | ----------------------------- | --------------------------------- | -| `GetQueryResults` | cloudwatch-logs_metadata.yaml | cloudwatch-logs_GetQueryResults | -| `StartQuery` | cloudwatch-logs_metadata.yaml | cloudwatch-logs_StartQuery | -| `Large Query` | cloudwatch-logs_metadata.yaml | cloudwatch-logs_Scenario_LargeQuery | diff --git a/scenarios/features/cloudwatch_logs_large_query/resources/create_logs.py b/scenarios/features/cloudwatch_logs_large_query/resources/create_logs.py deleted file mode 100644 index 882bbdc1eb7..00000000000 --- a/scenarios/features/cloudwatch_logs_large_query/resources/create_logs.py +++ /dev/null @@ -1,70 +0,0 @@ -#!/usr/bin/env python3 -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 - -""" -Script to generate and upload 50,000 sample log entries to CloudWatch Logs. -This script creates logs spanning 5 minutes and uploads them in batches. -""" - -import boto3 -import time -from datetime import datetime - -LOG_GROUP_NAME = "/workflows/cloudwatch-logs/large-query" -LOG_STREAM_NAME = "stream1" -TOTAL_ENTRIES = 50000 -ENTRIES_PER_BATCH = 10000 -FIVE_MINUTES_MS = 5 * 60 * 1000 - - -def main(): - """Generate and upload log entries to CloudWatch Logs.""" - client = boto3.client('logs') - - # Calculate timestamps - start_time_ms = int(time.time() * 1000) - timestamp_increment = FIVE_MINUTES_MS // TOTAL_ENTRIES - - print(f"Generating {TOTAL_ENTRIES} log entries...") - print(f"QUERY_START_DATE={start_time_ms}") - - entry_count = 0 - current_timestamp = start_time_ms - - # Generate and upload logs in batches - num_batches = TOTAL_ENTRIES // ENTRIES_PER_BATCH - - for batch_num in range(num_batches): - log_events = [] - - for i in range(ENTRIES_PER_BATCH): - log_events.append({ - 'timestamp': current_timestamp, - 'message': f'Entry {entry_count}' - }) - - entry_count += 1 - current_timestamp += timestamp_increment - - # Upload batch - try: - client.put_log_events( - logGroupName=LOG_GROUP_NAME, - logStreamName=LOG_STREAM_NAME, - logEvents=log_events - ) - print(f"Uploaded batch {batch_num + 1}/{num_batches}") - except Exception as e: - print(f"Error uploading batch {batch_num + 1}: {e}") - return 1 - - end_time_ms = current_timestamp - timestamp_increment - print(f"QUERY_END_DATE={end_time_ms}") - print(f"Successfully uploaded {TOTAL_ENTRIES} log entries") - - return 0 - - -if __name__ == "__main__": - exit(main()) diff --git a/scenarios/features/cloudwatch_logs_large_query/resources/stack.yaml b/scenarios/features/cloudwatch_logs_large_query/resources/stack.yaml index 25937630e31..ed9f451193d 100644 --- a/scenarios/features/cloudwatch_logs_large_query/resources/stack.yaml +++ b/scenarios/features/cloudwatch_logs_large_query/resources/stack.yaml @@ -2,7 +2,7 @@ Resources: LargeQueryLogGroup: Type: AWS::Logs::LogGroup Properties: - LogGroupName: /workflows/cloudwatch-logs/large-query12 + LogGroupName: /workflows/cloudwatch-logs/large-query LargeQueryLogGroupStream1: Type: AWS::Logs::LogStream Properties: From 2730b8868e8b43a40f9bc0be4f190785c30c98a0 Mon Sep 17 00:00:00 2001 From: Rachel Hagerman <110480692+rlhagerm@users.noreply.github.com> Date: Thu, 20 Nov 2025 10:12:42 -0600 Subject: [PATCH 19/23] File cleanup. --- .../LargeQuery/Scenarios/README.md | 109 ------------------ 1 file changed, 109 deletions(-) delete mode 100644 dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/README.md diff --git a/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/README.md b/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/README.md deleted file mode 100644 index c5d35a8317f..00000000000 --- a/dotnetv4/CloudWatchLogs/LargeQuery/Scenarios/README.md +++ /dev/null @@ -1,109 +0,0 @@ -# CloudWatch Logs Large Query Workflow - -## Overview - -This example demonstrates how to perform large-scale queries on Amazon CloudWatch Logs using recursive binary search to retrieve more than the 10,000 result limit. The workflow showcases how to use CloudWatch Logs Insights queries with a recursive algorithm to fetch all matching log entries. - -## Workflow Steps - -This workflow demonstrates the following steps and tasks: - -1. **Prepare the Application** - - Prompts the user to deploy a CloudFormation stack and generate sample logs - - Deploys the CloudFormation template to create a log group and log stream - - Executes a Python script to generate 50,000 sample log entries - - Waits 5 minutes for logs to be fully ingested and indexed - -2. **Execute Large Query** - - Prompts the user for query parameters (limit) - - Performs recursive queries using binary search to retrieve all logs - - Displays progress for each query executed with date ranges and result counts - - Shows total execution time and total logs found - - Optionally displays a sample of the retrieved logs - -3. **Clean Up** - - Prompts the user to confirm deletion of resources - - Deletes the CloudFormation stack - - Waits for stack deletion to complete - -## ⚠ Important - -* Running this code might result in charges to your AWS account. -* Running the tests might result in charges to your AWS account. -* We recommend that you grant your code least privilege. At most, grant only the minimum permissions required to perform the task. For more information, see [Grant least privilege](https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#grant-least-privilege). -* This code is not tested in every AWS Region. For more information, see [AWS Regional Services](https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services). - -## Scenario - -### Prerequisites - -Before running this workflow, ensure you have: - -- An AWS account with proper permissions to use Amazon CloudWatch Logs and AWS CloudFormation -- Python 3.x installed (for log generation script) -- AWS credentials configured - -### AWS Services Used - -This workflow uses the following AWS services: - -- Amazon CloudWatch Logs -- AWS CloudFormation - -### Resources - -The feature scenario deploys an AWS CloudFormation stack with the required resources: - -- CloudWatch Logs Log Group: `/workflows/cloudwatch-logs/large-query` -- CloudWatch Logs Log Stream: `stream1` - -### Instructions - -After the example compiles, you can run it from the command line. To do so, navigate to the folder that contains the .sln file and run the following command: - -``` -dotnet run --project Scenarios/CloudWatchLogsScenario.csproj -``` - -Alternatively, you can run the example from within your IDE. - -This starts an interactive scenario that walks you through: - -1. Deploying a CloudFormation stack with CloudWatch Logs resources -2. Generating 50,000 sample log entries -3. Performing recursive queries to retrieve all logs -4. Cleaning up resources - -## How the Recursive Query Works - -The recursive query algorithm uses binary search to retrieve more than the 10,000 result limit: - -1. Execute a query with the specified date range -2. If results < limit, return the results -3. If results >= limit: - - Get the timestamp of the last result - - Calculate the midpoint between the last result and the end date - - Recursively query the first half (last result to midpoint) - - Recursively query the second half (midpoint to end date) - - Concatenate all results - -This approach efficiently retrieves all matching logs by splitting the date range whenever the result limit is reached. - -## CloudWatch Logs Actions - -The workflow covers the following CloudWatch Logs API actions: - -- [`StartQuery`](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_StartQuery.html) - Initiates a CloudWatch Logs Insights query -- [`GetQueryResults`](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_GetQueryResults.html) - Retrieves results from a query -- [`PutLogEvents`](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html) - Uploads log events to a log stream - -## Additional Resources - -* [CloudWatch Logs User Guide](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/WhatIsCloudWatchLogs.html) -* [CloudWatch Logs Insights Query Syntax](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html) -* [CloudWatch Logs API Reference](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/Welcome.html) - ---- - -Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -SPDX-License-Identifier: Apache-2.0 From 9ed5bd26c36d77b26c59977805b69d4386157a46 Mon Sep 17 00:00:00 2001 From: Rachel Hagerman <110480692+rlhagerm@users.noreply.github.com> Date: Thu, 20 Nov 2025 10:32:07 -0600 Subject: [PATCH 20/23] Solution file cleanup. --- dotnetv4/DotNetV4Examples.sln | 39 ++++++++++++++--------------------- 1 file changed, 15 insertions(+), 24 deletions(-) diff --git a/dotnetv4/DotNetV4Examples.sln b/dotnetv4/DotNetV4Examples.sln index 0b44f37b700..62c2fd11911 100644 --- a/dotnetv4/DotNetV4Examples.sln +++ b/dotnetv4/DotNetV4Examples.sln @@ -63,7 +63,6 @@ Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Command_R_InvokeModelWithRe EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Command_R_InvokeModel", "Bedrock-runtime\Models\CohereCommand\Command_R_InvokeModel\Command_R_InvokeModel.csproj", "{6FCC8A6C-A172-4AAF-A0FC-66C3BD9E8716}" EndProject - Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "AnthropicClaude", "AnthropicClaude", "{6FF2EDB6-D1B8-4EE0-B1F0-2BCE66972E39}" EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "InvokeModelWithResponseStream", "Bedrock-runtime\Models\AnthropicClaude\InvokeModelWithResponseStream\InvokeModelWithResponseStream.csproj", "{345DA0D1-C762-49EF-9953-6F4D57CB7FC7}" @@ -76,7 +75,6 @@ Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Converse", "Bedrock-runtime EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "AmazonTitanText", "AmazonTitanText", "{74979310-8A92-47DC-B5CA-EFA7970E1202}" EndProject - Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "BedrockRuntimeActions", "Bedrock-runtime\Actions\BedrockRuntimeActions.csproj", "{05E93A3E-CFA0-4980-8EE5-CD25C7ED766D}" EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "CloudFormation", "CloudFormation", "{5FBEAD92-9234-4824-9320-2052D236C9CD}" @@ -95,11 +93,11 @@ Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CloudWatchActions", "CloudW EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "CloudWatchLogs", "CloudWatchLogs", "{A1B2C3D4-E5F6-7890-1234-567890ABCDEF}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CloudWatchLogsTests", "CloudWatchLogs\LargeQuery\Tests\CloudWatchLogsTests.csproj", "{B2C3D4E5-F6A7-8901-2345-678901BCDEFG}" +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CloudWatchLogsTests", "CloudWatchLogs\LargeQuery\Tests\CloudWatchLogsTests.csproj", "{B1A4BF4B-D2AE-4E7D-AAA2-5D77877848B0}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CloudWatchLogsScenario", "CloudWatchLogs\LargeQuery\Scenarios\CloudWatchLogsScenario.csproj", "{C3D4E5F6-A7B8-9012-3456-789012CDEFGH}" +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CloudWatchLogsScenario", "CloudWatchLogs\LargeQuery\Scenarios\CloudWatchLogsScenario.csproj", "{EEDAB42C-1106-42C9-9601-D44F21B475DE}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CloudWatchLogsActions", "CloudWatchLogs\LargeQuery\Actions\CloudWatchLogsActions.csproj", "{D4E5F6A7-B8C9-0123-4567-890123DEFGHI}" +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CloudWatchLogsActions", "CloudWatchLogs\LargeQuery\Actions\CloudWatchLogsActions.csproj", "{7781E31F-CABB-484A-AD52-EBC02D2EB274}" EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "EC2", "EC2", "{9424FB14-B6DE-44CE-B675-AC2B57EC1E69}" EndProject @@ -245,7 +243,6 @@ Global {6FCC8A6C-A172-4AAF-A0FC-66C3BD9E8716}.Debug|Any CPU.Build.0 = Debug|Any CPU {6FCC8A6C-A172-4AAF-A0FC-66C3BD9E8716}.Release|Any CPU.ActiveCfg = Release|Any CPU {6FCC8A6C-A172-4AAF-A0FC-66C3BD9E8716}.Release|Any CPU.Build.0 = Release|Any CPU - {345DA0D1-C762-49EF-9953-6F4D57CB7FC7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {345DA0D1-C762-49EF-9953-6F4D57CB7FC7}.Debug|Any CPU.Build.0 = Debug|Any CPU {345DA0D1-C762-49EF-9953-6F4D57CB7FC7}.Release|Any CPU.ActiveCfg = Release|Any CPU @@ -262,7 +259,6 @@ Global {874C7405-ED8D-477D-9362-0C69CF56F213}.Debug|Any CPU.Build.0 = Debug|Any CPU {874C7405-ED8D-477D-9362-0C69CF56F213}.Release|Any CPU.ActiveCfg = Release|Any CPU {874C7405-ED8D-477D-9362-0C69CF56F213}.Release|Any CPU.Build.0 = Release|Any CPU - {05E93A3E-CFA0-4980-8EE5-CD25C7ED766D}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {05E93A3E-CFA0-4980-8EE5-CD25C7ED766D}.Debug|Any CPU.Build.0 = Debug|Any CPU {05E93A3E-CFA0-4980-8EE5-CD25C7ED766D}.Release|Any CPU.ActiveCfg = Release|Any CPU @@ -287,18 +283,18 @@ Global {EAF4A3B8-5CD0-48ED-B848-0EA6D451B8D3}.Debug|Any CPU.Build.0 = Debug|Any CPU {EAF4A3B8-5CD0-48ED-B848-0EA6D451B8D3}.Release|Any CPU.ActiveCfg = Release|Any CPU {EAF4A3B8-5CD0-48ED-B848-0EA6D451B8D3}.Release|Any CPU.Build.0 = Release|Any CPU - {B2C3D4E5-F6A7-8901-2345-678901BCDEFG}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {B2C3D4E5-F6A7-8901-2345-678901BCDEFG}.Debug|Any CPU.Build.0 = Debug|Any CPU - {B2C3D4E5-F6A7-8901-2345-678901BCDEFG}.Release|Any CPU.ActiveCfg = Release|Any CPU - {B2C3D4E5-F6A7-8901-2345-678901BCDEFG}.Release|Any CPU.Build.0 = Release|Any CPU - {C3D4E5F6-A7B8-9012-3456-789012CDEFGH}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {C3D4E5F6-A7B8-9012-3456-789012CDEFGH}.Debug|Any CPU.Build.0 = Debug|Any CPU - {C3D4E5F6-A7B8-9012-3456-789012CDEFGH}.Release|Any CPU.ActiveCfg = Release|Any CPU - {C3D4E5F6-A7B8-9012-3456-789012CDEFGH}.Release|Any CPU.Build.0 = Release|Any CPU - {D4E5F6A7-B8C9-0123-4567-890123DEFGHI}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {D4E5F6A7-B8C9-0123-4567-890123DEFGHI}.Debug|Any CPU.Build.0 = Debug|Any CPU - {D4E5F6A7-B8C9-0123-4567-890123DEFGHI}.Release|Any CPU.ActiveCfg = Release|Any CPU - {D4E5F6A7-B8C9-0123-4567-890123DEFGHI}.Release|Any CPU.Build.0 = Release|Any CPU + {B1A4BF4B-D2AE-4E7D-AAA2-5D77877848B0}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {B1A4BF4B-D2AE-4E7D-AAA2-5D77877848B0}.Debug|Any CPU.Build.0 = Debug|Any CPU + {B1A4BF4B-D2AE-4E7D-AAA2-5D77877848B0}.Release|Any CPU.ActiveCfg = Release|Any CPU + {B1A4BF4B-D2AE-4E7D-AAA2-5D77877848B0}.Release|Any CPU.Build.0 = Release|Any CPU + {EEDAB42C-1106-42C9-9601-D44F21B475DE}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {EEDAB42C-1106-42C9-9601-D44F21B475DE}.Debug|Any CPU.Build.0 = Debug|Any CPU + {EEDAB42C-1106-42C9-9601-D44F21B475DE}.Release|Any CPU.ActiveCfg = Release|Any CPU + {EEDAB42C-1106-42C9-9601-D44F21B475DE}.Release|Any CPU.Build.0 = Release|Any CPU + {7781E31F-CABB-484A-AD52-EBC02D2EB274}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {7781E31F-CABB-484A-AD52-EBC02D2EB274}.Debug|Any CPU.Build.0 = Debug|Any CPU + {7781E31F-CABB-484A-AD52-EBC02D2EB274}.Release|Any CPU.ActiveCfg = Release|Any CPU + {7781E31F-CABB-484A-AD52-EBC02D2EB274}.Release|Any CPU.Build.0 = Release|Any CPU {C99A0F7C-9477-4985-90F6-8EED38ECAC10}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {C99A0F7C-9477-4985-90F6-8EED38ECAC10}.Debug|Any CPU.Build.0 = Debug|Any CPU {C99A0F7C-9477-4985-90F6-8EED38ECAC10}.Release|Any CPU.ActiveCfg = Release|Any CPU @@ -398,23 +394,18 @@ Global {81EA8494-176C-4178-A1C3-6FA3B1222B74} = {39EAAA32-53A8-4641-873C-976FD5963360} {085F3A30-A788-48D6-8067-74D71C29A941} = {39EAAA32-53A8-4641-873C-976FD5963360} {6FCC8A6C-A172-4AAF-A0FC-66C3BD9E8716} = {39EAAA32-53A8-4641-873C-976FD5963360} - {6FF2EDB6-D1B8-4EE0-B1F0-2BCE66972E39} = {4429C078-35C8-4E2B-9C7B-F0C619741B67} {345DA0D1-C762-49EF-9953-6F4D57CB7FC7} = {6FF2EDB6-D1B8-4EE0-B1F0-2BCE66972E39} {C95689B5-C0A1-4C1F-9E97-369D3D397930} = {6FF2EDB6-D1B8-4EE0-B1F0-2BCE66972E39} {8551C158-60B4-4594-8B1D-5BE851F90EE4} = {6FF2EDB6-D1B8-4EE0-B1F0-2BCE66972E39} {874C7405-ED8D-477D-9362-0C69CF56F213} = {6FF2EDB6-D1B8-4EE0-B1F0-2BCE66972E39} {74979310-8A92-47DC-B5CA-EFA7970E1202} = {4429C078-35C8-4E2B-9C7B-F0C619741B67} - {05E93A3E-CFA0-4980-8EE5-CD25C7ED766D} = {D859B39C-9106-4D3D-8C57-11B15FA8106B} {AAFC86EB-49D7-4FD8-8C79-C42C129EB75A} = {5FBEAD92-9234-4824-9320-2052D236C9CD} {98A11016-DD41-4848-A848-51D703951A91} = {5FBEAD92-9234-4824-9320-2052D236C9CD} {106FBE12-6FF7-40DC-9B3C-E5F67F335B32} = {CED87D19-7F82-4D67-8A30-3EE085D07E45} {565A9701-3D9C-49F8-86B7-D256A1D9E074} = {CED87D19-7F82-4D67-8A30-3EE085D07E45} {EAF4A3B8-5CD0-48ED-B848-0EA6D451B8D3} = {CED87D19-7F82-4D67-8A30-3EE085D07E45} - {B2C3D4E5-F6A7-8901-2345-678901BCDEFG} = {A1B2C3D4-E5F6-7890-1234-567890ABCDEF} - {C3D4E5F6-A7B8-9012-3456-789012CDEFGH} = {A1B2C3D4-E5F6-7890-1234-567890ABCDEF} - {D4E5F6A7-B8C9-0123-4567-890123DEFGHI} = {A1B2C3D4-E5F6-7890-1234-567890ABCDEF} {C99A0F7C-9477-4985-90F6-8EED38ECAC10} = {9424FB14-B6DE-44CE-B675-AC2B57EC1E69} {6C167F25-F97F-4854-8CD8-A2D446B6799B} = {9424FB14-B6DE-44CE-B675-AC2B57EC1E69} {D95519CA-BD27-45AE-B83B-3FB02E7AE445} = {6C167F25-F97F-4854-8CD8-A2D446B6799B} From 38e50d8bd83ec3e69fdd97625d84ad71a66db51e Mon Sep 17 00:00:00 2001 From: Rachel Hagerman <110480692+rlhagerm@users.noreply.github.com> Date: Thu, 20 Nov 2025 10:33:45 -0600 Subject: [PATCH 21/23] Update validation.yaml --- .doc_gen/validation.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.doc_gen/validation.yaml b/.doc_gen/validation.yaml index 3e77aa5e973..c165ea0b62c 100644 --- a/.doc_gen/validation.yaml +++ b/.doc_gen/validation.yaml @@ -215,6 +215,7 @@ allow_list: - "EnablePropagateAdditionalUserContextData" - "StopQueryWorkloadInsightsTopContributors" - "com/location/latest/APIReference/Welcome" + - "LargeQuery/Actions/CloudWatchLogsWrapper" sample_files: - "README.md" - "chat_sfn_state_machine.json" From 80e71ed34685986e00c9f63554d00480de969b17 Mon Sep 17 00:00:00 2001 From: Rachel Hagerman <110480692+rlhagerm@users.noreply.github.com> Date: Fri, 21 Nov 2025 09:34:33 -0600 Subject: [PATCH 22/23] Update spec and steering. --- .../metadata/cloudwatch-logs_metadata.yaml | 12 - .../SPECIFICATION.md | 256 ++++-------------- steering_docs/dotnet-tech/scenario.md | 3 +- 3 files changed, 60 insertions(+), 211 deletions(-) diff --git a/.doc_gen/metadata/cloudwatch-logs_metadata.yaml b/.doc_gen/metadata/cloudwatch-logs_metadata.yaml index 7f208a71409..6aaa4a733e1 100644 --- a/.doc_gen/metadata/cloudwatch-logs_metadata.yaml +++ b/.doc_gen/metadata/cloudwatch-logs_metadata.yaml @@ -314,18 +314,6 @@ cloudwatch-logs_GetQueryResults: - python.example_code.cloudwatch_logs.get_query_results services: cloudwatch-logs: {GetQueryResults} -cloudwatch-logs_PutLogEvents: - languages: - .NET: - versions: - - sdk_version: 4 - github: dotnetv4/CloudWatchLogs/LargeQuery - excerpts: - - description: - snippet_tags: - - CloudWatchLogs.dotnetv4.PutLogEvents - services: - cloudwatch-logs: {PutLogEvents} cloudwatch-logs_StartQuery: languages: .NET: diff --git a/scenarios/features/cloudwatch_logs_large_query/SPECIFICATION.md b/scenarios/features/cloudwatch_logs_large_query/SPECIFICATION.md index b3e23572c2d..055ac351f9c 100644 --- a/scenarios/features/cloudwatch_logs_large_query/SPECIFICATION.md +++ b/scenarios/features/cloudwatch_logs_large_query/SPECIFICATION.md @@ -11,10 +11,6 @@ This feature scenario demonstrates how to perform large-scale queries on Amazon 3. Performing recursive queries to retrieve all logs using binary search 4. Cleaning up all resources -**The scenario must be runnable in both interactive and non-interactive modes** to support: -- Interactive mode: User runs the scenario manually with prompts -- Non-interactive mode: Automated integration tests run the scenario without user input - For an introduction, see the [README.md](README.md). --- @@ -23,8 +19,9 @@ For an introduction, see the [README.md](README.md). - [API Actions Used](#api-actions-used) - [Resources](#resources) -- [Proposed Example Structure](#proposed-example-structure) -- [Implementation Details](#implementation-details) +- [Variables](#variables) +- [Building the queries](#building-the-queries) +- [Example Structure](#example-structure) - [Output Format](#output-format) - [Errors](#errors) - [Metadata](#metadata) @@ -38,12 +35,6 @@ This scenario uses the following CloudWatch Logs API actions: - `StartQuery` - Initiates a CloudWatch Logs Insights query - `GetQueryResults` - Retrieves results from a query, polling until complete -This scenario uses the following CloudFormation API actions: - -- `CreateStack` - Deploys the CloudFormation template -- `DescribeStacks` - Checks stack status and retrieves outputs -- `DeleteStack` - Removes the CloudFormation stack - --- ## Resources @@ -56,37 +47,69 @@ This scenario uses the following CloudFormation API actions: - CloudWatch Logs Log Group: `/workflows/cloudwatch-logs/large-query` - CloudWatch Logs Log Stream: `stream1` -**Stack Outputs**: None (resources use fixed names) +### Helper files +These files are for reference only. New versions of this example should create and upload logs as part of the scenario. + +- [put-log-events](resources/put-log-events.sh) is a bash script that ingests log data and uploads it to CloudWatch. +- [make-log-files.sh](resources/make-log-files.sh) is a bash script that creates log data. **Five minutes of logs, starting at the time of execution, will be created. Wait at least five minutes after running this script before attempting to query.** +--- -### Sample Data Generation Scripts +## Variables -**Script 1**: `scenarios/features/cloudwatch_logs_large_query/resources/make-log-files.sh` -- Creates 50,000 log entries divided into 5 JSON files (10,000 entries each) -- Generates timestamps spanning 5 minutes from execution time -- Outputs `QUERY_START_DATE` and `QUERY_END_DATE` environment variables -- Creates files: `file1.json`, `file2.json`, `file3.json`, `file4.json`, `file5.json` +| Variable Name | Description | Type | Default | +|--------------|-------------|------|---------| +| `stackName` | CloudFormation stack name | String | "CloudWatchLargeQueryStack" | +| `queryStartDate` | Query start timestamp | Long/Integer | From script output | +| `queryEndDate` | Query end timestamp | Long/Integer | From script output | +| `queryLimit` | Maximum results per query | Integer | 10000 | +| `logGroupName` | Log group name (if not using stack) | String | "/workflows/cloudwatch-logs/large-query" | +| `logStreamName` | Log stream name (if not using stack) | String | "stream1" | -**Script 2**: `scenarios/features/cloudwatch_logs_large_query/resources/put-log-events.sh` -- Uploads the generated JSON files to CloudWatch Logs -- Uses AWS CLI `put-log-events` command -- Targets log group: `/workflows/cloudwatch-logs/large-query` -- Targets log stream: `stream1` +--- -**Python Alternative**: `scenarios/features/cloudwatch_logs_large_query/resources/create_logs.py` -- Python script that combines both generation and upload -- Creates 50,000 log entries and uploads them directly -- Returns start and end timestamps for query configuration -- Preferred for cross-platform compatibility +## Building the queries + +### Building and waiting for single query + +The query itself is a "CloudWatch Logs Insights query syntax" string. The query must return the `@timestamp` field so follow-up queries can use that information. Here's a sample query string: `fields @timestamp, @message | sort @timestamp asc`. Notice it sorts in ascending order. You can sort in either `asc` or `desc`, but the recursive strategy described later will need to match accordingly. + +Queries are jobs. You can start a query with `StartQuery`, but it immediately returns the `queryId`. You must poll a query using `GetQueryResults` until the query has finished. For the purpose of this example, a query has "finished" when `GetQueryResults` has returned a status of one of "Complete", "Failed", "Cancelled", "Timeout", or "Unknown". + +`StartQuery` responds with an error if the query's start or end date occurs out of bounds of the log group creation date. The error message starts with "Query's end date and time". + +Start the query and wait for it to "finish". Store the `results`. If the count of the results is less than the configured LIMIT, return the results. If the results are greater than or equal to the limit, go to [Recursive queries](#recursive-queries). --- -## Proposed Example Structure +### Recursive queries + +If the result count from the previous step is 10000 (or the configured LIMIT), it is very likely that there are more results. **The example must do a binary search of the remaining logs**. To do this, get the date of the last log (earliest or latest, depending on sort order). Use that date as the start date of a new date range. The end date can remain the same. + +Split that date range in half, resulting in two new date ranges. Call your query function twice; once for each new date range. + +Concatenate the results of the first query with the results of the two new queries. + +The following pseudocode illustrates this. + +```pseudocode +func large_query(date_range): + query_results = get_query_results(date_range) + + if query_results.length < LIMIT + return query_results + else + date_range = [query_results.end, date_range.end] + d1, d2 = split(date_range) + return concat(query_results, large_query(d1), large_query(d2)) +``` + + +## Example Structure ### Phase 1: Setup **Purpose**: Deploy resources and generate sample data as part of the scenario -**Interactive Mode Steps**: 1. Welcome message explaining the scenario 2. Prompt user: "Would you like to deploy the CloudFormation stack and generate sample logs? (y/n)" 3. If yes: @@ -101,23 +124,17 @@ This scenario uses the following CloudFormation API actions: - Display message: "Sample logs created. Waiting 5 minutes for logs to be fully ingested..." - Wait 5 minutes (300 seconds) for log ingestion with countdown display 4. If no: - - Prompt user for existing log group name - - Prompt user for log stream name + - Prompt user for existing log group name, or enter to use the default name + - Prompt user for log stream name, or enter to use the default name - Prompt user for query start date (ISO 8601 format with milliseconds) - Prompt user for query end date (ISO 8601 format with milliseconds) -**Non-Interactive Mode Behavior**: +**Fully Self-Contained Behavior**: - Automatically deploys stack with default name - Automatically generates 50,000 sample logs - Waits 5 minutes for log ingestion - Uses default values for all configuration -**Variables Set**: -- `stackName` - CloudFormation stack name -- `logGroupName` - Log group name (default: `/workflows/cloudwatch-logs/large-query`) -- `logStreamName` - Log stream name (default: `stream1`) -- `queryStartDate` - Start timestamp for query (seconds since epoch) -- `queryEndDate` - End timestamp for query (seconds since epoch) ### Phase 2: Query Execution @@ -133,7 +150,7 @@ This scenario uses the following CloudFormation API actions: - Start date - End date - Limit -5. Display progress for each query executed (see [Output Format](#output-format)) +5. Display progress for each query executed 6. Display total execution time 7. Display total logs found 8. Prompt user: "Would you like to see a sample of the logs? (y/n)" @@ -153,120 +170,6 @@ This scenario uses the following CloudFormation API actions: - Display message: "Resources will remain. You can delete them later through the AWS Console." - Display stack name and log group name for reference -**Non-Interactive Mode Behavior**: -- Automatically deletes the CloudFormation stack -- Waits for deletion to complete -- Ensures cleanup happens even if errors occur during the scenario - ---- - -## Implementation Details - -### CloudFormation Stack Deployment - -**Deployment**: -``` -Stack Name: User-provided or default "CloudWatchLargeQueryStack" -Template: scenarios/features/cloudwatch_logs_large_query/resources/stack.yaml -Capabilities: None required (no IAM resources) -``` - -**Polling for Completion**: -- Poll `DescribeStacks` every 5-10 seconds -- Success: `StackStatus` = `CREATE_COMPLETE` -- Failure: `StackStatus` = `CREATE_FAILED`, `ROLLBACK_COMPLETE`, or `ROLLBACK_FAILED` -- Timeout: 5 minutes maximum wait time - -### Log Generation Execution - -**Cross-Platform Considerations**: -- Bash scripts work on Linux, macOS, and Git Bash on Windows -- Python script is preferred for true cross-platform support -- Check for script availability before execution -- Handle script execution errors gracefully - -**Capturing Output**: -- Parse stdout for `QUERY_START_DATE` and `QUERY_END_DATE` -- Convert timestamps to appropriate format for SDK -- Store timestamps for query configuration - -**Wait Time**: -- CloudWatch Logs requires time to ingest and index logs -- Minimum wait: 5 minutes (300 seconds) -- Display countdown or progress indicator during wait - -### Building and Executing Queries - -**Query String**: -``` -fields @timestamp, @message | sort @timestamp asc -``` - -**Important**: The query MUST return `@timestamp` field for recursive queries to work. - -**StartQuery Parameters**: -- `logGroupName` - The log group to query -- `startTime` - Start of date range (seconds since epoch) -- `endTime` - End of date range (seconds since epoch) -- `queryString` - CloudWatch Logs Insights query syntax -- `limit` - Maximum results (default: 10000, max: 10000) - -**GetQueryResults Polling**: -- Poll every 1-2 seconds -- Continue until status is one of: `Complete`, `Failed`, `Cancelled`, `Timeout`, `Unknown` -- Timeout after 60 seconds of polling - -**Error Handling**: -- If `StartQuery` returns error starting with "Query's end date and time", the date range is out of bounds -- Handle this by adjusting the date range or informing the user - -### Recursive Query Algorithm - -**Purpose**: Retrieve more than 10,000 results by splitting date ranges - -**Algorithm**: -``` -function LargeQuery(startDate, endDate, limit): - results = ExecuteQuery(startDate, endDate, limit) - - if results.count < limit: - return results - else: - // Get timestamp of last result - lastTimestamp = results[results.count - 1].timestamp - - // Calculate midpoint between last result and end date - midpoint = (lastTimestamp + endDate) / 2 - - // Query first half - results1 = LargeQuery(lastTimestamp, midpoint, limit) - - // Query second half - results2 = LargeQuery(midpoint, endDate, limit) - - // Combine results - return Concatenate(results, results1, results2) -``` - -**Key Points**: -- Use binary search to split remaining date range -- Recursively query each half -- Concatenate all results -- Log each query's date range and result count (see [Output Format](#output-format)) - -### Stack Deletion - -**Deletion**: -``` -Stack Name: Same as used during creation -``` - -**Polling for Completion**: -- Poll `DescribeStacks` every 5-10 seconds -- Success: Stack not found (ValidationError) or `StackStatus` = `DELETE_COMPLETE` -- Failure: `StackStatus` = `DELETE_FAILED` -- If `DELETE_FAILED`, optionally retry with force delete -- Timeout: 5 minutes maximum wait time --- @@ -316,51 +219,10 @@ Sample logs (first 10 of 50000): ## Errors -### CloudFormation Errors - -| Error Code | Error Message Pattern | Handling Strategy | -|------------|----------------------|-------------------| -| `AlreadyExistsException` | Stack already exists | Prompt user for different stack name and retry | -| `ValidationError` | Template validation failed | Display error message and exit setup | -| `InsufficientCapabilitiesException` | Requires capabilities | Should not occur (template has no IAM resources) | - -### CloudWatch Logs Errors - | Error Code | Error Message Pattern | Handling Strategy | |------------|----------------------|-------------------| | `InvalidParameterException` | "Query's end date and time" | Date range is out of bounds; inform user and adjust dates | | `ResourceNotFoundException` | Log group not found | Verify log group exists; prompt user to run setup | -| `LimitExceededException` | Too many concurrent queries | Wait and retry after 5 seconds | -| `ServiceUnavailableException` | Service temporarily unavailable | Retry with exponential backoff (max 3 retries) | - -### Script Execution Errors - -| Error Type | Handling Strategy | -|------------|-------------------| -| Script not found | Display error message; provide manual instructions | -| Script execution failed | Display error output; allow user to retry or skip | -| Permission denied | Suggest making script executable (`chmod +x`) | -| AWS CLI not available | Inform user AWS CLI is required for bash scripts; suggest Python alternative | - ---- - -## User Input Variables - -### Required Variables - -| Variable Name | Description | Type | Default | Validation | -|--------------|-------------|------|---------|------------| -| `stackName` | CloudFormation stack name | String | "CloudWatchLargeQueryStack" | Must match pattern: `[a-zA-Z][-a-zA-Z0-9]*` | -| `queryStartDate` | Query start timestamp | Long/Integer | From script output | Milliseconds since epoch | -| `queryEndDate` | Query end timestamp | Long/Integer | From script output | Milliseconds since epoch | -| `queryLimit` | Maximum results per query | Integer | 10000 | Min: 1, Max: 10000 | - -### Optional Variables - -| Variable Name | Description | Type | Default | -|--------------|-------------|------|---------| -| `logGroupName` | Log group name (if not using stack) | String | "/workflows/cloudwatch-logs/large-query" | -| `logStreamName` | Log stream name (if not using stack) | String | "stream1" | --- @@ -370,4 +232,4 @@ Sample logs (first 10 of 50000): | ----------------- | ----------------------------- | --------------------------------- | | `GetQueryResults` | cloudwatch-logs_metadata.yaml | cloudwatch-logs_GetQueryResults | | `StartQuery` | cloudwatch-logs_metadata.yaml | cloudwatch-logs_StartQuery | -| `Large Query` | cloudwatch-logs_metadata.yaml | cloudwatch-logs_Scenario_LargeQuery | +| `Large Query` | cloudwatch-logs_metadata.yaml | cloudwatch-logs_Scenario_LargeQuery | diff --git a/steering_docs/dotnet-tech/scenario.md b/steering_docs/dotnet-tech/scenario.md index 2bbb42c41ef..53797c8c682 100644 --- a/steering_docs/dotnet-tech/scenario.md +++ b/steering_docs/dotnet-tech/scenario.md @@ -7,7 +7,6 @@ Generate feature scenarios that demonstrate complete workflows using multiple se **IMPORTANT**: All new feature scenarios MUST be created in the `dotnetv4` directory, NOT `dotnetv3`. - **New scenarios**: `dotnetv4/{Service}/` -- **Legacy examples**: `dotnetv3/{Service}/` (Must NOT add new examples here) ## Requirements - **Specification-Driven**: MUST read the `scenarios/features/{service_feature}/SPECIFICATION.md` @@ -168,7 +167,7 @@ public class {Service}Workflow await Cleanup(); } - Console.WriteLine("{AWS Service} scenario completed."); + Console.WriteLine("{AWS Service} feature scenario completed."); } /// From b234c87264959d4bf0e07272fed4195a80bdf114 Mon Sep 17 00:00:00 2001 From: Rachel Hagerman <110480692+rlhagerm@users.noreply.github.com> Date: Fri, 21 Nov 2025 10:13:05 -0600 Subject: [PATCH 23/23] Update README.md --- dotnetv4/CloudWatchLogs/README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/dotnetv4/CloudWatchLogs/README.md b/dotnetv4/CloudWatchLogs/README.md index bb3400d77e7..c0e1a4cf50b 100644 --- a/dotnetv4/CloudWatchLogs/README.md +++ b/dotnetv4/CloudWatchLogs/README.md @@ -34,7 +34,6 @@ For prerequisites, see the [README](../README.md#Prerequisites) in the `dotnetv4 Code excerpts that show you how to call individual service functions. - [GetQueryResults](LargeQuery/Actions/CloudWatchLogsWrapper.cs#L79) -- [PutLogEvents](LargeQuery/Actions/CloudWatchLogsWrapper.cs#L110) - [StartQuery](LargeQuery/Actions/CloudWatchLogsWrapper.cs#L30) ### Scenarios