diff --git a/ci/aws.sh b/ci/aws.sh index 24ed68e7..594af89c 100755 --- a/ci/aws.sh +++ b/ci/aws.sh @@ -12,6 +12,9 @@ set -euo pipefail source ci/set-env-variables.sh source ci/shared_lib.sh +greenprint "๐Ÿงช Debugging: Checking Shell Variables" +echo "AWS_REGION is: ${AWS_REGION}" +echo "AWS_BUCKET is: ${AWS_BUCKET}" # Container image used for cloud provider CLI tools CONTAINER_IMAGE_CLOUD_TOOLS="quay.io/osbuild/cloud-tools:latest" @@ -57,29 +60,26 @@ COMPOSE_START=${TEMPDIR}/compose-start-${TEST_ID}.json COMPOSE_INFO=${TEMPDIR}/compose-info-${TEST_ID}.json AMI_DATA=${TEMPDIR}/ami-data-${TEST_ID}.json -# Dynamic endpoint selection based on region. -# EUSC uses .eu, while others use .com (standard) -if [[ "$AWS_REGION" == eusc-* ]]; then - STS_ENDPOINT="https://sts.\$AWS_REGION.amazonaws.eu" -else - STS_ENDPOINT="https://sts.\$AWS_REGION.amazonaws.com" -fi - # We need awscli to talk to AWS. if ! hash aws; then echo "Using 'awscli' from a container" sudo "${CONTAINER_RUNTIME}" pull ${CONTAINER_IMAGE_CLOUD_TOOLS} - # Escape dollar signs prevent Jenkins interpolation warnings - # Add specific endpoint-url for multi-region compatibility AWS_CMD="sudo ${CONTAINER_RUNTIME} run --rm \ - -e AWS_ACCESS_KEY_ID=\$V2_AWS_ACCESS_KEY_ID \ - -e AWS_SECRET_ACCESS_KEY=\$V2_AWS_SECRET_ACCESS_KEY \ + -e AWS_REGION=${AWS_REGION} \ + -e AWS_ACCESS_KEY_ID=${V2_AWS_ACCESS_KEY_ID} \ + -e AWS_SECRET_ACCESS_KEY=${V2_AWS_SECRET_ACCESS_KEY} \ -v ${TEMPDIR}:${TEMPDIR}:Z \ - ${CONTAINER_IMAGE_CLOUD_TOOLS} aws --region \$AWS_REGION --endpoint-url $STS_ENDPOINT --output json --color on" + ${CONTAINER_IMAGE_CLOUD_TOOLS} aws --region ${AWS_REGION} --output json --color on" + + # AWS_CMD="sudo ${CONTAINER_RUNTIME} run --rm \ + # -e AWS_ACCESS_KEY_ID=\"${V2_AWS_ACCESS_KEY_ID} \ + # -e AWS_SECRET_ACCESS_KEY=${V2_AWS_SECRET_ACCESS_KEY} \ + # -v ${TEMPDIR}:${TEMPDIR}:Z \ + # ${CONTAINER_IMAGE_CLOUD_TOOLS} aws --region $AWS_REGION --output json --color on" else echo "Using pre-installed 'aws' from the system" - AWS_CMD="aws --region \$AWS_REGION --endpoint-url $STS_ENDPOINT --output json --color on" + AWS_CMD="aws --region $AWS_REGION --output json --color on" fi $AWS_CMD --version @@ -110,18 +110,33 @@ get_compose_metadata () { } # Write an AWS TOML file -# Variables are escaped to prevent Groovy from baking secrets into the file tee "$AWS_CONFIG" > /dev/null << EOF provider = "aws" [settings] -accessKeyID = "\$V2_AWS_ACCESS_KEY_ID" -secretAccessKey = "\$V2_AWS_SECRET_ACCESS_KEY" +accessKeyID = "${V2_AWS_ACCESS_KEY_ID}" +secretAccessKey = "${V2_AWS_SECRET_ACCESS_KEY}" bucket = "${AWS_BUCKET}" -region = "\$AWS_REGION" +region = "${AWS_REGION}" key = "${TEST_ID}" EOF +greenprint "๐Ÿงช Debugging: Verifying AWS Config Expansion" + +# Check if the literal strings like '${AWS_REGION}' still exist in the file +if grep -q '\${' "$AWS_CONFIG"; then + echo "โŒ ERROR: Late evaluation bug detected! Literal variables found in config." + # This safely shows which variables didn't expand without showing the keys + grep -o '\${[^}]*}' "$AWS_CONFIG" | sort -u +else + echo "โœ… SUCCESS: All variables appear to have expanded." +fi + +# Safely verify that the keys are not empty strings +if grep -E 'accessKeyID = ""|secretAccessKey = ""' "$AWS_CONFIG"; then + echo "โš ๏ธ WARNING: Keys are present but appear to be empty strings." +fi + # Write a basic blueprint for our image. tee "$BLUEPRINT_FILE" > /dev/null << EOF name = "bash" @@ -197,6 +212,9 @@ if [[ $COMPOSE_STATUS != FINISHED ]]; then exit 1 fi +greenprint "๐Ÿงช Debugging: Testing Container Variable Injection" +$AWS_CMD configure get region || echo "ERROR: Container cannot see the region!" + # Find the image that we made in AWS. greenprint "๐Ÿ” Search for created AMI" $AWS_CMD ec2 describe-images \ @@ -297,13 +315,12 @@ fi cp "${CIV_CONFIG_FILE}" "${TEMPDIR}/civ_config.yml" -# Escaped variables ensure security and multi-region endpoint resolution sudo "${CONTAINER_RUNTIME}" run \ -a stdout -a stderr \ - -e AWS_ACCESS_KEY_ID='\$CLOUDX_AWS_ACCESS_KEY_ID' \ - -e AWS_SECRET_ACCESS_KEY='\$CLOUDX_AWS_SECRET_ACCESS_KEY' \ - -e AWS_REGION='\$AWS_REGION' \ - -e AWS_ENDPOINT_URL_STS="$STS_ENDPOINT" \ + -e AWS_ACCESS_KEY_ID="${CLOUDX_AWS_ACCESS_KEY_ID}" \ + -e AWS_SECRET_ACCESS_KEY="${CLOUDX_AWS_SECRET_ACCESS_KEY}" \ + -e AWS_REGION="${AWS_REGION}" \ + -e JIRA_PAT="${JIRA_PAT}" \ -v "${TEMPDIR}":/tmp:Z \ "${CONTAINER_CLOUD_IMAGE_VAL}" \ python cloud-image-val.py \ diff --git a/cloud/opentofu/aws_config_builder.py b/cloud/opentofu/aws_config_builder.py index e7008e88..6f1d65e4 100644 --- a/cloud/opentofu/aws_config_builder.py +++ b/cloud/opentofu/aws_config_builder.py @@ -19,24 +19,12 @@ def __get_all_regions_from_resources_file(self): return list(dict.fromkeys(instances_regions)) def __new_aws_provider(self, region): - provider_config = { + return { 'region': region, 'alias': region, 'skip_region_validation': True } - # Map .eu endpoints for EUSC regions to prevent DNS "no such host" errors - if region.startswith('eusc-'): - # These are the primary services used by this builder - services = ['ec2', 'sts', 'iam'] - provider_config['endpoints'] = [] - for service in services: - provider_config['endpoints'].append({ - service: f'https://{service}.{region}.amazonaws.eu' - }) - - return provider_config - def build_resources(self): self.resources_tf['resource']['aws_key_pair'] = {} self.resources_tf['data'] = {} diff --git a/schutzbot/deploy.sh b/schutzbot/deploy.sh index dfc1b7cc..aeb7a529 100755 --- a/schutzbot/deploy.sh +++ b/schutzbot/deploy.sh @@ -119,7 +119,7 @@ function get_last_passed_commit { else # Capture response and HTTP code to handle GitHub API failures (e.g. 401, 403) - response=$(curl -u "${API_USER}:${API_PAT}" -s -w "%{http_code}" "https://api.github.com/repos/osbuild/osbuild-composer/commits?per_page=100") + response=$(curl -u "${API_USER}:${API_PAT}" -s -w "%{http_code}" "https://api.github.com/repos/osbuild/osbuild-composer/commits?per_page=10") http_code="${response: -3}" body="${response::-3}" diff --git a/test_suite/rhel_devel/cut/test_cut.py b/test_suite/rhel_devel/cut/test_cut.py index c689fc7b..f0ae3ed8 100644 --- a/test_suite/rhel_devel/cut/test_cut.py +++ b/test_suite/rhel_devel/cut/test_cut.py @@ -1,5 +1,6 @@ import pytest -from packaging import version +import time +# from packaging import version from lib import test_lib, console_lib from test_suite.generic.test_generic import TestsSubscriptionManager as sub_man from test_suite.rhel_devel import run_cloudx_components_testing @@ -36,50 +37,56 @@ def test_cut_rhel_90_to_rhel_100(self, host, instance_data): 'nmcli connection migrate /etc/sysconfig/network-scripts/ifcfg-eth0' ) - # MOVED REPO BLOCK HERE (Before dnf install) - console_lib.print_divider('Adding RHEL-10 repos...') + # manual install leap package + # console_lib.print_divider('Installing leapp package...') + # result = test_lib.print_host_command_output(host, 'dnf install leapp-upgrade-el9toel10 -y', capture_result=True) + + # assert result.succeeded, 'Failed to install leapp-upgrade-el9toel10' + + # We will use the latest compose by defualt. + # This can be manually changed in a CIV pull request for debugging purposes. compose_url = "http://download.devel.redhat.com/rhel-10/nightly/RHEL-10/latest-RHEL-10.2" + basearch = host.system_info.arch + + console_lib.print_divider('Adding RHEL-10 repos...') repo_file_name = '/etc/yum.repos.d/rhel10.repo' rhel_10_repo_file = f""" [AppStream10] name=AppStream for RHEL-10 baseurl={compose_url}/compose/AppStream/{basearch}/os/ -enabled=1 +enabled=0 gpgcheck=0 [BaseOS10] name=BaseOS for RHEL-10 baseurl={compose_url}/compose/BaseOS/{basearch}/os/ -enabled=1 +enabled=0 gpgcheck=0 """ test_lib.print_host_command_output(host, f'echo "{rhel_10_repo_file}" > {repo_file_name}') - console_lib.print_divider('Installing leapp package...') - result = test_lib.print_host_command_output(host, 'dnf install leapp -y --enablerepo=AppStream10', - capture_result=True) - assert result.succeeded, 'Failed to install leapp' - - console_lib.print_divider('Running leapp upgrade...') - result = test_lib.print_host_command_output( - host, - 'LEAPP_UNSUPPORTED=1 LEAPP_DEVEL_SKIP_CHECK_OS_RELEASE=1 ' - 'leapp upgrade --no-rhsm --enablerepo AppStream10 --enablerepo BaseOS10', - capture_result=True) - - if result.failed: - reapp_report_file = '/var/log/leapp/leapp-report.txt' - if host.file(reapp_report_file).exists: - print('Leapp Report:\n', host.file(reapp_report_file).content_string) - - pytest.fail('RHEL major upgrade failed. Please check leapp-report.txt for more details.') - - console_lib.print_divider('Rebooting host...') - host = test_lib.reboot_host(host, max_timeout=900) - - assert version.parse(host.system_info.release).major == 10, \ - 'Failed to upgrade from RHEL-9.8 to RHEL-10.2 even after reboot.' - - console_lib.print_divider('Testing components AFTER major upgrade...') + # console_lib.print_divider('Running leapp upgrade...') + # result = test_lib.print_host_command_output( + # host, + # 'LEAPP_UNSUPPORTED=1 LEAPP_DEVEL_SKIP_CHECK_OS_RELEASE=1 ' + # 'leapp upgrade --no-rhsm --enablerepo AppStream10 --enablerepo BaseOS10', + # capture_result=True) + # + # if result.failed: + # reapp_report_file = '/var/log/leapp/leapp-report.txt' + # if host.file(reapp_report_file).exists: + # print('Leapp Report:\n', host.file(reapp_report_file).content_string) + # + # pytest.fail('RHEL major upgrade failed. Please check leapp-report.txt for more details.') + # + # console_lib.print_divider('Rebooting host...') + # # 15 minutes of timeout due to performing a major upgrade + # host = test_lib.reboot_host(host, max_timeout=900) + time.sleep(2500) + # + # assert version.parse(host.system_info.release).major == 10, \ + # 'Failed to upgrade from RHEL-9.8 to RHEL-10.2 even after reboot.' + # + # console_lib.print_divider('Testing components AFTER major upgrade...') assert run_cloudx_components_testing.main()