|
| 1 | +import * as cdk from "aws-cdk-lib"; |
| 2 | +import { Construct } from "constructs"; |
| 3 | +import * as events from "aws-cdk-lib/aws-events"; |
| 4 | +import * as targets from "aws-cdk-lib/aws-events-targets"; |
| 5 | +import * as lambda from "aws-cdk-lib/aws-lambda"; |
| 6 | +import * as iam from "aws-cdk-lib/aws-iam"; |
| 7 | +import * as efs from "aws-cdk-lib/aws-efs"; |
| 8 | +import * as kms from "aws-cdk-lib/aws-kms"; |
| 9 | +import * as s3 from "aws-cdk-lib/aws-s3"; |
| 10 | + |
| 11 | +import { |
| 12 | + BuildSpec, |
| 13 | + ComputeType, |
| 14 | + FileSystemLocation, |
| 15 | + LinuxBuildImage, |
| 16 | + Project, |
| 17 | +} from "aws-cdk-lib/aws-codebuild"; |
| 18 | +import { IRepository } from "aws-cdk-lib/aws-ecr"; |
| 19 | + |
| 20 | +import { |
| 21 | + ISecurityGroup, |
| 22 | + IVpc, |
| 23 | + Peer, |
| 24 | + Port, |
| 25 | + SecurityGroup, |
| 26 | +} from "aws-cdk-lib/aws-ec2"; |
| 27 | +import { ProjectKind } from "./constructs/source-repo"; |
| 28 | +import { VMImportBucket } from "./vm-import-bucket"; |
| 29 | +import { Asset } from "aws-cdk-lib/aws-s3-assets"; |
| 30 | +import { LogGroup, RetentionDays } from "aws-cdk-lib/aws-logs"; |
| 31 | +import { RemovalPolicy } from "aws-cdk-lib"; |
| 32 | + |
| 33 | +/** |
| 34 | + * Properties to allow customizing the build. |
| 35 | + */ |
| 36 | +export interface EmbeddedLinuxCodebuildProjectProps |
| 37 | + extends cdk.StackProps { |
| 38 | + /** ECR Repository where the Build Host Image resides. */ |
| 39 | + readonly imageRepo: IRepository; |
| 40 | + /** Tag for the Build Host Image */ |
| 41 | + readonly imageTag?: string; |
| 42 | + /** VPC where the networking setup resides. */ |
| 43 | + readonly vpc: IVpc; |
| 44 | + /** The type of project being built. */ |
| 45 | + readonly projectKind?: ProjectKind; |
| 46 | + /** A name for the layer-repo that is created. Default is 'layer-repo' */ |
| 47 | + readonly layerRepoName?: string; |
| 48 | + /** Additional policy statements to add to the build project. */ |
| 49 | + readonly buildPolicyAdditions?: iam.PolicyStatement[]; |
| 50 | + /** Access logging bucket to use */ |
| 51 | + readonly accessLoggingBucket?: s3.Bucket; |
| 52 | + /** Access logging prefix to use */ |
| 53 | + readonly serverAccessLogsPrefix?: string; |
| 54 | + /** Artifact bucket to use */ |
| 55 | + readonly artifactBucket?: s3.Bucket; |
| 56 | + /** Output bucket to use */ |
| 57 | + readonly outputBucket?: s3.Bucket | VMImportBucket; |
| 58 | + /** Prefix for S3 object within bucket */ |
| 59 | + readonly subDirectoryName?: string; |
| 60 | +} |
| 61 | + |
| 62 | +/** |
| 63 | + * The stack for creating a build pipeline. |
| 64 | + * |
| 65 | + * See {@link EmbeddedLinuxCodebuildProjectProps} for configration options. |
| 66 | + */ |
| 67 | +export class EmbeddedLinuxCodebuildProjectStack extends cdk.Stack { |
| 68 | + constructor( |
| 69 | + scope: Construct, |
| 70 | + id: string, |
| 71 | + props: EmbeddedLinuxCodebuildProjectProps |
| 72 | + ) { |
| 73 | + super(scope, id, props); |
| 74 | + |
| 75 | + /** Set up networking access and EFS FileSystems. */ |
| 76 | + |
| 77 | + const projectSg = new SecurityGroup(this, "BuildProjectSecurityGroup", { |
| 78 | + vpc: props.vpc, |
| 79 | + description: "Security Group to allow attaching EFS", |
| 80 | + }); |
| 81 | + projectSg.addIngressRule( |
| 82 | + Peer.ipv4(props.vpc.vpcCidrBlock), |
| 83 | + Port.tcp(2049), |
| 84 | + "NFS Mount Port" |
| 85 | + ); |
| 86 | + |
| 87 | + const sstateFS = this.addFileSystem("SState", props.vpc, projectSg); |
| 88 | + const dlFS = this.addFileSystem("Downloads", props.vpc, projectSg); |
| 89 | + const tmpFS = this.addFileSystem("Temp", props.vpc, projectSg); |
| 90 | + |
| 91 | + let accessLoggingBucket: s3.IBucket; |
| 92 | + |
| 93 | + if (props.accessLoggingBucket) { |
| 94 | + accessLoggingBucket = props.accessLoggingBucket; |
| 95 | + } else { |
| 96 | + accessLoggingBucket = new s3.Bucket(this, "ArtifactAccessLogging", { |
| 97 | + versioned: true, |
| 98 | + enforceSSL: true, |
| 99 | + }); |
| 100 | + } |
| 101 | + |
| 102 | + /** Create our CodeBuild Project. */ |
| 103 | + const project = new Project( |
| 104 | + this, |
| 105 | + "EmbeddedLinuxCodebuildProject", |
| 106 | + { |
| 107 | + buildSpec: BuildSpec.fromObject({ |
| 108 | + version: "0.2", |
| 109 | + phases: { |
| 110 | + build: { |
| 111 | + commands: ['echo "DUMMY BUILDSPEC - can not be empty"'], |
| 112 | + }, |
| 113 | + }, |
| 114 | + artifacts: { |
| 115 | + files: ["**/*"], |
| 116 | + "base-directory": ".", |
| 117 | + }, |
| 118 | + }), |
| 119 | + environment: { |
| 120 | + computeType: ComputeType.X2_LARGE, |
| 121 | + buildImage: LinuxBuildImage.fromEcrRepository( |
| 122 | + props.imageRepo, |
| 123 | + props.imageTag |
| 124 | + ), |
| 125 | + privileged: true, |
| 126 | + }, |
| 127 | + timeout: cdk.Duration.hours(4), |
| 128 | + vpc: props.vpc, |
| 129 | + securityGroups: [projectSg], |
| 130 | + fileSystemLocations: [ |
| 131 | + FileSystemLocation.efs({ |
| 132 | + identifier: "tmp_dir", |
| 133 | + location: tmpFS, |
| 134 | + mountPoint: "/build-output", |
| 135 | + }), |
| 136 | + FileSystemLocation.efs({ |
| 137 | + identifier: "sstate_cache", |
| 138 | + location: sstateFS, |
| 139 | + mountPoint: "/sstate-cache", |
| 140 | + }), |
| 141 | + FileSystemLocation.efs({ |
| 142 | + identifier: "dl_dir", |
| 143 | + location: dlFS, |
| 144 | + mountPoint: "/downloads", |
| 145 | + }), |
| 146 | + ], |
| 147 | + logging: { |
| 148 | + cloudWatch: { |
| 149 | + logGroup: new LogGroup(this, "PipelineBuildLogs", { |
| 150 | + retention: RetentionDays.TEN_YEARS, |
| 151 | + }), |
| 152 | + }, |
| 153 | + }, |
| 154 | + } |
| 155 | + ); |
| 156 | + |
| 157 | + if (props.buildPolicyAdditions) { |
| 158 | + props.buildPolicyAdditions.map((p) => project.addToRolePolicy(p)); |
| 159 | + } |
| 160 | + |
| 161 | + project.addToRolePolicy(this.addProjectPolicies()); |
| 162 | + |
| 163 | + project.role?.addManagedPolicy( |
| 164 | + iam.ManagedPolicy.fromAwsManagedPolicyName("AWSCodeBuildAdminAccess") |
| 165 | + ); |
| 166 | + |
| 167 | + /** Here we create the logic to check for presence of ECR image on the CodePipeline automatic triggering upon resource creation, |
| 168 | + * and stop the execution if the image does not exist. */ |
| 169 | + const fnOnPipelineCreate = new lambda.Function( |
| 170 | + this, |
| 171 | + "OSImageCheckOnStart", |
| 172 | + { |
| 173 | + runtime: lambda.Runtime.PYTHON_3_10, |
| 174 | + handler: "index.handler", |
| 175 | + code: lambda.Code.fromInline(` |
| 176 | +import boto3 |
| 177 | +import json |
| 178 | +
|
| 179 | +ecr_client = boto3.client('ecr') |
| 180 | +codepipeline_client = boto3.client('codepipeline') |
| 181 | +
|
| 182 | +def handler(event, context): |
| 183 | + print("Received event: " + json.dumps(event, indent=2)) |
| 184 | + response = ecr_client.describe_images(repositoryName='${props.imageRepo.repositoryName}', filter={'tagStatus': 'TAGGED'}) |
| 185 | + for i in response['imageDetails']: |
| 186 | + if '${props.imageTag}' in i['imageTags']: |
| 187 | + break |
| 188 | + else: |
| 189 | + print('OS image not found. Stopping execution.') |
| 190 | + response = codepipeline_client.stop_pipeline_execution( |
| 191 | + pipelineName=event['detail']['pipeline'], |
| 192 | + pipelineExecutionId=event['detail']['execution-id'], |
| 193 | + abandon=True, |
| 194 | + reason='OS image not found in ECR repository. Stopping pipeline until image is present.') |
| 195 | + `), |
| 196 | + logRetention: RetentionDays.TEN_YEARS, |
| 197 | + } |
| 198 | + ); |
| 199 | + |
| 200 | + const pipelineCreateRule = new events.Rule(this, "OnPipelineStartRule", { |
| 201 | + eventPattern: { |
| 202 | + detailType: ["CodePipeline Pipeline Execution State Change"], |
| 203 | + source: ["aws.codepipeline"], |
| 204 | + detail: { |
| 205 | + state: ["STARTED"], |
| 206 | + "execution-trigger": { |
| 207 | + "trigger-type": ["CreatePipeline"], |
| 208 | + }, |
| 209 | + }, |
| 210 | + }, |
| 211 | + }); |
| 212 | + pipelineCreateRule.addTarget( |
| 213 | + new targets.LambdaFunction(fnOnPipelineCreate) |
| 214 | + ); |
| 215 | + } |
| 216 | + |
| 217 | + /** |
| 218 | + * Adds an EFS FileSystem to the VPC and SecurityGroup. |
| 219 | + * |
| 220 | + * @param name - A name to differentiate the filesystem. |
| 221 | + * @param vpc - The VPC the Filesystem resides in. |
| 222 | + * @param securityGroup - A SecurityGroup to allow access to the filesystem from. |
| 223 | + * @returns The filesystem location URL. |
| 224 | + * |
| 225 | + */ |
| 226 | + private addFileSystem( |
| 227 | + name: string, |
| 228 | + vpc: IVpc, |
| 229 | + securityGroup: ISecurityGroup |
| 230 | + ): string { |
| 231 | + const fs = new efs.FileSystem( |
| 232 | + this, |
| 233 | + `EmbeddedLinuxPipeline${name}Filesystem`, |
| 234 | + { |
| 235 | + vpc, |
| 236 | + removalPolicy: cdk.RemovalPolicy.DESTROY, |
| 237 | + } |
| 238 | + ); |
| 239 | + |
| 240 | + fs.connections.allowFrom(securityGroup, Port.tcp(2049)); |
| 241 | + |
| 242 | + const fsId = fs.fileSystemId; |
| 243 | + const region = cdk.Stack.of(this).region; |
| 244 | + |
| 245 | + return `${fsId}.efs.${region}.amazonaws.com:/`; |
| 246 | + } |
| 247 | + |
| 248 | + private addProjectPolicies(): iam.PolicyStatement { |
| 249 | + return new iam.PolicyStatement({ |
| 250 | + actions: [ |
| 251 | + "ec2:DescribeSecurityGroups", |
| 252 | + "codestar-connections:GetConnection", |
| 253 | + "codestar-connections:GetConnectionToken", |
| 254 | + "codeconnections:GetConnectionToken", |
| 255 | + "codeconnections:GetConnection", |
| 256 | + "codeconnections:UseConnection", |
| 257 | + "codebuild:ListConnectedOAuthAccounts", |
| 258 | + "codebuild:ListRepositories", |
| 259 | + "codebuild:PersistOAuthToken", |
| 260 | + "codebuild:ImportSourceCredentials", |
| 261 | + ], |
| 262 | + resources: ["*"], |
| 263 | + }); |
| 264 | + } |
| 265 | +} |
0 commit comments