diff --git a/License2Deploy/rolling_deploy.py b/License2Deploy/rolling_deploy.py index 99f6bd1..d440101 100644 --- a/License2Deploy/rolling_deploy.py +++ b/License2Deploy/rolling_deploy.py @@ -39,7 +39,6 @@ def __init__(self, self.force_redeploy = force_redeploy self.stack_resources = False self.autoscaling_group = False - self.cloudwatch_alarms = False self.environments = AWSConn.load_config(self.regions_conf).get(self.env) self.region = AWSConn.determine_region(self.environments) self.conn_ec2 = AWSConn.aws_conn_ec2(self.region, self.profile_name) @@ -62,6 +61,7 @@ def __init__(self, self.target_group_arn = None self.original_instance_ids = [] self.new_desired_capacity = None + self.sc_proc = ['ReplaceUnhealthy', 'AZRebalance', 'AlarmNotification', 'ScheduledActions'] def get_ami_id_state(self, ami_id): try: @@ -340,6 +340,12 @@ def launch_new_instances(self, group_name): # pragma: no cover logging.error('Load balancer healthcheck has exceeded the timeout threshold. Rolling back.') self.revert_deployment() + def suspend_process(self, group_name): + self.conn_auto.suspend_processes( + AutoScalingGroupName=group_name, + ScalingProcesses=self.sc_proc + ) + def terminate_original_instances(self, group_name): #pragma: no cover """ Will remove original instances in autoscale group """ logging.info("Removing old instances from autoscale group") @@ -351,47 +357,11 @@ def terminate_original_instances(self, group_name): #pragma: no cover logging.warning('Failed to remove instance: {0}. Please Investigate: {1}'.format(instance_id, e)) logging.info("TERMINATION OF OLD INSTANCES COMPLETE!") - def get_cloudwatch_alarms_from_stack(self): - if not self.cloudwatch_alarms: - self.cloudwatch_alarms = self.get_resources_from_stack_of_type('AWS::CloudWatch::Alarm') - return self.get_resources_physical_ids_by_project(self.cloudwatch_alarms) - - def retrieve_project_cloudwatch_alarms(self): - """ Retrieve all the Cloud-Watch alarms for the given project and environment """ - try: - if self.stack_name: - return self.get_cloudwatch_alarms_from_stack() - all_cloud_watch_alarms = self.conn_cloudwatch.describe_alarms() - except Exception as e: - logging.error("Error while retrieving the list of cloud-watch alarms. Error: {0}".format(e)) - exit(self.exit_error_code) - project_cloud_watch_alarms = [alarm.name for alarm in all_cloud_watch_alarms if self.project in alarm.name and self.env in alarm.name] - if len(project_cloud_watch_alarms) == 0: - logging.info("No cloud-watch alarm found") - return project_cloud_watch_alarms - - def disable_project_cloudwatch_alarms(self): - """ Disable all the cloud watch alarms """ - project_cloud_watch_alarms = self.retrieve_project_cloudwatch_alarms() - for alarm in project_cloud_watch_alarms: - try: - self.conn_cloudwatch.disable_alarm_actions(alarm) - logging.info("Disabled cloud-watch alarm. {0}".format(alarm)) - except Exception as e: - logging.error("Unable to disable the cloud-watch alarm, please investigate: {0}".format(e)) - exit(self.exit_error_code) - - def enable_project_cloudwatch_alarms(self): - """ Enable all the cloud watch alarms """ - project_cloud_watch_alarms = self.retrieve_project_cloudwatch_alarms() - for alarm in project_cloud_watch_alarms: - logging.info("Found an alarm. {0}".format(alarm)) - try: - self.conn_cloudwatch.enable_alarm_actions(alarm) - logging.info("Enabled cloud-watch alarm. {0}".format(alarm)) - except Exception as e: - logging.error("Unable to enable the cloud-watch alarm, please investigate: {0}".format(e)) - exit(self.exit_error_code) + def resume_processes(self, group_name): + self.conn_auto.resume_processes( + AutoScalingGroupName=group_name, + ScalingProcesses=self.sc_proc + ) def get_target_group(self, asg_group): target_groups = self.asg.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_group])['AutoScalingGroups'][0]['TargetGroupARNs'] @@ -421,7 +391,7 @@ def deploy(self): # pragma: no cover self.log_instances_ips(self.original_instance_ids, self.asg_name) if not self.force_redeploy and self.is_redeploy(): self.stop_deploy('You are attempting to redeploy the same build. Please pass the force_redeploy flag if a redeploy is desired') - self.disable_project_cloudwatch_alarms() + self.suspend_process(group_name) self.new_desired_capacity = self.calculate_autoscale_desired_instance_count(self.asg_name, 'increase') self.set_autoscale_instance_desired_count(self.new_desired_capacity, self.asg_name) self.launch_new_instances(self.asg_name) @@ -429,7 +399,7 @@ def deploy(self): # pragma: no cover self.set_autoscale_instance_desired_count(len(self.original_instance_ids), self.asg_name) self.confirm_lb_has_only_new_instances() self.tag_ami(self.ami_id, self.env) - self.enable_project_cloudwatch_alarms() + self.resume_processes(group_name) logging.info("Deployment Complete!") def revert_deployment(self): #pragma: no cover diff --git a/tests/cloudformation_client_test.py b/tests/cloudformation_client_test.py index ee0115c..b9ec8a5 100644 --- a/tests/cloudformation_client_test.py +++ b/tests/cloudformation_client_test.py @@ -20,11 +20,3 @@ def test_get_autoscaling_group_name_via_cloudformation(self): asg_name = self.rolling_deploy.get_autoscale_group_name() self.assertTrue(self.rolling_deploy.autoscaling_group) self.assertEqual(asg_name, 'dnbi-backend-qa-dnbigmsextenderASGqa-1NP5ZBSVZRD0N') - - def test_retrieve_project_cloudwatch_alarms(self): - self.assertEqual(self.rolling_deploy.stack_resources, False) - self.assertEqual(self.rolling_deploy.cloudwatch_alarms, False) - cloudwatch_alarms = self.rolling_deploy.retrieve_project_cloudwatch_alarms() - self.assertTrue(self.rolling_deploy.stack_resources) - self.assertEqual(cloudwatch_alarms, ['dnbi-servergmsextender-SCALEDOWNALARMqa-123123', 'dnbi-servergmsextender-SCALEUPALARMqa-4asdhjks']) - diff --git a/tests/rolling_deploy_test.py b/tests/rolling_deploy_test.py index e699fa2..8c89c7e 100644 --- a/tests/rolling_deploy_test.py +++ b/tests/rolling_deploy_test.py @@ -131,40 +131,6 @@ def setUpCloudWatchWithWrongConfig(self, instance_ids, env="stg"): watch_conn = boto.connect_cloudwatch() watch_conn.put_metric_alarm(alarm) - @mock_cloudwatch_deprecated - def test_retrieve_project_cloudwatch_alarms(self): - instance_ids = self.setUpEC2() - self.setUpCloudWatch(instance_ids) - cloud_watch_alarms = self.rolling_deploy.retrieve_project_cloudwatch_alarms() - print(cloud_watch_alarms) - self.assertEqual(1, len(cloud_watch_alarms)) - - @mock_cloudwatch_deprecated - def test_retrieve_project_cloudwatch_alarms_with_no_valid_alarms(self): - instance_ids = self.setUpEC2() - self.setUpCloudWatch(instance_ids) - self.rolling_deploy.env = "wrong_env_prd" # set a wrong environment - cloud_watch_alarms = self.rolling_deploy.retrieve_project_cloudwatch_alarms() - self.assertEqual(0, len(cloud_watch_alarms)) - - @mock_cloudwatch_deprecated - def test_retrieve_project_cloudwatch_alarms_with_wrong_config(self): - instance_ids = self.setUpEC2() - self.setUpCloudWatchWithWrongConfig(instance_ids) - self.assertRaises(SystemExit, lambda: self.rolling_deploy.retrieve_project_cloudwatch_alarms()) - - @mock_cloudwatch_deprecated - def test_enable_project_cloudwatch_alarms_Error(self): - instance_ids = self.setUpEC2() - self.setUpCloudWatch(instance_ids) - self.assertRaises(SystemExit, lambda: self.rolling_deploy.enable_project_cloudwatch_alarms()) - - @mock_cloudwatch_deprecated - def test_disable_project_cloudwatch_alarms_Error(self): - instance_ids = self.setUpEC2() - self.setUpCloudWatch(instance_ids) - self.assertRaises(SystemExit, lambda: self.rolling_deploy.disable_project_cloudwatch_alarms()) - @mock_ec2_deprecated def test_tag_ami(self): conn = self.setUpEC2()[0]