From 6987feaf955933fbaca1e567a2a41d4334a6c153 Mon Sep 17 00:00:00 2001 From: ajpaws Date: Sun, 22 Jan 2023 07:03:03 +0000 Subject: [PATCH] adding reports --- hardeneks/__init__.py | 139 ++++-- .../cluster-autoscaling/__init__.py | 0 .../cluster-autoscaling/cluster-autoscaler.py | 356 ++++++++++++++ hardeneks/cluster_wide/networking/__init__.py | 0 .../cluster_wide/networking/load-balancing.py | 28 ++ .../cluster_wide/networking/prefix_mode.py | 57 +++ hardeneks/cluster_wide/networking/vpc-cni.py | 122 +++++ .../cluster_wide/networking/vpc_subnets.py | 37 ++ .../cluster_wide/reliability/applications.py | 47 +- .../security/detective_controls.py | 32 +- .../security/encryption_secrets.py | 86 ++-- hardeneks/cluster_wide/security/iam.py | 189 ++++--- .../cluster_wide/security/image_security.py | 31 +- .../security/infrastructure_security.py | 69 ++- .../cluster_wide/security/multi_tenancy.py | 38 +- .../cluster_wide/security/network_security.py | 83 ++-- .../cluster_wide/security/pod_security.py | 43 +- hardeneks/harden.py | 166 ++++++- hardeneks/helper_functions.py | 23 + .../namespace_based/networking/__init__.py | 0 .../networking/load-balancing.py | 461 ++++++++++++++++++ .../reliability/applications.py | 186 ++++--- .../security/encryption_secrets.py | 31 +- hardeneks/namespace_based/security/iam.py | 264 +++++----- .../security/network_security.py | 42 +- .../namespace_based/security/pod_security.py | 175 ++++--- .../security/runtime_security.py | 46 +- hardeneks/report.py | 435 ++++++++++------- hardeneks/resources.py | 22 +- 29 files changed, 2415 insertions(+), 793 deletions(-) create mode 100644 hardeneks/cluster_wide/cluster-autoscaling/__init__.py create mode 100644 hardeneks/cluster_wide/cluster-autoscaling/cluster-autoscaler.py create mode 100644 hardeneks/cluster_wide/networking/__init__.py create mode 100644 hardeneks/cluster_wide/networking/load-balancing.py create mode 100644 hardeneks/cluster_wide/networking/prefix_mode.py create mode 100644 hardeneks/cluster_wide/networking/vpc-cni.py create mode 100644 hardeneks/cluster_wide/networking/vpc_subnets.py create mode 100644 hardeneks/helper_functions.py create mode 100644 hardeneks/namespace_based/networking/__init__.py create mode 100644 hardeneks/namespace_based/networking/load-balancing.py diff --git a/hardeneks/__init__.py b/hardeneks/__init__.py index 0913747..30f2884 100644 --- a/hardeneks/__init__.py +++ b/hardeneks/__init__.py @@ -15,7 +15,7 @@ NamespacedResources, Resources, ) -from .harden import harden +from .harden import harden, cluster_data app = typer.Typer() @@ -40,11 +40,58 @@ def _config_callback(value: str): return value -def _get_current_context(context): - if context: - return context - _, active_context = kubernetes.config.list_kube_config_contexts() - return active_context["name"] +def _get_cluster_name_from_context(clusterNameStr): + + if clusterNameStr.endswith('eksctl.io'): + clusterName = clusterNameStr.split('.')[0] + elif clusterNameStr.startswith('arn:'): + clusterName = clusterNameStr.split('/')[-1] + else: + clusterName = clusterNameStr + + return clusterName + + + +def _get_current_context(contextFromUser, clusterFromUser): + + contextName = None + clusterName = None + + #print("contextFromUser={} clusterFromUser={}".format(contextFromUser, clusterFromUser)) + + contextList, active_context = kubernetes.config.list_kube_config_contexts() + + if contextFromUser: + contextName = contextFromUser + if clusterFromUser: + clusterName = clusterFromUser + else: + for contextData in contextList: + #print("contextData={}".format(contextData)) + if contextData['name'] == contextFromUser: + clusterName = _get_cluster_name_from_context(contextData['context']['cluster']) + else: + if clusterFromUser: + clusterName = clusterFromUser + for contextData in contextList: + clusterNameFromContext = _get_cluster_name_from_context(contextData['context']['cluster']) + #print("clusterNameFromContext={} clusterFromUser={}".format(clusterNameFromContext, clusterFromUser)) + if clusterNameFromContext == clusterFromUser: + contextName = contextData['name'] + print("contextName={}".format(contextName)) + + else: + contextName = active_context['name'] + clusterName = _get_cluster_name_from_context(active_context['context']['cluster']) + + + if contextName and clusterName: + #print("contextName={} clusterName={}".format(contextName, clusterName)) + return (contextName, clusterName) + else: + #print("contextName={} and clusterName={} are not valid. Exiting the program".format(contextName, clusterName)) + sys.exit() def _get_namespaces(ignored_ns: list) -> list: @@ -52,16 +99,10 @@ def _get_namespaces(ignored_ns: list) -> list: namespaces = [i.metadata.name for i in v1.list_namespace().items] return list(set(namespaces) - set(ignored_ns)) - -def _get_cluster_name(context, region): - try: - client = boto3.client("eks", region_name=region) - for name in client.list_clusters()["clusters"]: - if name in context: - return name - except EndpointConnectionError: - raise ValueError(f"{region} seems like a bad region name") - +def _get_pillars() -> list: + pillarsList = ["security", "reliability", "cluster-autoscaling", "networking"] + return pillarsList + def _get_region(): return boto3.session.Session().region_name @@ -114,6 +155,22 @@ def run_hardeneks( False, "--insecure-skip-tls-verify", ), + pillars: str = typer.Option( + default=None, + help="Specific pillars to harden. Default is all pillars.", + ), + run_only_cluster_level_checks: bool = typer.Option( + False, + "--run_only_cluster_level_checks", + ), + run_only_namespace_level_checks: bool = typer.Option( + False, + "--run_only_namespace_level_checks", + ), + debug: bool = typer.Option( + False, + "--debug", + ), ): """ Main entry point to hardeneks. @@ -132,16 +189,14 @@ def run_hardeneks( None """ + + (context, cluster) = _get_current_context(context, cluster) + if insecure_skip_tls_verify: _load_kube_config() else: kubernetes.config.load_kube_config(context=context) - context = _get_current_context(context) - - if not cluster: - cluster = _get_cluster_name(context, region) - if not region: region = _get_region() @@ -158,26 +213,40 @@ def run_hardeneks( if not namespace: namespaces = _get_namespaces(config["ignore-namespaces"]) else: - namespaces = [namespace] + #namespaces = [namespace] + namespaces = namespace.split(',') + if not pillars: + pillarsList = _get_pillars() + else: + #namespaces = [namespace] + pillarsList = pillars.split(',') + + rules = config["rules"] + resources = Resources(region, context, cluster, namespaces, debug) + resources.set_resources() + cluster_data(resources, rules, "cluster_wide") + + console.rule("[b]Checking cluster wide rules", characters="- ") console.print() - resources = Resources(region, context, cluster, namespaces) - resources.set_resources() - harden(resources, rules, "cluster_wide") - - for ns in namespaces: - console.rule( - f"[b]Checking rules against namespace: {ns}", characters=" -" - ) - console.print() - resources = NamespacedResources(region, context, cluster, ns) - resources.set_resources() - harden(resources, rules, "namespace_based") - console.print() + if not run_only_namespace_level_checks: + #resources = Resources(region, context, cluster, namespaces, debug) + #resources.set_resources() + harden(resources, rules, "cluster_wide", pillarsList) + + if not run_only_cluster_level_checks: + for ns in namespaces: + #console.rule(f"[b]Checking rules against namespace: {ns}", characters=" -") + #console.print() + resources = NamespacedResources(region, context, cluster, ns, debug) + resources.set_resources() + harden(resources, rules, "namespace_based", pillarsList) + console.print() + if export_txt: console.save_text(export_txt) diff --git a/hardeneks/cluster_wide/cluster-autoscaling/__init__.py b/hardeneks/cluster_wide/cluster-autoscaling/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hardeneks/cluster_wide/cluster-autoscaling/cluster-autoscaler.py b/hardeneks/cluster_wide/cluster-autoscaling/cluster-autoscaler.py new file mode 100644 index 0000000..618bd05 --- /dev/null +++ b/hardeneks/cluster_wide/cluster-autoscaling/cluster-autoscaler.py @@ -0,0 +1,356 @@ +import boto3 +from kubernetes import client +from rich import print + +import sys + +from ...resources import Resources +from ...report import print_console_message +from ...helper_functions import is_deployment_exists_in_namespace + +def check_any_cluster_autoscaler_exists(resources: Resources): + + status = True + objectsList = None + objectType = None + message = "" + + docs_link = "https://aws.github.io/aws-eks-best-practices/cluster-autoscaling/" + deployments = [ + i.metadata.name + for i in client.AppsV1Api().list_deployment_for_all_namespaces().items + ] + + #pprint("deployments={}".format(deployments)) + + if "cluster-autoscaler" in deployments: + message = "Kubernetes Cluster Autoscaler is deployed" + elif "karpenter" in deployments: + message = "Karpeneter is deployed" + else: + message = "Kubernetes Cluster Autoscaler or Karpeneter is not deployed" + status = False + + return (status, message, objectsList, objectType) + +def ensure_cluster_autoscaler_and_cluster_versions_match(resources: Resources): + + status = True + objectsList = None + objectType = None + message = "" + + eksclient = boto3.client("eks", region_name=resources.region) + cluster_metadata = eksclient.describe_cluster(name=resources.cluster) + + cluster_version = cluster_metadata["cluster"]["version"] + + #print("cluster_version={}".format(cluster_version)) + + deployments = (client.AppsV1Api().list_namespaced_deployment("kube-system").items) + + #print("deployments={}".format(deployments)) + + ca_version = None + ca_containers = None + + for deployment in deployments: + if deployment.metadata.name == "cluster-autoscaler": + ca_containers = deployment.spec.template.spec.containers + ca_image = ca_containers[0].image + ca_image_version = ca_image.split(':')[-1] + #print("ca_image={} ca_image_version={}".format(ca_image, ca_image_version)) + + versions = "Kubernetes Cluster Autoscaler Version (" + ca_image_version + ") and Cluster Version (" + cluster_version + ")" + + if cluster_version in ca_image_version: + message = versions + " match" + else: + message = versions + " do not match" + status = False + + if message == "": + message = "Kubernetes Cluster Autoscaler is not deployed in the cluster" + status = False + + return (status, message, objectsList, objectType) + + +def ensure_cluster_autoscaler_has_autodiscovery_mode(resources: Resources): + + status = True + objectsList = None + objectType = None + message = "" + + deployments = (client.AppsV1Api().list_namespaced_deployment("kube-system").items) + + for deployment in deployments: + if deployment.metadata.name == "cluster-autoscaler": + ca_containers = deployment.spec.template.spec.containers + ca_command = ca_containers[0].command + for item in ca_command: + if 'node-group-auto-discovery' in item: + message = "Kubernetes Cluster Autoscaler is configured with Auto Discovery Mode" + break + #print("item={}".format(item)) + + if message == "": + message = "Kubernetes Cluster Autoscaler is not deployed in the cluster" + status = False + + return (status, message, objectsList, objectType) + + + +def ensure_cluster_autoscaler_has_three_replicas(resources: Resources): + + status = True + objectsList = None + objectType = None + message = "" + + deployments = (client.AppsV1Api().list_namespaced_deployment("kube-system").items) + + for deployment in deployments: + if deployment.metadata.name == "cluster-autoscaler": + ca_replicas = deployment.spec.replicas + if ca_replicas >= 3: + message = "Kubernetes Cluster Autoscaler has {} replicas".format(ca_replicas) + else: + message = "Kubernetes Cluster Autoscaler has only {} replicas".format(ca_replicas) + status = False + break + + if message == "": + message = "Kubernetes Cluster Autoscaler is not deployed in the cluster" + status = False + + return (status, message, objectsList, objectType) + +def use_separate_iam_role_for_cluster_autoscaler(resources: Resources): + + status = True + objectsList = None + objectType = None + message = "" + + (ret, deploymentData) = is_deployment_exists_in_namespace("cluster-autoscaler", "kube-system") + if ret: + sa = deploymentData.spec.template.spec.service_account_name + sa_data = client.CoreV1Api().read_namespaced_service_account(sa, 'kube-system', pretty="true") + #print(sa_data.metadata.annotations.keys()) + + if 'eks.amazonaws.com/role-arn' in sa_data.metadata.annotations.keys(): + message = "cluster-autoscaler deployment uses a dedicated IAM Role (IRSA)" + else: + message = "cluster-autoscaler deployment does not use a dedicated IAM Role (IRSA)" + status = False + else: + message = "Kubernetes Cluster Autoscaler is not deployed in the cluster" + status = False + + return (status, message, objectsList, objectType) + + +def employ_least_privileged_access_to_the_IAM_role(resources: Resources): + + status = True + objectsList = None + objectType = None + message = "" + + iam_client = boto3.client('iam') + + + (ret, deploymentData) = is_deployment_exists_in_namespace("cluster-autoscaler", "kube-system") + if ret: + + sa = deploymentData.spec.template.spec.service_account_name + sa_data = client.CoreV1Api().read_namespaced_service_account(sa, 'kube-system', pretty="true") + #print(sa_data.metadata.annotations.keys()) + + if 'eks.amazonaws.com/role-arn' in sa_data.metadata.annotations.keys(): + sa_iam_role_arn = sa_data.metadata.annotations['eks.amazonaws.com/role-arn'] + #print("sa_iam_role_arn={}".format(sa_iam_role_arn)) + #response = iam_client.list_role_policies(RoleName=sa_iam_role) + sa_iam_role = sa_iam_role_arn.split('/')[-1] + #print("sa_iam_role={}".format(sa_iam_role)) + policyList = iam_client.list_attached_role_policies(RoleName=sa_iam_role) + #print("policyList={}".format(policyList)) + + administratorAccess = None + leastPrivelegedAccess = None + listofActions = [] + for policy in policyList['AttachedPolicies']: + + if policy['PolicyArn'] == 'arn:aws:iam::aws:policy/AdministratorAccess': + administratorAccess = True + + #print("PolicyName={} PolicyArn={}".format(policy['PolicyName'], policy['PolicyArn'])) + policyData = iam_client.get_policy(PolicyArn=policy['PolicyArn']) + #print("policyData={}".format(policyData)) + policyDefaultVersion = policyData['Policy']['DefaultVersionId'] + #print("policyDefaultVersion={}".format(policyDefaultVersion)) + + policyDocument = iam_client.get_policy_version(PolicyArn=policy['PolicyArn'], VersionId=policyDefaultVersion) + + policyStatements = policyDocument['PolicyVersion']['Document']['Statement'] + #print("policyDocument={}".format(policyStatements)) + + for statement in policyStatements: + listofActions.extend(statement['Action']) + + #print("listofActions={}".format(listofActions)) + + if 'autoscaling:SetDesiredCapacity' in listofActions and 'autoscaling:TerminateInstanceInAutoScalingGroup' in listofActions: + leastPrivelegedAccess = True + + if administratorAccess is None and leastPrivelegedAccess: + message = "cluster-autoscaler has least privileged access to the IAM role" + else: + message = "cluster-autoscaler does not have least privileged access to the IAM role" + status = False + else: + message = "cluster-autoscaler deployment does not use a dedicated IAM Role (IRSA)" + status = False + else: + message = "Kubernetes Cluster Autoscaler is not deployed in the cluster" + status = False + + return (status, message, objectsList, objectType) + +def use_managed_nodegroups(resources: Resources): + + status = True + objectsList = None + objectType = None + message = "" + + eksclient = boto3.client("eks", region_name=resources.region) + cluster_metadata = eksclient.describe_cluster(name=resources.cluster) + + cluster_version = cluster_metadata["cluster"]["version"] + + #print("cluster_version={}".format(cluster_version)) + + eksmnglist = set() + selfmnglist=set() + + nodeList = (client.CoreV1Api().list_node().items) + for node in nodeList: + labels = node.metadata.labels + #print("nodeName={} nodegroup={}".format(node.metadata.name, labels )) + if 'eks.amazonaws.com/nodegroup' in labels.keys(): + #print("nodeName={} managed nodegroup={}".format(node.metadata.name, labels['eks.amazonaws.com/nodegroup'] )) + eksmnglist.add(labels['eks.amazonaws.com/nodegroup']) + elif 'alpha.eksctl.io/nodegroup-name' in labels.keys(): + #print("nodeName={} self managed nodegroup={}".format(node.metadata.name, labels['alpha.eksctl.io/nodegroup-name'] )) + selfmnglist.add(labels['alpha.eksctl.io/nodegroup-name']) + elif 'karpenter.sh/provisioner-name' in labels.keys(): + #print("nodeName={} Karpeneter managed provisioner={}".format(node.metadata.name, labels['karpenter.sh/provisioner-name'] )) + pass + else: + selfmnglist.add(node.metadata.name) + #print("nodeName={} self managed with node labels={}".format(node.metadata.name, labels )) + + #print("eksmnglist={} selfmnglist={}".format(eksmnglist, selfmnglist)) + + if len(selfmnglist) == 0: + message = "cluster has only managed node groups :{}".format(eksmnglist) + else: + message = "cluster has self managed node groups :{}".format(selfmnglist) + status = False + #print("keys={}".format(labels.keys())) + #print("nodes={}".format(node.metadata.labels)) + #print("nodes={}".format(node['metadata']['labels'])) + + + return (status, message, objectsList, objectType) + + +def ensure_uniform_instance_types_in_nodegroups(resources: Resources): + + status = True + objectsList = None + objectType = None + message = "" + + + func_name = sys._getframe().f_code.co_name + docs_link = "https://aws.github.io/aws-eks-best-practices/cluster-autoscaling/" + + eksclient = boto3.client("eks", region_name=resources.region) + cluster_metadata = eksclient.describe_cluster(name=resources.cluster) + + cluster_version = cluster_metadata["cluster"]["version"] + + #print("cluster_version={}".format(cluster_version)) + + nodegroupList = {} + nodegroupInstanceSizesList={} + + nodeList = (client.CoreV1Api().list_node().items) + for node in nodeList: + labels = node.metadata.labels + #print("nodeName={} nodegroup={}".format(node.metadata.name, labels )) + if 'eks.amazonaws.com/nodegroup' in labels.keys(): + #print("nodeName={} managed nodegroup={}".format(node.metadata.name, labels['eks.amazonaws.com/nodegroup'] )) + nodegroupName = labels['eks.amazonaws.com/nodegroup'] + if nodegroupName not in nodegroupList.keys(): + nodegroupList[nodegroupName] = [] + nodegroupList[nodegroupName].append(labels['beta.kubernetes.io/instance-type']) + #eksmnglist.add(labels['eks.amazonaws.com/nodegroup']) + elif 'alpha.eksctl.io/nodegroup-name' in labels.keys(): + nodegroupName = labels['alpha.eksctl.io/nodegroup-name'] + if nodegroupName not in nodegroupList.keys(): + nodegroupList[nodegroupName] = [] + nodegroupList[nodegroupName].append(labels['beta.kubernetes.io/instance-type']) + #print("nodeName={} self managed nodegroup={}".format(node.metadata.name, labels['alpha.eksctl.io/nodegroup-name'] )) + #selfmnglist.add(labels['alpha.eksctl.io/nodegroup-name']) + elif 'karpenter.sh/provisioner-name' in labels.keys(): + #print("nodeName={} Karpeneter managed provisioner={}".format(node.metadata.name, labels['karpenter.sh/provisioner-name'] )) + pass + else: + pass + + #print("nodeName={} self managed with node labels={}".format(node.metadata.name, labels )) + + + #nodegroupList['ng-3f4edeea'].append('m5.xlarge') + #nodegroupList['mng2'].append('m5.2xlarge') + #print("nodegroupList={}".format(nodegroupList)) + + descriptionMessage = "These nodegroups contain non uniform instance types :" + + ec2client = boto3.client('ec2') + isNonUniformNodegroupsExists = None + for nodegroupName, instanceTypesList in nodegroupList.items(): + instanceTypesData = ec2client.describe_instance_types(InstanceTypes=instanceTypesList) + #print("instanceTypesData={}".format(instanceTypesData)) + nodegroupInstanceSizesList[nodegroupName] = set() + for instanceData in instanceTypesData['InstanceTypes']: + #print("InstanceType={} DefaultVCpus={} SizeInMiB={}".format(instanceData['InstanceType'], instanceData['VCpuInfo']['DefaultVCpus'], instanceData['MemoryInfo']['SizeInMiB'])) + DefaultVCpus=instanceData['VCpuInfo']['DefaultVCpus'] + SizeInMiB=instanceData['MemoryInfo']['SizeInMiB'] + nodegroupInstanceSizesList[nodegroupName].add((DefaultVCpus, int(SizeInMiB/1024))) + + if len(nodegroupInstanceSizesList[nodegroupName]) > 1: + descriptionMessage += " " + nodegroupName + isNonUniformNodegroupsExists = True + + #print("nodegroupInstanceSizesList={}".format(nodegroupInstanceSizesList)) + #print("descriptionMessage={}".format(descriptionMessage)) + + + if not isNonUniformNodegroupsExists: + message = "cluster has only unfirm instance types in the node groups" + else: + message = descriptionMessage + status = False + #print("keys={}".format(labels.keys())) + #print("nodes={}".format(node.metadata.labels)) + #print("nodes={}".format(node['metadata']['labels'])) + + return (status, message, objectsList, objectType) + \ No newline at end of file diff --git a/hardeneks/cluster_wide/networking/__init__.py b/hardeneks/cluster_wide/networking/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hardeneks/cluster_wide/networking/load-balancing.py b/hardeneks/cluster_wide/networking/load-balancing.py new file mode 100644 index 0000000..a5f6d5f --- /dev/null +++ b/hardeneks/cluster_wide/networking/load-balancing.py @@ -0,0 +1,28 @@ +import boto3 +from kubernetes import client +from rich import print +import sys + + +from ...resources import Resources +from ...report import print_console_message +from ...helper_functions import is_deployment_exists_in_namespace + +def use_aws_lb_controller(resources: Resources): + + + status = True + objectsList = None + objectType = None + message = "" + + (ret, deploymentData) = is_deployment_exists_in_namespace("aws-load-balancer-controller", "kube-system") + if ret: + message = "AWS LB Controller is deployed in the cluster" + else: + message = "AWS LB Controller is not deployed in the cluster" + status = False + + return (status, message, objectsList, objectType) + + diff --git a/hardeneks/cluster_wide/networking/prefix_mode.py b/hardeneks/cluster_wide/networking/prefix_mode.py new file mode 100644 index 0000000..208d558 --- /dev/null +++ b/hardeneks/cluster_wide/networking/prefix_mode.py @@ -0,0 +1,57 @@ +import boto3 +from kubernetes import client +from rich import print +import sys + + +from ...resources import Resources +from ...report import print_console_message + + +def use_prefix_mode(resources: Resources): + + status = True + objectsList = None + objectType = None + message = None + isvpccni_version_correct = None + isPrefixModeEabled = None + isMaxPodsPerNodeIncorrect = None + + daemonset = client.AppsV1Api().read_namespaced_daemon_set(name="aws-node", namespace="kube-system") + vpccni_containers = daemonset.spec.template.spec.containers + vpccni_image = vpccni_containers[0].image + vpccni_image_version = vpccni_image.split('/')[-1].split(':')[-1].split('-')[0] + vpccni_image_version_digits = vpccni_image_version.split('.') + + if int(vpccni_image_version_digits[1]) >= 9 and int(vpccni_image_version_digits[2]) >= 0: + isvpccni_version_correct=True + + #print("vpccni_image={} vpccni_image_version={} 2nd={} 3rd={}".format(vpccni_image, vpccni_image_version, vpccni_image_version[1], vpccni_image_version[2])) + #print("vpccni_containers={}".format(vpccni_containers)) + #envList = vpccni_containers[0].env + #print("envList={} type={}".format(envList, type(envList))) + #for env in envList: + for env in vpccni_containers[0].env: + #print("name={} value={}".format(env.name, env.value)) + if env.name == 'ENABLE_PREFIX_DELEGATION': + isPrefixModeEabled = env.value + + nodeList = (client.CoreV1Api().list_node().items) + for node in nodeList: + name = node.metadata.name + pods = node.status.capacity['pods'] + #print("name={} pods={}".format(name, pods)) + if int(pods) < 110: + isMaxPodsPerNodeIncorrect = True + + + if not isMaxPodsPerNodeIncorrect and isvpccni_version_correct and isPrefixModeEabled == 'true': + message = "vpc cni prefix mode enabled and version is {} and max pods is set 110".format(vpccni_image_version) + else: + message = "vpc cni prefix mode disabled and version is {} and ensure max pods is set to 110".format(vpccni_image_version) + status = False + + return (status, message, objectsList, objectType) + + \ No newline at end of file diff --git a/hardeneks/cluster_wide/networking/vpc-cni.py b/hardeneks/cluster_wide/networking/vpc-cni.py new file mode 100644 index 0000000..e66b84e --- /dev/null +++ b/hardeneks/cluster_wide/networking/vpc-cni.py @@ -0,0 +1,122 @@ +import boto3 +from kubernetes import client +from rich import print +import sys + + +from ...resources import Resources +from ...report import print_console_message + +def deploy_vpc_cni_managed_add_on(resources: Resources): + status = True + objectsList = None + objectType = None + message = None + + func_name = sys._getframe().f_code.co_name + docs_link = "https://aws.github.io/aws-eks-best-practices/networking/vpc-cni/#deploy-vpc-cni-managed-add-on" + client = boto3.client("eks", region_name=resources.region) + try: + vpccni_add_on = client.describe_addon(clusterName=resources.cluster, addonName='vpc-cni') + message = "VPC CNI is a Managed Add-On" + #pprint("vpccni_add_on={}".format(vpccni_add_on)) + except Exception as exc: + #print(f"[bold][red]{exc}") + message = "VPC CNI is Not a Managed Add-On" + status = False + + return (status, message, objectsList, objectType) + + +def use_separate_iam_role_for_cni(resources: Resources): + + status = True + objectsList = None + objectType = None + message = None + + daemonset = client.AppsV1Api().read_namespaced_daemon_set(name="aws-node", namespace="kube-system") + sa = daemonset.spec.template.spec.service_account_name + sa_data = client.CoreV1Api().read_namespaced_service_account(sa, 'kube-system', pretty="true") + #print(sa_data.metadata.annotations.keys()) + + if 'eks.amazonaws.com/role-arn' in sa_data.metadata.annotations.keys(): + message = "aws-node daemonset uses a dedicated IAM Role (IRSA)" + else: + message = "aws-node daemonset does not use a dedicated IAM Role (IRSA)" + status = False + return (status, message, objectsList, objectType) + + + +def monitor_IP_adress_inventory(resources: Resources): + status = True + objectsList = None + objectType = None + message = None + + eksclient = boto3.client("eks", region_name=resources.region) + cluster_metadata = eksclient.describe_cluster(name=resources.cluster) + vpcId = cluster_metadata["cluster"]["resourcesVpcConfig"]["vpcId"] + subnetIds = cluster_metadata["cluster"]["resourcesVpcConfig"]["subnetIds"] + + #print("vpcId={} subnetIds={}".format(vpcId, subnetIds)) + + subnets = boto3.resource("ec2").subnets.filter( + Filters=[{"Name": "vpc-id", "Values": [vpcId]}] + ) + subnet_ids = [sn.id for sn in subnets] + + #print("subnets={} subnet_ids={}".format(subnets, subnet_ids)) + ec2client = boto3.client('ec2') + subnetsList = ec2client.describe_subnets(SubnetIds=subnet_ids) + #print("response={} ".format(response)) + totalAvailableIpAddressCount = 0 + for subnet in subnetsList['Subnets']: + #print("SubnetId={} CidrBlock={} AvailableIpAddressCount={}".format(subnet['SubnetId'], subnet['CidrBlock'], subnet['AvailableIpAddressCount'])) + totalAvailableIpAddressCount += subnet['AvailableIpAddressCount'] + + #print("totalAvailableIpAddressCount={}".format(totalAvailableIpAddressCount)) + + descriptionMessage = "Total number of Available IPs across all subnets in the VPC is {}".format(totalAvailableIpAddressCount) + if totalAvailableIpAddressCount > 5000: + message = descriptionMessage + else: + message = descriptionMessage + status = False + + return (status, message, objectsList, objectType) + +def use_dedicated_and_small_subnets_for_cluster_creation(resources: Resources): + status = True + objectsList = None + objectType = None + message = None + + eksclient = boto3.client("eks", region_name=resources.region) + cluster_metadata = eksclient.describe_cluster(name=resources.cluster) + vpcId = cluster_metadata["cluster"]["resourcesVpcConfig"]["vpcId"] + subnetIds = cluster_metadata["cluster"]["resourcesVpcConfig"]["subnetIds"] + + #print("vpcId={} subnetIds={}".format(vpcId, subnetIds)) + + #print("subnets={} subnet_ids={}".format(subnets, subnet_ids)) + ec2client = boto3.client('ec2') + subnetsList = ec2client.describe_subnets(SubnetIds=subnetIds) + #print("response={} ".format(response)) + + is_cluster_subnet_cidr_size_big = None + for subnet in subnetsList['Subnets']: + #print("SubnetId={} CidrBlock={} AvailableIpAddressCount={}".format(subnet['SubnetId'], subnet['CidrBlock'], subnet['AvailableIpAddressCount'])) + cidr_size = subnet['CidrBlock'].split('/')[-1] + #print(cidr_size) + if int(cidr_size) < 28: + is_cluster_subnet_cidr_size_big = True + + if not is_cluster_subnet_cidr_size_big: + message = "Cluster Subnet CIDE Size is <= /28" + else: + message = "Cluster Subnet CIDE Size is > /28" + status = False + + return (status, message, objectsList, objectType) \ No newline at end of file diff --git a/hardeneks/cluster_wide/networking/vpc_subnets.py b/hardeneks/cluster_wide/networking/vpc_subnets.py new file mode 100644 index 0000000..845c176 --- /dev/null +++ b/hardeneks/cluster_wide/networking/vpc_subnets.py @@ -0,0 +1,37 @@ +import boto3 +from kubernetes import client +import sys + +from ...resources import Resources +from ...report import print_console_message + +def consider_public_and_private_mode(resources: Resources): + status = True + objectsList = None + objectType = None + message = None + + + client = boto3.client("eks", region_name=resources.region) + cluster_metadata = client.describe_cluster(name=resources.cluster) + #pprint("cluster_metadata={}".format(cluster_metadata)) + endpoint_public_access = cluster_metadata["cluster"]["resourcesVpcConfig"]["endpointPublicAccess"] + endpoint_private_access = cluster_metadata["cluster"]["resourcesVpcConfig"]["endpointPrivateAccess"] + + if endpoint_public_access == True and endpoint_private_access == True: + message = "EKS Cluster Endpoint is in Public and Private Mode" + else: + message = "EKS Cluster Endpoint is not in Public and Private Mode" + status = False + + if endpoint_public_access: + public_access_cidr_list = cluster_metadata["cluster"]["resourcesVpcConfig"]["publicAccessCidrs"] + if '0.0.0.0/0' in public_access_cidr_list : + message = "EKS Cluster Endpoint is Public and Open to Internet Access ['0.0.0.0/0']" + status = False + else: + message = "EKS Cluster Endpoint is Public and is not Open to Internet Access ['0.0.0.0/0']" + + return (status, message, objectsList, objectType) + + diff --git a/hardeneks/cluster_wide/reliability/applications.py b/hardeneks/cluster_wide/reliability/applications.py index 243c595..4d3308f 100644 --- a/hardeneks/cluster_wide/reliability/applications.py +++ b/hardeneks/cluster_wide/reliability/applications.py @@ -1,43 +1,50 @@ from kubernetes import client +from rich import print from rich.panel import Panel +from rich.console import Console -from hardeneks import console from ...resources import Resources def check_metrics_server_is_running(resources: Resources): + status = None + message = "" + objectType = None + objectsList = [] + services = [ i.metadata.name for i in client.CoreV1Api().list_service_for_all_namespaces().items ] if "metrics-server" in services: - return True + status = True + message = "Metrics server deployed" else: - console.print( - Panel( - "[red]Deploy metrics server.", - subtitle="[link=https://aws.github.io/aws-eks-best-practices/reliability/docs/application/#run-kubernetes-metrics-server]Click to see the guide[/link]", - ) - ) - console.print() - return False - + status = False + message = "Metrics server is not deployed" + + return (status, message, objectsList, objectType) def check_vertical_pod_autoscaler_exists(resources: Resources): + + status = None + message = "" + objectType = None + objectsList = [] + deployments = [ i.metadata.name for i in client.AppsV1Api().list_deployment_for_all_namespaces().items ] if "vpa-recommender" in deployments: - return True + status = True + message = "Vertical pod autoscaler is deployed" else: - console.print( - Panel( - "[red]Deploy vertical pod autoscaler if needed.", - subtitle="[link=https://aws.github.io/aws-eks-best-practices/reliability/docs/application/#vertical-pod-autoscaler-vpa]Click to see the guide[/link]", - ) - ) - console.print() - return False + status = False + message = "Deploy vertical pod autoscaler if needed" + + return (status, message, objectsList, objectType) + + diff --git a/hardeneks/cluster_wide/security/detective_controls.py b/hardeneks/cluster_wide/security/detective_controls.py index 528a128..0d79c11 100644 --- a/hardeneks/cluster_wide/security/detective_controls.py +++ b/hardeneks/cluster_wide/security/detective_controls.py @@ -1,23 +1,31 @@ import boto3 +from rich import print from rich.panel import Panel +from rich.console import Console + -from hardeneks import console from ...resources import Resources +console = Console() + def check_logs_are_enabled(resources: Resources): - client = boto3.client("eks", region_name=resources.region) - cluster_metadata = client.describe_cluster(name=resources.cluster) + + status = None + message = "" + objectType = None + objectsList = [] + + eksclient = boto3.client("eks", region_name=resources.region) + cluster_metadata = eksclient.describe_cluster(name=resources.cluster) logs = cluster_metadata["cluster"]["logging"]["clusterLogging"][0][ "enabled" ] if not logs: - console.print( - Panel( - "[red]Enable control plane logs for auditing", - subtitle="[link=https://aws.github.io/aws-eks-best-practices/security/docs/detective/#enable-audit-logs]Click to see the guide[/link]", - ) - ) - console.print() - - return logs + status = False + message = "Enable control plane logs for auditing" + else: + status = True + message = "Control plane logs are enabled for auditing" + + return (status, message, objectsList, objectType) diff --git a/hardeneks/cluster_wide/security/encryption_secrets.py b/hardeneks/cluster_wide/security/encryption_secrets.py index b740624..f712da1 100644 --- a/hardeneks/cluster_wide/security/encryption_secrets.py +++ b/hardeneks/cluster_wide/security/encryption_secrets.py @@ -1,62 +1,80 @@ from ...resources import Resources -from ...report import print_storage_class_table, print_persistent_volume_table - def use_encryption_with_ebs(resources: Resources): - offenders = [] + + status = None + message = "" + objectType = "StorageClass" + objectsList = [] + for storage_class in resources.storage_classes: if storage_class.provisioner == "ebs.csi.aws.com": encrypted = storage_class.parameters.get("encrypted") if not encrypted: - offenders.append(storage_class) + objectsList.append(storage_class) elif encrypted == "false": - offenders.append(storage_class) + objectsList.append(storage_class) - if offenders: - print_storage_class_table( - offenders, - "[red]EBS Storage Classes should have encryption parameter", - "[link=https://aws.github.io/aws-eks-best-practices/security/docs/data/#encryption-at-rest]Click to see the guide[/link]", - ) - return offenders + if objectsList: + status = False + message = "EBS Storage Classes should have encryption parameter" + else: + status = True + message = "EBS Storage Classes have encrypted parameters" + + return (status, message, objectsList, objectType) + def use_encryption_with_efs(resources: Resources): - offenders = [] - + + status = None + message = "" + objectType = "PersistentVolume" + objectsList = [] + for persistent_volume in resources.persistent_volumes: csi = persistent_volume.spec.csi if csi and csi.driver == "efs.csi.aws.com": mount_options = persistent_volume.spec.mount_options if not mount_options: - offenders.append(persistent_volume) + objectsList.append(persistent_volume) else: if "tls" not in mount_options: - offenders.append(persistent_volume) - - if offenders: - print_persistent_volume_table( - offenders, - "[red]EFS Persistent volumes should have tls mount option", - "[link=https://aws.github.io/aws-eks-best-practices/security/docs/data/#encryption-at-rest]Click to see the guide[/link]", - ) - return offenders + objectsList.append(persistent_volume) + if objectsList: + status = False + message = "EFS Persistent volumes should have tls mount option" + else: + status = True + message = "EFS Persistent volumes have tls mount option" + + return (status, message, objectsList, objectType) + def use_efs_access_points(resources: Resources): - offenders = [] + status = None + message = "" + objectType = "PersistentVolume" + objectsList = [] + for persistent_volume in resources.persistent_volumes: csi = persistent_volume.spec.csi if csi and csi.driver == "efs.csi.aws.com": if "::" not in csi.volume_handle: - offenders.append(persistent_volume) - - if offenders: - print_persistent_volume_table( - offenders, - "[red]EFS Persistent volumes should leverage access points", - "[link=https://aws.github.io/aws-eks-best-practices/security/docs/data/#use-efs-access-points-to-simplify-access-to-shared-datasets]Click to see the guide[/link]", - ) - return offenders + objectsList.append(persistent_volume) + + if objectsList: + status = False + message = "EFS Persistent volumes should leverage access points" + else: + status = True + message = "EFS Persistent volumes are leveraging access points" + + return (status, message, objectsList, objectType) + + + \ No newline at end of file diff --git a/hardeneks/cluster_wide/security/iam.py b/hardeneks/cluster_wide/security/iam.py index 385e66e..8d9f073 100644 --- a/hardeneks/cluster_wide/security/iam.py +++ b/hardeneks/cluster_wide/security/iam.py @@ -1,74 +1,106 @@ import boto3 from kubernetes import client +from rich import print from rich.panel import Panel +from rich.console import Console -from hardeneks import console from ...resources import Resources -from ...report import print_role_table, print_instance_metadata_table +from ...helper_functions import is_daemonset_exists_in_cluster +console = Console() -def restrict_wildcard_for_cluster_roles(resources: Resources): - offenders = [] - for role in resources.cluster_roles: - for rule in role.rules: - if "*" in rule.verbs: - offenders.append(role) - if rule.resources and "*" in rule.resources: - offenders.append(role) - - if offenders: - print_role_table( - offenders, - "[red]ClusterRoles should not have '*' in Verbs or Resources", - "[link=https://aws.github.io/aws-eks-best-practices/security/docs/iam/#employ-least-privileged-access-when-creating-rolebindings-and-clusterrolebindings]Click to see the guide[/link]", - "ClusterRole", - ) - return offenders +def disable_anonymous_access_for_cluster_roles(resources: Resources): + + status = None + message = "" + objectType = "ClusterRoleBinding" + objectsList = [] + clusterrolenameslist = "" + for cluster_role_binding in resources.cluster_role_bindings: + if cluster_role_binding.subjects: + for subject in cluster_role_binding.subjects: + if ( + subject.name == "system:unauthenticated" + or subject.name == "system:anonymous" + ): + objectsList.append(cluster_role_binding.metadata.name) + #objectsList.append(cluster_role_binding) + clusterrolenameslist += cluster_role_binding.metadata.name + + + #print("objectsList={}".format(objectsList)) + + if objectsList: + status = False + message = "Clusterroles bound to to anonymous/unauthenticated groups: " + clusterrolenameslist + else: + status = True + message = "There are no Clusterroles bound to to anonymous/unauthenticated groups" + + return (status, message, objectsList, objectType) + def check_endpoint_public_access(resources: Resources): + + status = None + message = "" + objectType = None + objectsList = [] + + client = boto3.client("eks", region_name=resources.region) cluster_metadata = client.describe_cluster(name=resources.cluster) endpoint_access = cluster_metadata["cluster"]["resourcesVpcConfig"][ "endpointPublicAccess" ] + if endpoint_access: - console.print( - Panel( - "[red]EKS Cluster Endpoint is not Private", - subtitle="[link=https://aws.github.io/aws-eks-best-practices/security/docs/iam/#make-the-eks-cluster-endpoint-private]Click to see the guide[/link]", - ) - ) - console.print() - return False - - return True + status = False + message = "EKS Cluster Endpoint is Public" + else: + status = True + message = "EKS Cluster Endpoint is not Private" + + return (status, message, objectsList, objectType) + def check_aws_node_daemonset_service_account(resources: Resources): - daemonset = client.AppsV1Api().read_namespaced_daemon_set( - name="aws-node", namespace="kube-system" - ) - - if daemonset.spec.template.spec.service_account_name == "aws-node": - console.print( - Panel( - "[red]Update the aws-node daemonset to use IRSA", - subtitle="[link=https://aws.github.io/aws-eks-best-practices/security/docs/iam/#update-the-aws-node-daemonset-to-use-irsa]Click to see the guide[/link]", - ) - ) - console.print() - return False - - return True + + status = None + message = "" + objectType = None + objectsList = [] + + + (doesDSExists, daemonset) = is_daemonset_exists_in_cluster("aws-node") + + if doesDSExists: + + if daemonset.spec.template.spec.service_account_name == "aws-node": + status = False + message = "Update the aws-node daemonset to use IRSA" + else: + status = True + message = "aws-node daemonset uses IRSA" + else: + status = False + message = "aws-node daemonset doesn't exist in the cluster" + + return (status, message, objectsList, objectType) def check_access_to_instance_profile(resources: Resources): - client = boto3.client("ec2", region_name=resources.region) - offenders = [] - instance_metadata = client.describe_instances( + status = None + message = "" + objectType = "instanceMetadata" + objectsList = [] + + ec2client = boto3.client("ec2", region_name=resources.region) + instance_metadata = ec2client.describe_instances( Filters=[ { "Name": "tag:aws:eks:cluster-name", @@ -86,35 +118,42 @@ def check_access_to_instance_profile(resources: Resources): ] == 2 ): - offenders.append(instance) + objectsList.append(instance) + status = False + message = "access to the instance profile assigned to nodes is not restricted" + else: + status = True + message = "access to the instance profile assigned to nodes is restricted" + + + return (status, message, objectsList, objectType) - if offenders: - print_instance_metadata_table( - offenders, - "[red]Restrict access to the instance profile assigned to nodes", - "[link=https://aws.github.io/aws-eks-best-practices/security/docs/iam/#when-your-application-needs-access-to-imds-use-imdsv2-and-increase-the-hop-limit-on-ec2-instances-to-2]Click to see the guide[/link]", - ) - return offenders +def restrict_wildcard_for_cluster_roles(resources: Resources): + + status = None + message = "" + objectType = "ClusterRole" + objectsList = [] + clusterrolenameslist = "" + + for role in resources.cluster_roles: + if role.rules: + for rule in role.rules: + if "*" in rule.verbs: + objectsList.append(role.metadata.name) + if rule.resources and "*" in rule.resources: + objectsList.append(role.metadata.name) + clusterrolenameslist += role.metadata.name + + + if objectsList: + status = False + message = "ClusterRoles with '*' in Verbs or Resources are: " + clusterrolenameslist + else: + status = True + message = "There are no ClusterRoles with '*' in Verbs or Resources" + + return (status, message, objectsList, objectType) -def disable_anonymous_access_for_cluster_roles(resources: Resources): - offenders = [] - - for cluster_role_binding in resources.cluster_role_bindings: - if cluster_role_binding.subjects: - for subject in cluster_role_binding.subjects: - if ( - subject.name == "system:unauthenticated" - or subject.name == "system:anonymous" - ): - offenders.append(cluster_role_binding) - - if offenders: - print_role_table( - offenders, - "[red]Don't bind clusterroles to anonymous/unauthenticated groups", - "[link=https://aws.github.io/aws-eks-best-practices/security/docs/iam/#review-and-revoke-unnecessary-anonymous-access]Click to see the guide[/link]", - "ClusterRoleBinding", - ) - return offenders diff --git a/hardeneks/cluster_wide/security/image_security.py b/hardeneks/cluster_wide/security/image_security.py index ba232c0..d56bb43 100644 --- a/hardeneks/cluster_wide/security/image_security.py +++ b/hardeneks/cluster_wide/security/image_security.py @@ -1,24 +1,27 @@ import boto3 -from ...report import print_repository_table from ...resources import Resources def use_immutable_tags_with_ecr(resources: Resources): - offenders = [] + status = None + message = "" + objectType = "Repository" + objectsList = [] - client = boto3.client("ecr", region_name=resources.region) - repositories = client.describe_repositories() + + ecrclient = boto3.client("ecr", region_name=resources.region) + repositories = ecrclient.describe_repositories() for repository in repositories["repositories"]: if repository["imageTagMutability"] != "IMMUTABLE": - offenders.append(repository) - - if offenders: - print_repository_table( - offenders, - "imageTagMutability", - "[red]Make image tags immutable.", - "[link=https://aws.github.io/aws-eks-best-practices/security/docs/image/#use-immutable-tags-with-ecr]Click to see the guide[/link]", - ) + objectsList.append(repository) - return offenders + if objectsList: + status = False + message = "Make image tags immutable" + else: + status = True + message = "Image tags are immutable" + + return (status, message, objectsList, objectType) + diff --git a/hardeneks/cluster_wide/security/infrastructure_security.py b/hardeneks/cluster_wide/security/infrastructure_security.py index f9508cb..dd8be7a 100644 --- a/hardeneks/cluster_wide/security/infrastructure_security.py +++ b/hardeneks/cluster_wide/security/infrastructure_security.py @@ -1,17 +1,24 @@ import boto3 +from rich.console import Console from rich.panel import Panel +from rich import print -from hardeneks import console from ...resources import Resources -from ...report import print_instance_public_table -def deploy_workers_onto_private_subnets(resources: Resources): - client = boto3.client("ec2", region_name=resources.region) +console = Console() + - offenders = [] +def deploy_workers_onto_private_subnets(resources: Resources): + + status = None + message = "" + objectType = "PublicInstances" + objectsList = [] + - instance_metadata = client.describe_instances( + ec2client = boto3.client("ec2", region_name=resources.region) + instance_metadata = ec2client.describe_instances( Filters=[ { "Name": "tag:aws:eks:cluster-name", @@ -24,24 +31,31 @@ def deploy_workers_onto_private_subnets(resources: Resources): for instance in instance_metadata["Reservations"]: if instance["Instances"][0]["PublicDnsName"]: - offenders.append(instance) - - if offenders: - print_instance_public_table( - offenders, - "[red]Place worker nodes on private subnets.", - "[link=https://aws.github.io/aws-eks-best-practices/security/docs/hosts/#deploy-workers-onto-private-subnets]Click to see the guide[/link]", - ) - return offenders - + objectsList.append(instance) + if objectsList: + status = False + message = "Place worker nodes on private subnets" + else: + status = True + message = "worker nodes are on private subnets." + + return (status, message, objectsList, objectType) + + def make_sure_inspector_is_enabled(resources: Resources): - client = boto3.client("inspector2", region_name=resources.region) + + status = None + message = "" + objectType = None + objectsList = [] + + inspector2client = boto3.client("inspector2", region_name=resources.region) account_id = boto3.client( "sts", region_name=resources.region ).get_caller_identity()["Account"] - response = client.batch_get_account_status( + response = inspector2client.batch_get_account_status( accountIds=[ account_id, ] @@ -52,13 +66,14 @@ def make_sure_inspector_is_enabled(resources: Resources): ecr_status = resource_state["ecr"]["status"] if ec2_status != "ENABLED" and ecr_status != "ENABLED": - console.print( - Panel( - "[red]Enable Amazon Inspector for ec2 and ecr", - subtitle="[link=https://aws.github.io/aws-eks-best-practices/security/docs/hosts/#run-amazon-inspector-to-assess-hosts-for-exposure-vulnerabilities-and-deviations-from-best-practices]Click to see the guide[/link]", - ) - ) - console.print() - return False + status = False + message = "Enable Amazon Inspector for ec2 and ecr" + else: + status = True + message = "Amazon Inspector is enabled for ec2 and ecr" - return True + return (status, message, objectsList, objectType) + + + + \ No newline at end of file diff --git a/hardeneks/cluster_wide/security/multi_tenancy.py b/hardeneks/cluster_wide/security/multi_tenancy.py index d04c1eb..6f10e1e 100644 --- a/hardeneks/cluster_wide/security/multi_tenancy.py +++ b/hardeneks/cluster_wide/security/multi_tenancy.py @@ -1,22 +1,30 @@ +from rich.console import Console + +import copy + from ...resources import Resources -from ...report import ( - print_namespace_table, -) +console = Console() def ensure_namespace_quotas_exist(resources: Resources): - offenders = resources.namespaces - + status = None + message = "" + objectType = "Namespace" + objectsList = [] + + objectsList = copy.deepcopy(resources.namespaces) + for quota in resources.resource_quotas: - offenders.remove(quota.metadata.namespace) - - if offenders: - print_namespace_table( - offenders, - "[red]Namespaces should have quotas assigned", - "[link=https://aws.github.io/aws-eks-best-practices/security/docs/multitenancy/#namespaces]Click to see the guide[/link]", - ) - - return offenders + if quota.metadata.namespace in objectsList: + objectsList.remove(quota.metadata.namespace) + + if objectsList: + status = False + message = "Namespaces does not have quotas assigned" + else: + status = True + message = "Namespaces have quotas assigned" + + return (status, message, objectsList, objectType) \ No newline at end of file diff --git a/hardeneks/cluster_wide/security/network_security.py b/hardeneks/cluster_wide/security/network_security.py index 1d6d3d9..e944e71 100644 --- a/hardeneks/cluster_wide/security/network_security.py +++ b/hardeneks/cluster_wide/security/network_security.py @@ -1,15 +1,26 @@ import boto3 from kubernetes import client +from rich.console import Console from rich.panel import Panel +from rich import print +import sys, copy + -from hardeneks import console from ...resources import Resources -from ...report import print_namespace_table + +console = Console() def check_vpc_flow_logs(resources: Resources): - client = boto3.client("eks", region_name=resources.region) - cluster_metadata = client.describe_cluster(name=resources.cluster) + + status = None + message = "" + objectType = "ClusterRole" + objectsList = [] + clusterrolenameslist = "" + + eksclient = boto3.client("eks", region_name=resources.region) + cluster_metadata = eksclient.describe_cluster(name=resources.cluster) vpc_id = cluster_metadata["cluster"]["resourcesVpcConfig"]["vpcId"] client = boto3.client("ec2", region_name=resources.region) @@ -19,43 +30,49 @@ def check_vpc_flow_logs(resources: Resources): )["FlowLogs"] if not flow_logs: - console.print( - Panel( - "[red]Enable flow logs for your VPC.", - subtitle="[link=https://aws.github.io/aws-eks-best-practices/security/docs/network/#log-network-traffic-metadata]Click to see the guide[/link]", - ) - ) - console.print() - return False + status = False + message = "Enable flow logs for your VPC" + else: + status = True + message = "VPC flow logs are enabled" + + return (status, message, objectsList, objectType) def check_awspca_exists(resources: Resources): + + status = False + message = "Install aws privateca issuer for your certificates." + objectType = None + objectsList = [] + services = client.CoreV1Api().list_service_for_all_namespaces().items for service in services: if service.metadata.name.startswith("aws-privateca-issuer"): - return True + status = True + message = "aws privateca issuer for certificates exists" - console.print( - Panel( - "[red]Install aws privateca issuer for your certificates.", - subtitle="[link=https://aws.github.io/aws-eks-best-practices/security/docs/network/#acm-private-ca-with-cert-manager]Click to see the guide[/link]", - ) - ) - console.print() - return False + return (status, message, objectsList, objectType) def check_default_deny_policy_exists(resources: Resources): - offenders = resources.namespaces - + + status = None + message = "" + objectType = "Namespace" + objectsList = [] + + objectsList = copy.deepcopy(resources.namespaces) + for policy in resources.network_policies: - offenders.remove(policy.metadata.namespace) - - if offenders: - print_namespace_table( - offenders, - "[red]Namespaces that does not have default network deny policies", - "[link=https://aws.github.io/aws-eks-best-practices/security/docs/network/#create-a-default-deny-policy]Click to see the guide[/link]", - ) - - return offenders + if policy.metadata.namespace in objectsList: + objectsList.remove(policy.metadata.namespace) + + if objectsList: + status = False + message = "Namespaces does not have default network deny policies" + else: + status = True + message = "Namespaces have default network deny policies" + + return (status, message, objectsList, objectType) diff --git a/hardeneks/cluster_wide/security/pod_security.py b/hardeneks/cluster_wide/security/pod_security.py index 1f8c478..373eb80 100644 --- a/hardeneks/cluster_wide/security/pod_security.py +++ b/hardeneks/cluster_wide/security/pod_security.py @@ -2,27 +2,28 @@ from ...resources import Resources -from ...report import ( - print_namespace_table, -) - - def ensure_namespace_psa_exist(resources: Resources): - offenders = [] - namespaces = kubernetes.client.CoreV1Api().list_namespace().items - for namespace in namespaces: + + status = None + message = "" + objectType = "Namespace" + objectsList = [] + + for namespace in resources.namespaceObjList: if namespace.metadata.name not in resources.namespaces: - labels = namespace.metadata.labels.keys() - if "pod-security.kubernetes.io/enforce" not in labels: - offenders.append(namespace.metadata.name) - elif "pod-security.kubernetes.io/warn" not in labels: - offenders.append(namespace.metadata.name) - - if offenders: - print_namespace_table( - offenders, - "[red]Namespaces should have psa modes.", - "[link=https://aws.github.io/aws-eks-best-practices/security/docs/pods/#pod-security-standards-pss-and-pod-security-admission-psa]Click to see the guide[/link]", - ) + if namespace.metadata.labels: + labels = namespace.metadata.labels.keys() + if "pod-security.kubernetes.io/enforce" not in labels: + objectsList.append(namespace.metadata.name) + elif "pod-security.kubernetes.io/warn" not in labels: + objectsList.append(namespace.metadata.name) - return offenders + if objectsList: + status = False + message = "Namespaces should have psa modes" + else: + status = True + message = "Namespaces have psa modes" + + return (status, message, objectsList, objectType) + diff --git a/hardeneks/harden.py b/hardeneks/harden.py index 8fb4af3..88a9d7e 100644 --- a/hardeneks/harden.py +++ b/hardeneks/harden.py @@ -1,17 +1,159 @@ from importlib import import_module +import sys +import boto3 +from kubernetes import client +from rich import print -def harden(resources, config, _type): +from rich.console import Console + +from .report import print_console_message + +from .report import colorMap + +def cluster_data(resources, config, _type): + + current_eks_version = "1.24" + eks_cluster_data = [] + + eksclient = boto3.client("eks", region_name=resources.region) + cluster_metadata = eksclient.describe_cluster(name=resources.cluster) + cluster_version = cluster_metadata["cluster"]["version"] + + if cluster_version == current_eks_version: + eks_cluster_data.append( ["green", resources.cluster, cluster_version, "Cluster version is latest version"]) + else: + eks_cluster_data.append( ["yellow", resources.cluster, cluster_version, "Upgrade to latest version: {}".format(current_eks_version)]) + + cluster_endpoint = cluster_metadata["cluster"]["endpoint"] + eks_cluster_data.append( ["green", "Cluster Endpoint URL", cluster_endpoint, ""]) + + vpcId = cluster_metadata["cluster"]["resourcesVpcConfig"]["vpcId"] + subnetIds = cluster_metadata["cluster"]["resourcesVpcConfig"]["subnetIds"] + + + eks_cluster_data.append( ["green", "Cluster VPC Id", vpcId, ""]) + + subnetIdsString = " ".join(subnetIds) + eks_cluster_data.append( ["green", "Cluster Subnets Id", subnetIdsString, ""]) + + endpoint_public_access = cluster_metadata["cluster"]["resourcesVpcConfig"]["endpointPublicAccess"] + endpoint_private_access = cluster_metadata["cluster"]["resourcesVpcConfig"]["endpointPrivateAccess"] + + endpoibtAccessString = "public: " + str(endpoint_public_access) + ", " + "private: " + str(endpoint_private_access) + eks_cluster_data.append( ["green", "Cluster Emdpopint Access", endpoibtAccessString, ""]) + + subnets = boto3.resource("ec2").subnets.filter( + Filters=[{"Name": "vpc-id", "Values": [vpcId]}] + ) + subnet_ids = [sn.id for sn in subnets] + + + ec2client = boto3.client('ec2') + subnetsList = ec2client.describe_subnets(SubnetIds=subnet_ids) + + totalAvailableIpAddressCount = 0 + for subnet in subnetsList['Subnets']: + totalAvailableIpAddressCount += subnet['AvailableIpAddressCount'] + + eks_cluster_data.append( ["green", "Total Available IPs in the VPC", str(totalAvailableIpAddressCount), ""]) + + deployments = client.AppsV1Api().list_deployment_for_all_namespaces().items + eks_cluster_data.append( ["green", "Total No. of Deployments in Cluster", str(len(deployments)), ""]) + + services = client.CoreV1Api().list_service_for_all_namespaces().items + eks_cluster_data.append( ["green", "Total No. of Services in Cluster", str(len(services)), ""]) + + pods = client.CoreV1Api().list_pod_for_all_namespaces().items + eks_cluster_data.append( ["green", "Total No. of Pods in Cluster", str(len(pods)), ""]) + + + nodeList = (client.CoreV1Api().list_node().items) + eks_cluster_data.append( ["green", "Total No. of Nodes in Cluster", str(len(nodeList)), ""]) + + eksmnglist = set() + selfmnglist=set() + + for node in nodeList: + labels = node.metadata.labels + + if 'eks.amazonaws.com/nodegroup' in labels.keys(): + + eksmnglist.add(labels['eks.amazonaws.com/nodegroup']) + elif 'alpha.eksctl.io/nodegroup-name' in labels.keys(): + + selfmnglist.add(labels['alpha.eksctl.io/nodegroup-name']) + elif 'karpenter.sh/provisioner-name' in labels.keys(): + + pass + else: + selfmnglist.add(node.metadata.name) + + + if len(eksmnglist) >=1 : + eks_cluster_data.append( ["green", "List of EKS Managed node groups ib Cluster", ' '.join(eksmnglist), ""]) + + if len(selfmnglist) >=1 : + eks_cluster_data.append( ["green", "List of Self Managed node groups ib Cluster", ' '.join(selfmnglist), ""]) + + + print_console_message(True, "aws-eks-best-practices", None, eks_cluster_data, "ClusterData") + + + + +def harden(resources, config, _type, pillarsList): + + console = Console() + + console.print() config = config[_type] + + eks_waf_report = {} + for pillar in config.keys(): - for section in config[pillar]: - for rule in config[pillar][section]: - module = import_module(f"hardeneks.{_type}.{pillar}.{section}") - try: - func = getattr(module, rule) - except AttributeError as exc: - print(f"[bold][red]{exc}") - try: - func(resources) - except Exception as exc: - print(f"[bold][red]{exc}") + + if pillar in pillarsList: + + eks_waf_report[pillar] = [] + + for section in config[pillar]: + + if resources.debug: + if _type == "cluster_wide": + console.rule(f"[b] Checking Rules for Scope: {_type} for Pillar: {pillar} Section: {section}", characters=" -") + else: + console.rule(f"[b] Checking Rules for Scope: {_type} for namespace: {resources.namespace} for Pillar: {pillar} Section: {section}", characters=" -") + + console.print() + + for rule in config[pillar][section]: + module = import_module(f"hardeneks.{_type}.{pillar}.{section}") + try: + func = getattr(module, rule) + except AttributeError as exc: + print(f"Exception for rule={rule} : [bold][red]{exc}") + try: + (ret, message, objectsList, objectType) =func(resources) + + if resources.debug: + print_console_message(ret, rule, message, objectsList, objectType) + + eks_waf_report[pillar].append( {"rule": rule, "message": message, "ret": ret}) + + except Exception as exc: + if _type == "cluster_wide": + print(f"Exception for rule {rule} in Section {section} for Pillar {pillar} for scope {_type}: [bold][red]{exc}") + else: + print(f"Exception for rule {rule} in Section {section} for Pillar {pillar} for scope {_type} for namespace: {resources.namespace}: [bold][red]{exc}") + + + if _type == "cluster_wide": + print_console_message(True, pillar, None, eks_waf_report, "Report") + else: + print_console_message(True, pillar, resources.namespace, eks_waf_report, "Report") + + eks_waf_report={} + + + diff --git a/hardeneks/helper_functions.py b/hardeneks/helper_functions.py new file mode 100644 index 0000000..e76b4cd --- /dev/null +++ b/hardeneks/helper_functions.py @@ -0,0 +1,23 @@ +import boto3 +from kubernetes import client + +def is_deployment_exists_in_namespace(deploymentName, namespace): + + deployments = (client.AppsV1Api().list_namespaced_deployment(namespace).items) + + for deployment in deployments: + if deployment.metadata.name == deploymentName: + return (True, deployment) + + return (False, None) + + +def is_daemonset_exists_in_cluster(dsName): + + dsList = (client.AppsV1Api().list_daemon_set_for_all_namespaces().items) + + for ds in dsList: + if ds.metadata.name == dsName: + return (True, ds) + + return (False, None) diff --git a/hardeneks/namespace_based/networking/__init__.py b/hardeneks/namespace_based/networking/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hardeneks/namespace_based/networking/load-balancing.py b/hardeneks/namespace_based/networking/load-balancing.py new file mode 100644 index 0000000..e4b9eab --- /dev/null +++ b/hardeneks/namespace_based/networking/load-balancing.py @@ -0,0 +1,461 @@ +from collections import Counter +import sys + +import boto3 +from kubernetes import client +from rich import print + +from rich.console import Console + +from ...resources import NamespacedResources +from ...report import ( + print_console_message, +) + + +console = Console() + + + +def use_IP_target_type_service_load_balancers(namespaced_resources: NamespacedResources,): + + status = None + objectsList = None + objectType = None + succes_message = "Target Group Mode IP configured for services :" + error_message = "Target Group Mode IP is NOT configured for services :" + objectsList = [] + + for service in namespaced_resources.services: + + serviceName = service.metadata.name + serviceType = service.spec.type + + if serviceType == "LoadBalancer": + annotations = service.metadata.annotations + #print("service name={} type={} annotations={}".format(serviceName, serviceType, annotations)) + + if annotations: + + target_group_mode_exists = ( + "service.beta.kubernetes.io/aws-load-balancer-nlb-target-type" + in annotations + ) + target_group_mode_type = annotations.get( + "service.beta.kubernetes.io/aws-load-balancer-nlb-target-type" + ) + #print("target_group_mode_exists={} target_group_mode_type={}".format(target_group_mode_exists, target_group_mode_type)) + + if target_group_mode_exists and target_group_mode_type == "ip": + succes_message += serviceName + " " + status = True + else: + status = False + error_message += serviceName + " " + objectsList.append(service) + else: + status = False + error_message += serviceName + " " + objectsList.append(service) + + + if status == True: + message = succes_message + elif status == False: + message = error_message + else: + message = "Note this Rule is NOT applicable since there are no Loadbalamcer types" + + return (status, message, objectsList, "Service") + + +def use_IP_target_type_ingress_load_balancers(namespaced_resources: NamespacedResources,): + + status = None + objectsList = None + objectType = None + succes_message = "Target Group Mode IP configured for ingresses :" + error_message = "Target Group Mode IP is NOT configured for ingresses :" + objectsList = [] + + ingressList = client.NetworkingV1Api().list_namespaced_ingress(namespaced_resources.namespace).items + + #print("ingressList={}".format(ingressList)) + + + for ingress in ingressList: + + ingressName = ingress.metadata.name + annotations = ingress.metadata.annotations + + if annotations: + + target_group_mode_exists = ( + "alb.ingress.kubernetes.io/target-type" + in annotations + ) + + if target_group_mode_exists: + + target_group_mode_type = annotations.get( + "alb.ingress.kubernetes.io/target-type" + ) + + if target_group_mode_type == "ip": + status = True + succes_message += ingressName + " " + else: + status = False + error_message += ingressName + " " + objectsList.append(ingress) + else: + status = False + error_message += ingressName + " " + objectsList.append(ingress) + else: + status = False + error_message += ingressName + " " + objectsList.append(ingress) + + + if status == True: + message = succes_message + elif status == False: + message = error_message + else: + message = "Note this Rule is NOT applicable since there are no Loadbalancer Services" + + return (status, message, objectsList, "Ingress") + +def utilize_pod_readiness_gates(namespaced_resources: NamespacedResources,): + + status = None + objectsList = None + objectType = None + succes_message = "Target Group Mode IP configured for services :" + error_message = "Target Group Mode IP is NOT configured for services :" + serviceNames = '' + ingressNames = '' + objectsList = [] + serviceNameList = [] + serviceObjOffendersList = [] + ingressObjOffendersList = [] + ingressNameList = [] + ingressObjNameList = [] + + for service in namespaced_resources.services: + + serviceName = service.metadata.name + serviceType = service.spec.type + + isServiceCompliant = False + if serviceType == "LoadBalancer": + annotations = service.metadata.annotations + #print("service name={} type={} annotations={}".format(serviceName, serviceType, annotations)) + if annotations: + target_group_mode_exists = ( + "service.beta.kubernetes.io/aws-load-balancer-nlb-target-type" + in annotations + ) + #print("target_group_mode_exists={} target_group_mode_type={}".format(target_group_mode_exists, target_group_mode_type)) + if target_group_mode_exists: + target_group_mode_type = annotations.get( + "service.beta.kubernetes.io/aws-load-balancer-nlb-target-type" + ) + if target_group_mode_type == "ip": + isServiceCompliant = True + + + + + if isServiceCompliant: + serviceNameList.append(serviceName) + else: + serviceObjOffendersList.append(service) + + + ingressList = client.NetworkingV1Api().list_namespaced_ingress(namespaced_resources.namespace).items + + #print("ingressList={}".format(ingressList)) + + for ingress in ingressList: + + isIngressCompliant = False + + ingressName = ingress.metadata.name + annotations = ingress.metadata.annotations + + if annotations: + target_group_mode_exists = ( + "alb.ingress.kubernetes.io/target-type" + in annotations + ) + if target_group_mode_exists: + + target_group_mode_type = annotations.get( + "alb.ingress.kubernetes.io/target-type" + ) + if target_group_mode_type == "ip": + isIngressCompliant = True + + + if isIngressCompliant: + ingressNameList.append(ingressName) + else: + ingressObjOffendersList.append(ingress) + + + if len(serviceNameList) >=1 or len(ingressNameList) >=1: + ns = client.CoreV1Api().read_namespace(name=namespaced_resources.namespace) + #print("ns={}".format(ns)) + labels = ns.metadata.labels + #print("labels={}".format(labels)) + if labels: + if 'elbv2.k8s.aws/pod-readiness-gate-inject' in labels.keys(): + readinessgatesstatus = labels['elbv2.k8s.aws/pod-readiness-gate-inject'] + + if readinessgatesstatus == 'enabled': + status = True + else: + status = False + objectsList.extend(serviceObjOffendersList) + objectsList.extend(ingressObjOffendersList) + else: + status = False + + + if len(serviceNameList) >=1: + serviceNames = "Service List: " + ' '.join(serviceNameList) + + + if len(ingressNameList) >=1: + ingressNames = "Ingress List: " + ' '.join(ingressNameList) + + + message = serviceNames + " " + ingressNames + + + if status is None: + message = "Note this Rule is NOT applicable since there are no Loadbalaccer Services or Ingress" + + return (status, message, objectsList, "Service") + + + +def ensure_pods_deregister_from_LB_before_termination(namespaced_resources: NamespacedResources,): + + status = None + objectsList = None + objectType = None + succes_message = "Target Group Mode IP configured for services :" + error_message = "Target Group Mode IP is NOT configured for services :" + complianceServiceNames = '' + nonComplianceServiceNames = '' + complianceServiceNamesList = [] + nonComplianceServiceNamesList = [] + nonComplianceServiceObjList = [] + objectsList = [] + serviceObjList = [] + ingressObjList = [] + + for service in namespaced_resources.services: + + serviceName = service.metadata.name + serviceType = service.spec.type + + if serviceType == "LoadBalancer": + annotations = service.metadata.annotations + #print("service name={} type={} annotations={}".format(serviceName, serviceType, annotations)) + if annotations: + target_group_mode_exists = ( + "service.beta.kubernetes.io/aws-load-balancer-nlb-target-type" + in annotations + ) + #print("target_group_mode_exists={} target_group_mode_type={}".format(target_group_mode_exists, target_group_mode_type)) + if target_group_mode_exists: + target_group_mode_type = annotations.get( + "service.beta.kubernetes.io/aws-load-balancer-nlb-target-type" + ) + if target_group_mode_type == "ip": + serviceObjList.append(service) + + + + + ingressList = client.NetworkingV1Api().list_namespaced_ingress(namespaced_resources.namespace).items + + #print("ingressList={}".format(ingressList)) + + for ingress in ingressList: + + ingressName = ingress.metadata.name + annotations = ingress.metadata.annotations + + if annotations: + target_group_mode_exists = ( + "alb.ingress.kubernetes.io/target-type" + in annotations + ) + if target_group_mode_exists: + + target_group_mode_type = annotations.get( + "alb.ingress.kubernetes.io/target-type" + ) + if target_group_mode_type == "ip": + #ingressObjList.append(ingress) + rules = ingress.spec.rules + #print("ingressName={} rules={}".format(ingressName, rules)) + for rule in rules: + paths = rule.http.paths + #print("paths={}".format(paths)) + for path in paths: + serviceName = path.backend.service.name + #print("serviceName={}".format(serviceName)) + serviceObj = client.CoreV1Api().read_namespaced_service(name=serviceName, namespace=namespaced_resources.namespace) + serviceObjList.append(serviceObj) + #print("serviceObj={}".format(serviceObj)) + + + + + #print("serviceObjList={}".format(serviceObjList)) + + for service in serviceObjList: + + is_sleep_command_exists = False + serviceName = service.metadata.name + serviceSelector = service.spec.selector + #serviceSelector = {'app1': 'nginx1', 'k1':'v1', 'k2' : 'v2'} + numberOfLabels = len (serviceSelector) + i=0 + serviceSelectorStr='' + for k,v in serviceSelector.items(): + #print("k={} v={}".format(k,v)) + serviceSelectorStr += k +'=' + v + i += 1 + if i < numberOfLabels: + serviceSelectorStr += ',' + + #print("serviceName={} serviceSelector={} serviceSelectorStr={}".format(serviceName, serviceSelector, serviceSelectorStr)) + pods = client.CoreV1Api().list_namespaced_pod(namespace=namespaced_resources.namespace, label_selector=serviceSelectorStr).items + #print("pods={}".format(pods)) + #for pod in pods: + if len(pods) >= 1: + podName = pods[0].metadata.name + containers = pods[0].spec.containers + image = containers[0].image + lifecycle = containers[0].lifecycle + #print("podName={} image={} lifecycle={}".format(podName, image, lifecycle)) + if lifecycle: + commandList = lifecycle.pre_stop._exec.command + #print("commandList={}".format(commandList)) + + for command in commandList: + if 'sleep' in command: + is_sleep_command_exists = True + #print("sleep command={}".format(command)) + + if is_sleep_command_exists: + complianceServiceNamesList.append(serviceName) + else: + objectsList.append(service) + nonComplianceServiceNamesList.append(serviceName) + + + #print("complianceServiceNamesList={} nonComplianceServiceNamesList={}".format(complianceServiceNamesList, nonComplianceServiceNamesList)) + + if len(complianceServiceNamesList) >=1: + complianceServiceNames = "Compliance Service List: " + ' '.join(complianceServiceNamesList) + #objectsList.extend(complianceServiceNamesList) + + if len(nonComplianceServiceNamesList) >=1: + nonComplianceServiceNames = "Non Compliance Service List: " + ' '.join(nonComplianceServiceNamesList) + #objectsList.extend(nonComplianceServiceNamesList) + + + #for ingressObj in + #print("ingressObjList={}".format(ingressObjList)) + + if len(complianceServiceNamesList) >=1 or len(nonComplianceServiceNamesList) >=1: + + if len(nonComplianceServiceNamesList) == 0: + status = True + message = complianceServiceNames + else: + status = False + message = complianceServiceNames + ' ' + nonComplianceServiceNames + + + if status is None: + message = "Note this Rule is NOT applicable since there are no Loadbalaccer Services or Ingress" + + return (status, message, objectsList, "Service") + + +def configure_pod_disruption_budget(namespaced_resources: NamespacedResources,): + + status = None + objectsList = None + objectType = None + message = '' + succes_message = "Target Group Mode IP configured for services :" + error_message = "Target Group Mode IP is NOT configured for services :" + objectsList = [] + deployNamesWithPDB ='' + deploymentsWithPDB =[] + deployNamesWithoutPDB = '' + deploymentsWithoutPDB =[] + + + print("namespace={}".format(namespaced_resources.namespace)) + print("deployments={}".format(namespaced_resources.deployments)) + + pdsList = client.PolicyV1Api().list_namespaced_pod_disruption_budget(namespace=namespaced_resources.namespace).items + + #print("pdsList={}".format(pdsList)) + #print("deployments={}".format(namespaced_resources.deployments)) + + for deployment in namespaced_resources.deployments: + deploymentName = deployment.metadata.name + deployLabels = deployment.spec.selector.match_labels + #print("deploymentName={} deployLabels={}".format(deploymentName, deployLabels)) + + isPDBExists = False + for pdb in pdsList: + + pdbName = pdb.metadata.name + pdbLabels = pdb.spec.selector.match_labels + #print("pdbName={} pdbLabels={}".format(pdbName, pdbLabels)) + isPDBExists = all((deployLabels.get(k) == v for k, v in pdbLabels.items())) + #print("deploymentName={} pdbName={} res={}".format(deploymentName, pdbName, isPDBExists)) + + if isPDBExists: + deploymentsWithPDB.append(deploymentName) + else: + deploymentsWithoutPDB.append(deploymentName) + objectsList.append(deployment) + + + if len(deploymentsWithPDB) >=1: + deployNamesWithPDB = "Deployments with PDB: " + ' '.join(deploymentsWithPDB) + #objectsList.extend(deploymentsWithPDB) + + if len(deploymentsWithoutPDB) >=1: + deployNamesWithoutPDB = "Deployments without PDB: " + ' '.join(deploymentsWithoutPDB) + #objectsList.extend(deploymentsWithoutPDB) + + + if len(deploymentsWithPDB) >=1 or len(deploymentsWithoutPDB) >=1: + + if len(deploymentsWithoutPDB) == 0: + status = True + message = deployNamesWithPDB + else: + status = False + message = deployNamesWithPDB + ' ' + deployNamesWithoutPDB + + if status is None: + message = "Note this Rule is NOT applicable since there are no deployments in this namespace" + + return (status, message, objectsList, "Deployment") + diff --git a/hardeneks/namespace_based/reliability/applications.py b/hardeneks/namespace_based/reliability/applications.py index a6aeeb5..40ef742 100644 --- a/hardeneks/namespace_based/reliability/applications.py +++ b/hardeneks/namespace_based/reliability/applications.py @@ -1,117 +1,141 @@ from ...resources import NamespacedResources -from ...report import ( - print_pod_table, - print_service_table, - print_deployment_table, -) - - -def avoid_running_singleton_pods(namespaced_resources: NamespacedResources): - offenders = [] - for pod in namespaced_resources.pods: - owner = pod.metadata.owner_references - if not owner: - offenders.append(pod) - - if offenders: - print_pod_table( - offenders, - "[red]Avoid running pods without deployments.", - "[link=https://aws.github.io/aws-eks-best-practices/reliability/docs/application/#avoid-running-singleton-pods]Click to see the guide[/link]", - ) - return offenders - - -def run_multiple_replicas(namespaced_resources: NamespacedResources): - offenders = [] +def check_horizontal_pod_autoscaling_exists(namespaced_resources: NamespacedResources,): + + status = None + message = "" + objectType = "Service" + objectsList = [] + + hpas = [i.spec.scale_target_ref.name for i in namespaced_resources.hpas] for deployment in namespaced_resources.deployments: - if deployment.spec.replicas < 2: - offenders.append(deployment) - - if offenders: - print_deployment_table( - offenders, - "[red]Avoid running single replica deployments", - "[link=https://aws.github.io/aws-eks-best-practices/reliability/docs/application/#run-multiple-replicas]Click to see the guide[/link]", - ) - return offenders + if deployment.metadata.name not in hpas: + objectsList.append(deployment) + if objectsList: + status = False + message = "Deploy horizontal pod autoscaler for deployments" + else: + status = True + message = "horizontal pod autoscaler for deployments is deployed" + + return (status, message, objectsList, objectType) def schedule_replicas_across_nodes(namespaced_resources: NamespacedResources): - offenders = [] + + status = None + message = "" + objectType = "Service" + objectsList = [] for deployment in namespaced_resources.deployments: spread = deployment.spec.template.spec.topology_spread_constraints if not spread: - offenders.append(deployment) + objectsList.append(deployment) else: topology_keys = set([i.topology_key for i in spread]) if not set(["topology.kubernetes.io/zone"]).issubset( topology_keys ): - offenders.append(deployment) + objectsList.append(deployment) - if offenders: - print_service_table( - offenders, - "[red]Spread replicas across AZs and Nodes", - "[link=https://aws.github.io/aws-eks-best-practices/reliability/docs/application/#schedule-replicas-across-nodes]Click to see the guide[/link]", - ) - return offenders + if objectsList: + status = False + message = "Spread replicas across AZs and Nodes" + else: + status = True + message = " replicas Spread across AZs and Nodes" + + return (status, message, objectsList, objectType) -def check_horizontal_pod_autoscaling_exists( - namespaced_resources: NamespacedResources, -): - offenders = [] +def run_multiple_replicas(namespaced_resources: NamespacedResources): + + status = None + message = "" + objectsList = [] + objectType = "Deployment" + + for deployment in namespaced_resources.deployments: + if deployment.spec.replicas < 2: + objectsList.append(deployment) - hpas = [i.spec.scale_target_ref.name for i in namespaced_resources.hpas] + if objectsList: + status = False + message = "Avoid running single replica deployments" + else: + status = True + message = "There are no single replica deployments" + + return (status, message, objectsList, objectType) - for deployment in namespaced_resources.deployments: - if deployment.metadata.name not in hpas: - offenders.append(deployment) - if offenders: - print_service_table( - offenders, - "[red]Deploy horizontal pod autoscaler for deployments", - "[link=https://aws.github.io/aws-eks-best-practices/reliability/docs/application/#horizontal-pod-autoscaler-hpa]Click to see the guide[/link]", - ) - return offenders +def avoid_running_singleton_pods(namespaced_resources: NamespacedResources): + + status = None + message = "" + objectType = "Pod" + objectsList = [] + + for pod in namespaced_resources.pods: + owner = pod.metadata.owner_references + if not owner: + objectsList.append(pod) + + if objectsList: + status = False + message = "Avoid running pods without deployments." + else: + status = True + message = "There no singleton pods" + + return (status, message, objectsList, objectType) + def check_readiness_probes(namespaced_resources: NamespacedResources): - offenders = [] + + status = None + message = "" + objectType = "Pod" + objectsList = [] for pod in namespaced_resources.pods: for container in pod.spec.containers: if not container.readiness_probe: - offenders.append(pod) - - if offenders: - print_pod_table( - offenders, - "[red]Define readiness probes for pods.", - "[link=https://aws.github.io/aws-eks-best-practices/reliability/docs/application/#use-readiness-probe-to-detect-partial-unavailability]Click to see the guide[/link]", - ) - return offenders - + objectsList.append(pod) + + if objectsList: + status = False + message = "Define readiness probes for pods." + else: + status = True + message = "readiness probes exists for pods." + + return (status, message, objectsList, objectType) + + def check_liveness_probes(namespaced_resources: NamespacedResources): - offenders = [] + + status = None + message = "" + objectType = "Pod" + objectsList = [] + for pod in namespaced_resources.pods: for container in pod.spec.containers: if not container.liveness_probe: - offenders.append(pod) - - if offenders: - print_pod_table( - offenders, - "[red]Define liveness probes for pods.", - "[link=https://aws.github.io/aws-eks-best-practices/reliability/docs/application/#use-liveness-probe-to-remove-unhealthy-pods]Click to see the guide[/link]", - ) - return offenders + objectsList.append(pod) + + if objectsList: + status = False + message = "Define liveness probes for pods." + else: + status = True + message = "liveness probes for exists for pods." + + return (status, message, objectsList, objectType) \ No newline at end of file diff --git a/hardeneks/namespace_based/security/encryption_secrets.py b/hardeneks/namespace_based/security/encryption_secrets.py index 9ca1743..44b4d04 100644 --- a/hardeneks/namespace_based/security/encryption_secrets.py +++ b/hardeneks/namespace_based/security/encryption_secrets.py @@ -1,28 +1,29 @@ from ...resources import NamespacedResources -from ...report import ( - print_pod_table, -) - def disallow_secrets_from_env_vars(resources: NamespacedResources): - offenders = [] - + + status = None + message = "" + objectType = "Pod" + objectsList = [] + for pod in resources.pods: for container in pod.spec.containers: if container.env: for env in container.env: if env.value_from and env.value_from.secret_key_ref: - offenders.append(pod) + objectsList.append(pod) if container.env_from: for env_from in container.env_from: if env_from.secret_ref: - offenders.append(pod) + objectsList.append(pod) - if offenders: - print_pod_table( - offenders, - "[red]Disallow secrets from env vars", - "[link=https://aws.github.io/aws-eks-best-practices/security/docs/data/#use-volume-mounts-instead-of-environment-variables]Click to see the guide[/link]", - ) + if objectsList: + status = False + message = "Disallow secrets from env vars" + else: + status = True + message = "secrets are not allowed env vars" + + return (status, message, objectsList, objectType) - return offenders diff --git a/hardeneks/namespace_based/security/iam.py b/hardeneks/namespace_based/security/iam.py index 93ef75b..3c476d9 100644 --- a/hardeneks/namespace_based/security/iam.py +++ b/hardeneks/namespace_based/security/iam.py @@ -1,51 +1,98 @@ from collections import Counter -from ...resources import NamespacedResources -from ...report import ( - print_role_table, - print_pod_table, - print_workload_table, -) +from rich.console import Console +from ...resources import NamespacedResources -def restrict_wildcard_for_roles(resources: NamespacedResources): - offenders = [] - for role in resources.roles: - for rule in role.rules: - if "*" in rule.verbs: - offenders.append(role) - if "*" in rule.resources: - offenders.append(role) +console = Console() - if offenders: - print_role_table( - offenders, - "[red]Roles should not have '*' in Verbs or Resources", - "[link=https://aws.github.io/aws-eks-best-practices/security/docs/iam/#employ-least-privileged-access-when-creating-rolebindings-and-clusterrolebindings]Click to see the guide[/link]", - "Role", - ) - return offenders +def disable_anonymous_access_for_roles(resources: NamespacedResources): + + status = None + message = "" + objectType = "RoleBinding" + objectsList = [] + rolenameslist = "" + + for role_binding in resources.role_bindings: + if role_binding.subjects: + for subject in role_binding.subjects: + if ( + subject.name == "system:unauthenticated" + or subject.name == "system:anonymous" + ): + objectsList.append(role_binding) + rolenameslist += role_binding.metadata.name + + if objectsList: + status = False + message = "Roles bound to to anonymous/unauthenticated groups: " + rolenameslist + else: + status = True + message = "There are no Roles bound to to anonymous/unauthenticated groups" + + return (status, message, objectsList, objectType) + + + +def restrict_wildcard_for_roles(resources: NamespacedResources): + + status = None + message = "" + objectType = "Role" + objectsList = [] + rolenameslist = "" + + if resources.roles: + for role in resources.roles: + if role.rules: + for rule in role.rules: + if "*" in rule.verbs: + objectsList.append(role) + if "*" in rule.resources: + objectsList.append(role) + + if objectsList: + status = False + message = "Roles with '*' in Verbs or Resources are: " + rolenameslist + else: + status = True + message = "There are no Roles with '*' in Verbs or Resources" + + return (status, message, objectsList, objectType) + + def disable_service_account_token_mounts(resources: NamespacedResources): - offenders = [] + + status = None + message = "" + objectType = "Pod" + objectsList = [] + rolenameslist = "" for pod in resources.pods: if pod.spec.automount_service_account_token: - offenders.append(pod) - - if offenders: - print_pod_table( - offenders, - "[red]Auto-mounting of Service Account tokens is not allowed", - "[link=https://aws.github.io/aws-eks-best-practices/security/docs/iam/#disable-auto-mounting-of-service-account-tokens]Click to see the guide[/link]", - ) - return offenders - - + objectsList.append(pod) + + if objectsList: + status = False + message = "Auto-mounting of Service Account tokens is not allowed" + else: + status = True + message = "There is no Auto-mounting of Service Account tokens" + + return (status, message, objectsList, objectType) + + def disable_run_as_root_user(resources: NamespacedResources): - offenders = [] + + status = None + message = "" + objectType = "Pod" + objectsList = [] for pod in resources.pods: security_context = pod.spec.security_context @@ -53,45 +100,25 @@ def disable_run_as_root_user(resources: NamespacedResources): not security_context.run_as_group and not security_context.run_as_user ): - offenders.append(pod) - - if offenders: - print_pod_table( - offenders, - "[red]Running as root is not allowed", - "[link=https://aws.github.io/aws-eks-best-practices/security/docs/iam/#run-the-application-as-a-non-root-user]Click to see the guide[/link]", - ) - - return offenders - - -def disable_anonymous_access_for_roles(resources: NamespacedResources): - offenders = [] - - for role_binding in resources.role_bindings: - if role_binding.subjects: - for subject in role_binding.subjects: - if ( - subject.name == "system:unauthenticated" - or subject.name == "system:anonymous" - ): - offenders.append(role_binding) - - if offenders: - print_role_table( - offenders, - "[red]Don't bind roles to anonymous or unauthenticated groups", - "[link=https://aws.github.io/aws-eks-best-practices/security/docs/iam/#review-and-revoke-unnecessary-anonymous-access]Click to see the guide[/link]", - "RoleBinding", - ) - return offenders - - -def use_dedicated_service_accounts_for_each_deployment( - resources: NamespacedResources, -): - offenders = [] - + objectsList.append(pod) + + if objectsList: + status = False + message = "Running as root is not allowed" + else: + status = True + message = "There are no pod running as root" + + return (status, message, objectsList, objectType) + + +def use_dedicated_service_accounts_for_each_deployment(resources: NamespacedResources): + + status = None + message = "" + objectType = "Deployment" + objectsList = [] + count = Counter( [ i.spec.template.spec.service_account_name @@ -105,24 +132,25 @@ def use_dedicated_service_accounts_for_each_deployment( for k, v in repeated_service_accounts.items(): for deployment in resources.deployments: if k == deployment.spec.template.spec.service_account_name: - offenders.append(deployment) - - if offenders: - print_workload_table( - offenders, - "[red]Don't share service accounts between Deployments", - "[link=https://aws.github.io/aws-eks-best-practices/security/docs/iam/#use-dedicated-service-accounts-for-each-application]Click to see the guide[/link]", - "Deployment", - ) + objectsList.append(deployment) - return offenders + if objectsList: + status = False + message = "Don't share service accounts between Deployments" + else: + status = True + message = "There are no shared service accounts between Deployments" + + return (status, message, objectsList, objectType) +def use_dedicated_service_accounts_for_each_stateful_set(resources: NamespacedResources): -def use_dedicated_service_accounts_for_each_stateful_set( - resources: NamespacedResources, -): - offenders = [] + status = None + message = "" + objectType = "StatefulSet" + objectsList = [] + count = Counter( [ i.spec.template.spec.service_account_name @@ -136,24 +164,26 @@ def use_dedicated_service_accounts_for_each_stateful_set( for k, v in repeated_service_accounts.items(): for deployment in resources.stateful_sets: if k == deployment.spec.template.spec.service_account_name: - offenders.append(deployment) - - if offenders: - print_workload_table( - offenders, - "[red]Don't share service accounts between StatefulSets", - "[link=https://aws.github.io/aws-eks-best-practices/security/docs/iam/#use-dedicated-service-accounts-for-each-application]Click to see the guide[/link]", - "StatefulSet", - ) - - return offenders - - -def use_dedicated_service_accounts_for_each_daemon_set( - resources: NamespacedResources, -): - offenders = [] - + objectsList.append(deployment) + + if objectsList: + status = False + message = "Don't share service accounts between StatefulSets" + else: + status = True + message = "There are no shared service accounts between StatefulSets" + + return (status, message, objectsList, objectType) + + + +def use_dedicated_service_accounts_for_each_daemon_set(resources: NamespacedResources): + + status = None + message = "" + objectType = "DaemonSet" + objectsList = [] + count = Counter( [ i.spec.template.spec.service_account_name @@ -167,14 +197,14 @@ def use_dedicated_service_accounts_for_each_daemon_set( for k, v in repeated_service_accounts.items(): for deployment in resources.daemon_sets: if k == deployment.spec.template.spec.service_account_name: - offenders.append(deployment) - - if offenders: - print_workload_table( - offenders, - "[red]Don't share service accounts between DaemonSets", - "[link=https://aws.github.io/aws-eks-best-practices/security/docs/iam/#use-dedicated-service-accounts-for-each-application]Click to see the guide[/link]", - "DaemonSet", - ) - - return offenders + objectsList.append(deployment) + + if objectsList: + status = False + message = "Don't share service accounts between DaemonSet" + else: + status = True + message = "There are no shared service accounts between DaemonSet" + + return (status, message, objectsList, objectType) + diff --git a/hardeneks/namespace_based/security/network_security.py b/hardeneks/namespace_based/security/network_security.py index c22ff1b..188b867 100644 --- a/hardeneks/namespace_based/security/network_security.py +++ b/hardeneks/namespace_based/security/network_security.py @@ -1,13 +1,18 @@ -from ...report import ( - print_service_table, -) +from rich.console import Console + from hardeneks.resources import NamespacedResources -def use_encryption_with_aws_load_balancers( - namespaced_resources: NamespacedResources, -): - offenders = [] +console = Console() + + +def use_encryption_with_aws_load_balancers(namespaced_resources: NamespacedResources): + + status = None + message = "" + objectType = "Service" + objectsList = [] + for service in namespaced_resources.services: annotations = service.metadata.annotations if annotations: @@ -19,12 +24,17 @@ def use_encryption_with_aws_load_balancers( "service.beta.kubernetes.io/aws-load-balancer-ssl-ports" ) if not (ssl_cert and ssl_cert_port == "443"): - offenders.append(service) - - if offenders: - print_service_table( - offenders, - "[red]Make sure you specify an ssl cert", - "[link=https://aws.github.io/aws-eks-best-practices/security/docs/network/#use-encryption-with-aws-load-balancers]Click to see the guide[/link]", - ) - return offenders + objectsList.append(service) + + if objectsList: + status = False + message = "Make sure you specify an ssl cert" + else: + status = True + message = "ssl cert are configured for the services" + + return (status, message, objectsList, objectType) + + + + diff --git a/hardeneks/namespace_based/security/pod_security.py b/hardeneks/namespace_based/security/pod_security.py index 207da02..7b772bb 100644 --- a/hardeneks/namespace_based/security/pod_security.py +++ b/hardeneks/namespace_based/security/pod_security.py @@ -1,12 +1,18 @@ -from ...report import ( - print_pod_table, -) +from rich.console import Console + from ...resources import NamespacedResources -def disallow_container_socket_mount(namespaced_resources: NamespacedResources): - offenders = [] +console = Console() + +def disallow_container_socket_mount(namespaced_resources: NamespacedResources): + + status = None + message = "" + objectType = "Pod" + objectsList = [] + sockets = [ "/var/run/docker.sock", "/var/run/containerd.sock", @@ -16,97 +22,114 @@ def disallow_container_socket_mount(namespaced_resources: NamespacedResources): for pod in namespaced_resources.pods: for volume in pod.spec.volumes: if volume.host_path and volume.host_path.path in sockets: - offenders.append(pod) - - if offenders: - print_pod_table( - offenders, - "[red]Container socket mounts are not allowed", - "[link=https://aws.github.io/aws-eks-best-practices/security/docs/pods/#never-run-docker-in-docker-or-mount-the-socket-in-the-container]Click to see the guide[/link]", - ) - - return offenders - - -def disallow_host_path_or_make_it_read_only( - namespaced_resources: NamespacedResources, -): - offenders = [] - + objectsList.append(pod) + + if objectsList: + status = False + message = "Container socket mounts are not allowed" + else: + status = True + message = "There are no Container socket mounted" + + return (status, message, objectsList, objectType) + + + +def disallow_host_path_or_make_it_read_only(namespaced_resources: NamespacedResources): + + status = None + message = "" + objectType = "Pod" + objectsList = [] + + for pod in namespaced_resources.pods: for volume in pod.spec.volumes: if volume.host_path: - offenders.append(pod) - - if offenders: - print_pod_table( - offenders, - "[red]Restrict the use of hostpath.", - "[link=https://aws.github.io/aws-eks-best-practices/security/docs/pods/#restrict-the-use-of-hostpath-or-if-hostpath-is-necessary-restrict-which-prefixes-can-be-used-and-configure-the-volume-as-read-only]Click to see the guide[/link]", - ) - - return offenders - - -def set_requests_limits_for_containers( - namespaced_resources: NamespacedResources, -): - offenders = [] + objectsList.append(pod) + + if objectsList: + status = False + message = "Restrict the use of hostpath" + else: + status = True + message = "hostpath are not mounted" + + return (status, message, objectsList, objectType) + + +def set_requests_limits_for_containers(namespaced_resources: NamespacedResources): + + status = None + message = "" + objectType = "Pod" + objectsList = [] for pod in namespaced_resources.pods: for container in pod.spec.containers: if not ( container.resources.limits and container.resources.requests ): - offenders.append(pod) - - if offenders: - print_pod_table( - offenders, - "[red]Set requests and limits for each container.", - "[link=https://aws.github.io/aws-eks-best-practices/security/docs/pods/#set-requests-and-limits-for-each-container-to-avoid-resource-contention-and-dos-attacks]Click to see the guide[/link]", - ) - - return offenders - + objectsList.append(pod) + + if objectsList: + status = False + message = "Set requests and limits for each container." + else: + status = True + message = "requests and limits are set for each containers" + + return (status, message, objectsList, objectType) + + def disallow_privilege_escalation(namespaced_resources: NamespacedResources): - offenders = [] - + + status = None + message = "" + objectType = "Pod" + objectsList = [] + for pod in namespaced_resources.pods: for container in pod.spec.containers: if ( container.security_context and container.security_context.allow_privilege_escalation ): - offenders.append(pod) - - if offenders: - print_pod_table( - offenders, - "[red]Set allowPrivilegeEscalation in the pod spec to false", - "[link=https://aws.github.io/aws-eks-best-practices/security/docs/pods/#do-not-allow-privileged-escalation]Click to see the guide[/link]", - ) - - return offenders - - -def check_read_only_root_file_system( - namespaced_resources: NamespacedResources, -): - offenders = [] + objectsList.append(pod) + + if objectsList: + status = False + message = "Set allowPrivilegeEscalation in the pod spec to false" + else: + status = True + message = "allowPrivilegeEscalation in the pod spec is to true" + + return (status, message, objectsList, objectType) + + + +def check_read_only_root_file_system(namespaced_resources: NamespacedResources): + + status = None + message = "" + objectType = "Pod" + objectsList = [] + for pod in namespaced_resources.pods: for container in pod.spec.containers: if ( container.security_context and not container.security_context.read_only_root_filesystem ): - offenders.append(pod) - if offenders: - print_pod_table( - offenders, - "[red]Configure your images with a read-only root file system", - "[link=https://aws.github.io/aws-eks-best-practices/security/docs/pods/#configure-your-images-with-read-only-root-file-system]Click to see the guide[/link]", - ) - - return offenders + objectsList.append(pod) + + if objectsList: + status = False + message = "Configure your images with a read-only root file system" + else: + status = True + message = "Images are configured with a read-only root file system" + + return (status, message, objectsList, objectType) + \ No newline at end of file diff --git a/hardeneks/namespace_based/security/runtime_security.py b/hardeneks/namespace_based/security/runtime_security.py index be9fd71..8214f1c 100644 --- a/hardeneks/namespace_based/security/runtime_security.py +++ b/hardeneks/namespace_based/security/runtime_security.py @@ -1,11 +1,15 @@ -from hardeneks import console +from rich import print + from ...resources import NamespacedResources -from ...report import print_pod_table def disallow_linux_capabilities(namespaced_resources: NamespacedResources): - offenders = [] - + + status = None + message = "" + objectType = "Pod" + objectsList = [] + allowed_list = [ "AUDIT_WRITE", "CHOWN", @@ -21,25 +25,27 @@ def disallow_linux_capabilities(namespaced_resources: NamespacedResources): "SETUID", "SYS_CHROOT", ] + for pod in namespaced_resources.pods: + for container in pod.spec.containers: if ( container.security_context and container.security_context.capabilities ): - capabilities = set(container.security_context.capabilities.add) - if not capabilities.issubset(set(allowed_list)): - offenders.append(pod) - - if offenders: - console.print() - console.print(allowed_list) - print_pod_table( - offenders, - """ - [red]Capabilities beyond the allowed list are disallowed. - """, - "[link=https://aws.github.io/aws-eks-best-practices/security/docs/runtime/#consider-adddropping-linux-capabilities-before-writing-seccomp-policies]Click to see the guide[/link]", - ) - - return offenders + if container.security_context.capabilities.add: + capabilities = set(container.security_context.capabilities.add) + if not capabilities.issubset(set(allowed_list)): + objectsList.append(pod) + + if objectsList: + status = False + message = "Capabilities beyond the allowed list are allowed" + else: + status = True + message = "Capabilities beyond the allowed list are disallowed" + + return (status, message, objectsList, objectType) + + + diff --git a/hardeneks/report.py b/hardeneks/report.py index b175a9d..1622a2c 100644 --- a/hardeneks/report.py +++ b/hardeneks/report.py @@ -1,170 +1,273 @@ from rich.table import Table from rich.panel import Panel - -from hardeneks import console - - -def print_role_table(roles, message, docs, type): - table = Table() - - table.add_column("Kind", style="cyan") - table.add_column("Namespace", style="magenta") - table.add_column("Name", style="green") - - for role in roles: - table.add_row(type, role.metadata.namespace, role.metadata.name) - - console.print(Panel(table, title=message, subtitle=docs)) - console.print() - - -def print_instance_metadata_table(instances, message, docs): - table = Table() - - table.add_column("InstanceId", style="cyan") - table.add_column("HttpPutResponseHopLimit", style="magenta") - - for instance in instances: - table.add_row( - instance["Instances"][0]["InstanceId"], - str( - instance["Instances"][0]["MetadataOptions"][ - "HttpPutResponseHopLimit" - ] - ), - ) - - console.print(Panel(table, title=message, subtitle=docs)) - console.print() - - -def print_instance_public_table(instances, message, docs): - table = Table() - - table.add_column("InstanceId", style="cyan") - table.add_column("PublicDnsName", style="magenta") - - for instance in instances: - table.add_row( - instance["Instances"][0]["InstanceId"], - str(instance["Instances"][0]["PublicDnsName"]), - ) - - console.print(Panel(table, title=message)) - console.print() - - -def print_repository_table(repositories, attribute, message, docs): - table = Table() - table.add_column("Repository", style="cyan") - table.add_column(attribute, style="magenta") - for repository in repositories: - table.add_row( - repository["repositoryName"], - repository[attribute], - ) - - console.print(Panel(table, title=message, subtitle=docs)) - console.print() - - -def print_pod_table(pods, message, docs): - table = Table() - - table.add_column("Kind", style="cyan") - table.add_column("Namespace", style="magenta") - table.add_column("Name", style="green") - - for pod in pods: - table.add_row("Pod", pod.metadata.namespace, pod.metadata.name) - - console.print(Panel(table, title=message, subtitle=docs)) +from rich.console import Console +from rich import print + + +colorMap = { + True: "green", + False: "red", + None: "yellow" +} + +statusMap = { + True: "PASS", + False: "FAIL", + None: "NA" +} + + +ruledocsLinkMap = { + +#toplevel links + "aws-eks-best-practices": "https://aws.github.io/aws-eks-best-practices/" , + "cluster-autoscaling": "https://aws.github.io/aws-eks-best-practices/karpenter/", + "networking": "https://aws.github.io/aws-eks-best-practices/networking/index/", + "security" : "https://aws.github.io/aws-eks-best-practices/security/docs/", + "reliability" : "https://aws.github.io/aws-eks-best-practices/reliability/docs/", + + +# Cluster Autoscaling + + "check_any_cluster_autoscaler_exists": "https://aws.github.io/aws-eks-best-practices/cluster-autoscaling/", + "ensure_cluster_autoscaler_and_cluster_versions_match": "https://aws.github.io/aws-eks-best-practices/cluster-autoscaling/", + "ensure_cluster_autoscaler_has_autodiscovery_mode": "https://aws.github.io/aws-eks-best-practices/cluster-autoscaling/", + "ensure_cluster_autoscaler_has_three_replicas": "https://aws.github.io/aws-eks-best-practices/cluster-autoscaling/", + "use_separate_iam_role_for_cluster_autoscaler": "https://aws.github.io/aws-eks-best-practices/cluster-autoscaling/#employ-least-privileged-access-to-the-iam-role", + "employ_least_privileged_access_to_the_IAM_role": "https://aws.github.io/aws-eks-best-practices/cluster-autoscaling/#employ-least-privileged-access-to-the-iam-role", + "use_managed_nodegroups": "https://aws.github.io/aws-eks-best-practices/cluster-autoscaling/", + "ensure_uniform_instance_types_in_nodegroups": "https://aws.github.io/aws-eks-best-practices/cluster-autoscaling/", + +# Networking + + "consider_public_and_private_mode": "https://aws.github.io/aws-eks-best-practices/networking/subnets/#consider-public-and-private-mode-for-cluster-endpoint", + "deploy_vpc_cni_managed_add_on": "https://aws.github.io/aws-eks-best-practices/networking/vpc-cni/#deploy-vpc-cni-managed-add-on", + "use_separate_iam_role_for_cni": "https://aws.github.io/aws-eks-best-practices/networking/vpc-cni/#use-separate-iam-role-for-cni", + "monitor_IP_adress_inventory": "https://aws.github.io/aws-eks-best-practices/networking/vpc-cni/#monitor-ip-address-inventory", + "use_dedicated_and_small_subnets_for_cluster_creation": "https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html", + "use_prefix_mode" : "https://aws.github.io/aws-eks-best-practices/networking/prefix-mode/#use-prefix-mode-when", + "use_aws_lb_controller" : "https://aws.github.io/aws-eks-best-practices/networking/loadbalancing/loadbalancing/", + "use_IP_target_type_service_load_balancers" : "https://aws.github.io/aws-eks-best-practices/networking/loadbalancing/loadbalancing/#use-ip-target-type-load-balancers", + "use_IP_target_type_ingress_load_balancers" : "https://aws.github.io/aws-eks-best-practices/networking/loadbalancing/loadbalancing/#use-ip-target-type-load-balancers", + "utilize_pod_readiness_gates": "https://aws.github.io/aws-eks-best-practices/networking/loadbalancing/loadbalancing/#utilize-pod-readiness-gates", + "ensure_pods_deregister_from_LB_before_termination" : "https://aws.github.io/aws-eks-best-practices/networking/loadbalancing/loadbalancing/#ensure-pods-are-deregistered-from-load-balancers-before-termination", + "configure_pod_disruption_budget" : "https://aws.github.io/aws-eks-best-practices/networking/loadbalancing/loadbalancing/#configure-a-pod-disruption-budget", + + +# reliability + # reliability - cluster level + "check_metrics_server_is_running" : "https://aws.github.io/aws-eks-best-practices/reliability/docs/application/#run-kubernetes-metrics-server", + "check_vertical_pod_autoscaler_exists" : "https://aws.github.io/aws-eks-best-practices/reliability/docs/application/#vertical-pod-autoscaler-vpa", + + # reliability - namespace level + "check_horizontal_pod_autoscaling_exists" : "https://aws.github.io/aws-eks-best-practices/reliability/docs/application/#horizontal-pod-autoscaler-hpa", + "schedule_replicas_across_nodes" : "https://aws.github.io/aws-eks-best-practices/reliability/docs/application/#schedule-replicas-across-nodes", + "run_multiple_replicas" : "https://aws.github.io/aws-eks-best-practices/reliability/docs/application/#run-multiple-replicas", + "avoid_running_singleton_pods" : "https://aws.github.io/aws-eks-best-practices/reliability/docs/application/#avoid-running-singleton-pods", + "check_readiness_probes" : "https://aws.github.io/aws-eks-best-practices/reliability/docs/application/#use-readiness-probe-to-detect-partial-unavailability", + "check_liveness_probes" : "https://aws.github.io/aws-eks-best-practices/reliability/docs/application/#use-liveness-probe-to-remove-unhealthy-pods", + + +# security + + #iam - cluster level + + "disable_anonymous_access_for_cluster_roles" : "https://aws.github.io/aws-eks-best-practices/security/docs/iam/#review-and-revoke-unnecessary-anonymous-access", + "check_endpoint_public_access" : "https://aws.github.io/aws-eks-best-practices/security/docs/iam/#make-the-eks-cluster-endpoint-private", + "check_aws_node_daemonset_service_account" : "https://aws.github.io/aws-eks-best-practices/security/docs/iam/#update-the-aws-node-daemonset-to-use-irsa", + "check_access_to_instance_profile" : "https://aws.github.io/aws-eks-best-practices/security/docs/iam/#when-your-application-needs-access-to-imds-use-imdsv2-and-increase-the-hop-limit-on-ec2-instances-to-2", + "restrict_wildcard_for_cluster_roles" : "https://aws.github.io/aws-eks-best-practices/security/docs/iam/#employ-least-privileged-access-when-creating-rolebindings-and-clusterrolebindings", + + #iam - namespace level + "disable_anonymous_access_for_roles" : "https://aws.github.io/aws-eks-best-practices/security/docs/iam/#review-and-revoke-unnecessary-anonymous-access", + "restrict_wildcard_for_roles" : "https://aws.github.io/aws-eks-best-practices/security/docs/iam/#employ-least-privileged-access-when-creating-rolebindings-and-clusterrolebindings" , + "disable_service_account_token_mounts" : "https://aws.github.io/aws-eks-best-practices/security/docs/iam/#disable-auto-mounting-of-service-account-tokens", + "disable_run_as_root_user": "https://aws.github.io/aws-eks-best-practices/security/docs/iam/#run-the-application-as-a-non-root-user", + "use_dedicated_service_accounts_for_each_deployment" : "https://aws.github.io/aws-eks-best-practices/security/docs/iam/#use-dedicated-service-accounts-for-each-application", + "use_dedicated_service_accounts_for_each_stateful_set" : "https://aws.github.io/aws-eks-best-practices/security/docs/iam/#use-dedicated-service-accounts-for-each-application", + "use_dedicated_service_accounts_for_each_daemon_set" : "https://aws.github.io/aws-eks-best-practices/security/docs/iam/#use-dedicated-service-accounts-for-each-application", + + + # multi_tenancy + + "ensure_namespace_quotas_exist" : "https://aws.github.io/aws-eks-best-practices/security/docs/multitenancy/#namespaces", + + # detective_controls + "check_logs_are_enabled" : "https://aws.github.io/aws-eks-best-practices/security/docs/detective/#enable-audit-logs", + + # network_security - cluster level + + "check_vpc_flow_logs" : "https://aws.github.io/aws-eks-best-practices/security/docs/network/#log-network-traffic-metadata", + "check_awspca_exists" : "https://aws.github.io/aws-eks-best-practices/security/docs/network/#acm-private-ca-with-cert-manager", + "check_default_deny_policy_exists" : "https://aws.github.io/aws-eks-best-practices/security/docs/network/#create-a-default-deny-policy", + + # network_security - namespace level + + "use_encryption_with_aws_load_balancers" : "https://aws.github.io/aws-eks-best-practices/security/docs/network/#use-encryption-with-aws-load-balancers" , + + # encryption_secrets - cluster level + + "use_encryption_with_ebs" : "https://aws.github.io/aws-eks-best-practices/security/docs/data/#encryption-at-rest", + "use_encryption_with_efs" : "https://aws.github.io/aws-eks-best-practices/security/docs/data/#encryption-at-rest", + "use_efs_access_points" : "https://aws.github.io/aws-eks-best-practices/security/docs/data/#use-efs-access-points-to-simplify-access-to-shared-datasets", + + # encryption_secrets - cluster level + + "disallow_secrets_from_env_vars" : "https://aws.github.io/aws-eks-best-practices/security/docs/data/#use-volume-mounts-instead-of-environment-variables", + + + # infrastructure_security + + "deploy_workers_onto_private_subnets" : "https://aws.github.io/aws-eks-best-practices/security/docs/hosts/#deploy-workers-onto-private-subnets" , + "make_sure_inspector_is_enabled" : "https://aws.github.io/aws-eks-best-practices/security/docs/hosts/#run-amazon-inspector-to-assess-hosts-for-exposure-vulnerabilities-and-deviations-from-best-practices", + + # pod_security - cluster level + + "ensure_namespace_psa_exist" : "https://aws.github.io/aws-eks-best-practices/security/docs/pods/#pod-security-standards-pss-and-pod-security-admission-psa", + + # pod_security - namespace level + + "disallow_container_socket_mount" : "https://aws.github.io/aws-eks-best-practices/security/docs/pods/#never-run-docker-in-docker-or-mount-the-socket-in-the-container", + "disallow_host_path_or_make_it_read_only" : "https://aws.github.io/aws-eks-best-practices/security/docs/pods/#restrict-the-use-of-hostpath-or-if-hostpath-is-necessary-restrict-which-prefixes-can-be-used-and-configure-the-volume-as-read-only" , + "set_requests_limits_for_containers" : "https://aws.github.io/aws-eks-best-practices/security/docs/pods/#set-requests-and-limits-for-each-container-to-avoid-resource-contention-and-dos-attacks" , + "disallow_privilege_escalation" : "https://aws.github.io/aws-eks-best-practices/security/docs/pods/#do-not-allow-privileged-escalation", + "check_read_only_root_file_system" : "https://aws.github.io/aws-eks-best-practices/security/docs/pods/#configure-your-images-with-read-only-root-file-system", + + + # image_security + + "use_immutable_tags_with_ecr" : "https://aws.github.io/aws-eks-best-practices/security/docs/image/#use-immutable-tags-with-ecr", + + # runtime_security - namespace level + "disallow_linux_capabilities" : "https://aws.github.io/aws-eks-best-practices/security/docs/runtime/#consider-adddropping-linux-capabilities-before-writing-seccomp-policies" + +} + + +console = Console() + +def print_console_message(ret, rule, message, objectsList, kind): + + color = colorMap[ret] + colorStr = "[" + color + "]" + ruleStr = "Rule: " + rule + titleMessage = colorStr + ruleStr + + docs_link = ruledocsLinkMap[rule] + docs_url = "[link=" + docs_link + "]Click to see the guide[/link]" + + + if objectsList and kind: + table = Table() + if kind == "IP": + table.add_column("SubnetId", style="cyan") + table.add_column("CidrBlock", style="magenta") + table.add_column("AvailableIpAddressCount", style="green") + totalAvailableIpAddressCount = 0 + for objectData in objectsList: + table.add_row(objectData['SubnetId'], objectData['CidrBlock'], str(objectData['AvailableIpAddressCount'])) + totalAvailableIpAddressCount += objectData['AvailableIpAddressCount'] + table.add_row("", "[green]totalAvailableIpAddressCount", str(totalAvailableIpAddressCount)) + elif kind == "instanceMetadata": + table.add_column("InstanceId", style="cyan") + table.add_column("HttpPutResponseHopLimit", style="magenta") + + for instance in objectsList: + table.add_row( + instance["Instances"][0]["InstanceId"], + str( + instance["Instances"][0]["MetadataOptions"][ + "HttpPutResponseHopLimit" + ] + ), + ) + print(Panel(table, title=titleMessage, subtitle=docs_url)) + elif kind == "PublicInstances": + table.add_column("InstanceId", style="cyan") + table.add_column("PublicDnsName", style="magenta") + + for instance in objectsList: + table.add_row( + instance["Instances"][0]["InstanceId"], + str(instance["Instances"][0]["PublicDnsName"]), + ) + print(Panel(table, title=titleMessage, subtitle=docs_url)) + + elif kind == "CIDR": + table.add_column("SubnetId", style="cyan") + table.add_column("CidrBlock", style="magenta") + for objectData in objectsList: + table.add_row(objectData['SubnetId'], objectData['CidrBlock']) + + print(Panel(table, title=titleMessage, subtitle=docs_url)) + elif kind == "Repository": + table.add_column("Repository", style="cyan") + table.add_column("imageTagMutability", style="magenta") + + for objectData in objectsList: + table.add_row(objectData['repositoryName'], objectData['imageTagMutability']) + + print(Panel(table, title=titleMessage, subtitle=docs_url)) + elif kind == "PersistentVolume" or kind == "StorageClass" : + + table.add_column("PersistentVolume", style="cyan") + table.add_column("Encrypted", style="magenta") + + for objectData in objectsList: + table.add_row(objectData.metadata.name, "false") + + print(Panel(table, title=titleMessage, subtitle=docs_url)) + + elif kind in ["Namespace", "ClusterRole", "ClusterRoleBinding", "RoleBinding"] : + table.add_column(kind, style="cyan") + for objectData in objectsList: + table.add_row(objectData) + + print(Panel(table, title=titleMessage, subtitle=docs_url)) + elif kind == "Report": + totalNumOfRules = len(objectsList[rule]) + table.add_column("S.No", style="cyan") + table.add_column("Rule", style="magenta") + table.add_column("Status", style="green") + table.add_column("Message", style="yellow") + + for i, objectData in enumerate( objectsList[rule]): + color = colorMap[objectData['ret']] + colorStr = "[" + color + "]" + table.add_row(colorStr+str(i+1)+"/"+str(totalNumOfRules), colorStr+objectData['rule'], colorStr+statusMap[objectData['ret']], colorStr+objectData['message'] ) + + if message is None: + titleMessage = colorStr + "Hardeneks Report for Cluster for Pillar: {}".format(rule) + else: + titleMessage = colorStr + "Hardeneks Report for Namespace : {} for Pillar: {}".format(message, rule) + + print(Panel(renderable=table, title=titleMessage, subtitle=docs_url)) + elif kind == "ClusterData": + totalNumOfRules = len(objectsList) + table.add_column("S.No", style="cyan") + table.add_column("Description", style="magenta") + table.add_column("Details", style="green") + table.add_column("Comments", style="yellow") + + for i, objectData in enumerate( objectsList): + color = objectData[0] + colorStr = "[" + color + "]" + table.add_row(colorStr+str(i+1)+"/"+str(totalNumOfRules), colorStr+objectData[1], colorStr+objectData[2], colorStr+objectData[3] ) + + titleMessage = colorStr + "EKS Cluster Details" + print(Panel(renderable=table, title=titleMessage, subtitle=docs_url)) + + else: + table.add_column("Kind", style="cyan") + table.add_column("Namespace", style="magenta") + table.add_column("Name", style="green") + for objectData in objectsList: + table.add_row(kind, objectData.metadata.namespace, objectData.metadata.name) + + print(Panel(renderable=table, title=titleMessage, subtitle=docs_url)) + else: + descriptionMessage = colorStr + message + print(Panel(renderable=descriptionMessage, title=titleMessage, subtitle=docs_url)) + console.print() - -def print_workload_table(workloads, message, docs, kind): - table = Table() - - table.add_column("Kind", style="cyan") - table.add_column("Namespace", style="magenta") - table.add_column("Name", style="green") - - for workload in workloads: - table.add_row( - kind, workload.metadata.namespace, workload.metadata.name - ) - - console.print(Panel(table, title=message, subtitle=docs)) - console.print() - - -def print_namespace_table(namespaces, message, docs): - table = Table() - - table.add_column("Namespace", style="cyan") - - for namespace in namespaces: - table.add_row( - namespace, - ) - - console.print(Panel(table, title=message, subtitle=docs)) - console.print() - - -def print_service_table(services, message, docs): - table = Table() - - table.add_column("Kind", style="cyan") - table.add_column("Namespace", style="magenta") - table.add_column("Name", style="green") - - for workload in services: - table.add_row( - "Service", workload.metadata.namespace, workload.metadata.name - ) - - console.print(Panel(table, title=message, subtitle=docs)) - console.print() - - -def print_deployment_table(deployments, message, docs): - table = Table() - - table.add_column("Kind", style="cyan") - table.add_column("Namespace", style="magenta") - table.add_column("Name", style="green") - - for workload in deployments: - table.add_row( - "Deployment", workload.metadata.namespace, workload.metadata.name - ) - - console.print(Panel(table, title=message, subtitle=docs)) - console.print() - - -def print_storage_class_table(storage_classes, message, docs): - table = Table() - - table.add_column("StorageClass", style="cyan") - table.add_column("Encyrpted", style="magenta") - - for storage_class in storage_classes: - table.add_row(storage_class.metadata.name, "false") - - console.print(Panel(table, title=message, subtitle=docs)) - console.print() - - -def print_persistent_volume_table(persistent_volumes, message, docs): - table = Table() - - table.add_column("PersistentVolume", style="cyan") - table.add_column("Encrypted", style="magenta") - - for persistent_volume in persistent_volumes: - table.add_row(persistent_volume.metadata.name, "false") - - console.print(Panel(table, title=message, subtitle=docs)) - console.print() diff --git a/hardeneks/resources.py b/hardeneks/resources.py index 3097aab..3a7937d 100644 --- a/hardeneks/resources.py +++ b/hardeneks/resources.py @@ -1,14 +1,20 @@ -from kubernetes import client +from kubernetes import client, config +import sys class Resources: - def __init__(self, region, context, cluster, namespaces): + def __init__(self, region, context, cluster, namespaces, debug): self.region = region self.context = context self.cluster = cluster self.namespaces = namespaces + self.debug = debug + #self.api_client_obj = config.new_client_from_config(context=self.context) def set_resources(self): + + #api_client_obj=config.new_client_from_config(context=self.context) + self.cluster_roles = ( client.RbacAuthorizationV1Api().list_cluster_role().items ) @@ -24,19 +30,27 @@ def set_resources(self): .items ) self.storage_classes = client.StorageV1Api().list_storage_class().items + self.persistent_volumes = ( client.CoreV1Api().list_persistent_volume().items ) - + + self.namespaceObjList = ( + client.CoreV1Api().list_namespace().items + ) class NamespacedResources: - def __init__(self, region, context, cluster, namespace): + def __init__(self, region, context, cluster, namespace, debug): self.namespace = namespace self.region = region self.cluster = cluster self.context = context + self.debug = debug def set_resources(self): + + #api_client_obj=config.new_client_from_config(context=self.context) + self.roles = ( client.RbacAuthorizationV1Api() .list_namespaced_role(self.namespace)