diff --git a/pkg/containerprofilemanager/v1/lifecycle.go b/pkg/containerprofilemanager/v1/lifecycle.go index 8e40fd870..c16d41656 100644 --- a/pkg/containerprofilemanager/v1/lifecycle.go +++ b/pkg/containerprofilemanager/v1/lifecycle.go @@ -18,8 +18,7 @@ func (cpm *ContainerProfileManager) ContainerCallback(notif containercollection. switch notif.Type { case containercollection.EventTypeAddContainer: if utils.IsHostContainer(notif.Container) { - logger.L().Debug("adding host container to the container profile manager", - helpers.String("containerID", notif.Container.Runtime.ContainerID)) + return } if cpm.cfg.IgnoreContainer(notif.Container.K8s.Namespace, notif.Container.K8s.PodName, notif.Container.K8s.PodLabels) { return @@ -93,14 +92,17 @@ func (cpm *ContainerProfileManager) addContainer(container *containercollection. return fmt.Errorf("failed to get shared data for container %s: %w", containerID, err) } - // Check if the container should use a user-defined profile + // Check if the container should use a user-defined profile. + // When both an ApplicationProfile and a NetworkNeighborhood are + // user-provided, skip ALL recording — there is nothing to learn. if sharedData.UserDefinedProfile != "" { logger.L().Debug("ignoring container with a user-defined profile", helpers.String("containerID", containerID), helpers.String("containerName", container.Runtime.ContainerName), helpers.String("podName", container.K8s.PodName), helpers.String("namespace", container.K8s.Namespace), - helpers.String("userDefinedProfile", sharedData.UserDefinedProfile)) + helpers.String("userDefinedProfile", sharedData.UserDefinedProfile), + helpers.String("userDefinedNetwork", sharedData.UserDefinedNetwork)) // Close ready channel before removing entry if entry, exists := cpm.getContainerEntry(containerID); exists { entry.readyOnce.Do(func() { diff --git a/pkg/objectcache/networkneighborhoodcache/networkneighborhoodcache.go b/pkg/objectcache/networkneighborhoodcache/networkneighborhoodcache.go index 254f6ae64..425f44c1d 100644 --- a/pkg/objectcache/networkneighborhoodcache/networkneighborhoodcache.go +++ b/pkg/objectcache/networkneighborhoodcache/networkneighborhoodcache.go @@ -30,6 +30,7 @@ type ContainerInfo struct { InstanceTemplateHash string Namespace string SeenContainerFromTheStart bool // True if container was seen from the start + UserDefinedNetwork string // Non-empty when pod has a user-defined NN label } // NetworkNeighborhoodCacheImpl implements the NetworkNeighborhoodCache interface @@ -204,6 +205,13 @@ func (nnc *NetworkNeighborhoodCacheImpl) updateAllNetworkNeighborhoods(ctx conte continue } + // Never overwrite a user-defined network neighborhood with an + // auto-learned one. Check if any container for this workload + // has a user-defined-network label. + if nnc.workloadHasUserDefinedNetwork(workloadID) { + continue + } + // If we have a "new" container (seen from start) and the network neighborhood is partial, // skip it - we don't want to use partial profiles for containers we're tracking from the start if hasNewContainer && nn.Annotations[helpersv1.CompletionMetadataKey] == helpersv1.Partial { @@ -419,11 +427,48 @@ func (nnc *NetworkNeighborhoodCacheImpl) addContainer(container *containercollec InstanceTemplateHash: sharedData.InstanceID.GetTemplateHash(), Namespace: container.K8s.Namespace, SeenContainerFromTheStart: !sharedData.PreRunningContainer, + UserDefinedNetwork: sharedData.UserDefinedNetwork, } // Add to container info map nnc.containerIDToInfo.Set(containerID, containerInfo) + // If the container has a user-defined network neighborhood, load it + // directly into the cache — skip learning entirely for this workload. + if sharedData.UserDefinedNetwork != "" { + fullNN, err := nnc.storageClient.GetNetworkNeighborhood( + container.K8s.Namespace, sharedData.UserDefinedNetwork) + if err != nil { + logger.L().Error("failed to get user-defined network neighborhood", + helpers.String("containerID", containerID), + helpers.String("workloadID", workloadID), + helpers.String("namespace", container.K8s.Namespace), + helpers.String("nnName", sharedData.UserDefinedNetwork), + helpers.Error(err)) + profileState := &objectcache.ProfileState{ + Error: err, + } + nnc.workloadIDToProfileState.Set(workloadID, profileState) + return nil + } + + nnc.workloadIDToNetworkNeighborhood.Set(workloadID, fullNN) + profileState := &objectcache.ProfileState{ + Completion: helpersv1.Full, + Status: helpersv1.Completed, + Name: fullNN.Name, + Error: nil, + } + nnc.workloadIDToProfileState.Set(workloadID, profileState) + + logger.L().Debug("added user-defined network neighborhood to cache", + helpers.String("containerID", containerID), + helpers.String("workloadID", workloadID), + helpers.String("namespace", container.K8s.Namespace), + helpers.String("nnName", sharedData.UserDefinedNetwork)) + return nil + } + // Create workload ID to state mapping if _, exists := nnc.workloadIDToProfileState.Load(workloadID); !exists { nnc.workloadIDToProfileState.Set(workloadID, nil) @@ -718,6 +763,20 @@ func (nnc *NetworkNeighborhoodCacheImpl) mergeNetworkPorts(normalPorts, userPort return normalPorts } +// workloadHasUserDefinedNetwork returns true if any container tracked for +// the given workloadID has a user-defined-network label set. +func (nnc *NetworkNeighborhoodCacheImpl) workloadHasUserDefinedNetwork(workloadID string) bool { + found := false + nnc.containerIDToInfo.Range(func(_ string, info *ContainerInfo) bool { + if info.WorkloadID == workloadID && info.UserDefinedNetwork != "" { + found = true + return false // stop iteration + } + return true + }) + return found +} + func isUserManagedNN(nn *v1beta1.NetworkNeighborhood) bool { return nn.Annotations != nil && nn.Annotations[helpersv1.ManagedByMetadataKey] == helpersv1.ManagedByUserValue && diff --git a/pkg/objectcache/shared_container_data.go b/pkg/objectcache/shared_container_data.go index cf0e7d2b4..87952dbe4 100644 --- a/pkg/objectcache/shared_container_data.go +++ b/pkg/objectcache/shared_container_data.go @@ -20,6 +20,11 @@ import ( "k8s.io/apimachinery/pkg/util/validation" ) +// UserDefinedNetworkMetadataKey is the pod label that references a +// user-provided NetworkNeighborhood resource by name (analogous to +// helpersv1.UserDefinedProfileMetadataKey for ApplicationProfiles). +const UserDefinedNetworkMetadataKey = "kubescape.io/user-defined-network" + type ContainerType int const ( @@ -82,6 +87,7 @@ type WatchedContainerData struct { PreviousReportTimestamp time.Time CurrentReportTimestamp time.Time UserDefinedProfile string + UserDefinedNetwork string } type ContainerInfo struct { @@ -167,6 +173,16 @@ func (watchedContainer *WatchedContainerData) SetContainerInfo(wl workloadinterf watchedContainer.UserDefinedProfile = userDefinedProfile } } + // check for user defined network neighborhood + if userDefinedNetwork, ok := labels[UserDefinedNetworkMetadataKey]; ok { + if userDefinedNetwork != "" { + logger.L().Info("container has a user defined network neighborhood", + helpers.String("network", userDefinedNetwork), + helpers.String("container", containerName), + helpers.String("workload", wl.GetName())) + watchedContainer.UserDefinedNetwork = userDefinedNetwork + } + } podSpec, err := wl.GetPodSpec() if err != nil { return fmt.Errorf("failed to get pod spec: %w", err) diff --git a/tests/chart/templates/node-agent/default-rules.yaml b/tests/chart/templates/node-agent/default-rules.yaml index 55fd1b527..1a545524c 100644 --- a/tests/chart/templates/node-agent/default-rules.yaml +++ b/tests/chart/templates/node-agent/default-rules.yaml @@ -122,7 +122,7 @@ spec: profileDependency: 0 severity: 1 supportPolicy: false - isTriggerAlert: false + isTriggerAlert: true mitreTactic: "TA0011" mitreTechnique: "T1071.004" tags: @@ -245,7 +245,7 @@ spec: - "anomaly" - "applicationprofile" - name: "Unexpected Egress Network Traffic" - enabled: false + enabled: true id: "R0011" description: "Detecting unexpected egress network traffic that is not whitelisted by application profile." expressions: @@ -257,7 +257,7 @@ spec: profileDependency: 0 severity: 5 # Medium supportPolicy: false - isTriggerAlert: false + isTriggerAlert: true mitreTactic: "TA0010" mitreTechnique: "T1041" tags: diff --git a/tests/component_test.go b/tests/component_test.go index 380f32576..9f9ed272e 100644 --- a/tests/component_test.go +++ b/tests/component_test.go @@ -11,6 +11,7 @@ import ( "slices" "sort" "strconv" + "strings" "testing" "time" @@ -1565,3 +1566,532 @@ func Test_24_ProcessTreeDepthTest(t *testing.T) { t.Logf("Found alerts for the process tree depth: %v", alerts) } + +// Test_27_ApplicationProfileOpens tests that the dynamic path matching in +// application profiles works correctly for both recorded (auto-learned) +// profiles and user-defined profiles. +// +// Path matching symbols: +// +// ⋯ (U+22EF DynamicIdentifier) — matches exactly ONE path segment +// * (WildcardIdentifier) — matches ZERO or more path segments +// 0 (in endpoints) — wildcard port (any port) +// +// R0002 "Files Access Anomalies in container" fires when a file is opened +// under a monitored prefix (/etc/, /var/log/, …) and the path was NOT +// recorded in the application profile. +func Test_27_ApplicationProfileOpens(t *testing.T) { + start := time.Now() + defer tearDownTest(t, start) + + const ruleName = "Files Access Anomalies in container" + const profileName = "nginx-regex-profile" + + // --- result tracking for end-of-test summary --- + type subtestResult struct { + name string + profilePath string + filePath string + expectAlert bool + passed bool + detail string + } + var results []subtestResult + addResult := func(name, profilePath, filePath string, expectAlert, passed bool, detail string) { + results = append(results, subtestResult{name, profilePath, filePath, expectAlert, passed, detail}) + } + defer func() { + t.Log("\n========== Test_27 Summary ==========") + anyFailed := false + for _, r := range results { + status := "PASS" + if !r.passed { + status = "FAIL" + anyFailed = true + } + expect := "expect alert" + if !r.expectAlert { + expect = "expect NO alert" + } + t.Logf(" [%s] %-35s profile=%-25s file=%-25s %s", status, r.name, r.profilePath, r.filePath, expect) + if !r.passed { + t.Logf(" -> %s", r.detail) + } + } + if !anyFailed { + t.Log(" All subtests passed.") + } + t.Log("======================================") + }() + + // deployWithProfile creates a user-defined ApplicationProfile with the + // given Opens list, deploys nginx with the kubescape.io/user-defined-profile + // label pointing at it, and waits for the pod + cache to be ready. + deployWithProfile := func(t *testing.T, opens []v1beta1.OpenCalls) *testutils.TestWorkload { + t.Helper() + ns := testutils.NewRandomNamespace() + + profile := &v1beta1.ApplicationProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: profileName, + Namespace: ns.Name, + }, + Spec: v1beta1.ApplicationProfileSpec{ + Architectures: []string{"amd64"}, + Containers: []v1beta1.ApplicationProfileContainer{ + { + Name: "nginx", + Execs: []v1beta1.ExecCalls{ + {Path: "/bin/cat", Args: []string{"/bin/cat"}}, + }, + Opens: opens, + }, + }, + }, + } + + k8sClient := k8sinterface.NewKubernetesApi() + storageClient := spdxv1beta1client.NewForConfigOrDie(k8sClient.K8SConfig) + _, err := storageClient.ApplicationProfiles(ns.Name).Create( + context.Background(), profile, metav1.CreateOptions{}) + require.NoError(t, err, "create user-defined profile %q in ns %s", profileName, ns.Name) + + wl, err := testutils.NewTestWorkload(ns.Name, + path.Join(utils.CurrentDir(), "resources/nginx-user-profile-deployment.yaml")) + require.NoError(t, err, "create workload in ns %s", ns.Name) + require.NoError(t, wl.WaitForReady(80), "workload not ready in ns %s", ns.Name) + + time.Sleep(20 * time.Second) // let node-agent pick up the profile + return wl + } + + // triggerAndGetAlerts execs cat on the given path and returns the alerts. + triggerAndGetAlerts := func(t *testing.T, wl *testutils.TestWorkload, filePath string) []testutils.Alert { + t.Helper() + stdout, stderr, err := wl.ExecIntoPod([]string{"cat", filePath}, "nginx") + if err != nil { + t.Errorf("exec 'cat %s' in container nginx failed: %v (stdout=%q stderr=%q)", filePath, err, stdout, stderr) + } + time.Sleep(30 * time.Second) + alerts, err := testutils.GetAlerts(wl.Namespace) + require.NoError(t, err, "get alerts from ns %s", wl.Namespace) + return alerts + } + + // hasAlert checks whether an R0002 alert exists for comm=cat, container=nginx. + hasAlert := func(alerts []testutils.Alert) bool { + for _, a := range alerts { + if a.Labels["rule_name"] == ruleName && + a.Labels["comm"] == "cat" && + a.Labels["container_name"] == "nginx" { + return true + } + } + return false + } + + // --------------------------------------------------------------- + // 1a. Recorded (auto-learned) profile must use absolute paths. + // There must be no "." in the Opens paths. + // --------------------------------------------------------------- + t.Run("recorded_profile_absolute_paths", func(t *testing.T) { + ns := testutils.NewRandomNamespace() + wl, err := testutils.NewTestWorkload(ns.Name, + path.Join(utils.CurrentDir(), "resources/nginx-deployment.yaml")) + require.NoError(t, err) + require.NoError(t, wl.WaitForReady(80)) + require.NoError(t, wl.WaitForApplicationProfileCompletion(80)) + + profile, err := wl.GetApplicationProfile() + require.NoError(t, err, "get application profile") + + passed := true + for _, container := range profile.Spec.Containers { + for _, open := range container.Opens { + if !strings.HasPrefix(open.Path, "/") { + t.Errorf("recorded path must be absolute: got %q (container %s)", open.Path, container.Name) + passed = false + } + if open.Path == "." { + t.Errorf("recorded path must not be relative dot: got %q (container %s)", open.Path, container.Name) + passed = false + } + } + } + detail := "" + if !passed { + detail = "found non-absolute or '.' paths in recorded profile" + } + addResult("recorded_profile_absolute_paths", "(auto-learned)", "(nginx startup)", false, passed, detail) + }) + + // --------------------------------------------------------------- + // 1b. User-defined profile wildcard tests. + // Each sub-test deploys nginx in its own namespace with a + // different Opens pattern and verifies R0002 behaviour. + // --------------------------------------------------------------- + + // 1b-1: Exact path — profile has the exact file => no alert. + t.Run("exact_path_match", func(t *testing.T) { + profilePath := "/etc/nginx/nginx.conf" + filePath := "/etc/nginx/nginx.conf" + wl := deployWithProfile(t, []v1beta1.OpenCalls{ + {Path: profilePath, Flags: []string{"O_RDONLY"}}, + {Path: "/etc/ld.so.cache", Flags: []string{"O_RDONLY", "O_CLOEXEC"}}, // dynamic linker opens this on every exec + }) + alerts := triggerAndGetAlerts(t, wl, filePath) + got := hasAlert(alerts) + if got { + t.Errorf("expected NO R0002 alert: profile allows %q, opened %q, but alert fired", profilePath, filePath) + } + addResult("exact_path_match", profilePath, filePath, false, !got, + fmt.Sprintf("got %d alerts, expected none for cat", len(alerts))) + }) + + // 1b-2: Exact path — profile has a DIFFERENT file => alert. + t.Run("exact_path_mismatch", func(t *testing.T) { + profilePath := "/etc/nginx/nginx.conf" + filePath := "/etc/hostname" + wl := deployWithProfile(t, []v1beta1.OpenCalls{ + {Path: profilePath, Flags: []string{"O_RDONLY"}}, + }) + alerts := triggerAndGetAlerts(t, wl, filePath) + got := hasAlert(alerts) + if !got { + t.Errorf("expected R0002 alert: profile only allows %q, opened %q, but no alert", profilePath, filePath) + } + addResult("exact_path_mismatch", profilePath, filePath, true, got, + fmt.Sprintf("got %d alerts, expected at least one for cat", len(alerts))) + }) + + // 1b-3: Ellipsis ⋯ matches single segment — /etc/⋯ covers /etc/hostname. + t.Run("ellipsis_single_segment_match", func(t *testing.T) { + profilePath := "/etc/" + dynamicpathdetector.DynamicIdentifier + filePath := "/etc/hostname" + wl := deployWithProfile(t, []v1beta1.OpenCalls{ + {Path: profilePath, Flags: []string{"O_RDONLY"}}, + }) + alerts := triggerAndGetAlerts(t, wl, filePath) + got := hasAlert(alerts) + if got { + t.Errorf("expected NO R0002 alert: profile %q should match %q (single segment), but alert fired", profilePath, filePath) + } + addResult("ellipsis_single_segment_match", profilePath, filePath, false, !got, + fmt.Sprintf("got %d alerts, expected none for cat", len(alerts))) + }) + + // 1b-4: Ellipsis ⋯ rejects multi-segment — /etc/⋯ does NOT cover + // /etc/nginx/nginx.conf (two segments past /etc/). + t.Run("ellipsis_rejects_multi_segment", func(t *testing.T) { + profilePath := "/etc/" + dynamicpathdetector.DynamicIdentifier + filePath := "/etc/nginx/nginx.conf" + wl := deployWithProfile(t, []v1beta1.OpenCalls{ + {Path: profilePath, Flags: []string{"O_RDONLY"}}, + }) + alerts := triggerAndGetAlerts(t, wl, filePath) + got := hasAlert(alerts) + if !got { + t.Errorf("expected R0002 alert: profile %q should NOT match %q (two segments), but no alert", profilePath, filePath) + } + addResult("ellipsis_rejects_multi_segment", profilePath, filePath, true, got, + fmt.Sprintf("got %d alerts, expected at least one for cat", len(alerts))) + }) + + // 1b-5: Wildcard * matches any depth — /etc/* covers /etc/nginx/nginx.conf. + t.Run("wildcard_matches_deep_path", func(t *testing.T) { + profilePath := "/etc/*" + filePath := "/etc/nginx/nginx.conf" + wl := deployWithProfile(t, []v1beta1.OpenCalls{ + {Path: profilePath, Flags: []string{"O_RDONLY"}}, + }) + alerts := triggerAndGetAlerts(t, wl, filePath) + got := hasAlert(alerts) + if got { + t.Errorf("expected NO R0002 alert: profile %q should match %q (wildcard), but alert fired", profilePath, filePath) + } + addResult("wildcard_matches_deep_path", profilePath, filePath, false, !got, + fmt.Sprintf("got %d alerts, expected none for cat", len(alerts))) + }) + + // --------------------------------------------------------------- + // 1c. Deploy known-application-profile-wildcards.yaml (curl image) + // and verify that files under wildcard-covered opens paths + // produce no R0002 alert. + // --------------------------------------------------------------- + t.Run("wildcard_yaml_profile_allowed_opens", func(t *testing.T) { + ns := testutils.NewRandomNamespace() + wildcardProfileName := "fusioncore-profile-wildcards" + + // Create the profile matching known-application-profile-wildcards.yaml. + profile := &v1beta1.ApplicationProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: wildcardProfileName, + Namespace: ns.Name, + }, + Spec: v1beta1.ApplicationProfileSpec{ + Architectures: []string{"amd64"}, + Containers: []v1beta1.ApplicationProfileContainer{ + { + Name: "curl", + ImageID: "docker.io/curlimages/curl@sha256:08e466006f0860e54fc299378de998935333e0e130a15f6f98482e9f8dab3058", + ImageTag: "docker.io/curlimages/curl:8.5.0", + Capabilities: []string{ + "CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_DAC_READ_SEARCH", + "CAP_SETGID", "CAP_SETPCAP", "CAP_SETUID", "CAP_SYS_ADMIN", + }, + Execs: []v1beta1.ExecCalls{ + {Path: "/bin/sleep", Args: []string{"/bin/sleep", "infinity"}}, + {Path: "/bin/cat", Args: []string{"/bin/cat"}}, + {Path: "/usr/bin/curl", Args: []string{"/usr/bin/curl", "-sm2", "fusioncore.ai"}}, + }, + Opens: []v1beta1.OpenCalls{ + {Path: "/etc/*", Flags: []string{"O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"}}, + {Path: "/etc/ssl/openssl.cnf", Flags: []string{"O_RDONLY", "O_LARGEFILE"}}, + {Path: "/home/*", Flags: []string{"O_RDONLY", "O_LARGEFILE"}}, + {Path: "/lib/*", Flags: []string{"O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"}}, + {Path: "/usr/lib/*", Flags: []string{"O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"}}, + {Path: "/usr/local/lib/*", Flags: []string{"O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"}}, + {Path: "/proc/*/cgroup", Flags: []string{"O_RDONLY", "O_CLOEXEC"}}, + {Path: "/proc/*/kernel/cap_last_cap", Flags: []string{"O_RDONLY", "O_CLOEXEC"}}, + {Path: "/proc/*/mountinfo", Flags: []string{"O_RDONLY", "O_CLOEXEC"}}, + {Path: "/proc/*/task/*/fd", Flags: []string{"O_RDONLY", "O_DIRECTORY", "O_CLOEXEC"}}, + {Path: "/sys/fs/cgroup/cpu.max", Flags: []string{"O_RDONLY", "O_CLOEXEC"}}, + {Path: "/sys/kernel/mm/transparent_hugepage/hpage_pmd_size", Flags: []string{"O_RDONLY"}}, + {Path: "/7/setgroups", Flags: []string{"O_RDONLY", "O_CLOEXEC"}}, + {Path: "/runc", Flags: []string{"O_RDONLY", "O_CLOEXEC"}}, + }, + Syscalls: []string{ + "arch_prctl", "bind", "brk", "capget", "capset", "chdir", + "clone", "close", "close_range", "connect", "epoll_ctl", + "epoll_pwait", "execve", "exit", "exit_group", "faccessat2", + "fchown", "fcntl", "fstat", "fstatfs", "futex", "getcwd", + "getdents64", "getegid", "geteuid", "getgid", "getpeername", + "getppid", "getsockname", "getsockopt", "gettid", "getuid", + "ioctl", "membarrier", "mmap", "mprotect", "munmap", + "nanosleep", "newfstatat", "open", "openat", "openat2", + "pipe", "poll", "prctl", "read", "recvfrom", "recvmsg", + "rt_sigaction", "rt_sigprocmask", "rt_sigreturn", "sendto", + "set_tid_address", "setgid", "setgroups", "setsockopt", + "setuid", "sigaltstack", "socket", "statx", "tkill", + "unknown", "write", "writev", + }, + }, + }, + }, + } + + k8sClient := k8sinterface.NewKubernetesApi() + storageClient := spdxv1beta1client.NewForConfigOrDie(k8sClient.K8SConfig) + _, err := storageClient.ApplicationProfiles(ns.Name).Create( + context.Background(), profile, metav1.CreateOptions{}) + require.NoError(t, err, "create wildcard profile %q in ns %s", wildcardProfileName, ns.Name) + + wl, err := testutils.NewTestWorkload(ns.Name, + path.Join(utils.CurrentDir(), "resources/curl-user-profile-wildcards-deployment.yaml")) + require.NoError(t, err, "create curl workload in ns %s", ns.Name) + require.NoError(t, wl.WaitForReady(80), "curl workload not ready in ns %s", ns.Name) + + time.Sleep(20 * time.Second) // let node-agent pick up the profile + + // Cat files that are covered by the wildcard opens. + allowedFiles := []string{ + "/etc/hosts", // covered by /etc/* + "/etc/resolv.conf", // covered by /etc/* + "/etc/ssl/openssl.cnf", // exact match + } + for _, f := range allowedFiles { + stdout, stderr, err := wl.ExecIntoPod([]string{"cat", f}, "curl") + if err != nil { + t.Logf("exec 'cat %s' failed: %v (stdout=%q stderr=%q)", f, err, stdout, stderr) + } + } + + time.Sleep(30 * time.Second) // let alerts propagate + + alerts, err := testutils.GetAlerts(wl.Namespace) + require.NoError(t, err, "get alerts from ns %s", wl.Namespace) + + var r0002Fired bool + for _, a := range alerts { + if a.Labels["rule_name"] == ruleName && + a.Labels["comm"] == "cat" && + a.Labels["container_name"] == "curl" { + r0002Fired = true + break + } + } + if r0002Fired { + t.Errorf("expected NO R0002 for files covered by wildcard opens, but alert fired") + } + addResult("wildcard_yaml_profile_allowed_opens", + "/etc/*, /etc/ssl/openssl.cnf", "/etc/hosts, /etc/resolv.conf, /etc/ssl/openssl.cnf", + false, !r0002Fired, + fmt.Sprintf("got R0002=%v, expected none for wildcard-covered files", r0002Fired)) + }) +} + +// Test_28_UserDefinedNetworkNeighborhood creates user-defined AP and NN, +// deploys a pod with both user-defined-profile and user-defined-network +// labels (skipping all learning), then triggers: +// - TCP egress to IPs NOT in the NN → R0011 "Unexpected Egress Network Traffic" +// - DNS lookups for domains NOT in the NN → R0005 "DNS Anomalies in container" +// +// Note: R0005 requires real resolvable domains (not NXDOMAIN), because the +// trace_dns eBPF callback drops DNS responses with 0 answers. +func Test_28_UserDefinedNetworkNeighborhood(t *testing.T) { + start := time.Now() + defer tearDownTest(t, start) + + ns := testutils.NewRandomNamespace() + k8sClient := k8sinterface.NewKubernetesApi() + storageClient := spdxv1beta1client.NewForConfigOrDie(k8sClient.K8SConfig) + + // 1. Create user-defined ApplicationProfile (skip learning). + ap := &v1beta1.ApplicationProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: "curl-ap", + Namespace: ns.Name, + Annotations: map[string]string{ + helpersv1.ManagedByMetadataKey: helpersv1.ManagedByUserValue, + helpersv1.StatusMetadataKey: helpersv1.Completed, + helpersv1.CompletionMetadataKey: helpersv1.Full, + }, + Labels: map[string]string{ + helpersv1.ApiGroupMetadataKey: "apps", + helpersv1.ApiVersionMetadataKey: "v1", + helpersv1.KindMetadataKey: "Deployment", + helpersv1.NameMetadataKey: "curl-28", + helpersv1.NamespaceMetadataKey: ns.Name, + }, + }, + Spec: v1beta1.ApplicationProfileSpec{ + Containers: []v1beta1.ApplicationProfileContainer{ + { + Name: "curl", + Capabilities: []string{}, + Execs: []v1beta1.ExecCalls{ + {Path: "/bin/sleep"}, + {Path: "/usr/bin/curl"}, + }, + Opens: []v1beta1.OpenCalls{}, + Syscalls: []string{"socket", "connect", "sendto", "recvfrom", "read", "write", "close", "openat", "mmap", "mprotect", "munmap", "fcntl", "ioctl", "poll", "epoll_create1", "epoll_ctl", "epoll_wait", "bind", "listen", "accept4", "getsockopt", "setsockopt", "getsockname", "getpid", "fstat", "rt_sigaction", "rt_sigprocmask", "writev"}, + }, + }, + }, + } + _, err := storageClient.ApplicationProfiles(ns.Name).Create( + context.Background(), ap, metav1.CreateOptions{}) + require.NoError(t, err, "create AP curl-ap") + + // 2. Create user-defined NN allowing only fusioncore.ai on TCP/80. + nn := &v1beta1.NetworkNeighborhood{ + ObjectMeta: metav1.ObjectMeta{ + Name: "curl-nn", + Namespace: ns.Name, + Annotations: map[string]string{ + helpersv1.ManagedByMetadataKey: helpersv1.ManagedByUserValue, + helpersv1.StatusMetadataKey: helpersv1.Completed, + helpersv1.CompletionMetadataKey: helpersv1.Full, + }, + Labels: map[string]string{ + helpersv1.ApiGroupMetadataKey: "apps", + helpersv1.ApiVersionMetadataKey: "v1", + helpersv1.KindMetadataKey: "Deployment", + helpersv1.NameMetadataKey: "curl-28", + helpersv1.NamespaceMetadataKey: ns.Name, + }, + }, + Spec: v1beta1.NetworkNeighborhoodSpec{ + LabelSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "curl-28"}, + }, + Containers: []v1beta1.NetworkNeighborhoodContainer{ + { + Name: "curl", + Egress: []v1beta1.NetworkNeighbor{ + { + Identifier: "fusioncore-egress", + Type: "external", + DNS: "fusioncore.ai.", + DNSNames: []string{"fusioncore.ai."}, + IPAddress: "162.0.217.171", + Ports: []v1beta1.NetworkPort{ + {Name: "TCP-80", Protocol: "TCP", Port: ptr.To(int32(80))}, + }, + }, + }, + }, + }, + }, + } + _, err = storageClient.NetworkNeighborhoods(ns.Name).Create( + context.Background(), nn, metav1.CreateOptions{}) + require.NoError(t, err, "create NN curl-nn") + t.Logf("created AP + NN in ns %s", ns.Name) + + // 2b. Poll storage until both AP and NN are retrievable. + // Node-agent does a single fetch on container start with no retry, + // so the profile MUST exist before the pod is created. + require.Eventually(t, func() bool { + _, apErr := storageClient.ApplicationProfiles(ns.Name).Get(context.Background(), "curl-ap", metav1.GetOptions{}) + _, nnErr := storageClient.NetworkNeighborhoods(ns.Name).Get(context.Background(), "curl-nn", metav1.GetOptions{}) + return apErr == nil && nnErr == nil + }, 30*time.Second, 1*time.Second, "AP and NN must be retrievable from storage before deploying the pod") + t.Logf("verified AP + NN are retrievable from storage") + + // 3. Deploy curl with both user-defined labels (no learning). + wl, err := testutils.NewTestWorkload(ns.Name, path.Join(utils.CurrentDir(), "resources/nginx-user-defined-deployment.yaml")) + require.NoError(t, err) + require.NoError(t, wl.WaitForReady(80)) + t.Logf("pod ready in ns %s", ns.Name) + + // Give node-agent time to load the user-defined profiles into cache. + time.Sleep(30 * time.Second) + + // 4. Trigger anomalous traffic NOT in the NN. + exec := func(cmd []string) { + stdout, stderr, err := wl.ExecIntoPod(cmd, "curl") + t.Logf("exec %v → err=%v stdout=%q stderr=%q", cmd, err, stdout, stderr) + } + + // 4a. TCP egress to IPs not in NN (triggers R0011). + exec([]string{"curl", "-sm5", "http://8.8.8.8"}) + exec([]string{"curl", "-sm5", "http://1.1.1.1"}) + + // 4b. DNS lookups for real resolvable domains not in NN (triggers R0005). + // Must use domains that actually resolve (non-NXDOMAIN) because trace_dns + // drops responses with 0 answers. + exec([]string{"curl", "-sm5", "http://google.com"}) + exec([]string{"curl", "-sm5", "http://ebpf.io"}) + exec([]string{"curl", "-sm5", "http://cloudflare.com"}) + + // 5. Wait for alerts and assert both R0011 and R0005 fire. + time.Sleep(30 * time.Second) + alerts, err := testutils.GetAlerts(ns.Name) + require.NoError(t, err) + + t.Logf("=== %d alerts in namespace %s ===", len(alerts), ns.Name) + for i, a := range alerts { + t.Logf(" [%d] rule=%s(%s) container=%s", i, + a.Labels["rule_name"], a.Labels["rule_id"], a.Labels["container_name"]) + } + + r0011Count := 0 + r0005Count := 0 + for _, a := range alerts { + switch a.Labels["rule_id"] { + case "R0011": + r0011Count++ + case "R0005": + r0005Count++ + } + } + + require.Greater(t, r0011Count, 0, + "expected R0011 'Unexpected Egress Network Traffic' alerts for 8.8.8.8/1.1.1.1, got none") + t.Logf("R0011 alerts: %d — user-defined NN correctly detects anomalous TCP egress", r0011Count) + + require.Greater(t, r0005Count, 0, + "expected R0005 'DNS Anomalies' alerts for google.com/ebpf.io/cloudflare.com, got none") + t.Logf("R0005 alerts: %d — user-defined NN correctly detects anomalous DNS lookups", r0005Count) +} diff --git a/tests/resources/curl-plain-deployment.yaml b/tests/resources/curl-plain-deployment.yaml new file mode 100644 index 000000000..003810550 --- /dev/null +++ b/tests/resources/curl-plain-deployment.yaml @@ -0,0 +1,20 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: curl-fusioncore-28-0 + name: curl-fusioncore-deployment +spec: + selector: + matchLabels: + app: curl-fusioncore-28-0 + replicas: 1 + template: + metadata: + labels: + app: curl-fusioncore-28-0 + spec: + containers: + - name: curl + image: docker.io/curlimages/curl@sha256:08e466006f0860e54fc299378de998935333e0e130a15f6f98482e9f8dab3058 + command: ["sleep", "infinity"] diff --git a/tests/resources/curl-user-network-deployment.yaml b/tests/resources/curl-user-network-deployment.yaml new file mode 100644 index 000000000..122de0f1c --- /dev/null +++ b/tests/resources/curl-user-network-deployment.yaml @@ -0,0 +1,21 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: curl-fusioncore-28-1 + name: curl-fusioncore-deployment +spec: + selector: + matchLabels: + app: curl-fusioncore-28-1 + replicas: 1 + template: + metadata: + labels: + app: curl-fusioncore-28-1 + kubescape.io/user-defined-network: fusioncore-network + spec: + containers: + - name: curl + image: docker.io/curlimages/curl@sha256:08e466006f0860e54fc299378de998935333e0e130a15f6f98482e9f8dab3058 + command: ["sleep", "infinity"] diff --git a/tests/resources/curl-user-profile-wildcards-deployment.yaml b/tests/resources/curl-user-profile-wildcards-deployment.yaml new file mode 100644 index 000000000..7b2e4ab7d --- /dev/null +++ b/tests/resources/curl-user-profile-wildcards-deployment.yaml @@ -0,0 +1,21 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: curl-fusioncore + name: curl-fusioncore-deployment +spec: + selector: + matchLabels: + app: curl-fusioncore + replicas: 1 + template: + metadata: + labels: + app: curl-fusioncore + kubescape.io/user-defined-profile: fusioncore-profile-wildcards + spec: + containers: + - name: curl + image: docker.io/curlimages/curl@sha256:08e466006f0860e54fc299378de998935333e0e130a15f6f98482e9f8dab3058 + command: ["sleep", "infinity"] diff --git a/tests/resources/debug-learn-nn.sh b/tests/resources/debug-learn-nn.sh new file mode 100755 index 000000000..aa05d74fe --- /dev/null +++ b/tests/resources/debug-learn-nn.sh @@ -0,0 +1,82 @@ +#!/usr/bin/env bash +# +# debug-learn-nn.sh — Deploy curl container without user-defined labels, +# trigger DNS+HTTP traffic, wait for NN to learn, dump the result. +# +# Usage: +# ./debug-learn-nn.sh +# +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +NS="debug-nn-$(head -c4 /dev/urandom | xxd -p)" + +echo "=== Creating namespace $NS ===" +kubectl create namespace "$NS" + +echo "=== Deploying curl (no user-defined labels) ===" +kubectl apply -n "$NS" -f "$SCRIPT_DIR/curl-plain-deployment.yaml" +kubectl rollout status deployment/curl-fusioncore-deployment -n "$NS" --timeout=120s +POD=$(kubectl get pods -n "$NS" -l app=curl-fusioncore -o jsonpath='{.items[0].metadata.name}') +echo "Pod: $POD" + +echo "" +echo "=== Checking available DNS tools ===" +echo "--- which nslookup ---" +kubectl exec -n "$NS" "$POD" -c curl -- which nslookup 2>&1 || echo "(not found)" +echo "--- which dig ---" +kubectl exec -n "$NS" "$POD" -c curl -- which dig 2>&1 || echo "(not found)" +echo "--- which host ---" +kubectl exec -n "$NS" "$POD" -c curl -- which host 2>&1 || echo "(not found)" +echo "--- busybox --list (dns-related) ---" +kubectl exec -n "$NS" "$POD" -c curl -- busybox --list 2>&1 | grep -iE 'nslookup|dig|host|wget|ping' || echo "(none found)" + +echo "" +echo "=== Triggering DNS + network traffic ===" + +echo "--- nslookup fusioncore.ai ---" +kubectl exec -n "$NS" "$POD" -c curl -- nslookup fusioncore.ai 2>&1 || true + +echo "--- curl -sm5 http://fusioncore.ai ---" +kubectl exec -n "$NS" "$POD" -c curl -- curl -sm5 http://fusioncore.ai >/dev/null 2>&1 || true + +echo "--- nslookup google.com ---" +kubectl exec -n "$NS" "$POD" -c curl -- nslookup google.com 2>&1 || true + +echo "--- curl -sm5 http://google.com ---" +kubectl exec -n "$NS" "$POD" -c curl -- curl -sm5 http://google.com >/dev/null 2>&1 || true + +sleep 5 +echo "--- repeat: nslookup + curl fusioncore.ai ---" +kubectl exec -n "$NS" "$POD" -c curl -- nslookup fusioncore.ai 2>&1 || true +kubectl exec -n "$NS" "$POD" -c curl -- curl -sm5 http://fusioncore.ai >/dev/null 2>&1 || true + +echo "" +echo "=== Waiting for NN to complete ===" +for i in $(seq 1 60); do + NN_STATUS=$(kubectl get networkneighborhoods -n "$NS" \ + -o jsonpath='{.items[0].metadata.annotations.kubescape\.io/status}' 2>/dev/null || true) + AP_STATUS=$(kubectl get applicationprofiles -n "$NS" \ + -o jsonpath='{.items[0].metadata.annotations.kubescape\.io/status}' 2>/dev/null || true) + echo " [$i] AP=$AP_STATUS NN=$NN_STATUS" + [ "$NN_STATUS" = "completed" ] && break + sleep 10 +done + +echo "" +echo "========== Learned NetworkNeighborhood ==========" +kubectl get networkneighborhoods -n "$NS" -o yaml 2>&1 +echo "=================================================" + +echo "" +echo "========== Learned ApplicationProfile (execs) ==========" +kubectl get applicationprofiles -n "$NS" \ + -o jsonpath='{.items[0].spec.containers[0].execs}' 2>&1 | python3 -m json.tool 2>/dev/null || \ + kubectl get applicationprofiles -n "$NS" \ + -o jsonpath='{.items[0].spec.containers[0].execs}' 2>&1 +echo "" +echo "=======================================================" + +echo "" +echo "Namespace: $NS (left intact for inspection)" +echo "Cleanup: kubectl delete namespace $NS" diff --git a/tests/resources/known-application-profile-wildcards.yaml b/tests/resources/known-application-profile-wildcards.yaml new file mode 100644 index 000000000..ec60a6716 --- /dev/null +++ b/tests/resources/known-application-profile-wildcards.yaml @@ -0,0 +1,155 @@ +## +## User-defined ApplicationProfile with wildcard opens for Test_27. +## +## Derived from the learned AP of curlimages/curl:8.5.0, +## with opens collapsed using the wildcard identifier: +## * — matches zero or more path segments +## +## Usage: +## sed "s/{{NAMESPACE}}/$NS/g" known-application-profile-wildcards.yaml \ +## | kubectl apply -f - +## +apiVersion: spdx.softwarecomposition.kubescape.io/v1beta1 +kind: ApplicationProfile +metadata: + name: fusioncore-profile-wildcards + namespace: "{{NAMESPACE}}" +spec: + architectures: ["amd64"] + containers: + - name: curl + imageID: "docker.io/curlimages/curl@sha256:08e466006f0860e54fc299378de998935333e0e130a15f6f98482e9f8dab3058" + imageTag: "docker.io/curlimages/curl:8.5.0" + capabilities: + - CAP_CHOWN + - CAP_DAC_OVERRIDE + - CAP_DAC_READ_SEARCH + - CAP_SETGID + - CAP_SETPCAP + - CAP_SETUID + - CAP_SYS_ADMIN + execs: + - path: /bin/sleep + args: ["/bin/sleep", "infinity"] + - path: /bin/cat + args: ["/bin/cat"] + - path: /usr/bin/curl + args: ["/usr/bin/curl", "-sm2", "fusioncore.ai"] + - path: /usr/bin/nslookup + args: ["/usr/bin/nslookup"] + opens: + # --- /etc: wildcard covers any config file --- + - path: "/etc/*" + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + # --- /etc/ssl: exact --- + - path: /etc/ssl/openssl.cnf + flags: ["O_RDONLY", "O_LARGEFILE"] + # --- /home: wildcard covers curlrc in any depth --- + - path: "/home/*" + flags: ["O_RDONLY", "O_LARGEFILE"] + # --- /lib: wildcard covers all shared libs --- + - path: "/lib/*" + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + # --- /usr/lib: wildcard covers all versioned shared libs --- + - path: "/usr/lib/*" + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + # --- /usr/local/lib: wildcard covers all local shared libs --- + - path: "/usr/local/lib/*" + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + # --- /proc: wildcard for dynamic PID segments --- + - path: "/proc/*/cgroup" + flags: ["O_RDONLY", "O_CLOEXEC"] + - path: "/proc/*/kernel/cap_last_cap" + flags: ["O_RDONLY", "O_CLOEXEC"] + - path: "/proc/*/mountinfo" + flags: ["O_RDONLY", "O_CLOEXEC"] + - path: "/proc/*/task/*/fd" + flags: ["O_RDONLY", "O_DIRECTORY", "O_CLOEXEC"] + # --- /sys --- + - path: "/sys/fs/cgroup/cpu.max" + flags: ["O_RDONLY", "O_CLOEXEC"] + - path: "/sys/kernel/mm/transparent_hugepage/hpage_pmd_size" + flags: ["O_RDONLY"] + # --- runtime internals --- + - path: /7/setgroups + flags: ["O_RDONLY", "O_CLOEXEC"] + - path: /runc + flags: ["O_RDONLY", "O_CLOEXEC"] + syscalls: + - arch_prctl + - bind + - brk + - capget + - capset + - chdir + - clone + - close + - close_range + - connect + - epoll_ctl + - epoll_pwait + - execve + - exit + - exit_group + - faccessat2 + - fchown + - fcntl + - fstat + - fstatfs + - futex + - getcwd + - getdents64 + - getegid + - geteuid + - getgid + - getpeername + - getppid + - getsockname + - getsockopt + - gettid + - getuid + - ioctl + - membarrier + - mmap + - mprotect + - munmap + - nanosleep + - newfstatat + - open + - openat + - openat2 + - pipe + - poll + - prctl + - read + - recvfrom + - recvmsg + - rt_sigaction + - rt_sigprocmask + - rt_sigreturn + - sendto + - set_tid_address + - setgid + - setgroups + - setsockopt + - setuid + - sigaltstack + - socket + - statx + - tkill + - unknown + - write + - writev + endpoints: + - endpoint: ":80/" + direction: outbound + methods: ["GET"] + internal: false + headers: '{"Host":["fusioncore.ai"]}' + seccompProfile: + spec: + defaultAction: "" + rulePolicies: {} + initContainers: [] + ephemeralContainers: [] +status: {} diff --git a/tests/resources/known-application-profile.yaml b/tests/resources/known-application-profile.yaml new file mode 100644 index 000000000..b80294157 --- /dev/null +++ b/tests/resources/known-application-profile.yaml @@ -0,0 +1,245 @@ +## +## User-defined ApplicationProfile for Test_28. +## +## Referenced directly from a pod via the label: +## kubescape.io/user-defined-profile: fusioncore-profile +## +## Modeled after a real auto-learned AP from curlimages/curl:8.5.0. +## +## Usage: +## sed "s/{{NAMESPACE}}/$NS/g" known-application-profile.yaml \ +## | kubectl apply -f - +## +apiVersion: spdx.softwarecomposition.kubescape.io/v1beta1 +kind: ApplicationProfile +metadata: + name: fusioncore-profile + namespace: "{{NAMESPACE}}" +spec: + architectures: ["amd64"] + containers: + - name: curl + imageID: "docker.io/curlimages/curl@sha256:08e466006f0860e54fc299378de998935333e0e130a15f6f98482e9f8dab3058" + imageTag: "docker.io/curlimages/curl:8.5.0" + capabilities: + - CAP_CHOWN + - CAP_DAC_OVERRIDE + - CAP_DAC_READ_SEARCH + - CAP_SETGID + - CAP_SETPCAP + - CAP_SETUID + - CAP_SYS_ADMIN + execs: + - path: /bin/sleep + args: ["/bin/sleep", "infinity"] + - path: /bin/cat + args: ["/bin/cat"] + - path: /usr/bin/curl + args: ["/usr/bin/curl", "-sm2", "fusioncore.ai"] + - path: /usr/bin/nslookup + args: ["/usr/bin/nslookup"] + opens: + - path: /7/setgroups + flags: ["O_RDONLY", "O_CLOEXEC"] + - path: /etc/hosts + flags: ["O_CLOEXEC", "O_RDONLY", "O_LARGEFILE"] + - path: /etc/ld-musl-x86_64.path + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /etc/passwd + flags: ["O_RDONLY", "O_CLOEXEC", "O_LARGEFILE"] + - path: /etc/resolv.conf + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /etc/ssl/openssl.cnf + flags: ["O_RDONLY", "O_LARGEFILE"] + - path: /home/curl_user/.config/curlrc + flags: ["O_RDONLY", "O_LARGEFILE"] + - path: /home/curl_user/.curlrc + flags: ["O_RDONLY", "O_LARGEFILE"] + - path: /lib/libbrotlicommon.so.1 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /lib/libbrotlidec.so.1 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /lib/libcom_err.so.2.1 + flags: ["O_CLOEXEC", "O_RDONLY", "O_LARGEFILE"] + - path: /lib/libcrypto.so.3 + flags: ["O_CLOEXEC", "O_RDONLY", "O_LARGEFILE"] + - path: /lib/libcurl.so.4 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /lib/libgssapi_krb5.so.2 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /lib/libidn2.so.0 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /lib/libk5crypto.so.3 + flags: ["O_CLOEXEC", "O_RDONLY", "O_LARGEFILE"] + - path: /lib/libkeyutils.so.1 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /lib/libkrb5.so.3 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /lib/libkrb5support.so.0 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /lib/libnghttp2.so.14 + flags: ["O_CLOEXEC", "O_RDONLY", "O_LARGEFILE"] + - path: /lib/libpsl.so.5 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /lib/libssh2.so.1 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /lib/libssl.so.3 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /lib/libunistring.so.5 + flags: ["O_CLOEXEC", "O_RDONLY", "O_LARGEFILE"] + - path: /lib/libz.so.1.3 + flags: ["O_LARGEFILE", "O_CLOEXEC", "O_RDONLY"] + - path: /proc/⋯/cgroup + flags: ["O_RDONLY", "O_CLOEXEC"] + - path: /proc/⋯/kernel/cap_last_cap + flags: ["O_RDONLY", "O_CLOEXEC"] + - path: /proc/⋯/mountinfo + flags: ["O_RDONLY", "O_CLOEXEC"] + - path: /proc/⋯/task/1/fd + flags: ["O_RDONLY", "O_DIRECTORY", "O_CLOEXEC"] + - path: /proc/⋯/task/7/fd + flags: ["O_RDONLY", "O_DIRECTORY", "O_CLOEXEC"] + - path: /runc + flags: ["O_RDONLY", "O_CLOEXEC"] + - path: /sys/fs/cgroup/cpu.max + flags: ["O_RDONLY", "O_CLOEXEC"] + - path: /sys/kernel/mm/transparent_hugepage/hpage_pmd_size + flags: ["O_RDONLY"] + - path: /usr/lib/libbrotlicommon.so.1.1.0 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/lib/libbrotlidec.so.1.1.0 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/lib/libcurl.so.4.8.0 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/lib/libgssapi_krb5.so.2.2 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/lib/libidn2.so.0.3.8 + flags: ["O_CLOEXEC", "O_RDONLY", "O_LARGEFILE"] + - path: /usr/lib/libk5crypto.so.3.1 + flags: ["O_CLOEXEC", "O_RDONLY", "O_LARGEFILE"] + - path: /usr/lib/libkeyutils.so.1.10 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/lib/libkrb5.so.3.3 + flags: ["O_CLOEXEC", "O_RDONLY", "O_LARGEFILE"] + - path: /usr/lib/libkrb5support.so.0.1 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/lib/libnghttp2.so.14.25.1 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/lib/libpsl.so.5.3.4 + flags: ["O_CLOEXEC", "O_RDONLY", "O_LARGEFILE"] + - path: /usr/lib/libssh2.so.1.0.1 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/lib/libunistring.so.5.0.0 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/local/lib/libbrotlicommon.so.1 + flags: ["O_LARGEFILE", "O_CLOEXEC", "O_RDONLY"] + - path: /usr/local/lib/libbrotlidec.so.1 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/local/lib/libcom_err.so.2 + flags: ["O_CLOEXEC", "O_RDONLY", "O_LARGEFILE"] + - path: /usr/local/lib/libcrypto.so.3 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/local/lib/libcurl.so.4 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/local/lib/libgssapi_krb5.so.2 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/local/lib/libidn2.so.0 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/local/lib/libk5crypto.so.3 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/local/lib/libkeyutils.so.1 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/local/lib/libkrb5.so.3 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/local/lib/libkrb5support.so.0 + flags: ["O_CLOEXEC", "O_RDONLY", "O_LARGEFILE"] + - path: /usr/local/lib/libnghttp2.so.14 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/local/lib/libpsl.so.5 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/local/lib/libssh2.so.1 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/local/lib/libssl.so.3 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/local/lib/libunistring.so.5 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + - path: /usr/local/lib/libz.so.1 + flags: ["O_RDONLY", "O_LARGEFILE", "O_CLOEXEC"] + syscalls: + - arch_prctl + - bind + - brk + - capget + - capset + - chdir + - clone + - close + - close_range + - connect + - epoll_ctl + - epoll_pwait + - execve + - exit + - exit_group + - faccessat2 + - fchown + - fcntl + - fstat + - fstatfs + - futex + - getcwd + - getdents64 + - getegid + - geteuid + - getgid + - getpeername + - getppid + - getsockname + - getsockopt + - gettid + - getuid + - ioctl + - membarrier + - mmap + - mprotect + - munmap + - nanosleep + - newfstatat + - open + - openat + - openat2 + - pipe + - poll + - prctl + - read + - recvfrom + - recvmsg + - rt_sigaction + - rt_sigprocmask + - rt_sigreturn + - sendto + - set_tid_address + - setgid + - setgroups + - setsockopt + - setuid + - sigaltstack + - socket + - statx + - tkill + - unknown + - write + - writev + endpoints: + - endpoint: ":80/" + direction: outbound + methods: ["GET"] + internal: false + headers: '{"Host":["fusioncore.ai"]}' + seccompProfile: + spec: + defaultAction: "" + rulePolicies: {} + initContainers: [] + ephemeralContainers: [] +status: {} diff --git a/tests/resources/known-network-neighborhood.yaml b/tests/resources/known-network-neighborhood.yaml new file mode 100644 index 000000000..0d4caa0c4 --- /dev/null +++ b/tests/resources/known-network-neighborhood.yaml @@ -0,0 +1,49 @@ +## +## User-defined NetworkNeighborhood for Test_28. +## +## Referenced directly from a pod via the label: +## kubescape.io/user-defined-network: fusioncore-network +## +## Carries "kubescape.io/managed-by: User" annotation and workload +## labels to match the schema the node-agent cache expects. +## +## Modeled after a real auto-learned NN from curlimages/curl:8.5.0. +## +## Usage: +## sed "s/{{NAMESPACE}}/$NS/g" known-network-neighborhood.yaml \ +## | kubectl apply -f - +## +apiVersion: spdx.softwarecomposition.kubescape.io/v1beta1 +kind: NetworkNeighborhood +metadata: + name: fusioncore-network + namespace: "{{NAMESPACE}}" + annotations: + kubescape.io/managed-by: User + kubescape.io/status: completed + kubescape.io/completion: complete + labels: + kubescape.io/workload-api-group: apps + kubescape.io/workload-api-version: v1 + kubescape.io/workload-kind: Deployment + kubescape.io/workload-name: curl-fusioncore-deployment + kubescape.io/workload-namespace: "{{NAMESPACE}}" +spec: + matchLabels: + app: curl-fusioncore-28-1 + containers: + - name: curl + ingress: [] + egress: + - dns: fusioncore.ai. + dnsNames: + - fusioncore.ai. + identifier: a5e64ff1db824089b1706ac872303e55075f92cf6a652b5272f06c3a2b9e8d10 + ipAddress: 162.0.217.171 + namespaceSelector: null + podSelector: null + ports: + - name: TCP-80 + port: 80 + protocol: TCP + type: external diff --git a/tests/resources/nginx-both-user-defined-deployment.yaml b/tests/resources/nginx-both-user-defined-deployment.yaml new file mode 100644 index 000000000..76d8959de --- /dev/null +++ b/tests/resources/nginx-both-user-defined-deployment.yaml @@ -0,0 +1,23 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: nginx-fusioncore + name: nginx-fusioncore-deployment +spec: + selector: + matchLabels: + app: nginx-fusioncore + replicas: 1 + template: + metadata: + labels: + app: nginx-fusioncore + kubescape.io/user-defined-profile: fusioncore-profile + kubescape.io/user-defined-network: fusioncore-network + spec: + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 diff --git a/tests/resources/nginx-known-network-deployment.yaml b/tests/resources/nginx-known-network-deployment.yaml new file mode 100644 index 000000000..0a947f5d4 --- /dev/null +++ b/tests/resources/nginx-known-network-deployment.yaml @@ -0,0 +1,21 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: nginx-fusioncore + name: nginx-fusioncore-deployment +spec: + selector: + matchLabels: + app: nginx-fusioncore + replicas: 1 + template: + metadata: + labels: + app: nginx-fusioncore + spec: + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 diff --git a/tests/resources/nginx-user-defined-deployment.yaml b/tests/resources/nginx-user-defined-deployment.yaml new file mode 100644 index 000000000..8e68df16b --- /dev/null +++ b/tests/resources/nginx-user-defined-deployment.yaml @@ -0,0 +1,22 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: curl-28 + name: curl-28 +spec: + selector: + matchLabels: + app: curl-28 + replicas: 1 + template: + metadata: + labels: + app: curl-28 + kubescape.io/user-defined-network: curl-nn + kubescape.io/user-defined-profile: curl-ap + spec: + containers: + - name: curl + image: docker.io/curlimages/curl@sha256:08e466006f0860e54fc299378de998935333e0e130a15f6f98482e9f8dab3058 + command: ["sleep", "infinity"] diff --git a/tests/resources/nginx-user-network-deployment.yaml b/tests/resources/nginx-user-network-deployment.yaml new file mode 100644 index 000000000..618f814de --- /dev/null +++ b/tests/resources/nginx-user-network-deployment.yaml @@ -0,0 +1,22 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: nginx-fusioncore + name: nginx-fusioncore-deployment +spec: + selector: + matchLabels: + app: nginx-fusioncore + replicas: 1 + template: + metadata: + labels: + app: nginx-fusioncore + kubescape.io/user-defined-network: fusioncore-network + spec: + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80