diff --git a/cmd/cluster-authentication-operator-tests-ext/main.go b/cmd/cluster-authentication-operator-tests-ext/main.go index 608c95cc1..725ec4495 100644 --- a/cmd/cluster-authentication-operator-tests-ext/main.go +++ b/cmd/cluster-authentication-operator-tests-ext/main.go @@ -69,6 +69,17 @@ func prepareOperatorTestsRegistry() (*oteextension.Registry, error) { }, }) + // The following suite runs tests that verify the operator's behaviour. + // This suite is executed only on pull requests targeting this repository. + // Tests tagged with [Parallel] and any of [Operator], [OIDC], [Templates], [Tokens] are included in this suite. + extension.AddSuite(oteextension.Suite{ + Name: "openshift/cluster-authentication-operator/operator/parallel", + Parallelism: 1, + Qualifiers: []string{ + `!name.contains("[Serial]") && (name.contains("[Operator]") || name.contains("[OIDC]") || name.contains("[Templates]") || name.contains("[Tokens]"))`, + }, + }) + specs, err := oteginkgo.BuildExtensionTestSpecsFromOpenShiftGinkgoSuite() if err != nil { return nil, fmt.Errorf("couldn't build extension test specs from ginkgo: %w", err) diff --git a/go.mod b/go.mod index 367f091dd..15bddbd2c 100644 --- a/go.mod +++ b/go.mod @@ -8,6 +8,7 @@ require ( github.com/golang-jwt/jwt/v5 v5.2.2 github.com/google/go-cmp v0.7.0 github.com/onsi/ginkgo/v2 v2.21.0 + github.com/onsi/gomega v1.35.1 github.com/openshift-eng/openshift-tests-extension v0.0.0-20251205182537-ff5553e56f33 github.com/openshift/api v0.0.0-20260126183958-606bd613f9f7 github.com/openshift/build-machinery-go v0.0.0-20250530140348-dc5b2804eeee @@ -76,7 +77,6 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/onsi/gomega v1.35.1 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pkg/profile v1.7.0 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect diff --git a/test/e2e/network_policy.go b/test/e2e/network_policy.go new file mode 100644 index 000000000..dd41bceb9 --- /dev/null +++ b/test/e2e/network_policy.go @@ -0,0 +1,574 @@ +package e2e + +import ( + "context" + "fmt" + "time" + + g "github.com/onsi/ginkgo/v2" + o "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + + configclient "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + test "github.com/openshift/cluster-authentication-operator/test/library" +) + +const ( + authNamespace = "openshift-authentication" + oauthAPINamespace = "openshift-oauth-apiserver" + authOperatorNamespace = "openshift-authentication-operator" + defaultDenyAllPolicyName = "default-deny-all" + oauthServerPolicyName = "oauth-server-networkpolicy" + oauthAPIServerPolicyName = "oauth-apiserver-networkpolicy" + authOperatorPolicyName = "authentication-operator-networkpolicy" +) + +var _ = g.Describe("[sig-auth] authentication operator", func() { + g.It("[Operator][NetworkPolicy] should ensure auth NetworkPolicies are defined", func() { + testAuthNetworkPolicies() + }) + g.It("[Operator][NetworkPolicy] should restore auth NetworkPolicies after delete or mutation[Timeout:30m]", func() { + testAuthNetworkPolicyReconcile() + }) +}) + +func testAuthNetworkPolicies() { + ctx := context.Background() + g.By("Creating Kubernetes clients") + kubeConfig := test.NewClientConfigForTest(g.GinkgoTB()) + kubeClient, err := kubernetes.NewForConfig(kubeConfig) + o.Expect(err).NotTo(o.HaveOccurred()) + configClient, err := configclient.NewForConfig(kubeConfig) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("Waiting for authentication ClusterOperator to be stable") + err = test.WaitForClusterOperatorAvailableNotProgressingNotDegraded(g.GinkgoTB(), configClient, "authentication") + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("Validating NetworkPolicies in openshift-authentication") + authDefaultDeny := getNetworkPolicy(ctx, kubeClient, authNamespace, defaultDenyAllPolicyName) + logNetworkPolicySummary("auth/default-deny-all", authDefaultDeny) + logNetworkPolicyDetails("auth/default-deny-all", authDefaultDeny) + requireDefaultDenyAll(authDefaultDeny) + + authPolicy := getNetworkPolicy(ctx, kubeClient, authNamespace, oauthServerPolicyName) + logNetworkPolicySummary("auth/oauth-server-networkpolicy", authPolicy) + logNetworkPolicyDetails("auth/oauth-server-networkpolicy", authPolicy) + requirePodSelectorLabel(authPolicy, "app", "oauth-openshift") + requireIngressPort(authPolicy, corev1.ProtocolTCP, 6443) + logIngressFromNamespaceOptional(authPolicy, 6443, "openshift-monitoring") + requireIngressFromNamespaceOrPolicyGroup(authPolicy, 6443, "openshift-ingress", "policy-group.network.openshift.io/ingress") + requireIngressFromNamespace(authPolicy, 6443, authOperatorNamespace) + requireEgressPort(authPolicy, corev1.ProtocolTCP, 5353) + requireEgressPort(authPolicy, corev1.ProtocolUDP, 5353) + requireEgressPort(authPolicy, corev1.ProtocolTCP, 8443) + logIngressHostNetworkOrAllowAll(authPolicy, 6443) + logEgressAllowAllTCP(authPolicy) + + g.By("Validating NetworkPolicies in openshift-oauth-apiserver") + oauthDefaultDeny := getNetworkPolicy(ctx, kubeClient, oauthAPINamespace, defaultDenyAllPolicyName) + logNetworkPolicySummary("oauth-apiserver/default-deny-all", oauthDefaultDeny) + logNetworkPolicyDetails("oauth-apiserver/default-deny-all", oauthDefaultDeny) + requireDefaultDenyAll(oauthDefaultDeny) + + oauthPolicy := getNetworkPolicy(ctx, kubeClient, oauthAPINamespace, oauthAPIServerPolicyName) + logNetworkPolicySummary("oauth-apiserver/oauth-apiserver-networkpolicy", oauthPolicy) + logNetworkPolicyDetails("oauth-apiserver/oauth-apiserver-networkpolicy", oauthPolicy) + requirePodSelectorLabel(oauthPolicy, "app", "openshift-oauth-apiserver") + requireIngressPort(oauthPolicy, corev1.ProtocolTCP, 8443) + logIngressFromNamespaceOptional(oauthPolicy, 8443, "openshift-monitoring") + requireIngressFromNamespace(oauthPolicy, 8443, "openshift-authentication") + requireIngressFromNamespace(oauthPolicy, 8443, authOperatorNamespace) + requireEgressPort(oauthPolicy, corev1.ProtocolTCP, 5353) + requireEgressPort(oauthPolicy, corev1.ProtocolUDP, 5353) + requireEgressPort(oauthPolicy, corev1.ProtocolTCP, 2379) + logIngressHostNetworkOrAllowAll(oauthPolicy, 8443) + logEgressAllowAllTCP(oauthPolicy) + + g.By("Validating NetworkPolicies in openshift-authentication-operator") + operatorDefaultDeny := getNetworkPolicy(ctx, kubeClient, authOperatorNamespace, defaultDenyAllPolicyName) + logNetworkPolicySummary("auth-operator/default-deny-all", operatorDefaultDeny) + logNetworkPolicyDetails("auth-operator/default-deny-all", operatorDefaultDeny) + requireDefaultDenyAll(operatorDefaultDeny) + + operatorPolicy := getNetworkPolicy(ctx, kubeClient, authOperatorNamespace, authOperatorPolicyName) + logNetworkPolicySummary("auth-operator/"+authOperatorPolicyName, operatorPolicy) + logNetworkPolicyDetails("auth-operator/"+authOperatorPolicyName, operatorPolicy) + requirePodSelectorLabel(operatorPolicy, "app", "authentication-operator") + requireIngressPort(operatorPolicy, corev1.ProtocolTCP, 8443) + logIngressFromNamespaceOptional(operatorPolicy, 8443, "openshift-monitoring") + requireEgressPort(operatorPolicy, corev1.ProtocolTCP, 5353) + requireEgressPort(operatorPolicy, corev1.ProtocolUDP, 5353) + requireEgressPort(operatorPolicy, corev1.ProtocolTCP, 6443) + requireEgressPort(operatorPolicy, corev1.ProtocolTCP, 8443) + logEgressAllowAllTCP(operatorPolicy) + + g.By("Verifying pods are ready in auth namespaces") + waitForPodsReadyByLabel(ctx, kubeClient, authNamespace, "app=oauth-openshift") + waitForPodsReadyByLabel(ctx, kubeClient, oauthAPINamespace, "app=openshift-oauth-apiserver") + waitForPodsReadyByLabel(ctx, kubeClient, authOperatorNamespace, "app=authentication-operator") +} + +func testAuthNetworkPolicyReconcile() { + ctx := context.Background() + g.By("Creating Kubernetes clients") + kubeConfig := test.NewClientConfigForTest(g.GinkgoTB()) + kubeClient, err := kubernetes.NewForConfig(kubeConfig) + o.Expect(err).NotTo(o.HaveOccurred()) + configClient, err := configclient.NewForConfig(kubeConfig) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("Waiting for authentication ClusterOperator to be stable") + err = test.WaitForClusterOperatorAvailableNotProgressingNotDegraded(g.GinkgoTB(), configClient, "authentication") + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("Capturing expected NetworkPolicy specs") + expectedAuthPolicy := getNetworkPolicy(ctx, kubeClient, authNamespace, oauthServerPolicyName) + expectedOAuthAPIPolicy := getNetworkPolicy(ctx, kubeClient, oauthAPINamespace, oauthAPIServerPolicyName) + expectedAuthOperatorPolicy := getNetworkPolicy(ctx, kubeClient, authOperatorNamespace, authOperatorPolicyName) + expectedAuthDefaultDeny := getNetworkPolicy(ctx, kubeClient, authNamespace, defaultDenyAllPolicyName) + expectedOAuthAPIDefaultDeny := getNetworkPolicy(ctx, kubeClient, oauthAPINamespace, defaultDenyAllPolicyName) + expectedAuthOperatorDefaultDeny := getNetworkPolicy(ctx, kubeClient, authOperatorNamespace, defaultDenyAllPolicyName) + + g.By("Deleting main policies and waiting for restoration") + g.GinkgoWriter.Printf("deleting NetworkPolicy %s/%s\n", authNamespace, oauthServerPolicyName) + restoreNetworkPolicy(ctx, kubeClient, expectedAuthPolicy) + g.GinkgoWriter.Printf("deleting NetworkPolicy %s/%s\n", oauthAPINamespace, oauthAPIServerPolicyName) + restoreNetworkPolicy(ctx, kubeClient, expectedOAuthAPIPolicy) + g.GinkgoWriter.Printf("deleting NetworkPolicy %s/%s\n", authOperatorNamespace, authOperatorPolicyName) + restoreNetworkPolicy(ctx, kubeClient, expectedAuthOperatorPolicy) + + g.By("Deleting default-deny-all policies and waiting for restoration") + g.GinkgoWriter.Printf("deleting NetworkPolicy %s/%s\n", authNamespace, defaultDenyAllPolicyName) + restoreNetworkPolicy(ctx, kubeClient, expectedAuthDefaultDeny) + g.GinkgoWriter.Printf("deleting NetworkPolicy %s/%s\n", oauthAPINamespace, defaultDenyAllPolicyName) + restoreNetworkPolicy(ctx, kubeClient, expectedOAuthAPIDefaultDeny) + g.GinkgoWriter.Printf("deleting NetworkPolicy %s/%s\n", authOperatorNamespace, defaultDenyAllPolicyName) + restoreNetworkPolicy(ctx, kubeClient, expectedAuthOperatorDefaultDeny) + + g.By("Mutating main policies and waiting for reconciliation") + g.GinkgoWriter.Printf("mutating NetworkPolicy %s/%s\n", authNamespace, oauthServerPolicyName) + mutateAndRestoreNetworkPolicy(ctx, kubeClient, authNamespace, oauthServerPolicyName) + g.GinkgoWriter.Printf("mutating NetworkPolicy %s/%s\n", oauthAPINamespace, oauthAPIServerPolicyName) + mutateAndRestoreNetworkPolicy(ctx, kubeClient, oauthAPINamespace, oauthAPIServerPolicyName) + g.GinkgoWriter.Printf("mutating NetworkPolicy %s/%s\n", authOperatorNamespace, authOperatorPolicyName) + mutateAndRestoreNetworkPolicy(ctx, kubeClient, authOperatorNamespace, authOperatorPolicyName) + + g.By("Mutating default-deny-all policies and waiting for reconciliation") + g.GinkgoWriter.Printf("mutating NetworkPolicy %s/%s\n", authNamespace, defaultDenyAllPolicyName) + mutateAndRestoreNetworkPolicy(ctx, kubeClient, authNamespace, defaultDenyAllPolicyName) + g.GinkgoWriter.Printf("mutating NetworkPolicy %s/%s\n", oauthAPINamespace, defaultDenyAllPolicyName) + mutateAndRestoreNetworkPolicy(ctx, kubeClient, oauthAPINamespace, defaultDenyAllPolicyName) + g.GinkgoWriter.Printf("mutating NetworkPolicy %s/%s\n", authOperatorNamespace, defaultDenyAllPolicyName) + mutateAndRestoreNetworkPolicy(ctx, kubeClient, authOperatorNamespace, defaultDenyAllPolicyName) + + g.By("Checking NetworkPolicy-related events (best-effort)") + logNetworkPolicyEvents(ctx, kubeClient, []string{"openshift-authentication-operator", authNamespace, oauthAPINamespace}, oauthServerPolicyName) +} + +func getNetworkPolicy(ctx context.Context, client kubernetes.Interface, namespace, name string) *networkingv1.NetworkPolicy { + g.GinkgoHelper() + policy, err := client.NetworkingV1().NetworkPolicies(namespace).Get(ctx, name, metav1.GetOptions{}) + o.Expect(err).NotTo(o.HaveOccurred(), "failed to get NetworkPolicy %s/%s", namespace, name) + return policy +} + +func requireDefaultDenyAll(policy *networkingv1.NetworkPolicy) { + g.GinkgoHelper() + if len(policy.Spec.PodSelector.MatchLabels) != 0 || len(policy.Spec.PodSelector.MatchExpressions) != 0 { + g.Fail(fmt.Sprintf("%s/%s: expected empty podSelector", policy.Namespace, policy.Name)) + } + + policyTypes := sets.NewString() + for _, policyType := range policy.Spec.PolicyTypes { + policyTypes.Insert(string(policyType)) + } + if !policyTypes.Has(string(networkingv1.PolicyTypeIngress)) || !policyTypes.Has(string(networkingv1.PolicyTypeEgress)) { + g.Fail(fmt.Sprintf("%s/%s: expected both Ingress and Egress policyTypes, got %v", policy.Namespace, policy.Name, policy.Spec.PolicyTypes)) + } +} + +func requirePodSelectorLabel(policy *networkingv1.NetworkPolicy, key, value string) { + g.GinkgoHelper() + actual, ok := policy.Spec.PodSelector.MatchLabels[key] + if !ok || actual != value { + g.Fail(fmt.Sprintf("%s/%s: expected podSelector %s=%s, got %v", policy.Namespace, policy.Name, key, value, policy.Spec.PodSelector.MatchLabels)) + } +} + +func requireIngressPort(policy *networkingv1.NetworkPolicy, protocol corev1.Protocol, port int32) { + g.GinkgoHelper() + if !hasPortInIngress(policy.Spec.Ingress, protocol, port) { + g.Fail(fmt.Sprintf("%s/%s: expected ingress port %s/%d", policy.Namespace, policy.Name, protocol, port)) + } +} + +func requireIngressFromNamespace(policy *networkingv1.NetworkPolicy, port int32, namespace string) { + g.GinkgoHelper() + if !hasIngressFromNamespace(policy.Spec.Ingress, port, namespace) { + g.Fail(fmt.Sprintf("%s/%s: expected ingress from namespace %s on port %d", policy.Namespace, policy.Name, namespace, port)) + } +} + +func logIngressFromNamespaceOptional(policy *networkingv1.NetworkPolicy, port int32, namespace string) { + g.GinkgoHelper() + if hasIngressFromNamespace(policy.Spec.Ingress, port, namespace) { + g.GinkgoWriter.Printf("networkpolicy %s/%s: ingress from namespace %s present on port %d\n", policy.Namespace, policy.Name, namespace, port) + return + } + g.GinkgoWriter.Printf("networkpolicy %s/%s: no ingress from namespace %s on port %d\n", policy.Namespace, policy.Name, namespace, port) +} + +func requireIngressFromNamespaceOrPolicyGroup(policy *networkingv1.NetworkPolicy, port int32, namespace, policyGroupLabelKey string) { + g.GinkgoHelper() + if hasIngressFromNamespace(policy.Spec.Ingress, port, namespace) { + return + } + if hasIngressFromPolicyGroup(policy.Spec.Ingress, port, policyGroupLabelKey) { + return + } + g.Fail(fmt.Sprintf("%s/%s: expected ingress from namespace %s or policy-group %s on port %d", policy.Namespace, policy.Name, namespace, policyGroupLabelKey, port)) +} + +func requireIngressAllowAll(policy *networkingv1.NetworkPolicy, port int32) { + g.GinkgoHelper() + if !hasIngressAllowAll(policy.Spec.Ingress, port) { + g.Fail(fmt.Sprintf("%s/%s: expected ingress allow-all on port %d", policy.Namespace, policy.Name, port)) + } +} + +func logIngressHostNetworkOrAllowAll(policy *networkingv1.NetworkPolicy, port int32) { + g.GinkgoHelper() + if hasIngressAllowAll(policy.Spec.Ingress, port) { + g.GinkgoWriter.Printf("networkpolicy %s/%s: ingress allow-all present on port %d\n", policy.Namespace, policy.Name, port) + return + } + if hasIngressFromPolicyGroup(policy.Spec.Ingress, port, "policy-group.network.openshift.io/host-network") { + g.GinkgoWriter.Printf("networkpolicy %s/%s: ingress host-network policy-group present on port %d\n", policy.Namespace, policy.Name, port) + return + } + g.GinkgoWriter.Printf("networkpolicy %s/%s: no ingress allow-all/host-network rule on port %d\n", policy.Namespace, policy.Name, port) +} + +func requireEgressPort(policy *networkingv1.NetworkPolicy, protocol corev1.Protocol, port int32) { + g.GinkgoHelper() + if !hasPortInEgress(policy.Spec.Egress, protocol, port) { + g.Fail(fmt.Sprintf("%s/%s: expected egress port %s/%d", policy.Namespace, policy.Name, protocol, port)) + } +} + +func hasPortInIngress(rules []networkingv1.NetworkPolicyIngressRule, protocol corev1.Protocol, port int32) bool { + for _, rule := range rules { + if hasPort(rule.Ports, protocol, port) { + return true + } + } + return false +} + +func hasPortInEgress(rules []networkingv1.NetworkPolicyEgressRule, protocol corev1.Protocol, port int32) bool { + for _, rule := range rules { + if hasPort(rule.Ports, protocol, port) { + return true + } + } + return false +} + +func hasPort(ports []networkingv1.NetworkPolicyPort, protocol corev1.Protocol, port int32) bool { + for _, p := range ports { + if p.Port == nil || p.Port.IntValue() != int(port) { + continue + } + if p.Protocol == nil || *p.Protocol == protocol { + return true + } + } + return false +} + +func hasIngressFromNamespace(rules []networkingv1.NetworkPolicyIngressRule, port int32, namespace string) bool { + for _, rule := range rules { + if !hasPort(rule.Ports, corev1.ProtocolTCP, port) { + continue + } + for _, peer := range rule.From { + if namespaceSelectorMatches(peer.NamespaceSelector, namespace) { + return true + } + } + } + return false +} + +func hasIngressAllowAll(rules []networkingv1.NetworkPolicyIngressRule, port int32) bool { + for _, rule := range rules { + if !hasPort(rule.Ports, corev1.ProtocolTCP, port) { + continue + } + if len(rule.From) == 0 { + return true + } + } + return false +} + +func namespaceSelectorMatches(selector *metav1.LabelSelector, namespace string) bool { + if selector == nil { + return false + } + if selector.MatchLabels != nil { + if selector.MatchLabels["kubernetes.io/metadata.name"] == namespace { + return true + } + } + for _, expr := range selector.MatchExpressions { + if expr.Key != "kubernetes.io/metadata.name" { + continue + } + if expr.Operator != metav1.LabelSelectorOpIn { + continue + } + for _, value := range expr.Values { + if value == namespace { + return true + } + } + } + return false +} + +func hasIngressFromPolicyGroup(rules []networkingv1.NetworkPolicyIngressRule, port int32, policyGroupLabelKey string) bool { + for _, rule := range rules { + if !hasPort(rule.Ports, corev1.ProtocolTCP, port) { + continue + } + for _, peer := range rule.From { + if peer.NamespaceSelector == nil || peer.NamespaceSelector.MatchLabels == nil { + continue + } + if _, ok := peer.NamespaceSelector.MatchLabels[policyGroupLabelKey]; ok { + return true + } + } + } + return false +} + +func logEgressAllowAllTCP(policy *networkingv1.NetworkPolicy) { + g.GinkgoHelper() + if hasEgressAllowAllTCP(policy.Spec.Egress) { + g.GinkgoWriter.Printf("networkpolicy %s/%s: egress allow-all TCP rule present\n", policy.Namespace, policy.Name) + return + } + g.GinkgoWriter.Printf("networkpolicy %s/%s: no egress allow-all TCP rule\n", policy.Namespace, policy.Name) +} + +func hasEgressAllowAllTCP(rules []networkingv1.NetworkPolicyEgressRule) bool { + for _, rule := range rules { + if len(rule.To) != 0 { + continue + } + if hasAnyTCPPort(rule.Ports) { + return true + } + } + return false +} + +func hasAnyTCPPort(ports []networkingv1.NetworkPolicyPort) bool { + if len(ports) == 0 { + return true + } + for _, p := range ports { + if p.Protocol != nil && *p.Protocol != corev1.ProtocolTCP { + continue + } + return true + } + return false +} + +func restoreNetworkPolicy(ctx context.Context, client kubernetes.Interface, expected *networkingv1.NetworkPolicy) { + g.GinkgoHelper() + namespace := expected.Namespace + name := expected.Name + g.GinkgoWriter.Printf("deleting NetworkPolicy %s/%s\n", namespace, name) + o.Expect(client.NetworkingV1().NetworkPolicies(namespace).Delete(ctx, name, metav1.DeleteOptions{})).NotTo(o.HaveOccurred()) + err := wait.PollImmediate(5*time.Second, 10*time.Minute, func() (bool, error) { + current, err := client.NetworkingV1().NetworkPolicies(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil + } + return equality.Semantic.DeepEqual(expected.Spec, current.Spec), nil + }) + o.Expect(err).NotTo(o.HaveOccurred(), "timed out waiting for NetworkPolicy %s/%s spec to be restored", namespace, name) + g.GinkgoWriter.Printf("NetworkPolicy %s/%s spec restored after delete\n", namespace, name) +} + +func mutateAndRestoreNetworkPolicy(ctx context.Context, client kubernetes.Interface, namespace, name string) { + g.GinkgoHelper() + original := getNetworkPolicy(ctx, client, namespace, name) + g.GinkgoWriter.Printf("mutating NetworkPolicy %s/%s (podSelector override)\n", namespace, name) + patch := []byte(`{"spec":{"podSelector":{"matchLabels":{"np-reconcile":"mutated"}}}}`) + _, err := client.NetworkingV1().NetworkPolicies(namespace).Patch(ctx, name, types.MergePatchType, patch, metav1.PatchOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + err = wait.PollImmediate(5*time.Second, 10*time.Minute, func() (bool, error) { + current := getNetworkPolicy(ctx, client, namespace, name) + return equality.Semantic.DeepEqual(original.Spec, current.Spec), nil + }) + o.Expect(err).NotTo(o.HaveOccurred(), "timed out waiting for NetworkPolicy %s/%s spec to be restored", namespace, name) + g.GinkgoWriter.Printf("NetworkPolicy %s/%s spec restored\n", namespace, name) +} + +func waitForPodsReadyByLabel(ctx context.Context, client kubernetes.Interface, namespace, labelSelector string) { + g.GinkgoHelper() + g.GinkgoWriter.Printf("waiting for pods ready in %s with selector %s\n", namespace, labelSelector) + err := wait.PollImmediate(5*time.Second, 5*time.Minute, func() (bool, error) { + pods, err := client.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector}) + if err != nil { + return false, err + } + if len(pods.Items) == 0 { + return false, nil + } + for _, pod := range pods.Items { + if !isPodReady(&pod) { + return false, nil + } + } + return true, nil + }) + o.Expect(err).NotTo(o.HaveOccurred(), "timed out waiting for pods in %s with selector %s to be ready", namespace, labelSelector) +} + +func isPodReady(pod *corev1.Pod) bool { + for _, condition := range pod.Status.Conditions { + if condition.Type == corev1.PodReady && condition.Status == corev1.ConditionTrue { + return true + } + } + return false +} + +func logNetworkPolicyEvents(ctx context.Context, client kubernetes.Interface, namespaces []string, policyName string) { + g.GinkgoHelper() + found := false + _ = wait.PollImmediate(5*time.Second, 2*time.Minute, func() (bool, error) { + for _, namespace := range namespaces { + events, err := client.CoreV1().Events(namespace).List(ctx, metav1.ListOptions{}) + if err != nil { + g.GinkgoWriter.Printf("unable to list events in %s: %v\n", namespace, err) + continue + } + for _, event := range events.Items { + if event.InvolvedObject.Kind == "NetworkPolicy" && event.InvolvedObject.Name == policyName { + g.GinkgoWriter.Printf("event in %s: %s %s %s\n", namespace, event.Type, event.Reason, event.Message) + found = true + } + if event.Message != "" && (event.InvolvedObject.Name == policyName || event.InvolvedObject.Kind == "NetworkPolicy") { + g.GinkgoWriter.Printf("event in %s: %s %s %s\n", namespace, event.Type, event.Reason, event.Message) + found = true + } + } + } + if found { + return true, nil + } + g.GinkgoWriter.Printf("no NetworkPolicy events yet for %s (namespaces: %v)\n", policyName, namespaces) + return false, nil + }) + if !found { + g.GinkgoWriter.Printf("no NetworkPolicy events observed for %s (best-effort)\n", policyName) + } +} + +func logNetworkPolicySummary(label string, policy *networkingv1.NetworkPolicy) { + g.GinkgoWriter.Printf("networkpolicy %s namespace=%s name=%s podSelector=%v policyTypes=%v ingress=%d egress=%d\n", + label, + policy.Namespace, + policy.Name, + policy.Spec.PodSelector.MatchLabels, + policy.Spec.PolicyTypes, + len(policy.Spec.Ingress), + len(policy.Spec.Egress), + ) +} + +func logNetworkPolicyDetails(label string, policy *networkingv1.NetworkPolicy) { + g.GinkgoHelper() + g.GinkgoWriter.Printf("networkpolicy %s details:\n", label) + g.GinkgoWriter.Printf(" podSelector=%v policyTypes=%v\n", policy.Spec.PodSelector.MatchLabels, policy.Spec.PolicyTypes) + for i, rule := range policy.Spec.Ingress { + g.GinkgoWriter.Printf(" ingress[%d]: ports=%s from=%s\n", i, formatPorts(rule.Ports), formatPeers(rule.From)) + } + for i, rule := range policy.Spec.Egress { + g.GinkgoWriter.Printf(" egress[%d]: ports=%s to=%s\n", i, formatPorts(rule.Ports), formatPeers(rule.To)) + } +} + +func formatPorts(ports []networkingv1.NetworkPolicyPort) string { + if len(ports) == 0 { + return "[]" + } + out := make([]string, 0, len(ports)) + for _, p := range ports { + proto := "TCP" + if p.Protocol != nil { + proto = string(*p.Protocol) + } + if p.Port == nil { + out = append(out, fmt.Sprintf("%s:any", proto)) + continue + } + out = append(out, fmt.Sprintf("%s:%s", proto, p.Port.String())) + } + return fmt.Sprintf("[%s]", joinStrings(out)) +} + +func formatPeers(peers []networkingv1.NetworkPolicyPeer) string { + if len(peers) == 0 { + return "[]" + } + out := make([]string, 0, len(peers)) + for _, peer := range peers { + ns := formatSelector(peer.NamespaceSelector) + pod := formatSelector(peer.PodSelector) + if ns == "" && pod == "" { + out = append(out, "{}") + continue + } + out = append(out, fmt.Sprintf("ns=%s pod=%s", ns, pod)) + } + return fmt.Sprintf("[%s]", joinStrings(out)) +} + +func formatSelector(sel *metav1.LabelSelector) string { + if sel == nil { + return "" + } + if len(sel.MatchLabels) == 0 && len(sel.MatchExpressions) == 0 { + return "{}" + } + return fmt.Sprintf("labels=%v exprs=%v", sel.MatchLabels, sel.MatchExpressions) +} + +func joinStrings(items []string) string { + if len(items) == 0 { + return "" + } + out := items[0] + for i := 1; i < len(items); i++ { + out += ", " + items[i] + } + return out +} diff --git a/test/e2e/network_policy_enforcement.go b/test/e2e/network_policy_enforcement.go new file mode 100644 index 000000000..dc722b0ed --- /dev/null +++ b/test/e2e/network_policy_enforcement.go @@ -0,0 +1,634 @@ +package e2e + +import ( + "context" + "fmt" + "time" + + g "github.com/onsi/ginkgo/v2" + o "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + + e2e "github.com/openshift/cluster-authentication-operator/test/library" +) + +const ( + agnhostImage = "registry.k8s.io/e2e-test-images/agnhost:2.45" +) + +var _ = g.Describe("[sig-auth] authentication operator", func() { + g.It("[Operator][NetworkPolicy] should enforce NetworkPolicy allow/deny basics in a test namespace", func() { + testGenericNetworkPolicyEnforcement() + }) + g.It("[Operator][NetworkPolicy] should enforce auth NetworkPolicies", func() { + testAuthNetworkPolicyEnforcement() + }) + g.It("[Operator][NetworkPolicy] should enforce oauth-apiserver NetworkPolicies", func() { + testOAuthAPIServerNetworkPolicyEnforcement() + }) + g.It("[Operator][NetworkPolicy] should enforce authentication-operator NetworkPolicies", func() { + testAuthenticationOperatorNetworkPolicyEnforcement() + }) + g.It("[Operator][NetworkPolicy] should enforce cross-namespace ingress traffic", func() { + testCrossNamespaceIngressEnforcement() + }) + g.It("[Operator][NetworkPolicy] should block unauthorized namespace traffic", func() { + testUnauthorizedNamespaceBlocking() + }) +}) + +func testGenericNetworkPolicyEnforcement() { + kubeConfig := e2e.NewClientConfigForTest(g.GinkgoTB()) + kubeClient, err := kubernetes.NewForConfig(kubeConfig) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("Creating a temporary namespace for policy enforcement checks") + nsName := e2e.NewTestNamespaceBuilder("np-enforcement-").Create(g.GinkgoTB(), kubeClient.CoreV1().Namespaces()) + defer func() { + g.GinkgoWriter.Printf("deleting test namespace %s\n", nsName) + _ = kubeClient.CoreV1().Namespaces().Delete(context.TODO(), nsName, metav1.DeleteOptions{}) + }() + + serverName := "np-server" + clientLabels := map[string]string{"app": "np-client"} + serverLabels := map[string]string{"app": "np-server"} + + g.GinkgoWriter.Printf("creating netexec server pod %s/%s\n", nsName, serverName) + serverPod := netexecPod(serverName, nsName, serverLabels, 8080) + _, err = kubeClient.CoreV1().Pods(nsName).Create(context.TODO(), serverPod, metav1.CreateOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(waitForPodReady(kubeClient, nsName, serverName)).NotTo(o.HaveOccurred()) + + server, err := kubeClient.CoreV1().Pods(nsName).Get(context.TODO(), serverName, metav1.GetOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(server.Status.PodIP).NotTo(o.BeEmpty()) + g.GinkgoWriter.Printf("server pod %s/%s ip=%s\n", nsName, serverName, server.Status.PodIP) + + g.By("Verifying allow-all when no policies select the pod") + g.GinkgoWriter.Printf("expecting allow from %s to %s:%d\n", nsName, server.Status.PodIP, 8080) + expectConnectivity(kubeClient, nsName, clientLabels, server.Status.PodIP, 8080, true) + + g.By("Applying default deny and verifying traffic is blocked") + g.GinkgoWriter.Printf("creating default-deny policy in %s\n", nsName) + _, err = kubeClient.NetworkingV1().NetworkPolicies(nsName).Create(context.TODO(), defaultDenyPolicy("default-deny", nsName), metav1.CreateOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + g.GinkgoWriter.Printf("expecting deny from %s to %s:%d\n", nsName, server.Status.PodIP, 8080) + + g.By("Adding ingress allow only and verifying traffic is still blocked") + g.GinkgoWriter.Printf("creating allow-ingress policy in %s\n", nsName) + _, err = kubeClient.NetworkingV1().NetworkPolicies(nsName).Create(context.TODO(), allowIngressPolicy("allow-ingress", nsName, serverLabels, clientLabels, 8080), metav1.CreateOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + g.GinkgoWriter.Printf("expecting deny from %s to %s:%d (egress still blocked)\n", nsName, server.Status.PodIP, 8080) + expectConnectivity(kubeClient, nsName, clientLabels, server.Status.PodIP, 8080, false) + + g.By("Adding egress allow and verifying traffic is permitted") + g.GinkgoWriter.Printf("creating allow-egress policy in %s\n", nsName) + _, err = kubeClient.NetworkingV1().NetworkPolicies(nsName).Create(context.TODO(), allowEgressPolicy("allow-egress", nsName, clientLabels, serverLabels, 8080), metav1.CreateOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + g.GinkgoWriter.Printf("expecting allow from %s to %s:%d\n", nsName, server.Status.PodIP, 8080) + expectConnectivity(kubeClient, nsName, clientLabels, server.Status.PodIP, 8080, true) +} + +func testAuthNetworkPolicyEnforcement() { + kubeConfig := e2e.NewClientConfigForTest(g.GinkgoTB()) + kubeClient, err := kubernetes.NewForConfig(kubeConfig) + o.Expect(err).NotTo(o.HaveOccurred()) + + namespace := "openshift-authentication" + clientLabels := map[string]string{"app": "oauth-openshift"} + serverLabels := map[string]string{"app": "oauth-openshift"} + + g.By("Creating oauth server test pods for allow/deny checks") + g.GinkgoWriter.Printf("creating auth server pods in %s\n", namespace) + allowedServerIP, cleanupAllowed := createServerPod(kubeClient, namespace, "np-auth-allowed", serverLabels, 6443) + defer cleanupAllowed() + deniedServerIP, cleanupDenied := createServerPod(kubeClient, namespace, "np-auth-denied", serverLabels, 12345) + defer cleanupDenied() + + g.By("Verifying allowed port 6443") + g.GinkgoWriter.Printf("expecting allow from %s to %s:%d\n", namespace, allowedServerIP, 6443) + expectConnectivity(kubeClient, namespace, clientLabels, allowedServerIP, 6443, true) + g.By("Verifying denied port 12345") + g.GinkgoWriter.Printf("expecting deny from %s to %s:%d\n", namespace, deniedServerIP, 12345) + expectConnectivity(kubeClient, namespace, clientLabels, deniedServerIP, 12345, false) +} + +func testOAuthAPIServerNetworkPolicyEnforcement() { + kubeConfig := e2e.NewClientConfigForTest(g.GinkgoTB()) + kubeClient, err := kubernetes.NewForConfig(kubeConfig) + o.Expect(err).NotTo(o.HaveOccurred()) + + serverNamespace := "openshift-oauth-apiserver" + clientNamespace := "openshift-authentication" + clientLabels := map[string]string{"app": "oauth-openshift"} + oauthClientLabels := map[string]string{"app": "openshift-oauth-apiserver"} + oauthPolicy, err := kubeClient.NetworkingV1().NetworkPolicies(serverNamespace).Get(context.TODO(), "oauth-apiserver-networkpolicy", metav1.GetOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("Creating oauth-apiserver test pods for allow/deny checks") + g.GinkgoWriter.Printf("creating oauth-apiserver server pods in %s\n", serverNamespace) + allowedServerIP, cleanupAllowed := createServerPod(kubeClient, serverNamespace, "np-oauth-api-allowed", map[string]string{"app": "openshift-oauth-apiserver"}, 8443) + defer cleanupAllowed() + deniedServerIP, cleanupDenied := createServerPod(kubeClient, serverNamespace, "np-oauth-api-denied", map[string]string{"app": "openshift-oauth-apiserver"}, 12345) + defer cleanupDenied() + + g.By("Verifying allowed port 8443") + g.GinkgoWriter.Printf("expecting allow from %s to %s:%d\n", clientNamespace, allowedServerIP, 8443) + expectConnectivity(kubeClient, clientNamespace, clientLabels, allowedServerIP, 8443, true) + + g.By("Verifying denied port 12345") + g.GinkgoWriter.Printf("expecting deny from %s to %s:%d\n", clientNamespace, deniedServerIP, 12345) + expectConnectivity(kubeClient, clientNamespace, clientLabels, deniedServerIP, 12345, false) + + g.By("Verifying denied ports even from allowed namespace") + for _, port := range []int32{80, 443, 6443, 9090} { + g.GinkgoWriter.Printf("expecting deny from %s to %s:%d\n", clientNamespace, allowedServerIP, port) + expectConnectivity(kubeClient, clientNamespace, clientLabels, allowedServerIP, port, false) + } + + g.By("Verifying oauth-apiserver egress to DNS") + dnsSvc, err := kubeClient.CoreV1().Services("openshift-dns").Get(context.TODO(), "dns-default", metav1.GetOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + dnsIP := dnsSvc.Spec.ClusterIP + g.GinkgoWriter.Printf("expecting allow from %s to DNS %s:53\n", serverNamespace, dnsIP) + expectConnectivity(kubeClient, serverNamespace, oauthClientLabels, dnsIP, 53, true) + + g.By("Verifying oauth-apiserver egress to etcd") + etcdSvc, err := kubeClient.CoreV1().Services("openshift-etcd").Get(context.TODO(), "etcd", metav1.GetOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + etcdIP := etcdSvc.Spec.ClusterIP + etcdAllowed := egressAllowsNamespace(oauthPolicy, "openshift-etcd", 2379) + g.GinkgoWriter.Printf("expecting %s from %s to etcd %s:2379\n", boolToAllowDeny(etcdAllowed), serverNamespace, etcdIP) + logConnectivityBestEffort(kubeClient, serverNamespace, oauthClientLabels, etcdIP, 2379) +} + +func testAuthenticationOperatorNetworkPolicyEnforcement() { + kubeConfig := e2e.NewClientConfigForTest(g.GinkgoTB()) + kubeClient, err := kubernetes.NewForConfig(kubeConfig) + o.Expect(err).NotTo(o.HaveOccurred()) + + namespace := "openshift-authentication-operator" + serverLabels := map[string]string{"app": "authentication-operator"} + clientLabels := map[string]string{"app": "authentication-operator"} + policy, err := kubeClient.NetworkingV1().NetworkPolicies(namespace).Get(context.TODO(), "authentication-operator-networkpolicy", metav1.GetOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("Creating authentication-operator test pods for policy checks") + g.GinkgoWriter.Printf("creating auth-operator server pod in %s\n", namespace) + serverIP, cleanupServer := createServerPod(kubeClient, namespace, "np-auth-op-server", serverLabels, 8443) + defer cleanupServer() + + allowedFromSameNamespace := ingressAllowsFromNamespace(policy, namespace, clientLabels, 8443) + g.By("Verifying within-namespace traffic matches policy") + g.GinkgoWriter.Printf("expecting %s from same namespace to %s:%d\n", boolToAllowDeny(allowedFromSameNamespace), serverIP, 8443) + expectConnectivity(kubeClient, namespace, clientLabels, serverIP, 8443, allowedFromSameNamespace) + + g.By("Verifying cross-namespace traffic from monitoring is allowed") + g.GinkgoWriter.Printf("expecting allow from openshift-monitoring to %s:%d\n", serverIP, 8443) + expectConnectivity(kubeClient, "openshift-monitoring", map[string]string{"app.kubernetes.io/name": "prometheus"}, serverIP, 8443, true) + + g.By("Verifying unauthorized ports are denied") + g.GinkgoWriter.Printf("expecting deny from openshift-monitoring to %s:%d (unauthorized port)\n", serverIP, 12345) + expectConnectivity(kubeClient, "openshift-monitoring", map[string]string{"app.kubernetes.io/name": "prometheus"}, serverIP, 12345, false) +} + +func testCrossNamespaceIngressEnforcement() { + kubeConfig := e2e.NewClientConfigForTest(g.GinkgoTB()) + kubeClient, err := kubernetes.NewForConfig(kubeConfig) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("Creating test server pods in auth namespaces") + authServerIP, cleanupAuthServer := createServerPod(kubeClient, "openshift-authentication", "np-auth-xns", map[string]string{"app": "oauth-openshift"}, 6443) + defer cleanupAuthServer() + oauthAPIServerIP, cleanupOAuthAPIServer := createServerPod(kubeClient, "openshift-oauth-apiserver", "np-oauth-api-xns", map[string]string{"app": "openshift-oauth-apiserver"}, 8443) + defer cleanupOAuthAPIServer() + authOperatorIP, cleanupAuthOperator := createServerPod(kubeClient, "openshift-authentication-operator", "np-auth-op-xns", map[string]string{"app": "authentication-operator"}, 8443) + defer cleanupAuthOperator() + + g.By("Testing cross-namespace ingress: auth-operator -> oauth-server:6443") + g.GinkgoWriter.Printf("expecting allow from openshift-authentication-operator to %s:6443\n", authServerIP) + expectConnectivity(kubeClient, "openshift-authentication-operator", map[string]string{"app": "authentication-operator"}, authServerIP, 6443, true) + + g.By("Testing cross-namespace ingress: auth-operator -> oauth-apiserver:8443") + g.GinkgoWriter.Printf("expecting allow from openshift-authentication-operator to %s:8443\n", oauthAPIServerIP) + expectConnectivity(kubeClient, "openshift-authentication-operator", map[string]string{"app": "authentication-operator"}, oauthAPIServerIP, 8443, true) + + g.By("Testing cross-namespace ingress: oauth-server -> oauth-apiserver:8443") + g.GinkgoWriter.Printf("expecting allow from openshift-authentication to %s:8443\n", oauthAPIServerIP) + expectConnectivity(kubeClient, "openshift-authentication", map[string]string{"app": "oauth-openshift"}, oauthAPIServerIP, 8443, true) + + g.By("Testing cross-namespace ingress: monitoring -> oauth-server:6443") + g.GinkgoWriter.Printf("expecting allow from openshift-monitoring to %s:6443\n", authServerIP) + expectConnectivity(kubeClient, "openshift-monitoring", map[string]string{"app.kubernetes.io/name": "prometheus"}, authServerIP, 6443, true) + + g.By("Testing cross-namespace ingress: monitoring -> oauth-apiserver:8443") + g.GinkgoWriter.Printf("expecting allow from openshift-monitoring to %s:8443\n", oauthAPIServerIP) + expectConnectivity(kubeClient, "openshift-monitoring", map[string]string{"app.kubernetes.io/name": "prometheus"}, oauthAPIServerIP, 8443, true) + + g.By("Testing cross-namespace ingress: monitoring -> auth-operator:8443") + g.GinkgoWriter.Printf("expecting allow from openshift-monitoring to %s:8443\n", authOperatorIP) + expectConnectivity(kubeClient, "openshift-monitoring", map[string]string{"app.kubernetes.io/name": "prometheus"}, authOperatorIP, 8443, true) + + g.By("Testing allow-all ingress: arbitrary namespace -> oauth-server:6443") + g.GinkgoWriter.Printf("expecting allow from any namespace to %s:6443 (oauth-proxy sidecars)\n", authServerIP) + expectConnectivity(kubeClient, "openshift-ingress", map[string]string{"test": "arbitrary-client"}, authServerIP, 6443, true) + + g.By("Testing denied cross-namespace: unauthorized namespace -> oauth-server on unauthorized port") + g.GinkgoWriter.Printf("expecting deny from openshift-ingress to %s:8080\n", authServerIP) + expectConnectivity(kubeClient, "openshift-ingress", map[string]string{"test": "arbitrary-client"}, authServerIP, 8080, false) + + g.By("Testing allow-all includes other auth components: oauth-apiserver -> oauth-server:6443") + g.GinkgoWriter.Printf("expecting allow from openshift-oauth-apiserver to %s:6443 (via allow-all rule)\n", authServerIP) + expectConnectivity(kubeClient, "openshift-oauth-apiserver", map[string]string{"app": "openshift-oauth-apiserver"}, authServerIP, 6443, true) + + g.By("Testing denied cross-namespace: wrong labels from allowed namespace") + g.GinkgoWriter.Printf("expecting deny from openshift-authentication (wrong labels) to %s:8443\n", oauthAPIServerIP) + expectConnectivity(kubeClient, "openshift-authentication", map[string]string{"app": "wrong-app"}, oauthAPIServerIP, 8443, false) +} + +func testUnauthorizedNamespaceBlocking() { + kubeConfig := e2e.NewClientConfigForTest(g.GinkgoTB()) + kubeClient, err := kubernetes.NewForConfig(kubeConfig) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("Creating test server pods in auth namespaces") + authServerIP, cleanupAuthServer := createServerPod(kubeClient, "openshift-authentication", "np-auth-unauth", map[string]string{"app": "oauth-openshift"}, 6443) + defer cleanupAuthServer() + oauthAPIServerIP, cleanupOAuthAPIServer := createServerPod(kubeClient, "openshift-oauth-apiserver", "np-oauth-api-unauth", map[string]string{"app": "openshift-oauth-apiserver"}, 8443) + defer cleanupOAuthAPIServer() + authOperatorIP, cleanupAuthOperator := createServerPod(kubeClient, "openshift-authentication-operator", "np-auth-op-unauth", map[string]string{"app": "authentication-operator"}, 8443) + defer cleanupAuthOperator() + authOperatorPolicy, err := kubeClient.NetworkingV1().NetworkPolicies("openshift-authentication-operator").Get(context.TODO(), "authentication-operator-networkpolicy", metav1.GetOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("Testing allow-all rules: oauth-server:6443 (oauth-proxy sidecars)") + g.GinkgoWriter.Printf("expecting allow from default namespace to %s:6443 (oauth-proxy sidecar access)\n", authServerIP) + expectConnectivity(kubeClient, "default", map[string]string{"test": "any-pod"}, authServerIP, 6443, true) + + g.By("Testing allow-all rules: oauth-apiserver:8443 (kube-apiserver webhook/aggregated APIs)") + g.GinkgoWriter.Printf("expecting allow from default namespace to %s:8443 (kube-apiserver access)\n", oauthAPIServerIP) + expectConnectivity(kubeClient, "default", map[string]string{"test": "any-pod"}, oauthAPIServerIP, 8443, true) + + g.By("Testing strict blocking: unauthorized namespace -> auth-operator:8443") + defaultAllowed := ingressAllowsFromNamespace(authOperatorPolicy, "default", map[string]string{"test": "unauthorized"}, 8443) + g.GinkgoWriter.Printf("expecting %s from default to %s:8443\n", boolToAllowDeny(defaultAllowed), authOperatorIP) + expectConnectivity(kubeClient, "default", map[string]string{"test": "unauthorized"}, authOperatorIP, 8443, defaultAllowed) + + g.By("Testing strict blocking: unauthorized namespace -> auth-operator:8443") + etcdAllowed := ingressAllowsFromNamespace(authOperatorPolicy, "openshift-etcd", map[string]string{"test": "unauthorized"}, 8443) + g.GinkgoWriter.Printf("expecting %s from openshift-etcd to %s:8443\n", boolToAllowDeny(etcdAllowed), authOperatorIP) + expectConnectivity(kubeClient, "openshift-etcd", map[string]string{"test": "unauthorized"}, authOperatorIP, 8443, etcdAllowed) + + g.By("Testing port-based blocking: unauthorized port even from any namespace") + g.GinkgoWriter.Printf("expecting deny from default to %s:9999 (unauthorized port)\n", oauthAPIServerIP) + expectConnectivity(kubeClient, "default", map[string]string{"test": "any-pod"}, oauthAPIServerIP, 9999, false) + + g.By("Testing port-based blocking: unauthorized port on oauth-server") + g.GinkgoWriter.Printf("expecting deny from default to %s:9999 (unauthorized port)\n", authServerIP) + expectConnectivity(kubeClient, "default", map[string]string{"test": "any-pod"}, authServerIP, 9999, false) + + g.By("Testing label-based traffic from monitoring (best-effort)") + monitoringLabels := map[string]string{"app": "wrong-label"} + g.GinkgoWriter.Printf("checking connectivity from openshift-monitoring with wrong labels to %s:8443\n", authOperatorIP) + logConnectivityBestEffort(kubeClient, "openshift-monitoring", monitoringLabels, authOperatorIP, 8443) + + g.By("Testing label-based traffic from openshift-authentication (best-effort)") + authWrongLabels := map[string]string{"app": "wrong-label"} + g.GinkgoWriter.Printf("checking connectivity from openshift-authentication with wrong labels to %s:8443\n", oauthAPIServerIP) + logConnectivityBestEffort(kubeClient, "openshift-authentication", authWrongLabels, oauthAPIServerIP, 8443) + + g.By("Testing multiple unauthorized ports on oauth-server") + for _, port := range []int32{80, 443, 8080, 8443, 22, 3306} { + g.GinkgoWriter.Printf("expecting deny from default to %s:%d (unauthorized port)\n", authServerIP, port) + expectConnectivity(kubeClient, "default", map[string]string{"test": "any-pod"}, authServerIP, port, false) + } + + g.By("Testing cross-namespace traffic: oauth-server -> auth-operator (best-effort)") + g.GinkgoWriter.Printf("checking connectivity from openshift-authentication to %s:8443\n", authOperatorIP) + logConnectivityBestEffort(kubeClient, "openshift-authentication", map[string]string{"app": "oauth-openshift"}, authOperatorIP, 8443) +} + +func netexecPod(name, namespace string, labels map[string]string, port int32) *corev1.Pod { + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: labels, + }, + Spec: corev1.PodSpec{ + SecurityContext: &corev1.PodSecurityContext{ + RunAsNonRoot: boolptr(true), + RunAsUser: int64ptr(1001), + SeccompProfile: &corev1.SeccompProfile{Type: corev1.SeccompProfileTypeRuntimeDefault}, + }, + Containers: []corev1.Container{ + { + Name: "netexec", + Image: agnhostImage, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: boolptr(false), + Capabilities: &corev1.Capabilities{Drop: []corev1.Capability{"ALL"}}, + RunAsNonRoot: boolptr(true), + RunAsUser: int64ptr(1001), + }, + Command: []string{"/agnhost"}, + Args: []string{"netexec", fmt.Sprintf("--http-port=%d", port)}, + Ports: []corev1.ContainerPort{ + {ContainerPort: port}, + }, + }, + }, + }, + } +} + +func createServerPod(kubeClient kubernetes.Interface, namespace, name string, labels map[string]string, port int32) (string, func()) { + g.GinkgoHelper() + + g.GinkgoWriter.Printf("creating server pod %s/%s port=%d labels=%v\n", namespace, name, port, labels) + pod := netexecPod(name, namespace, labels, port) + _, err := kubeClient.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(waitForPodReady(kubeClient, namespace, name)).NotTo(o.HaveOccurred()) + + created, err := kubeClient.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(created.Status.PodIP).NotTo(o.BeEmpty()) + g.GinkgoWriter.Printf("server pod %s/%s ip=%s\n", namespace, name, created.Status.PodIP) + + return created.Status.PodIP, func() { + g.GinkgoWriter.Printf("deleting server pod %s/%s\n", namespace, name) + _ = kubeClient.CoreV1().Pods(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) + } +} + +func defaultDenyPolicy(name, namespace string) *networkingv1.NetworkPolicy { + return &networkingv1.NetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{}, + PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress, networkingv1.PolicyTypeEgress}, + }, + } +} + +func allowIngressPolicy(name, namespace string, podLabels, fromLabels map[string]string, port int32) *networkingv1.NetworkPolicy { + return &networkingv1.NetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{MatchLabels: podLabels}, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + From: []networkingv1.NetworkPolicyPeer{ + {PodSelector: &metav1.LabelSelector{MatchLabels: fromLabels}}, + }, + Ports: []networkingv1.NetworkPolicyPort{ + {Port: &intstr.IntOrString{Type: intstr.Int, IntVal: port}, Protocol: protocolPtr(corev1.ProtocolTCP)}, + }, + }, + }, + PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress}, + }, + } +} + +func allowEgressPolicy(name, namespace string, podLabels, toLabels map[string]string, port int32) *networkingv1.NetworkPolicy { + return &networkingv1.NetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{MatchLabels: podLabels}, + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + To: []networkingv1.NetworkPolicyPeer{ + {PodSelector: &metav1.LabelSelector{MatchLabels: toLabels}}, + }, + Ports: []networkingv1.NetworkPolicyPort{ + {Port: &intstr.IntOrString{Type: intstr.Int, IntVal: port}, Protocol: protocolPtr(corev1.ProtocolTCP)}, + }, + }, + }, + PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeEgress}, + }, + } +} + +func expectConnectivity(kubeClient kubernetes.Interface, namespace string, clientLabels map[string]string, serverIP string, port int32, shouldSucceed bool) { + g.GinkgoHelper() + + err := wait.PollImmediate(5*time.Second, 2*time.Minute, func() (bool, error) { + succeeded, err := runConnectivityCheck(kubeClient, namespace, clientLabels, serverIP, port) + if err != nil { + return false, err + } + return succeeded == shouldSucceed, nil + }) + o.Expect(err).NotTo(o.HaveOccurred()) + g.GinkgoWriter.Printf("connectivity %s/%s:%d expected=%t\n", namespace, serverIP, port, shouldSucceed) +} + +func logConnectivityBestEffort(kubeClient kubernetes.Interface, namespace string, clientLabels map[string]string, serverIP string, port int32) { + g.GinkgoHelper() + + succeeded, err := runConnectivityCheck(kubeClient, namespace, clientLabels, serverIP, port) + if err != nil { + g.GinkgoWriter.Printf("connectivity %s/%s:%d error: %v\n", namespace, serverIP, port, err) + return + } + g.GinkgoWriter.Printf("connectivity %s/%s:%d succeeded=%t (best-effort)\n", namespace, serverIP, port, succeeded) +} + +func runConnectivityCheck(kubeClient kubernetes.Interface, namespace string, labels map[string]string, serverIP string, port int32) (bool, error) { + g.GinkgoHelper() + + name := fmt.Sprintf("np-client-%s", rand.String(5)) + g.GinkgoWriter.Printf("creating client pod %s/%s to connect %s:%d\n", namespace, name, serverIP, port) + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: labels, + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + SecurityContext: &corev1.PodSecurityContext{ + RunAsNonRoot: boolptr(true), + RunAsUser: int64ptr(1001), + SeccompProfile: &corev1.SeccompProfile{Type: corev1.SeccompProfileTypeRuntimeDefault}, + }, + Containers: []corev1.Container{ + { + Name: "connect", + Image: agnhostImage, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: boolptr(false), + Capabilities: &corev1.Capabilities{Drop: []corev1.Capability{"ALL"}}, + RunAsNonRoot: boolptr(true), + RunAsUser: int64ptr(1001), + }, + Command: []string{"/agnhost"}, + Args: []string{ + "connect", + "--protocol=tcp", + "--timeout=5s", + fmt.Sprintf("%s:%d", serverIP, port), + }, + }, + }, + }, + } + + _, err := kubeClient.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) + if err != nil { + return false, err + } + defer func() { + _ = kubeClient.CoreV1().Pods(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) + }() + + if err := waitForPodCompletion(kubeClient, namespace, name); err != nil { + return false, err + } + completed, err := kubeClient.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return false, err + } + if len(completed.Status.ContainerStatuses) == 0 { + return false, fmt.Errorf("no container status recorded for pod %s", name) + } + exitCode := completed.Status.ContainerStatuses[0].State.Terminated.ExitCode + g.GinkgoWriter.Printf("client pod %s/%s exitCode=%d\n", namespace, name, exitCode) + return exitCode == 0, nil +} + +func ingressAllowsFromNamespace(policy *networkingv1.NetworkPolicy, namespace string, labels map[string]string, port int32) bool { + for _, rule := range policy.Spec.Ingress { + if !ruleAllowsPort(rule.Ports, port) { + continue + } + if len(rule.From) == 0 { + return true + } + for _, peer := range rule.From { + if peer.NamespaceSelector != nil { + if nsMatch(peer.NamespaceSelector, namespace) && podMatch(peer.PodSelector, labels) { + return true + } + continue + } + if podMatch(peer.PodSelector, labels) { + return true + } + } + } + return false +} + +func nsMatch(selector *metav1.LabelSelector, namespace string) bool { + if selector == nil { + return true + } + if selector.MatchLabels != nil { + if selector.MatchLabels["kubernetes.io/metadata.name"] == namespace { + return true + } + } + return false +} + +func podMatch(selector *metav1.LabelSelector, labels map[string]string) bool { + if selector == nil { + return true + } + for key, value := range selector.MatchLabels { + if labels[key] != value { + return false + } + } + return true +} + +func ruleAllowsPort(ports []networkingv1.NetworkPolicyPort, port int32) bool { + if len(ports) == 0 { + return true + } + for _, p := range ports { + if p.Port == nil { + continue + } + if p.Port.Type == intstr.Int && p.Port.IntVal == port { + return true + } + } + return false +} + +func egressAllowsNamespace(policy *networkingv1.NetworkPolicy, namespace string, port int32) bool { + for _, rule := range policy.Spec.Egress { + if !ruleAllowsPort(rule.Ports, port) { + continue + } + if len(rule.To) == 0 { + return true + } + for _, peer := range rule.To { + if peer.NamespaceSelector != nil && nsMatch(peer.NamespaceSelector, namespace) { + return true + } + } + } + return false +} + +func boolToAllowDeny(allow bool) string { + if allow { + return "allow" + } + return "deny" +} + +func waitForPodReady(kubeClient kubernetes.Interface, namespace, name string) error { + return wait.PollImmediate(2*time.Second, 2*time.Minute, func() (bool, error) { + pod, err := kubeClient.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return false, err + } + if pod.Status.Phase != corev1.PodRunning { + return false, nil + } + for _, cond := range pod.Status.Conditions { + if cond.Type == corev1.PodReady && cond.Status == corev1.ConditionTrue { + return true, nil + } + } + return false, nil + }) +} + +func waitForPodCompletion(kubeClient kubernetes.Interface, namespace, name string) error { + return wait.PollImmediate(2*time.Second, 2*time.Minute, func() (bool, error) { + pod, err := kubeClient.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return false, err + } + return pod.Status.Phase == corev1.PodSucceeded || pod.Status.Phase == corev1.PodFailed, nil + }) +} + +func protocolPtr(protocol corev1.Protocol) *corev1.Protocol { + return &protocol +} + +func boolptr(value bool) *bool { + return &value +} + +func int64ptr(value int64) *int64 { + return &value +}