diff --git a/Makefile b/Makefile index c829a32d..5a79dc9c 100644 --- a/Makefile +++ b/Makefile @@ -190,6 +190,11 @@ test-scale-dnspolicy: kube-burner ## Run DNSPolicy scale tests. cd scale_test/dnspolicy && $(KUBE_BURNER) init -c ${KUBEBURNER_WORKLOAD} --log-level debug +# Include local environment setup modules +# Load variables first, then all other modules +include ./make/vars.mk +include $(filter-out ./make/vars.mk,$(wildcard ./make/*.mk)) + ##@ Build Dependencies ## Location to install dependencies to diff --git a/README.md b/README.md index 75606569..18a7ec24 100644 --- a/README.md +++ b/README.md @@ -32,6 +32,59 @@ make poetry > - [Kuadrant Helm Charts](https://github.com/Kuadrant/helm-charts) for any Kubernetes cluster > - [Deploying Kuadrant via OLM](https://github.com/Kuadrant/helm-charts-olm/blob/main/README.md) for OpenShift (recommended as it also deploys testing tools) +## Local Kind Cluster Setup + +For local development and testing, you can set up a complete Kuadrant environment using Kind (Kubernetes in Docker). + +### Prerequisites +* [Kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation) +* [Helm](https://helm.sh/docs/intro/install/) +* [jq](https://jqlang.github.io/jq/download/) (JSON processor) +* **Red Hat Registry credentials** (optional but recommended for testing tools) + - Username: Your Red Hat account username + - Password: Your Red Hat registry token (from [console.redhat.com/openshift/downloads](https://console.redhat.com/openshift/downloads)) + - **Note:** Without these credentials, testing tools (Keycloak, Mockserver, etc.) won't be deployed, but core Kuadrant functionality will still work + +### Quick Start + +Set up a complete local environment with one command: + +```bash +# Optional: Red Hat registry credentials (for testing tools like Keycloak) +# (if not provided, tools won't be deployed but core functionality will work) +export RH_REGISTRY_USERNAME= +export RH_REGISTRY_PASSWORD= + +# Optional: AWS credentials for DNS testing +# (if not provided, the secret won't be created and DNS tests will be skipped) +export AWS_ACCESS_KEY_ID= +export AWS_SECRET_ACCESS_KEY= +export AWS_REGION=us-east-1 +export AWS_BASE_DOMAIN=test.example.com + +# Run the setup (defaults to Istio gateway) +make local-setup + +# Or specify EnvoyGateway +GATEWAYAPI_PROVIDER=envoygateway make local-setup +``` + +This will: +1. Create a Kind cluster named `kuadrant-local` +2. Install metrics-server and MetalLB (LoadBalancer support) +3. Install Gateway API CRDs +4. Install cert-manager and create a self-signed ClusterIssuer +5. Install Istio or EnvoyGateway (based on `GATEWAYAPI_PROVIDER`) +6. Create test namespaces (`kuadrant`, `kuadrant2`) +7. Create AWS credentials secret (only if AWS credentials are provided) +8. Deploy Kuadrant Operator and Kuadrant CR +9. Deploy testing tools (only if RH_REGISTRY credentials are provided) - Keycloak, Mockserver, etc. + +**Cleanup:** +```bash +make local-cleanup # Delete the Kind cluster +``` + ## Configuration The Kuadrant testsuite uses [Dynaconf](https://www.dynaconf.com/) for configuration. diff --git a/make/dependencies.mk b/make/dependencies.mk new file mode 100644 index 00000000..c735b50d --- /dev/null +++ b/make/dependencies.mk @@ -0,0 +1,86 @@ + +##@ Core Dependencies + +.PHONY: install-metrics-server +install-metrics-server: ## Install metrics-server + @echo "Installing metrics-server..." + kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml + kubectl patch deployment metrics-server -n kube-system --type=json -p '[{"op":"add","path":"/spec/template/spec/containers/0/args/-","value":"--kubelet-insecure-tls"}]' + @echo "metrics-server installed" + +.PHONY: install-metallb +install-metallb: ## Install MetalLB for LoadBalancer services + @echo "Installing MetalLB $(METALLB_VERSION)..." + kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/$(METALLB_VERSION)/config/manifests/metallb-native.yaml + kubectl wait --namespace metallb-system --for=condition=Available deployment/controller --timeout=$(METALLB_TIMEOUT) + kubectl wait --namespace metallb-system --for=condition=ready pod --selector=component=controller --timeout=$(METALLB_TIMEOUT) + @echo "Configuring MetalLB IP pool..." + @printf '%s\n' \ + 'apiVersion: metallb.io/v1beta1' \ + 'kind: IPAddressPool' \ + 'metadata:' \ + ' name: default' \ + ' namespace: metallb-system' \ + 'spec:' \ + ' addresses:' \ + ' - 172.18.255.200-172.18.255.250' \ + | kubectl apply -f - + @printf '%s\n' \ + 'apiVersion: metallb.io/v1beta1' \ + 'kind: L2Advertisement' \ + 'metadata:' \ + ' name: default' \ + ' namespace: metallb-system' \ + | kubectl apply -f - + @echo "MetalLB installed with IP pool 172.18.255.200-172.18.255.250" + +.PHONY: gateway-api-install +gateway-api-install: ## Install Gateway API CRDs + @echo "Installing Gateway API $(GATEWAY_API_VERSION)..." + kubectl apply -f https://github.com/kubernetes-sigs/gateway-api/releases/download/$(GATEWAY_API_VERSION)/standard-install.yaml + @echo "Gateway API CRDs installed" + +.PHONY: install-cert-manager +install-cert-manager: ## Install cert-manager + @echo "Installing cert-manager $(CERT_MANAGER_VERSION)..." + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/$(CERT_MANAGER_VERSION)/cert-manager.yaml + kubectl wait --namespace cert-manager --for=condition=Available deployment/cert-manager --timeout=$(CERT_MANAGER_TIMEOUT) + kubectl wait --namespace cert-manager --for=condition=Available deployment/cert-manager-webhook --timeout=$(CERT_MANAGER_TIMEOUT) + kubectl wait --namespace cert-manager --for=condition=ready pod --selector=app.kubernetes.io/name=webhook --timeout=$(CERT_MANAGER_TIMEOUT) + @echo "cert-manager installed" + +.PHONY: create-cluster-issuer +create-cluster-issuer: ## Create self-signed ClusterIssuer for TLS testing + @echo "Creating self-signed ClusterIssuer..." + @printf '%s\n' \ + 'apiVersion: cert-manager.io/v1' \ + 'kind: ClusterIssuer' \ + 'metadata:' \ + ' name: kuadrant-qe-issuer' \ + 'spec:' \ + ' selfSigned: {}' \ + | kubectl apply -f - + @echo "ClusterIssuer 'kuadrant-qe-issuer' created" + +.PHONY: create-aws-credentials +create-aws-credentials: ## Create AWS credentials secret for DNS testing (only if credentials provided) + @if [ -n "$(AWS_ACCESS_KEY_ID)" ] && [ -n "$(AWS_SECRET_ACCESS_KEY)" ] && [ -n "$(AWS_REGION)" ] && [ -n "$(AWS_BASE_DOMAIN)" ]; then \ + echo "Creating AWS credentials secret..."; \ + printf '%s\n' \ + 'apiVersion: v1' \ + 'kind: Secret' \ + 'metadata:' \ + ' name: aws-credentials' \ + ' namespace: kuadrant' \ + ' annotations:' \ + ' base_domain: $(AWS_BASE_DOMAIN)' \ + 'stringData:' \ + ' AWS_ACCESS_KEY_ID: $(AWS_ACCESS_KEY_ID)' \ + ' AWS_REGION: $(AWS_REGION)' \ + ' AWS_SECRET_ACCESS_KEY: $(AWS_SECRET_ACCESS_KEY)' \ + 'type: kuadrant.io/aws' \ + | kubectl apply -f -; \ + echo "AWS credentials secret created in kuadrant namespace"; \ + else \ + echo "⏭️ Skipping AWS credentials secret (requires AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_REGION, and AWS_BASE_DOMAIN)"; \ + fi diff --git a/make/envoygateway.mk b/make/envoygateway.mk new file mode 100644 index 00000000..efaab26f --- /dev/null +++ b/make/envoygateway.mk @@ -0,0 +1,11 @@ + +##@ EnvoyGateway + +.PHONY: envoygateway-install +envoygateway-install: ## Install EnvoyGateway + @echo "Installing EnvoyGateway..." + helm install eg oci://docker.io/envoyproxy/gateway-helm --version $(ENVOYGATEWAY_VERSION) \ + --create-namespace \ + --namespace envoy-gateway-system \ + --wait + @echo "EnvoyGateway installed" diff --git a/make/istio.mk b/make/istio.mk new file mode 100644 index 00000000..81a6d14d --- /dev/null +++ b/make/istio.mk @@ -0,0 +1,30 @@ + +##@ Istio + +.PHONY: istio-install +istio-install: ## Install Istio via SAIL operator + @echo "Installing Sail Operator $(SAIL_OPERATOR_VERSION)..." + helm repo add sail-operator https://istio-ecosystem.github.io/sail-operator --force-update + helm install sail-operator \ + --create-namespace \ + --namespace istio-system \ + --wait \ + --timeout=$(HELM_TIMEOUT) \ + sail-operator/sail-operator \ + --version $(SAIL_OPERATOR_VERSION) + @echo "Creating Istio CR..." + @printf '%s\n' \ + 'apiVersion: sailoperator.io/v1' \ + 'kind: Istio' \ + 'metadata:' \ + ' name: default' \ + 'spec:' \ + ' namespace: istio-system' \ + ' updateStrategy:' \ + ' type: InPlace' \ + ' values:' \ + ' pilot:' \ + ' autoscaleMin: 2' \ + ' version: $(ISTIO_VERSION)' \ + | kubectl apply -f - + @echo "Istio $(ISTIO_VERSION) installed via SAIL" diff --git a/make/kind.mk b/make/kind.mk new file mode 100644 index 00000000..f034bf66 --- /dev/null +++ b/make/kind.mk @@ -0,0 +1,12 @@ + +##@ Kind Cluster + +.PHONY: kind-create-cluster +kind-create-cluster: ## Create kind cluster + @echo "Creating kind cluster '$(KIND_CLUSTER_NAME)'..." + @kind create cluster --name $(KIND_CLUSTER_NAME) || echo "Cluster already exists" + +.PHONY: kind-delete-cluster +kind-delete-cluster: ## Delete kind cluster + @echo "Deleting kind cluster '$(KIND_CLUSTER_NAME)'..." + @kind delete cluster --name $(KIND_CLUSTER_NAME) || true diff --git a/make/kuadrant.mk b/make/kuadrant.mk new file mode 100644 index 00000000..0f4c746b --- /dev/null +++ b/make/kuadrant.mk @@ -0,0 +1,87 @@ + +##@ Kuadrant + +.PHONY: create-test-namespaces +create-test-namespaces: ## Create namespaces for testing + @echo "Creating test namespaces..." + kubectl create namespace kuadrant || true + kubectl create namespace kuadrant2 || true + @echo "Test namespaces created" + +.PHONY: deploy-kuadrant-operator +deploy-kuadrant-operator: ## Deploy Kuadrant Operator (via Helm by default, or custom image) +ifneq ($(KUADRANT_OPERATOR_IMAGE),) + @echo "Installing Kuadrant Operator from custom image: $(KUADRANT_OPERATOR_IMAGE)" + $(MAKE) deploy-kuadrant-operator-local +else + @echo "Installing Kuadrant Operator $(KUADRANT_OPERATOR_VERSION) from Helm..." + helm repo add kuadrant https://kuadrant.io/helm-charts/ --force-update + $(if $(filter latest,$(KUADRANT_OPERATOR_VERSION)), \ + helm install kuadrant-operator kuadrant/kuadrant-operator --create-namespace --namespace $(KUADRANT_NAMESPACE), \ + helm install kuadrant-operator kuadrant/kuadrant-operator --version $(KUADRANT_OPERATOR_VERSION) --create-namespace --namespace $(KUADRANT_NAMESPACE)) + kubectl -n $(KUADRANT_NAMESPACE) wait --timeout=$(KUBECTL_TIMEOUT) --for=condition=Available deployments --all + $(MAKE) patch-kuadrant-operator-env + @echo "Kuadrant Operator $(KUADRANT_OPERATOR_VERSION) installed" +endif + +.PHONY: deploy-kuadrant-operator-local +deploy-kuadrant-operator-local: ## Deploy Kuadrant Operator from local build/image + @if [ -z "$(KUADRANT_OPERATOR_IMAGE)" ]; then \ + echo "ERROR: KUADRANT_OPERATOR_IMAGE not set"; \ + echo "Set KUADRANT_OPERATOR_IMAGE=your-image:tag"; \ + exit 1; \ + fi + @echo "Loading image into kind cluster..." + kind load docker-image $(KUADRANT_OPERATOR_IMAGE) --name $(KIND_CLUSTER_NAME) + @echo "Deploying operator with image $(KUADRANT_OPERATOR_IMAGE)..." + kubectl create namespace $(KUADRANT_NAMESPACE) || true + kubectl apply -k https://github.com/kuadrant/kuadrant-operator/config/crd + @if [ ! -d "/tmp/kuadrant-operator-deploy" ]; then \ + cd /tmp && git clone --depth=1 https://github.com/kuadrant/kuadrant-operator.git kuadrant-operator-deploy; \ + else \ + cd /tmp/kuadrant-operator-deploy && git pull; \ + fi + cd /tmp/kuadrant-operator-deploy/config/manager && \ + kustomize edit set image controller=$(KUADRANT_OPERATOR_IMAGE) && \ + kustomize build ../deploy | kubectl apply --server-side -f - + kubectl -n $(KUADRANT_NAMESPACE) wait --timeout=$(KUBECTL_TIMEOUT) --for=condition=Available deployments --all + $(MAKE) patch-kuadrant-operator-env + @echo "Kuadrant Operator deployed from image $(KUADRANT_OPERATOR_IMAGE)" + +.PHONY: patch-kuadrant-operator-env +patch-kuadrant-operator-env: ## Patch Kuadrant Operator deployment with custom env vars +ifneq ($(KUADRANT_OPERATOR_ENV_VARS),) + @echo "Patching Kuadrant Operator with environment variables..." + @EXISTING_ENV=$$(kubectl get deployment kuadrant-operator-controller-manager -n $(KUADRANT_NAMESPACE) -o jsonpath='{.spec.template.spec.containers[0].env}'); \ + NEW_ENV='['; \ + IFS=',' read -ra PAIRS <<< "$(KUADRANT_OPERATOR_ENV_VARS)"; \ + for i in "$${!PAIRS[@]}"; do \ + PAIR="$${PAIRS[$$i]}"; \ + NAME=$$(echo "$$PAIR" | cut -d'=' -f1); \ + VALUE=$$(echo "$$PAIR" | cut -d'=' -f2-); \ + [ $$i -gt 0 ] && NEW_ENV="$$NEW_ENV,"; \ + NEW_ENV="$$NEW_ENV{\"name\":\"$$NAME\",\"value\":\"$$VALUE\"}"; \ + done; \ + NEW_ENV="$$NEW_ENV]"; \ + MERGED_ENV=$$(echo "$$EXISTING_ENV$$NEW_ENV" | jq -s '.[0] + .[1] | unique_by(.name)'); \ + kubectl patch deployment kuadrant-operator-controller-manager -n $(KUADRANT_NAMESPACE) \ + --type=json -p="[{\"op\":\"replace\",\"path\":\"/spec/template/spec/containers/0/env\",\"value\":$$MERGED_ENV}]"; \ + kubectl -n $(KUADRANT_NAMESPACE) rollout status deployment/kuadrant-operator-controller-manager --timeout=$(KUBECTL_TIMEOUT) + @echo "Kuadrant Operator patched with env vars" +else + @echo "No custom env vars specified (KUADRANT_OPERATOR_ENV_VARS not set)" +endif + +.PHONY: deploy-kuadrant-cr +deploy-kuadrant-cr: ## Deploy Kuadrant CR + @echo "Creating Kuadrant CR..." + @printf '%s\n' \ + 'apiVersion: kuadrant.io/v1beta1' \ + 'kind: Kuadrant' \ + 'metadata:' \ + ' name: kuadrant-sample' \ + ' namespace: $(KUADRANT_NAMESPACE)' \ + 'spec: {}' \ + | kubectl apply -f - + kubectl wait kuadrant/kuadrant-sample --for=condition=Ready=True -n $(KUADRANT_NAMESPACE) --timeout=$(KUADRANT_CR_TIMEOUT) + @echo "Kuadrant CR ready" diff --git a/make/local-setup.mk b/make/local-setup.mk new file mode 100644 index 00000000..91e05eac --- /dev/null +++ b/make/local-setup.mk @@ -0,0 +1,34 @@ + +##@ Local Environment Setup + +.PHONY: local-setup +local-setup: ## Complete local environment setup (kind cluster + all dependencies) + @# Validate GATEWAYAPI_PROVIDER + @if [ "$(GATEWAYAPI_PROVIDER)" != "istio" ] && [ "$(GATEWAYAPI_PROVIDER)" != "envoygateway" ]; then \ + echo "ERROR: Invalid GATEWAYAPI_PROVIDER='$(GATEWAYAPI_PROVIDER)'"; \ + echo "Valid values: istio, envoygateway"; \ + exit 1; \ + fi + $(MAKE) kind-delete-cluster + $(MAKE) kind-create-cluster + $(MAKE) install-metrics-server + $(MAKE) install-metallb + $(MAKE) gateway-api-install + $(MAKE) install-cert-manager + $(MAKE) create-cluster-issuer + $(MAKE) $(GATEWAYAPI_PROVIDER)-install + $(MAKE) create-test-namespaces + $(MAKE) create-aws-credentials + $(MAKE) deploy-kuadrant-operator + $(MAKE) deploy-kuadrant-cr + $(MAKE) deploy-testsuite-tools + @echo "" + @echo "Local environment setup complete!" + @echo " Cluster: $(KIND_CLUSTER_NAME)" + @echo " Gateway Provider: $(GATEWAYAPI_PROVIDER)" + @echo "" + @echo "Run tests with: make kuadrant" + +.PHONY: local-cleanup +local-cleanup: ## Delete local kind cluster + $(MAKE) kind-delete-cluster diff --git a/make/tools.mk b/make/tools.mk new file mode 100644 index 00000000..8d2fd322 --- /dev/null +++ b/make/tools.mk @@ -0,0 +1,29 @@ + +##@ Testsuite Tools + +.PHONY: deploy-testsuite-tools +deploy-testsuite-tools: ## Deploy testsuite tools (Keycloak, etc.) - requires RH_REGISTRY credentials + @if [ -n "$(RH_REGISTRY_USERNAME)" ] && [ -n "$(RH_REGISTRY_PASSWORD)" ]; then \ + echo "Deploying testsuite tools..."; \ + kubectl create namespace tools || true; \ + echo "Creating Red Hat registry secret..."; \ + kubectl -n tools create secret docker-registry redhat-registry-secret \ + --docker-server=registry.redhat.io \ + --docker-username="$(RH_REGISTRY_USERNAME)" \ + --docker-password="$(RH_REGISTRY_PASSWORD)" \ + --dry-run=client -o yaml | kubectl apply -f -; \ + kubectl -n tools patch serviceaccount default \ + -p '{"imagePullSecrets": [{"name": "redhat-registry-secret"}]}'; \ + helm repo add kuadrant-olm https://kuadrant.io/helm-charts-olm --force-update; \ + helm repo update; \ + helm install \ + --set=tools.keycloak.keycloakProvider=deployment \ + --set=tools.coredns.enable=false \ + --debug \ + --wait \ + --timeout=$(TOOLS_TIMEOUT) \ + tools kuadrant-olm/tools-instances; \ + echo "Testsuite tools deployed"; \ + else \ + echo "⏭️ Skipping testsuite tools deployment (requires RH_REGISTRY_USERNAME and RH_REGISTRY_PASSWORD)"; \ + fi diff --git a/make/vars.mk b/make/vars.mk new file mode 100644 index 00000000..de0052a7 --- /dev/null +++ b/make/vars.mk @@ -0,0 +1,44 @@ + +##@ Configuration Variables + +# Kind cluster configuration +KIND_CLUSTER_NAME ?= kuadrant-local + +# Gateway provider (istio or envoygateway) +GATEWAYAPI_PROVIDER ?= istio + +# Version pinning +ISTIO_VERSION ?= v1.26-latest +SAIL_OPERATOR_VERSION ?= v1.26-latest +ENVOYGATEWAY_VERSION ?= v1.2.4 +CERT_MANAGER_VERSION ?= v1.18.2 +METALLB_VERSION ?= v0.15.2 +GATEWAY_API_VERSION ?= v1.3.0 + +# Kuadrant configuration +KUADRANT_NAMESPACE ?= kuadrant-system +KUADRANT_OPERATOR_VERSION ?= latest +KUADRANT_OPERATOR_IMAGE ?= + +# Kuadrant Operator environment variables +# Default: Service timeouts for faster test execution +# Override with your own: KUADRANT_OPERATOR_ENV_VARS="LOG_LEVEL=debug,..." +KUADRANT_OPERATOR_ENV_VARS ?= AUTH_SERVICE_TIMEOUT=1000ms,RATELIMIT_SERVICE_TIMEOUT=1000ms,RATELIMIT_CHECK_SERVICE_TIMEOUT=1000ms,RATELIMIT_REPORT_SERVICE_TIMEOUT=1000ms + +# Red Hat registry credentials +RH_REGISTRY_USERNAME ?= +RH_REGISTRY_PASSWORD ?= + +# AWS credentials for DNS testing (optional - secret only created if provided) +AWS_ACCESS_KEY_ID ?= +AWS_SECRET_ACCESS_KEY ?= +AWS_REGION ?= +AWS_BASE_DOMAIN ?= + +# Timeout configurations (in seconds) +KUBECTL_TIMEOUT ?= 300s +CERT_MANAGER_TIMEOUT ?= 120s +KUADRANT_CR_TIMEOUT ?= 120s +METALLB_TIMEOUT ?= 90s +HELM_TIMEOUT ?= 300s +TOOLS_TIMEOUT ?= 10m0s diff --git a/testsuite/core/__init__.py b/testsuite/core/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/testsuite/core/metrics_factory.py b/testsuite/core/metrics_factory.py new file mode 100644 index 00000000..ec615d6f --- /dev/null +++ b/testsuite/core/metrics_factory.py @@ -0,0 +1,74 @@ +"""Factory for creating GatewayMetrics instances based on exposer type.""" + +from typing import TYPE_CHECKING, Literal + +from testsuite.config import settings +from testsuite.gateway import Exposer +from testsuite.gateway.exposers import LoadBalancerServiceExposer, OpenShiftExposer +from testsuite.gateway.metrics import GatewayMetrics, LoadBalancerGatewayMetrics, OpenShiftGatewayMetrics +from testsuite.kubernetes.openshift.route import OpenshiftRoute +from testsuite.kubernetes.service import Service, ServicePort + +if TYPE_CHECKING: + from testsuite.gateway.gateway_api.gateway import KuadrantGateway + + +def _create_metrics_service(gateway: "KuadrantGateway", service_type: Literal["ClusterIP", "LoadBalancer"]) -> Service: + """ + Helper function to create a metrics service. + + Args: + gateway: The gateway to create metrics service for + service_type: Type of service ("ClusterIP" or "LoadBalancer") + + Returns: + Service: The created metrics service + """ + metrics_service = Service.create_instance( + gateway.cluster, + gateway.metrics_service_name, + selector={"gateway.networking.k8s.io/gateway-name": gateway.name()}, + ports=[ServicePort(name="metrics", port=15020, targetPort=15020)], + labels=gateway.model.metadata.get("labels", {}), + service_type=service_type, + ) + metrics_service.commit() + return metrics_service + + +def create_gateway_metrics(exposer: "Exposer", gateway: "KuadrantGateway") -> "GatewayMetrics": + """ + Factory function to create the appropriate GatewayMetrics implementation + based on the exposer type. + + Args: + exposer: The exposer instance used for the gateway + gateway: The gateway to expose metrics for + + Returns: + GatewayMetrics: The appropriate metrics implementation + """ + if isinstance(exposer, OpenShiftExposer): + # OpenShift: Create ClusterIP Service + Route + metrics_service = _create_metrics_service(gateway, "ClusterIP") + + metrics_route = OpenshiftRoute.create_instance( + gateway.cluster, + gateway.metrics_service_name, + gateway.metrics_service_name, + target_port="metrics", + tls=False, + ) + metrics_route.commit() + + return OpenShiftGatewayMetrics(metrics_route, metrics_service) + + if isinstance(exposer, LoadBalancerServiceExposer): + # LoadBalancer: Create LoadBalancer Service + metrics_service = _create_metrics_service(gateway, "LoadBalancer") + metrics_service.wait_for_ready(slow_loadbalancers=settings["control_plane"]["slow_loadbalancers"]) + + return LoadBalancerGatewayMetrics(gateway, metrics_service) + + # For other exposers (DNSPolicyExposer, etc.), metrics are not supported + raise NotImplementedError(f"Metrics not supported for exposer type: {type(exposer).__name__}") diff --git a/testsuite/core/topology.py b/testsuite/core/topology.py new file mode 100644 index 00000000..a5c965e2 --- /dev/null +++ b/testsuite/core/topology.py @@ -0,0 +1,493 @@ +"""Gateway API Topology Registry for tracking resources and their relationships""" + +import inspect +from functools import wraps +from typing import Any, Callable, Dict, List, Optional, Set, Union + +from testsuite.gateway.gateway_api import GatewayAPIKind, PolicyKind + + +class GlobalTopologySingleton: + """Holder class for global topology singleton to avoid global statement warnings""" + + registry: Optional["TopologyRegistry"] = None + + +class TopologyNode: + """Represents a node in the Gateway API topology graph""" + + class Relationships: + """Encapsulates all relationship data for a topology node""" + + def __init__(self) -> None: + self.targets: Set[str] = set() # Resources this node targets + self.targeted_by: Set[str] = set() # Resources targeting this node + self.children: Set[str] = set() # Child resources + self.parent: Optional[str] = None # Parent resource + + def __init__(self, kind: Optional[Union[GatewayAPIKind, PolicyKind]], resource: Any, name: str) -> None: + self.kind = kind + self.resource = resource # The actual Gateway/Route/Policy object + self.name = name + self._relationships = self.Relationships() + self.metadata: Dict = {} # Arbitrary metadata storage + + @property + def targets(self) -> Set[str]: + """Resources this node targets""" + return self._relationships.targets + + @property + def targeted_by(self) -> Set[str]: + """Resources targeting this node""" + return self._relationships.targeted_by + + @property + def children(self) -> Set[str]: + """Child resources""" + return self._relationships.children + + @property + def parent(self) -> Optional[str]: + """Parent resource""" + return self._relationships.parent + + @parent.setter + def parent(self, value: Optional[str]): + """Set parent resource""" + self._relationships.parent = value + + def __repr__(self): + return f"TopologyNode({self.kind.value if self.kind else 'Unknown'}, {self.name})" + + +def get_topology() -> Optional["TopologyRegistry"]: + """ + Get the global topology registry instance. + + Returns: + The global TopologyRegistry instance, or None if not initialized + + Usage: + from testsuite.core.topology import get_topology + + topology = get_topology() + if topology: + gateway = topology.get_gateway_for_policy(policy) + """ + return GlobalTopologySingleton.registry + + +def set_topology(registry: "TopologyRegistry") -> None: + """ + Set the global topology registry instance. + + This is typically called once by the session-scoped topology fixture. + + Args: + registry: The TopologyRegistry instance to use globally + """ + GlobalTopologySingleton.registry = registry + + +def clear_topology() -> None: + """Clear the global topology registry""" + if GlobalTopologySingleton.registry: + GlobalTopologySingleton.registry.clear() + GlobalTopologySingleton.registry = None + + +class TopologyRegistry: + """ + Central registry for Gateway API topology and policies. + + Tracks resources and their relationships: + - Gateway -> HTTPRoutes (children) + - HTTPRoute -> Gateway (parent) + - Policy -> Gateway/HTTPRoute (targets) + - Gateway/HTTPRoute -> Policies (targeted_by) + + Usage: + topology = TopologyRegistry() + + # Register resources + topology.register_gateway(gateway) + topology.register_route(route, gateway_name="my-gateway") + topology.register_policy(auth_policy) + + # Query relationships + gateway = topology.get_gateway_for_policy(policy) + routes = topology.get_routes_for_gateway("my-gateway") + policies = topology.get_policies_for_gateway("my-gateway") + + # Traverse + topology.print_topology() + """ + + def __init__(self) -> None: + self.nodes: Dict[str, TopologyNode] = {} # name -> TopologyNode + + def _get_or_create_node( + self, kind: Optional[Union[GatewayAPIKind, PolicyKind]], resource: Any, name: str + ) -> TopologyNode: + """Get existing node or create new one""" + if name not in self.nodes: + self.nodes[name] = TopologyNode(kind, resource, name) + return self.nodes[name] + + def register_gateway(self, gateway: Any) -> TopologyNode: + """Register a Gateway""" + name = gateway.name() + node = self._get_or_create_node(GatewayAPIKind.GATEWAY, gateway, name) + return node + + def register_route(self, route: Any, gateway_name: Optional[str] = None) -> TopologyNode: + """ + Register an HTTPRoute + + Args: + route: The HTTPRoute object + gateway_name: Optional explicit gateway name. If not provided, will try route.gateway.name() + """ + name = route.name() + node = self._get_or_create_node(GatewayAPIKind.HTTPROUTE, route, name) + + # Determine gateway + if not gateway_name and hasattr(route, "gateway"): + gateway_name = route.gateway.name() + + if gateway_name: + # Create relationship + node.parent = gateway_name + gateway_node = self._get_or_create_node(GatewayAPIKind.GATEWAY, None, gateway_name) + gateway_node.children.add(name) + + return node + + def register_policy(self, policy: Any) -> TopologyNode: + """ + Register a policy (AuthPolicy, RateLimitPolicy, etc.) + + Automatically determines the kind and target relationship + """ + # Access model directly (works even if not committed) + name = policy.model.metadata.name + kind = PolicyKind(policy.model.kind) + + node = self._get_or_create_node(kind, policy, name) + + # Determine target (use model directly to avoid property access) + if hasattr(policy.model.spec, "targetRef"): + target_ref = policy.model.spec.targetRef + target_name = target_ref.name + target_kind = target_ref.kind + + # Create relationship + node.targets.add(target_name) + + # Create or update target node + if target_kind == GatewayAPIKind.GATEWAY: + target_node = self._get_or_create_node(GatewayAPIKind.GATEWAY, None, target_name) + elif target_kind == GatewayAPIKind.HTTPROUTE: + target_node = self._get_or_create_node(GatewayAPIKind.HTTPROUTE, None, target_name) + else: + # Unknown target kind, create generic node + target_node = self.nodes.setdefault(target_name, TopologyNode(None, None, target_name)) + + target_node.targeted_by.add(name) + + return node + + def get_node(self, name: str) -> Optional[TopologyNode]: + """Get a node by name""" + return self.nodes.get(name) + + def get_gateway(self, name: str) -> Any: + """Get a Gateway resource by name""" + node = self.get_node(name) + return node.resource if node and node.kind == GatewayAPIKind.GATEWAY else None + + def get_route(self, name: str) -> Any: + """Get an HTTPRoute resource by name""" + node = self.get_node(name) + return node.resource if node and node.kind == GatewayAPIKind.HTTPROUTE else None + + def get_gateway_for_target_ref(self, target_ref: Any) -> Any: + """ + Get the Gateway for a given targetRef (before policy is registered). + + Args: + target_ref: The targetRef object with .name and .kind attributes + + Returns: + Gateway resource, or None if not found + """ + target_name = target_ref.name + target_kind = target_ref.kind + + if target_kind == GatewayAPIKind.GATEWAY: + # Direct gateway target + return self.get_gateway(target_name) + if target_kind == GatewayAPIKind.HTTPROUTE: + # Route target - get parent gateway + route_node = self.get_node(target_name) + if route_node and route_node.parent: + return self.get_gateway(route_node.parent) + + return None + + def get_policies_for_gateway(self, gateway_name: str, policy_kind: Optional[GatewayAPIKind] = None) -> List: + """ + Get all policies targeting a gateway (directly or via routes). + + Args: + gateway_name: Gateway name + policy_kind: Optional filter by policy kind (e.g., GatewayAPIKind.AUTH_POLICY) + """ + gateway_node = self.get_node(gateway_name) + if not gateway_node: + return [] + + policies = [] + + # Direct policies on gateway + for policy_name in gateway_node.targeted_by: + policy_node = self.get_node(policy_name) + if policy_node and policy_node.resource: + if not policy_kind or policy_node.kind == policy_kind: + policies.append(policy_node.resource) + + # Policies on child routes + for child_name in gateway_node.children: + child_node = self.get_node(child_name) + if child_node and child_node.kind == GatewayAPIKind.HTTPROUTE: + for policy_name in child_node.targeted_by: + policy_node = self.get_node(policy_name) + if policy_node and policy_node.resource: + if not policy_kind or policy_node.kind == policy_kind: + policies.append(policy_node.resource) + + return policies + + def get_policies_for_route(self, route_name: str, policy_kind: Optional[GatewayAPIKind] = None) -> List: + """Get all policies targeting a specific route""" + route_node = self.get_node(route_name) + if not route_node: + return [] + + policies = [] + for policy_name in route_node.targeted_by: + policy_node = self.get_node(policy_name) + if policy_node and policy_node.resource: + if not policy_kind or policy_node.kind == policy_kind: + policies.append(policy_node.resource) + return policies + + def set_policy_metadata(self, policy: Any, key: str, value: Any) -> None: + """ + Store metadata for a policy. + + Args: + policy: The policy object + key: Metadata key + value: Metadata value + """ + policy_name = policy.name() + policy_node = self.get_node(policy_name) + if policy_node: + policy_node.metadata[key] = value + + def get_policy_metadata(self, policy: Any, key: str, default: Any = None) -> Any: + """ + Retrieve metadata for a policy. + + Args: + policy: The policy object + key: Metadata key + default: Default value if key not found + + Returns: + The metadata value, or default if not found + """ + policy_name = policy.name() + policy_node = self.get_node(policy_name) + if policy_node: + return policy_node.metadata.get(key, default) + return default + + def has_existing_policies_for_target(self, target_ref: Any, exclude_policy_name: Optional[str] = None) -> bool: + """ + Check if there are existing committed policies targeting the given targetRef. + + Args: + target_ref: The targetRef object with .name and .kind attributes + exclude_policy_name: Optional policy name to exclude from the check + + Returns: + bool: True if other committed policies exist for this target + """ + target_kind = target_ref.kind + target_name = target_ref.name + + # Get policies for this target + if target_kind == GatewayAPIKind.HTTPROUTE: + existing_policies = self.get_policies_for_route(target_name) + elif target_kind == GatewayAPIKind.GATEWAY: + existing_policies = self.get_policies_for_gateway(target_name) + else: + return False + + # Filter out excluded policy and uncommitted policies + existing_policies = [ + p for p in existing_policies if (not exclude_policy_name or p.name() != exclude_policy_name) and p.committed + ] + + return len(existing_policies) > 0 + + def should_expect_wasm_metric_increase( + self, target_ref: Any, gateway_name: str, exclude_policy_name: Optional[str] = None + ) -> bool: + """ + Determine if committing a policy should cause the kuadrant_configs metric to increase. + + The metric increases when WasmPlugin PluginConfig is regenerated by the operator. + This happens when: + 1. First policy on a gateway (WasmPlugin created) + 2. Topology changes (route creation/deletion) trigger config regeneration + + In controlled test environments (gateway + routes created before policies): + - First policy creates WasmPlugin → flag set, metric increases + - Subsequent policies don't change topology → config unchanged, metric constant + + The per-gateway flag tracks whether WasmPlugin exists, which is the right granularity + since topology changes cause full config regeneration anyway. + + Args: + target_ref: The targetRef object with .name and .kind attributes + gateway_name: Gateway name + exclude_policy_name: Optional policy name to exclude from checks + + Returns: + bool: True if metric should increase, False otherwise + """ + # Check if other policies exist for this target + has_existing_policies = self.has_existing_policies_for_target(target_ref, exclude_policy_name) + + # Check if WasmPlugin was ever created for this gateway + gateway_node = self.get_node(gateway_name) + wasm_config_created = gateway_node and gateway_node.metadata.get("wasm_config_created", False) + + # Expect increase only if both checks say "no existing config" + return not has_existing_policies and not wasm_config_created + + def mark_wasm_config_created(self, gateway_name: str) -> None: + """ + Mark that a WasmPlugin config has been created for this gateway. + + Args: + gateway_name: Gateway name + """ + gateway_node = self.get_node(gateway_name) + if gateway_node: + gateway_node.metadata["wasm_config_created"] = True + + def clear(self) -> None: + """Clear all registered resources""" + self.nodes.clear() + + +# ============================================================================ +# Decorator-based automatic registration +# ============================================================================ + + +def topology(func: Callable) -> Callable: + """ + Decorator for pytest fixtures that automatically registers the returned object + in the global topology registry. + + Auto-detects the object type and registers it appropriately: + - Gateway objects → register_gateway() + - HTTPRoute objects → register_route() + - Policy objects (AuthPolicy, RateLimitPolicy, etc.) → register_policy() + + Usage: + @pytest.fixture(scope="module") + @topology + def gateway(cluster, blame): + gw = KuadrantGateway.create_instance(...) + return gw # or yield gw + + Uses the global topology registry - no need to inject topology fixture! + """ + # Check if the function is a generator function (uses yield) + if inspect.isgeneratorfunction(func): + # It's a generator function (uses yield) + @wraps(func) + def generator_wrapper(*args, **kwargs): + generator = func(*args, **kwargs) + topology_registry = get_topology() + + # Get the yielded object + try: + obj = next(generator) + except StopIteration: + return + + # Register it + if topology_registry: + _register_object(topology_registry, obj) + + # Yield it to the test + yield obj + + # Continue with cleanup - consume the rest of the generator + for _ in generator: + pass + + return generator_wrapper + + # It's a regular function (uses return) + @wraps(func) + def regular_wrapper(*args, **kwargs): + result = func(*args, **kwargs) + topology_registry = get_topology() + + # Register the returned object + if topology_registry: + _register_object(topology_registry, result) + + return result + + return regular_wrapper + + +def _register_object(topology_registry: TopologyRegistry, obj: Any) -> None: + """ + Helper to detect object type and register it using duck typing. + + Uses attribute checking instead of isinstance to avoid circular imports. + """ + if obj is None: + return + + # Check if it's a Gateway (has service_name and external_ip) + # Must check before Policy since Gateways also have model.kind + if hasattr(obj, "service_name") and hasattr(obj, "external_ip"): + topology_registry.register_gateway(obj) + return + + # Check if it's a Route (has gateway attribute) + # Must check before Policy since Routes also have model.kind + if hasattr(obj, "gateway"): + gateway_name = None + if obj.gateway: + gateway_name = obj.gateway.name() + topology_registry.register_route(obj, gateway_name=gateway_name) + return + + # Check if it's a Policy (has model.kind and model.spec.targetRef) + if hasattr(obj, "model") and hasattr(obj.model, "kind") and hasattr(obj.model, "spec"): + if hasattr(obj.model.spec, "targetRef"): + topology_registry.register_policy(obj) + return diff --git a/testsuite/gateway/__init__.py b/testsuite/gateway/__init__.py index 4d068100..fce4e209 100644 --- a/testsuite/gateway/__init__.py +++ b/testsuite/gateway/__init__.py @@ -8,6 +8,7 @@ from httpx import Client from testsuite.certificates import Certificate +from testsuite.gateway.metrics import GatewayMetrics from testsuite.httpx import KuadrantClient from testsuite.lifecycle import LifecycleObject from testsuite.utils import asdict @@ -138,6 +139,21 @@ def wait_for_ready(self, timeout: int = 90): def get_tls_cert(self, hostname: str) -> Optional[Certificate]: """Returns TLS cert bound to this Gateway, if the Gateway does not use TLS, returns None""" + @property + @abstractmethod + def class_name(self): + """Returns the classname""" + + @property + @abstractmethod + def metrics(self): + """Returns GatewayMetrics instance for querying metrics""" + + @property + @abstractmethod + def metrics_service_name(self): + """Returns the name of the service that exposes the metrics""" + class GatewayRoute(LifecycleObject, Referencable): """ diff --git a/testsuite/gateway/envoy/__init__.py b/testsuite/gateway/envoy/__init__.py index 4ba29eb8..91cb24c0 100644 --- a/testsuite/gateway/envoy/__init__.py +++ b/testsuite/gateway/envoy/__init__.py @@ -102,6 +102,21 @@ def commit(self): def get_tls_cert(self, _) -> Optional[Certificate]: return None + @property + def class_name(self): + """Returns the class name (Envoy doesn't use GatewayClass)""" + return "envoy" + + @property + def metrics_service_name(self): + """Returns the metrics service name (Envoy doesn't expose metrics separately)""" + return f"{self.name}-metrics" + + @property + def metrics(self): + """Returns GatewayMetrics instance (not supported for Envoy-only deployment)""" + raise NotImplementedError("Metrics not supported for Envoy-only deployment") + def delete(self): """Destroy all objects this instance created""" self.config.delete() diff --git a/testsuite/gateway/gateway_api/__init__.py b/testsuite/gateway/gateway_api/__init__.py index e69de29b..33cc2780 100644 --- a/testsuite/gateway/gateway_api/__init__.py +++ b/testsuite/gateway/gateway_api/__init__.py @@ -0,0 +1,26 @@ +"""Gateway API implementation""" + +from enum import Enum + + +class GatewayAPIKind(str, Enum): + """Gateway API resource kinds.""" + + GATEWAY = "Gateway" + HTTPROUTE = "HTTPRoute" + GRPCROUTE = "GRPCRoute" + + +class PolicyKind(str, Enum): + """Kuadrant policy kinds (core + extensions).""" + + # Core policies + AUTH_POLICY = "AuthPolicy" + RATE_LIMIT_POLICY = "RateLimitPolicy" + DNS_POLICY = "DNSPolicy" + TLS_POLICY = "TLSPolicy" + + # Extension policies + OIDC_POLICY = "OIDCPolicy" + PLAN_POLICY = "PlanPolicy" + TELEMETRY_POLICY = "TelemetryPolicy" diff --git a/testsuite/gateway/gateway_api/gateway.py b/testsuite/gateway/gateway_api/gateway.py index 198995c1..ffd46f6e 100644 --- a/testsuite/gateway/gateway_api/gateway.py +++ b/testsuite/gateway/gateway_api/gateway.py @@ -8,11 +8,13 @@ from testsuite.config import settings from testsuite.certificates import Certificate from testsuite.gateway import Gateway, GatewayListener +from testsuite.gateway.gateway_api import GatewayAPIKind from testsuite.kubernetes.client import KubernetesClient from testsuite.kubernetes import KubernetesObject, modify from testsuite.kuadrant.policy import Policy from testsuite.kubernetes.deployment import Deployment from testsuite.utils import check_condition, asdict, domain_match +from testsuite.core.metrics_factory import create_gateway_metrics class KuadrantGateway(KubernetesObject, Gateway): @@ -21,6 +23,10 @@ class KuadrantGateway(KubernetesObject, Gateway): # Name of the GatewayClass that is to be used for all the instances cached_gw_class_name = None + def __init__(self, dict_to_model=None, string_to_model=None, context=None): + super().__init__(dict_to_model, string_to_model, context) + self._metrics = None + @classmethod def create_instance(cls, cluster: KubernetesClient, name, labels): """Creates new instance of Gateway""" @@ -30,7 +36,7 @@ def create_instance(cls, cluster: KubernetesClient, name, labels): model: dict[Any, Any] = { "apiVersion": "gateway.networking.k8s.io/v1beta1", - "kind": "Gateway", + "kind": GatewayAPIKind.GATEWAY, "metadata": {"name": name, "labels": labels}, "spec": {"gatewayClassName": cls.cached_gw_class_name, "listeners": []}, } @@ -58,6 +64,11 @@ def get_listener_dns_ttl(self, listener_name: str) -> int: def service_name(self) -> str: return f"{self.name()}-{self.cached_gw_class_name}" + @property + def metrics_service_name(self): + """Returns the metrics service""" + return f"{self.name()}-metrics" + def external_ip(self) -> str: with self.context: return f"{self.refresh().model.status.addresses[0].value}:80" @@ -110,7 +121,7 @@ def get_tls_cert(self, hostname): def get_tls_secret(self, hostname): """Returns the TLS secret for the matching listener hostname, or None if not found""" tls_cert_secret_name = None - for listener in self.all_tls_listeners(): + for listener in self._all_tls_listeners(): if domain_match(hostname, listener.hostname): tls_cert_secret_name = listener.tls.certificateRefs[0].name @@ -124,13 +135,28 @@ def get_tls_secret(self, hostname): raise oc.OpenShiftPythonException("TLS secret was not created") from None raise e - def all_tls_listeners(self): + def _all_tls_listeners(self): """Yields all listeners in gateway that support 'tls'""" for listener in self.model.spec.listeners: if "tls" in listener: yield listener + def _expose_metrics(self): + """Expose metrics endpoint using the configured exposer""" + exposer = settings["default_exposer"](self.cluster) + self._metrics = create_gateway_metrics(exposer, self) + + def commit(self): + """Commits gateway and exposes metrics endpoint""" + result = super().commit() + self._expose_metrics() + return result + def delete(self, ignore_not_found=True, cmd_args=None): + # Delete metrics resources if they exist + if hasattr(self, "_metrics") and self._metrics: + self._metrics.delete() + res = super().delete(ignore_not_found, cmd_args) with self.cluster.context: # TLSPolicy does not delete certificates it creates @@ -168,3 +194,15 @@ def get_gateway_class_name(cluster: KubernetesClient): def deployment(self) -> Deployment: """Retrieve the managed deployment resource""" return self.cluster.get_deployment(self.service_name) + + @property + def class_name(self): + return self.cached_gw_class_name + + @property + def metrics(self): + """Returns GatewayMetrics instance for querying metrics""" + if not hasattr(self, "_metrics"): + raise RuntimeError("Gateway metrics not available. Call commit() first to expose metrics endpoint.") + + return self._metrics diff --git a/testsuite/gateway/gateway_api/route.py b/testsuite/gateway/gateway_api/route.py index b7fea01f..946afcb1 100644 --- a/testsuite/gateway/gateway_api/route.py +++ b/testsuite/gateway/gateway_api/route.py @@ -6,6 +6,7 @@ from testsuite.httpx import KuadrantClient from testsuite.gateway import Gateway, GatewayRoute, PathMatch, MatchType, RouteMatch +from testsuite.gateway.gateway_api import GatewayAPIKind from testsuite.kubernetes.client import KubernetesClient from testsuite.kubernetes import KubernetesObject, modify from testsuite.kuadrant.policy import Policy @@ -18,6 +19,10 @@ class HTTPRoute(KubernetesObject, GatewayRoute): """HTTPRoute object, serves as replacement for Routes and Ingresses""" + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._gateway = None + def client(self, **kwargs) -> Client: """Returns HTTPX client""" return KuadrantClient(base_url=f"http://{self.hostnames[0]}", **kwargs) @@ -33,7 +38,7 @@ def create_instance( """Creates new instance of HTTPRoute""" model = { "apiVersion": "gateway.networking.k8s.io/v1beta1", - "kind": "HTTPRoute", + "kind": GatewayAPIKind.HTTPROUTE, "metadata": {"name": name, "namespace": cluster.project, "labels": labels}, "spec": { "parentRefs": [gateway.reference], @@ -42,7 +47,14 @@ def create_instance( }, } - return cls(model, context=cluster.context) + route = cls(model, context=cluster.context) + route._gateway = gateway # Store the gateway instance + return route + + @property + def gateway(self): + """Returns the gateway this route is attached to""" + return self._gateway def is_affected_by(self, policy: Policy): """Returns True, if affected by status is found within the object for the specific policy""" @@ -64,7 +76,7 @@ def is_affected_by(self, policy: Policy): def reference(self): return { "group": "gateway.networking.k8s.io", - "kind": "HTTPRoute", + "kind": GatewayAPIKind.HTTPROUTE, "name": self.name(), } diff --git a/testsuite/gateway/metrics.py b/testsuite/gateway/metrics.py new file mode 100644 index 00000000..cbdb5d0f --- /dev/null +++ b/testsuite/gateway/metrics.py @@ -0,0 +1,162 @@ +"""Gateway metrics querying functionality""" + +import re +from abc import ABC, abstractmethod + +import backoff +import httpx + + +class GatewayMetrics(ABC): + """ + Base class for querying metrics from a Gateway. + """ + + @property + @abstractmethod + def metrics_url(self): + """Get the metrics endpoint URL""" + + @abstractmethod + def delete(self): + """Clean up metrics resources""" + + @backoff.on_exception( + backoff.constant, + (httpx.HTTPError, httpx.TimeoutException), + max_tries=3, + interval=2, + jitter=None, + ) + def get_kuadrant_configs(self): + """ + Queries and returns the kuadrant_configs metric value from the gateway. + This metric represents the total number of configs loaded in the wasm shim. + + Returns: + int: The metric value, or 0 if metric not found or endpoint unavailable + """ + # Query the metrics endpoint with cache-busting + response = httpx.get( + self.metrics_url, timeout=5.0, headers={"Cache-Control": "no-cache, no-store, must-revalidate"} + ) + response.raise_for_status() + + # Parse kuadrant_configs metric using regex + # Format: kuadrant_configs{} 4 + pattern = r"^kuadrant_configs.*?\s+(\d+)" + for line in response.text.split("\n"): + match = re.match(pattern, line) + if match: + return int(match.group(1)) + + return 0 + + def wait_for_kuadrant_config_increase(self, initial_value): + """ + Polls the kuadrant_configs metric until it increases from the initial value. + + Args: + initial_value: The initial metric value to compare against + + Returns: + float: The final metric value after increase + """ + + @backoff.on_predicate( + backoff.constant, + lambda x: x is None or x <= initial_value, + max_tries=5, + interval=2, + jitter=None, + ) + def poll_metric(): + return self.get_kuadrant_configs() + + final_value = poll_metric() + if final_value is None or final_value <= initial_value: + raise AssertionError(f"kuadrant_configs metric decreased. Initial: {initial_value}, Final: {final_value}") + + return final_value + + def wait_for_kuadrant_config_value(self, expected_value): + """ + Polls the kuadrant_configs metric until it reaches or exceeds the expected value. + + Args: + expected_value: The expected metric value to wait for + + Returns: + int: The final metric value + """ + + @backoff.on_predicate( + backoff.constant, + lambda x: x is None or x < expected_value, + max_tries=10, + interval=3, + jitter=None, + ) + def poll_metric(): + return self.get_kuadrant_configs() + + final_value = poll_metric() + if final_value is None or final_value < expected_value: + raise AssertionError( + f"kuadrant_configs metric did not reach expected value. " + f"Expected: >={expected_value}, Actual: {final_value}" + ) + + return final_value + + +class OpenShiftGatewayMetrics(GatewayMetrics): + """Gateway metrics implementation for OpenShift (ClusterIP + Route)""" + + def __init__(self, metrics_route, metrics_service): + """ + Initialize OpenShift metrics. + + Args: + metrics_route: OpenshiftRoute object exposing the metrics + metrics_service: Service object for metrics endpoint + """ + self._metrics_route = metrics_route + self._metrics_service = metrics_service + + @property + def metrics_url(self): + """Get the metrics URL from the route""" + return f"http://{self._metrics_route.hostname}/stats/prometheus" + + def delete(self): + """Delete the metrics route and service""" + if self._metrics_route: + self._metrics_route.delete(ignore_not_found=True) + if self._metrics_service: + self._metrics_service.delete(ignore_not_found=True) + + +class LoadBalancerGatewayMetrics(GatewayMetrics): + """Gateway metrics implementation for LoadBalancer (LoadBalancer Service)""" + + def __init__(self, gateway, metrics_service): + """ + Initialize LoadBalancer metrics. + + Args: + gateway: Gateway object to get external IP from + metrics_service: Service object for metrics endpoint + """ + self._gateway = gateway + self._metrics_service = metrics_service + + @property + def metrics_url(self): + """Get the metrics URL from metrics service external IP""" + return f"http://{self._metrics_service.external_ip}:15020/stats/prometheus" + + def delete(self): + """Delete the metrics service""" + if self._metrics_service: + self._metrics_service.delete(ignore_not_found=True) diff --git a/testsuite/kuadrant/policy/__init__.py b/testsuite/kuadrant/policy/__init__.py index b30c8168..a2d1ce1a 100644 --- a/testsuite/kuadrant/policy/__init__.py +++ b/testsuite/kuadrant/policy/__init__.py @@ -1,10 +1,12 @@ """Contains Base class for policies""" -from dataclasses import dataclass +from dataclasses import dataclass, is_dataclass from enum import Enum -from testsuite.kubernetes import KubernetesObject -from testsuite.utils import check_condition +from testsuite.kubernetes import KubernetesObject, modify +from testsuite.kuadrant.policy.metric_validator import WasmMetricValidator +from testsuite.utils import check_condition, asdict +from testsuite.core.topology import get_topology class Strategy(Enum): @@ -68,20 +70,141 @@ def _check(obj): return _check +class Section: + """ + Generic section handler for policy specs. + + Used for both: + - Defaults/overrides sections in policies (RateLimitPolicy, AuthPolicy) + - Nested sections in auth configs (identity, authorization, metadata) + + Provides: + - Generic section access: get_section() + - Modify delegation to parent object + - Generic add_to_spec() helper for dataclass conversion + - Helper methods: add_item(), clear_all(), strategy() + """ + + def __init__(self, obj, section_name: str = None): + """ + Initialize section. + + Args: + obj: Parent object (Policy or AuthConfig) + section_name: Name of the section (e.g., "defaults", "overrides", "identity") + """ + self.obj = obj + self.section_name = section_name + + def get_section(self, subsection: str = None): + """ + Get the target section from the parent object's spec. + + For policies with defaults/overrides: navigates model.spec[defaults/overrides][subsection] + For auth nested sections: navigates auth_section[section_name][subsection] + + If subsection is provided, navigates into that nested section. + """ + # Check if section_name is defaults/overrides (top-level policy sections) + if self.section_name in ("defaults", "overrides"): + # Always use model.spec for defaults/overrides + if hasattr(self.obj, "model"): + target = self.obj.model.spec.setdefault(self.section_name, {}) + else: + target = {} + elif hasattr(self.obj, "auth_section"): + # For auth nested sections (identity, authorization, metadata) + if self.section_name: + target = self.obj.auth_section.setdefault(self.section_name, {}) + else: + target = self.obj.auth_section + else: + # For regular policies, use model.spec + if self.section_name: + target = self.obj.model.spec.setdefault(self.section_name, {}) + else: + target = self.obj.model.spec + + if subsection: + return target.setdefault(subsection, {}) + return target + + @property + def committed(self): + """Delegate to parent object's committed status""" + return self.obj.committed + + def modify_and_apply(self, modifier_func, retries=2, cmd_args=None): + """Delegate modify_and_apply to the parent object""" + + def _new_modifier(obj): + modifier_func(self.__class__(obj, self.section_name)) + + return self.obj.modify_and_apply(_new_modifier, retries, cmd_args) + + @modify + def strategy(self, strategy: Strategy): + """Add strategy type to this section""" + target = self.get_section() + target["strategy"] = strategy.value + return self + + def add_to_spec(self, spec: dict, **kwargs): + """ + Generic helper to add any items to a spec dict. + + Automatically converts dataclasses to dicts and handles lists. + """ + for key, value in kwargs.items(): + if value is None: + continue + if isinstance(value, list): + spec[key] = [asdict(item) if is_dataclass(item) else item for item in value] + elif is_dataclass(value): + spec[key] = asdict(value) + else: + spec[key] = value + + def add_item(self, name: str, value: dict, **features): + """Add an item to this section""" + self.add_to_spec(value, **features) + self.get_section().update({name: value}) + + @modify + def clear_all(self): + """Clear content of this section""" + self.get_section().clear() + + class Policy(KubernetesObject): """Base class with common functionality for all policies""" + @property + def _topology(self): + """Get the global topology registry""" + return get_topology() + + def commit(self): + """Commits the policy to the cluster.""" + WasmMetricValidator.prepare_validation(self, self._topology) + return super().commit() + def wait_for_ready(self): - """Wait for a Policy to be ready""" + """ + Wait for a Policy to be ready. + Verifies observedGeneration, Enforced status, and kuadrant_configs metric. + """ self.refresh() success = self.wait_until(has_observed_generation(self.generation)) assert success, f"{self.kind()} did not reach observed generation in time" self.wait_for_full_enforced() + WasmMetricValidator.validate_metrics(self, self._topology) def wait_for_accepted(self): """Wait for a Policy to be Accepted""" success = self.wait_until(has_condition("Accepted", "True")) assert success, f"{self.kind()} did not get accepted in time" + WasmMetricValidator.validate_metrics(self, self._topology) def wait_for_partial_enforced(self): """Wait for a Policy to be partially Enforced""" diff --git a/testsuite/kuadrant/policy/authorization/auth_config.py b/testsuite/kuadrant/policy/authorization/auth_config.py index b5a438ba..0509cdbb 100644 --- a/testsuite/kuadrant/policy/authorization/auth_config.py +++ b/testsuite/kuadrant/policy/authorization/auth_config.py @@ -6,7 +6,7 @@ from testsuite.utils import asdict from testsuite.kubernetes import KubernetesObject, modify from testsuite.kubernetes.client import KubernetesClient -from .sections import AuthorizationSection, IdentitySection, MetadataSection, ResponseSection +from .sections import IdentitySection, AuthorizationSection, MetadataSection, ResponseSection from . import Rule, Pattern diff --git a/testsuite/kuadrant/policy/authorization/auth_policy.py b/testsuite/kuadrant/policy/authorization/auth_policy.py index c394b7dd..88b739aa 100644 --- a/testsuite/kuadrant/policy/authorization/auth_policy.py +++ b/testsuite/kuadrant/policy/authorization/auth_policy.py @@ -55,15 +55,28 @@ def strategy(self, strategy: Strategy) -> None: if self.spec_section is None: raise TypeError("Strategy can only be set on defaults or overrides") - self.spec_section["strategy"] = strategy.value + if isinstance(self.spec_section, str): + # String marker - create the section now + section = self.model.spec.setdefault(self.spec_section, {}) + else: + section = self.spec_section + + section["strategy"] = strategy.value self.spec_section = None @property def auth_section(self): + """Returns the rules section for adding auth configuration""" if self.spec_section is None: - self.spec_section = self.model.spec + # Implicit mode - use model.spec directly + spec_section = self.model.spec + elif isinstance(self.spec_section, str): + # String marker ("defaults" or "overrides") - create the section now + spec_section = self.model.spec.setdefault(self.spec_section, {}) + else: + # Already a dict (shouldn't happen with new code but keep for compatibility) + spec_section = self.spec_section - spec_section = self.spec_section self.spec_section = None return spec_section.setdefault("rules", {}) @@ -75,13 +88,17 @@ def responses(self) -> ResponseSection: @property def defaults(self): """Add new rule into the `defaults` AuthPolicy section""" - self.spec_section = self.model.spec.setdefault("defaults", {}) + # Don't create the dict yet - only mark which section to use + # The dict will be created when auth_section is called + self.spec_section = "defaults" return self @property def overrides(self): """Add new rule into the `overrides` AuthPolicy section""" - self.spec_section = self.model.spec.setdefault("overrides", {}) + # Don't create the dict yet - only mark which section to use + # The dict will be created when auth_section is called + self.spec_section = "overrides" return self @modify diff --git a/testsuite/kuadrant/policy/authorization/auth_policy_spec.py b/testsuite/kuadrant/policy/authorization/auth_policy_spec.py new file mode 100644 index 00000000..983d6295 --- /dev/null +++ b/testsuite/kuadrant/policy/authorization/auth_policy_spec.py @@ -0,0 +1,242 @@ +""" +Spec classes for AuthPolicy that mirror the Kuadrant operator Go API structure. + +This module provides a three-layer structure: +- AuthPolicySpec: Top-level spec with defaults/overrides properties +- MergeableAuthPolicySpec: Wrapper for defaults/overrides sections +- AuthPolicySpecProper: Actual policy rules (authentication, authorization, etc.) + +The structure matches the Go implementation in: +github.com/kuadrant/kuadrant-operator/api/v1/authpolicy_types.go +""" + +from functools import cached_property +from typing import Optional + +from testsuite.kuadrant.policy import Strategy +from testsuite.kuadrant.policy.authorization.sections import ( + IdentitySection, + AuthorizationSection, + MetadataSection, + ResponseSection, +) + + +class AuthPolicySpecProper: + """ + Actual policy rules matching Go AuthPolicySpecProper. + + Contains the auth scheme (rules dict) with sections for: + - authentication (identity) + - authorization + - metadata + - response + + This is the "proper" spec that contains the actual policy configuration. + """ + + def __init__(self, parent_policy) -> None: + """ + Initialize the spec proper. + + Args: + parent_policy: The parent AuthPolicy or builder object that owns this spec. + Used by sections to access committed state and modify_and_apply. + """ + self.parent_policy = parent_policy + self._rules: dict = {} + + @property + def rules(self) -> dict: + """Returns the rules dict (creates it if needed).""" + return self._rules + + @property + def auth_section(self): + """Returns the rules dict where sections are stored.""" + return self._rules + + @cached_property + def identity(self) -> IdentitySection: + """Access identity/authentication section""" + return IdentitySection(self, "authentication") + + @cached_property + def authorization(self) -> AuthorizationSection: + """Access authorization rules section""" + return AuthorizationSection(self, "authorization") + + @cached_property + def metadata(self) -> MetadataSection: + """Access metadata enrichment section""" + return MetadataSection(self, "metadata") + + @cached_property + def responses(self) -> ResponseSection: + """Access response manipulation section (Kuadrant uses 'filters')""" + return ResponseSection(self, "response", "filters") + + @property + def committed(self): + """Proxy to parent policy's committed status.""" + return self.parent_policy.committed + + def modify_and_apply(self, modifier_func, retries=2, cmd_args=None): + """Proxy to parent policy's modify_and_apply.""" + return self.parent_policy.modify_and_apply(modifier_func, retries, cmd_args) + + def to_dict(self) -> dict: + """Convert to dict for serialization to model.spec.""" + result = {} + if self._rules: + result["rules"] = self._rules + return result + + +class MergeableAuthPolicySpec: + """ + Wrapper for defaults/overrides sections matching Go MergeableAuthPolicySpec. + + Contains a strategy field and embeds an AuthPolicySpecProper. + """ + + def __init__(self, parent_policy, strategy: Strategy = Strategy.ATOMIC) -> None: + """ + Initialize the mergeable spec. + + Args: + parent_policy: The parent AuthPolicy that owns this spec. + strategy: The merge strategy (ATOMIC or MERGE). + """ + self.parent_policy = parent_policy + self._strategy = strategy + self.proper = AuthPolicySpecProper(self) + + def strategy(self, value: Strategy): + """ + Set the merge strategy. + + Args: + value: The merge strategy (Strategy.ATOMIC or Strategy.MERGE) + + Returns: + self for chaining + """ + self._strategy = value + return self + + @property + def committed(self): + """Proxy to parent policy's committed status.""" + return self.parent_policy.committed + + def modify_and_apply(self, modifier_func, retries=2, cmd_args=None): + """Proxy to parent policy's modify_and_apply.""" + return self.parent_policy.modify_and_apply(modifier_func, retries, cmd_args) + + # Convenience properties to access sections directly + @property + def identity(self): + """Direct access to identity section.""" + return self.proper.identity + + @property + def authorization(self): + """Direct access to authorization section.""" + return self.proper.authorization + + @property + def metadata(self): + """Direct access to metadata section.""" + return self.proper.metadata + + @property + def responses(self): + """Direct access to response section.""" + return self.proper.responses + + def to_dict(self) -> dict: + """Convert to dict for serialization to model.spec.""" + result = {"strategy": self._strategy.value} + result.update(self.proper.to_dict()) + return result + + +class AuthPolicySpec: + """ + Top-level spec matching Go AuthPolicySpec. + + Provides defaults, overrides, and implicit (bare) configuration. + The proper() method abstracts which section is active. + """ + + def __init__(self, parent_policy, target_ref: dict) -> None: + """ + Initialize the spec. + + Args: + parent_policy: The parent AuthPolicy that owns this spec. + target_ref: The targetRef dict for the policy. + """ + self.parent_policy = parent_policy + self.target_ref = target_ref + self._defaults: Optional[MergeableAuthPolicySpec] = None + self._overrides: Optional[MergeableAuthPolicySpec] = None + self._implicit = AuthPolicySpecProper(parent_policy) + + @property + def defaults(self) -> Optional[MergeableAuthPolicySpec]: + """Returns the defaults section if set, None otherwise.""" + return self._defaults + + @defaults.setter + def defaults(self, value: Optional[MergeableAuthPolicySpec]): + """Set defaults and clear overrides (mutual exclusivity).""" + if value is not None: + self._overrides = None + self._defaults = value + + @property + def overrides(self) -> Optional[MergeableAuthPolicySpec]: + """Returns the overrides section if set, None otherwise.""" + return self._overrides + + @overrides.setter + def overrides(self, value: Optional[MergeableAuthPolicySpec]): + """Set overrides and clear defaults (mutual exclusivity).""" + if value is not None: + self._defaults = None + self._overrides = value + + def proper(self) -> AuthPolicySpecProper: + """ + Returns the active SpecProper (matches Go Proper() method). + + Priority: + 1. defaults (if set) + 2. overrides (if set) + 3. implicit (bare configuration) + """ + if self._defaults is not None: + return self._defaults.proper + if self._overrides is not None: + return self._overrides.proper + return self._implicit + + def to_dict(self) -> dict: + """ + Convert to dict for serialization to model.spec. + + Returns a dict with targetRef and either defaults, overrides, or implicit rules. + """ + result = {"targetRef": self.target_ref} + + if self._defaults is not None: + result["defaults"] = self._defaults.to_dict() + elif self._overrides is not None: + result["overrides"] = self._overrides.to_dict() + else: + # Implicit mode - merge rules directly into spec + result.update(self._implicit.to_dict()) + + return result diff --git a/testsuite/kuadrant/policy/authorization/sections.py b/testsuite/kuadrant/policy/authorization/sections.py index 91c85eb1..61512163 100644 --- a/testsuite/kuadrant/policy/authorization/sections.py +++ b/testsuite/kuadrant/policy/authorization/sections.py @@ -46,16 +46,32 @@ def add_common_features( class Section: """Common class for all Sections""" - def __init__(self, obj: "AuthConfig", section_name) -> None: + def __init__(self, spec_proper, section_name) -> None: + """ + Initialize a section. + + Args: + spec_proper: The AuthPolicySpecProper or parent object that owns this section. + Must have 'rules', 'committed', and 'modify_and_apply' properties. + section_name: The name of the section (e.g., "authentication", "authorization"). + """ super().__init__() - self.obj = obj + self.obj = spec_proper self.section_name = section_name def modify_and_apply(self, modifier_func, retries=2, cmd_args=None): """Reimplementation of modify_and_apply from OpenshiftObject""" def _new_modifier(obj): - modifier_func(self.__class__(obj, self.section_name)) + # During modify_and_apply, we need to recreate the section on the new object + # The obj here is the policy, so we need to get its spec.proper() + if hasattr(obj, "spec"): + # It's a policy object, get the proper spec + spec_proper = obj.spec.proper() + else: + # It's already a spec proper + spec_proper = obj + modifier_func(self.__class__(spec_proper, self.section_name)) return self.obj.modify_and_apply(_new_modifier, retries, cmd_args) @@ -225,10 +241,34 @@ class ResponseSection(Section): SUCCESS_RESPONSE = Union[JsonResponse, PlainResponse, WristbandResponse] - def __init__(self, obj: "AuthConfig", section_name, data_key: Literal["filters", "dynamicMetadata"]): - super().__init__(obj, section_name) + def __init__(self, spec_proper, section_name, data_key: Literal["filters", "dynamicMetadata"]): + """ + Initialize a response section. + + Args: + spec_proper: The AuthPolicySpecProper that owns this section. + section_name: The name of the section (typically "response"). + data_key: Either "filters" or "dynamicMetadata" for success responses. + """ + super().__init__(spec_proper, section_name) self.data_key = data_key + def modify_and_apply(self, modifier_func, retries=2, cmd_args=None): + """Override to pass data_key when recreating section""" + + def _new_modifier(obj): + # During modify_and_apply, we need to recreate the section on the new object + # The obj here is the policy, so we need to get its spec.proper() + if hasattr(obj, "spec"): + # It's a policy object, get the proper spec + spec_proper = obj.spec.proper() + else: + # It's already a spec proper + spec_proper = obj + modifier_func(self.__class__(spec_proper, self.section_name, self.data_key)) + + return self.obj.modify_and_apply(_new_modifier, retries, cmd_args) + def add_simple(self, auth_json: str, name="simple", key="data", **common_features): """ Add simple response to AuthConfig, used for configuring response for debugging purposes, diff --git a/testsuite/kuadrant/policy/metric_validator.py b/testsuite/kuadrant/policy/metric_validator.py new file mode 100644 index 00000000..d6b927b0 --- /dev/null +++ b/testsuite/kuadrant/policy/metric_validator.py @@ -0,0 +1,102 @@ +"""WasmPlugin metric validation for policy commits.""" + + +class WasmMetricValidator: + """ + Validates kuadrant_configs metric changes after policy commits. + + The kuadrant_configs metric tracks WasmPlugin configurations loaded in the WASM shim. + It increases (by 4 per gateway) when WasmPlugin PluginConfig is regenerated, which happens: + 1. First policy on a gateway (WasmPlugin created) + 2. Topology changes (route creation/deletion) trigger config regeneration + + In controlled test environments where gateway + routes exist before policies: + - First policy creates WasmPlugin → metric increases + - Subsequent policies (parametrized tests) don't change topology → metric stays constant + + This validator uses per-gateway flags and topology analysis to predict metric behavior. + """ + + @staticmethod + def prepare_validation(policy, topology): + """ + Prepare metric validation by capturing initial state and determining expectations. + + This should be called before commit() to set up validation that runs in wait_for_ready(). + + Args: + policy: The policy being committed + topology: The TopologyRegistry instance + """ + if not topology or not hasattr(policy.model.spec, "targetRef"): + return + + target_ref = policy.model.spec.targetRef + gateway = topology.get_gateway_for_target_ref(target_ref) + + if not gateway or not hasattr(gateway, "metrics"): + return + + # Capture current metric value + try: + initial_metric = gateway.metrics.get_kuadrant_configs() + except (AttributeError, OSError): + # AttributeError: metrics service/route not ready or model structure unexpected + # OSError: httpx base exception (includes ConnectError, TimeoutException, etc.) + initial_metric = 0 + + # Determine if this policy will cause WasmPlugin creation (metric increase) + expect_metric_increase = topology.should_expect_wasm_metric_increase( + target_ref, gateway.name(), exclude_policy_name=policy.name() + ) + + # Mark gateway if WasmPlugin will be created + if expect_metric_increase: + topology.mark_wasm_config_created(gateway.name()) + + # Store metadata for validation in wait_for_ready() + topology.set_policy_metadata(policy, "initial_kuadrant_configs", initial_metric) + topology.set_policy_metadata(policy, "expect_metric_increase", expect_metric_increase) + topology.set_policy_metadata(policy, "gateway_name", gateway.name()) + + @staticmethod + def validate_metrics(policy, topology): + """ + Validate that the kuadrant_configs metric changed as expected after policy commit. + + This should be called in wait_for_ready() after the policy is enforced. + + Args: + policy: The policy that was committed + topology: The TopologyRegistry instance + + Raises: + AssertionError: If metric validation fails + """ + if not topology: + return + + initial_metric = topology.get_policy_metadata(policy, "initial_kuadrant_configs") + gateway_name = topology.get_policy_metadata(policy, "gateway_name") + expect_metric_increase = topology.get_policy_metadata(policy, "expect_metric_increase") + + if initial_metric is None or gateway_name is None: + return + + gateway = topology.get_gateway(gateway_name) + if not gateway or not hasattr(gateway, "metrics"): + return + + # Wait for metric to reach expected state + if expect_metric_increase: + # First policy for this target - metric should increase + gateway.metrics.wait_for_kuadrant_config_increase(initial_metric) + else: + # Policy updates existing WasmPlugin - metric should stay same + # Just verify it didn't decrease + current_metric = gateway.metrics.get_kuadrant_configs() + if current_metric < initial_metric: + raise AssertionError( + f"kuadrant_configs metric decreased unexpectedly for policy {policy.name()}. " + f"Initial: {initial_metric}, Current: {current_metric}" + ) diff --git a/testsuite/kuadrant/policy/rate_limit.py b/testsuite/kuadrant/policy/rate_limit.py index 11d14f1f..b24ee2cc 100644 --- a/testsuite/kuadrant/policy/rate_limit.py +++ b/testsuite/kuadrant/policy/rate_limit.py @@ -67,9 +67,16 @@ def add_limit( limit["counters"] = [asdict(rule) for rule in counters] if self.spec_section is None: - self.spec_section = self.model.spec - - self.spec_section.setdefault("limits", {})[name] = limit + # Implicit mode - use model.spec directly + spec_section = self.model.spec + elif isinstance(self.spec_section, str): + # String marker ("defaults" or "overrides") - create the section now + spec_section = self.model.spec.setdefault(self.spec_section, {}) + else: + # Already a dict (shouldn't happen with new code but keep for compatibility) + spec_section = self.spec_section + + spec_section.setdefault("limits", {})[name] = limit self.spec_section = None @modify @@ -78,21 +85,42 @@ def strategy(self, strategy: Strategy) -> None: if self.spec_section is None: raise TypeError("Strategy can only be set on defaults or overrides") - self.spec_section["strategy"] = strategy.value + if isinstance(self.spec_section, str): + # String marker - create the section now + section = self.model.spec.setdefault(self.spec_section, {}) + else: + section = self.spec_section + + section["strategy"] = strategy.value self.spec_section = None @property def defaults(self): """Add new rule into the `defaults` RateLimitPolicy section""" - self.spec_section = self.model.spec.setdefault("defaults", {}) + # Don't create the dict yet - only mark which section to use + # The dict will be created when add_limit or strategy is called + self.spec_section = "defaults" return self @property def overrides(self): """Add new rule into the `overrides` RateLimitPolicy section""" - self.spec_section = self.model.spec.setdefault("overrides", {}) + # Don't create the dict yet - only mark which section to use + # The dict will be created when add_limit or strategy is called + self.spec_section = "overrides" return self + def modify_and_apply(self, modifier_func, retries=2, cmd_args=None, **kwargs): + """Override to sync spec to model after modifications.""" + + def _wrapper(obj): + # Call the original modifier + modifier_func(obj) + # Sync spec to model after modification + obj._sync_spec_to_model() # pylint: disable=protected-access + + return super().modify_and_apply(_wrapper, retries, cmd_args, **kwargs) + def wait_for_ready(self): """Wait for RLP to be enforced""" super().wait_for_ready() diff --git a/testsuite/kuadrant/policy/rate_limit_spec.py b/testsuite/kuadrant/policy/rate_limit_spec.py new file mode 100644 index 00000000..b6014427 --- /dev/null +++ b/testsuite/kuadrant/policy/rate_limit_spec.py @@ -0,0 +1,185 @@ +""" +Spec classes for RateLimitPolicy that mirror the Kuadrant operator Go API structure. + +This module provides a three-layer structure: +- RateLimitPolicySpec: Top-level spec with defaults/overrides properties +- MergeableRateLimitPolicySpec: Wrapper for defaults/overrides sections +- RateLimitPolicySpecProper: Actual policy rules (limits) + +The structure matches the Go implementation in: +github.com/kuadrant/kuadrant-operator/api/v1/ratelimitpolicy_types.go +""" + +from typing import Iterable, Optional, TYPE_CHECKING + +from testsuite.kuadrant.policy import CelPredicate, CelExpression, Strategy +from testsuite.utils import asdict + +if TYPE_CHECKING: + from testsuite.kuadrant.policy.rate_limit import Limit + + +class RateLimitPolicySpecProper: + """ + Actual policy rules matching Go RateLimitPolicySpecProper. + + Contains the limits dict and methods to manipulate it. + This is the "proper" spec that contains the actual policy configuration. + """ + + def __init__(self) -> None: + self.limits: dict[str, dict] = {} + + def add_limit( + self, + name: str, + limits: Iterable["Limit"], + when: Optional[list[CelPredicate]] = None, + counters: Optional[list[CelExpression]] = None, + ): + """ + Add a limit to this spec. + + Args: + name: Name of the limit + limits: List of Limit objects + when: Optional CEL predicates for conditional limits + counters: Optional CEL expressions for custom counters + """ + limit: dict = { + "rates": [asdict(limit) for limit in limits], + } + if when: + limit["when"] = [asdict(rule) for rule in when] + if counters: + limit["counters"] = [asdict(rule) for rule in counters] + + self.limits[name] = limit + + def to_dict(self) -> dict: + """Convert to dict for serialization to model.spec""" + result = {} + if self.limits: + result["limits"] = self.limits + return result + + +class MergeableRateLimitPolicySpec: + """ + Wrapper for defaults/overrides sections matching Go MergeableRateLimitPolicySpec. + + Contains a strategy field and embeds a RateLimitPolicySpecProper. + """ + + def __init__(self, strategy: Strategy = Strategy.ATOMIC) -> None: + self._strategy = strategy + self.proper = RateLimitPolicySpecProper() + + def strategy(self, value: Strategy): + """ + Set the merge strategy. + + Args: + value: The merge strategy (Strategy.ATOMIC or Strategy.MERGE) + + Returns: + self for chaining + """ + self._strategy = value + return self + + def add_limit( + self, + name: str, + limits: Iterable["Limit"], + when: Optional[list[CelPredicate]] = None, + counters: Optional[list[CelExpression]] = None, + ): + """ + Convenience method to add a limit directly (delegates to proper). + + Args: + name: Name of the limit + limits: List of Limit objects + when: Optional CEL predicates for conditional limits + counters: Optional CEL expressions for custom counters + """ + self.proper.add_limit(name, limits, when, counters) + + def to_dict(self) -> dict: + """Convert to dict for serialization to model.spec""" + result = {"strategy": self._strategy.value} + result.update(self.proper.to_dict()) + return result + + +class RateLimitPolicySpec: + """ + Top-level spec matching Go RateLimitPolicySpec. + + Provides defaults, overrides, and implicit (bare) configuration. + The proper() method abstracts which section is active. + """ + + def __init__(self, target_ref: dict) -> None: + self.target_ref = target_ref + self._defaults: Optional[MergeableRateLimitPolicySpec] = None + self._overrides: Optional[MergeableRateLimitPolicySpec] = None + self._implicit = RateLimitPolicySpecProper() + + @property + def defaults(self) -> Optional[MergeableRateLimitPolicySpec]: + """Returns the defaults section if set, None otherwise.""" + return self._defaults + + @defaults.setter + def defaults(self, value: Optional[MergeableRateLimitPolicySpec]): + """Set defaults and clear overrides (mutual exclusivity).""" + if value is not None: + self._overrides = None + self._defaults = value + + @property + def overrides(self) -> Optional[MergeableRateLimitPolicySpec]: + """Returns the overrides section if set, None otherwise.""" + return self._overrides + + @overrides.setter + def overrides(self, value: Optional[MergeableRateLimitPolicySpec]): + """Set overrides and clear defaults (mutual exclusivity).""" + if value is not None: + self._defaults = None + self._overrides = value + + def proper(self) -> RateLimitPolicySpecProper: + """ + Returns the active SpecProper (matches Go Proper() method). + + Priority: + 1. defaults (if set) + 2. overrides (if set) + 3. implicit (bare configuration) + """ + if self._defaults is not None: + return self._defaults.proper + if self._overrides is not None: + return self._overrides.proper + return self._implicit + + def to_dict(self) -> dict: + """ + Convert to dict for serialization to model.spec. + + Returns a dict with targetRef and either defaults, overrides, or implicit limits. + """ + result = {"targetRef": self.target_ref} + + if self._defaults is not None: + result["defaults"] = self._defaults.to_dict() + elif self._overrides is not None: + result["overrides"] = self._overrides.to_dict() + else: + # Implicit mode - merge limits directly into spec + result.update(self._implicit.to_dict()) + + return result diff --git a/testsuite/kubernetes/service.py b/testsuite/kubernetes/service.py index dff19d0e..e6caca61 100644 --- a/testsuite/kubernetes/service.py +++ b/testsuite/kubernetes/service.py @@ -71,6 +71,18 @@ def external_ip(self): return ip + def get_node_port(self, port_name: str) -> int: + """Returns the NodePort for a specific port""" + if self.model.spec.type != "NodePort": + raise AttributeError("NodePort can only be used with NodePort services") + + port = self.get_port(port_name) + node_port = port.get("nodePort") + if not node_port: + raise AttributeError(f"NodePort not assigned for port {port_name}") + + return node_port + def delete(self, ignore_not_found=True, cmd_args=None): """Deletes Service, introduces bigger waiting times due to LoadBalancer type""" with timeout(10 * 60): diff --git a/testsuite/tests/conftest.py b/testsuite/tests/conftest.py index 15c658b8..64d0e0f1 100644 --- a/testsuite/tests/conftest.py +++ b/testsuite/tests/conftest.py @@ -14,15 +14,16 @@ from testsuite.certificates import CFSSLClient from testsuite.component_metadata import ComponentMetadataCollector from testsuite.config import settings +from testsuite.core.topology import TopologyRegistry, clear_topology, set_topology from testsuite.gateway import Exposer, CustomReference from testsuite.httpx import KuadrantClient +from testsuite.kubernetes.config_map import ConfigMap from testsuite.mockserver import Mockserver from testsuite.oidc import OIDCProvider from testsuite.oidc.auth0 import Auth0Provider -from testsuite.prometheus import Prometheus from testsuite.oidc.keycloak import Keycloak +from testsuite.prometheus import Prometheus from testsuite.tracing.jaeger import JaegerClient -from testsuite.kubernetes.config_map import ConfigMap from testsuite.tracing.tempo import RemoteTempoClient from testsuite.utils import randomize, _whoami @@ -101,6 +102,18 @@ def skip_or_fail(request): return pytest.fail if request.config.getoption("--enforce") else pytest.skip +@pytest.fixture(scope="session", autouse=True) +def topology(): + """Global topology registry for Gateway API resources and policies""" + registry = TopologyRegistry() + set_topology(registry) # Set as global singleton + + yield registry + + # Cleanup on session end + clear_topology() + + @pytest.fixture(scope="session", autouse=True) def term_handler(): """ diff --git a/testsuite/tests/singlecluster/conftest.py b/testsuite/tests/singlecluster/conftest.py index 42b8aed3..dc0744ef 100644 --- a/testsuite/tests/singlecluster/conftest.py +++ b/testsuite/tests/singlecluster/conftest.py @@ -10,6 +10,7 @@ from testsuite.gateway.envoy.route import EnvoyVirtualRoute from testsuite.gateway.gateway_api.gateway import KuadrantGateway from testsuite.gateway.gateway_api.route import HTTPRoute +from testsuite.core.topology import topology from testsuite.kuadrant import KuadrantCR from testsuite.kuadrant.policy.authorization.auth_policy import AuthPolicy from testsuite.kuadrant.policy.rate_limit import RateLimitPolicy @@ -36,6 +37,7 @@ def authorization_name(blame): @pytest.fixture(scope="module") +@topology def authorization(request, kuadrant, route, gateway, blame, cluster, label): # pylint: disable=unused-argument """Authorization object (In case of Kuadrant AuthPolicy)""" target_ref = request.getfixturevalue(getattr(request, "param", "route")) @@ -46,6 +48,7 @@ def authorization(request, kuadrant, route, gateway, blame, cluster, label): # @pytest.fixture(scope="module") +@topology def rate_limit(kuadrant, cluster, blame, request, module_label, route, gateway): # pylint: disable=unused-argument """ Rate limit object. @@ -97,6 +100,7 @@ def backend(request, cluster, blame, label, testconfig): @pytest.fixture(scope="session") +@topology def gateway(request, kuadrant, cluster, blame, label, testconfig, wildcard_domain) -> Gateway: """Deploys Gateway that wires up the Backend behind the reverse-proxy and Authorino instance""" if kuadrant: @@ -124,13 +128,7 @@ def domain_name(blame) -> str: @pytest.fixture(scope="module") -def hostname(gateway, exposer, domain_name) -> Hostname: - """Exposed Hostname object""" - hostname = exposer.expose_hostname(domain_name, gateway) - return hostname - - -@pytest.fixture(scope="module") +@topology def route(request, kuadrant, gateway, blame, hostname, backend, module_label) -> GatewayRoute: """Route object""" if kuadrant: @@ -144,6 +142,13 @@ def route(request, kuadrant, gateway, blame, hostname, backend, module_label) -> return route +@pytest.fixture(scope="module") +def hostname(gateway, exposer, domain_name) -> Hostname: + """Exposed Hostname object""" + hostname = exposer.expose_hostname(domain_name, gateway) + return hostname + + @pytest.fixture(scope="module") def client(route, hostname): # pylint: disable=unused-argument """Returns httpx client to be used for requests""" diff --git a/testsuite/tests/singlecluster/defaults/merge/auth_policy/conftest.py b/testsuite/tests/singlecluster/defaults/merge/auth_policy/conftest.py index 0a12c251..651ea8d4 100644 --- a/testsuite/tests/singlecluster/defaults/merge/auth_policy/conftest.py +++ b/testsuite/tests/singlecluster/defaults/merge/auth_policy/conftest.py @@ -1,7 +1,10 @@ """Conftest for defaults merge strategy tests for AuthPolicies""" +import time + import pytest +from testsuite.core.topology import topology from testsuite.httpx.auth import HeaderApiKeyAuth, HttpxOidcClientAuth from testsuite.kuadrant.policy import Strategy from testsuite.kuadrant.policy.authorization.auth_policy import AuthPolicy @@ -54,6 +57,7 @@ def auth(oidc_provider): @pytest.fixture(scope="module") +@topology def global_authorization(request, cluster, blame, admin_label, admin_api_key): """ Create an AuthPolicy with authentication for an admin with same target as one default. @@ -80,3 +84,4 @@ def commit(request, route, global_authorization, authorization): # pylint: disa request.addfinalizer(policy.delete) policy.commit() policy.wait_for_accepted() + time.sleep(60) # we sleep because if both get reconciled at the same time the overridden status will not work. diff --git a/testsuite/tests/singlecluster/defaults/merge/auth_policy/same_target/test_ab_strategy.py b/testsuite/tests/singlecluster/defaults/merge/auth_policy/same_target/test_ab_strategy.py index 26370dcc..3bb28343 100644 --- a/testsuite/tests/singlecluster/defaults/merge/auth_policy/same_target/test_ab_strategy.py +++ b/testsuite/tests/singlecluster/defaults/merge/auth_policy/same_target/test_ab_strategy.py @@ -1,5 +1,7 @@ """Test defaults policy aimed at the same resource uses the oldest policy.""" +import time + import pytest from testsuite.kuadrant.policy import has_condition @@ -14,6 +16,7 @@ def commit(request, route, authorization, global_authorization): # pylint: disa request.addfinalizer(policy.delete) policy.commit() policy.wait_for_accepted() + time.sleep(60) # we sleep because if both get reconciled at the same time the overridden status will not work. @pytest.mark.parametrize( diff --git a/testsuite/tests/singlecluster/defaults/merge/auth_policy/same_target/test_ba_stategy.py b/testsuite/tests/singlecluster/defaults/merge/auth_policy/same_target/test_ba_stategy.py index e5ca26d9..bcadc045 100644 --- a/testsuite/tests/singlecluster/defaults/merge/auth_policy/same_target/test_ba_stategy.py +++ b/testsuite/tests/singlecluster/defaults/merge/auth_policy/same_target/test_ba_stategy.py @@ -1,5 +1,7 @@ """Test defaults policy aimed at the same resource uses the oldest policy.""" +import time + import pytest from testsuite.kuadrant.policy import has_condition @@ -14,6 +16,7 @@ def commit(request, route, authorization, global_authorization): # pylint: disa request.addfinalizer(policy.delete) policy.commit() policy.wait_for_accepted() + time.sleep(60) # we sleep because if both get reconciled at the same time the overridden status will not work. @pytest.mark.parametrize( diff --git a/testsuite/tests/singlecluster/defaults/merge/rate_limit/conftest.py b/testsuite/tests/singlecluster/defaults/merge/rate_limit/conftest.py index a4cc339a..411e743c 100644 --- a/testsuite/tests/singlecluster/defaults/merge/rate_limit/conftest.py +++ b/testsuite/tests/singlecluster/defaults/merge/rate_limit/conftest.py @@ -2,6 +2,7 @@ import pytest +from testsuite.core.topology import topology from testsuite.kuadrant.policy import CelPredicate, Strategy from testsuite.kuadrant.policy.rate_limit import Limit, RateLimitPolicy @@ -18,6 +19,7 @@ def route(backend, route): @pytest.fixture(scope="module") +@topology def global_rate_limit(request, cluster, blame, module_label): """Create a RateLimitPolicy with default policies and a merge strategy.""" target_ref = request.getfixturevalue(getattr(request, "param", "gateway")) diff --git a/testsuite/tests/singlecluster/defaults/merge/rate_limit/same_target/test_ab_strategy.py b/testsuite/tests/singlecluster/defaults/merge/rate_limit/same_target/test_ab_strategy.py index 79e91651..a12bb660 100644 --- a/testsuite/tests/singlecluster/defaults/merge/rate_limit/same_target/test_ab_strategy.py +++ b/testsuite/tests/singlecluster/defaults/merge/rate_limit/same_target/test_ab_strategy.py @@ -1,5 +1,7 @@ """Test defaults policy aimed at the same resource uses the oldest policy.""" +import time + import pytest from testsuite.kuadrant.policy import has_condition @@ -15,6 +17,7 @@ def commit(request, route, rate_limit, global_rate_limit): # pylint: disable=un request.addfinalizer(policy.delete) policy.commit() policy.wait_for_accepted() + time.sleep(60) # we sleep because if both get reconciled at the same time the overridden status will not work. @pytest.mark.parametrize( diff --git a/testsuite/tests/singlecluster/defaults/merge/rate_limit/same_target/test_ba_startegy.py b/testsuite/tests/singlecluster/defaults/merge/rate_limit/same_target/test_ba_startegy.py index 6d7d663b..b97b4ed6 100644 --- a/testsuite/tests/singlecluster/defaults/merge/rate_limit/same_target/test_ba_startegy.py +++ b/testsuite/tests/singlecluster/defaults/merge/rate_limit/same_target/test_ba_startegy.py @@ -1,5 +1,7 @@ """Test defaults policy aimed at the same resource uses the oldest policy.""" +import time + import pytest from testsuite.kuadrant.policy import has_condition @@ -15,6 +17,7 @@ def commit(request, route, rate_limit, global_rate_limit): # pylint: disable=un request.addfinalizer(policy.delete) policy.commit() policy.wait_for_accepted() + time.sleep(60) # we sleep because if both get reconciled at the same time the overridden status will not work. @pytest.mark.parametrize( diff --git a/testsuite/tests/singlecluster/defaults/test_section_targeting.py b/testsuite/tests/singlecluster/defaults/test_section_targeting.py index f7cabb50..d079be5d 100644 --- a/testsuite/tests/singlecluster/defaults/test_section_targeting.py +++ b/testsuite/tests/singlecluster/defaults/test_section_targeting.py @@ -2,6 +2,7 @@ import pytest +from testsuite.core.topology import topology from testsuite.httpx.auth import HttpxOidcClientAuth from testsuite.kuadrant.policy.authorization.auth_policy import AuthPolicy from testsuite.kuadrant.policy.rate_limit import RateLimitPolicy, Limit @@ -34,6 +35,7 @@ def authorization(cluster, target, route, oidc_provider, module_label, blame): @pytest.fixture(scope="module") +@topology def rate_limit(cluster, target, route, module_label, blame): # pylint: disable=unused-argument """Add a RateLimitPolicy targeting specific section""" rate_limit = RateLimitPolicy.create_instance( diff --git a/testsuite/tests/singlecluster/extensions/oidc_policy/conftest.py b/testsuite/tests/singlecluster/extensions/oidc_policy/conftest.py index 0a72db00..b6edfb00 100644 --- a/testsuite/tests/singlecluster/extensions/oidc_policy/conftest.py +++ b/testsuite/tests/singlecluster/extensions/oidc_policy/conftest.py @@ -6,23 +6,11 @@ """ from contextlib import contextmanager + import pytest -from testsuite.gateway import Gateway, GatewayListener -from testsuite.gateway.gateway_api.gateway import KuadrantGateway from testsuite.kuadrant.extensions.oidc_policy import OIDCPolicy - - -@pytest.fixture(scope="module") -def gateway(request, domain_name, base_domain, cluster, blame, label) -> Gateway: - """Create and configure the test Gateway.""" - fqdn = f"{domain_name}-kuadrant.{base_domain}" - gw = KuadrantGateway.create_instance(cluster, blame("gw"), {"app": label}) - gw.add_listener(GatewayListener(hostname=fqdn)) - request.addfinalizer(gw.delete) - gw.commit() - gw.wait_for_ready() - return gw +from testsuite.core.topology import topology # JWT Cookie Helper fixture @@ -44,15 +32,14 @@ def oidc_policy_provider_config(oidc_provider, test_client): @pytest.fixture(scope="module") -def oidc_policy(cluster, blame, oidc_policy_provider_config, gateway): +@topology +def oidc_policy(cluster, blame, oidc_policy_provider_config, route): """Create OIDC policy instance for testing. Note: This fixture depends on 'provider' which should be defined in each test file with the appropriate client-specific configuration. """ - oidc_policy = OIDCPolicy.create_instance( - cluster, blame("oidc-policy"), gateway, provider=oidc_policy_provider_config - ) + oidc_policy = OIDCPolicy.create_instance(cluster, blame("oidc-policy"), route, provider=oidc_policy_provider_config) return oidc_policy diff --git a/testsuite/tests/singlecluster/extensions/plan_policy/test_plan_policy.py b/testsuite/tests/singlecluster/extensions/plan_policy/test_plan_policy.py index b9fb035a..e4911a06 100644 --- a/testsuite/tests/singlecluster/extensions/plan_policy/test_plan_policy.py +++ b/testsuite/tests/singlecluster/extensions/plan_policy/test_plan_policy.py @@ -4,6 +4,7 @@ from testsuite.httpx.auth import HttpxOidcClientAuth from testsuite.kuadrant.extensions.plan_policy import PlanPolicy, Plan +from testsuite.core.topology import topology pytestmark = [pytest.mark.authorino, pytest.mark.kuadrant_only, pytest.mark.extensions] @@ -60,6 +61,7 @@ def authorization(authorization, keycloak): @pytest.fixture(scope="module") +@topology def plan_policy(cluster, blame, target): """Create PlanPolicy targeting the route/gateway""" plan_policy = PlanPolicy.create_instance(cluster, blame("my-plan"), target) diff --git a/testsuite/tests/singlecluster/extensions/telemetry_policy/conftest.py b/testsuite/tests/singlecluster/extensions/telemetry_policy/conftest.py index ed87d2fa..f294da13 100644 --- a/testsuite/tests/singlecluster/extensions/telemetry_policy/conftest.py +++ b/testsuite/tests/singlecluster/extensions/telemetry_policy/conftest.py @@ -8,6 +8,7 @@ from testsuite.kuadrant.policy import CelPredicate from testsuite.kubernetes.monitoring import MetricsEndpoint from testsuite.kubernetes.monitoring.service_monitor import ServiceMonitor +from testsuite.core.topology import topology @pytest.fixture(scope="package") @@ -29,6 +30,7 @@ def wait_for_active_targets(prometheus, service_monitor): @pytest.fixture(scope="module") +@topology def telemetry_policy(cluster, blame, gateway): """Creates TelemetryPolicy with user and group labels""" telemetry_policy = TelemetryPolicy.create_instance(cluster, blame("tp"), gateway) diff --git a/testsuite/tests/singlecluster/gateway/authpolicy/test_authpolicy_section_targeting_gateway.py b/testsuite/tests/singlecluster/gateway/authpolicy/test_authpolicy_section_targeting_gateway.py index 8ce8f95e..5c5c02c2 100644 --- a/testsuite/tests/singlecluster/gateway/authpolicy/test_authpolicy_section_targeting_gateway.py +++ b/testsuite/tests/singlecluster/gateway/authpolicy/test_authpolicy_section_targeting_gateway.py @@ -10,6 +10,7 @@ from testsuite.gateway.gateway_api.gateway import KuadrantGateway from testsuite.gateway.gateway_api.hostname import StaticHostname from testsuite.gateway.gateway_api.route import HTTPRoute +from testsuite.core.topology import topology from testsuite.kuadrant.policy.authorization.auth_policy import AuthPolicy pytestmark = [pytest.mark.authorino, pytest.mark.kuadrant_only] @@ -73,6 +74,7 @@ def route(route: HTTPRoute, managed_domain, unmanaged_domain): @pytest.fixture(scope="module") +@topology def authorization(cluster, blame, module_label, oidc_provider, gateway, route): # pylint: disable=unused-argument """Creates an AuthPolicy that targets ONLY the 'secure-listener' section.""" policy = AuthPolicy.create_instance( diff --git a/testsuite/tests/singlecluster/gateway/authpolicy/test_authpolicy_section_targeting_http_route.py b/testsuite/tests/singlecluster/gateway/authpolicy/test_authpolicy_section_targeting_http_route.py index 0a2d2c97..ba788161 100644 --- a/testsuite/tests/singlecluster/gateway/authpolicy/test_authpolicy_section_targeting_http_route.py +++ b/testsuite/tests/singlecluster/gateway/authpolicy/test_authpolicy_section_targeting_http_route.py @@ -4,6 +4,8 @@ """ import pytest + +from testsuite.core.topology import topology from testsuite.kuadrant.policy.authorization.auth_policy import AuthPolicy pytestmark = [pytest.mark.authorino, pytest.mark.kuadrant_only] @@ -22,6 +24,7 @@ def route(route, backend): @pytest.fixture(scope="module") +@topology def authorization(cluster, blame, module_label, oidc_provider, route): """ Creates an AuthPolicy that targets a specific rule ('rule-1') within the diff --git a/testsuite/tests/singlecluster/gateway/conftest.py b/testsuite/tests/singlecluster/gateway/conftest.py index 1275494e..1bd9c607 100644 --- a/testsuite/tests/singlecluster/gateway/conftest.py +++ b/testsuite/tests/singlecluster/gateway/conftest.py @@ -9,9 +9,11 @@ from testsuite.kuadrant.policy.authorization.auth_policy import AuthPolicy from testsuite.kuadrant.policy.dns import DNSPolicy from testsuite.kuadrant.policy.tls import TLSPolicy +from testsuite.core.topology import topology @pytest.fixture(scope="module") +@topology def gateway(request, cluster, blame, wildcard_domain, module_label): """Returns ready gateway""" gateway_name = blame("gw") @@ -28,6 +30,7 @@ def gateway(request, cluster, blame, wildcard_domain, module_label): @pytest.fixture(scope="module") +@topology def authorization(blame, gateway, module_label, cluster, oidc_provider, route): # pylint: disable=unused-argument """Create AuthPolicy attached to gateway""" @@ -43,6 +46,7 @@ def auth(oidc_provider): @pytest.fixture(scope="module") +@topology def rate_limit(): """ For these tests don't create any RateLimitPolicy diff --git a/testsuite/tests/singlecluster/gateway/dnspolicy/health_check/test_additional_headers.py b/testsuite/tests/singlecluster/gateway/dnspolicy/health_check/test_additional_headers.py index 922d6034..6af38439 100644 --- a/testsuite/tests/singlecluster/gateway/dnspolicy/health_check/test_additional_headers.py +++ b/testsuite/tests/singlecluster/gateway/dnspolicy/health_check/test_additional_headers.py @@ -62,7 +62,7 @@ def headers_secret(request, cluster, blame): @pytest.fixture(scope="module") def mockserver_client(backend): """Returns Mockserver client from load-balanced service IP""" - return Mockserver(KuadrantClient(base_url=f"http://{backend.service.refresh().external_ip}: 8080")) + return Mockserver(KuadrantClient(base_url=f"http://{backend.service.refresh().external_ip}:8080")) @pytest.fixture(scope="module")