From edf5d59329665da0b2055bc0cc3c24a1960d7e9f Mon Sep 17 00:00:00 2001 From: Ondra Kupka Date: Wed, 4 Feb 2026 14:48:19 +0100 Subject: [PATCH 1/2] controllers: Prevent progressing on scaling only Assisted-by: Claude Code --- go.mod | 5 +- go.sum | 8 +- pkg/controllers/common/scaling/scaling.go | 108 ++++ .../common/scaling/scaling_test.go | 538 ++++++++++++++++++ .../deployment/deployment_controller.go | 36 +- pkg/operator/starter.go | 8 +- .../sync_openshift_oauth_apiserver.go | 55 +- .../sync_openshift_oauth_apiserver_test.go | 7 +- 8 files changed, 728 insertions(+), 37 deletions(-) create mode 100644 pkg/controllers/common/scaling/scaling.go create mode 100644 pkg/controllers/common/scaling/scaling_test.go diff --git a/go.mod b/go.mod index 6c9e02d35..d906b6daf 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/google/go-cmp v0.7.0 github.com/onsi/ginkgo/v2 v2.21.0 github.com/openshift-eng/openshift-tests-extension v0.0.0-20251205182537-ff5553e56f33 - github.com/openshift/api v0.0.0-20260126183958-606bd613f9f7 + github.com/openshift/api v0.0.0-20260212193555-c06ab675261f github.com/openshift/build-machinery-go v0.0.0-20250530140348-dc5b2804eeee github.com/openshift/client-go v0.0.0-20260108185524-48f4ccfc4e13 github.com/openshift/library-go v0.0.0-20260210145149-d0e860e8d752 @@ -132,3 +132,6 @@ require ( ) replace github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20251001123353-fd5b1fb35db1 + +// workload-condition-overwrites +replace github.com/openshift/library-go => github.com/tchap/library-go v0.0.0-20260216103045-5a90edab46c3 diff --git a/go.sum b/go.sum index e1352862b..c2ddefdae 100644 --- a/go.sum +++ b/go.sum @@ -146,14 +146,12 @@ github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/openshift-eng/openshift-tests-extension v0.0.0-20251205182537-ff5553e56f33 h1:LJf6kWZQ36iako7WXRzdEa5XKrnyrAX8GBhlAcKRaZQ= github.com/openshift-eng/openshift-tests-extension v0.0.0-20251205182537-ff5553e56f33/go.mod h1:6gkP5f2HL0meusT0Aim8icAspcD1cG055xxBZ9yC68M= -github.com/openshift/api v0.0.0-20260126183958-606bd613f9f7 h1:96rhgJpWlWzKEslMd6aYFMixV9vQVY32M71JcO4Gzn0= -github.com/openshift/api v0.0.0-20260126183958-606bd613f9f7/go.mod h1:d5uzF0YN2nQQFA0jIEWzzOZ+edmo6wzlGLvx5Fhz4uY= +github.com/openshift/api v0.0.0-20260212193555-c06ab675261f h1:l1IgsK48Ym/nED30yfaCTF4MtswO1eOoyfXgh2rEmdw= +github.com/openshift/api v0.0.0-20260212193555-c06ab675261f/go.mod h1:d5uzF0YN2nQQFA0jIEWzzOZ+edmo6wzlGLvx5Fhz4uY= github.com/openshift/build-machinery-go v0.0.0-20250530140348-dc5b2804eeee h1:+Sp5GGnjHDhT/a/nQ1xdp43UscBMr7G5wxsYotyhzJ4= github.com/openshift/build-machinery-go v0.0.0-20250530140348-dc5b2804eeee/go.mod h1:8jcm8UPtg2mCAsxfqKil1xrmRMI3a+XU2TZ9fF8A7TE= github.com/openshift/client-go v0.0.0-20260108185524-48f4ccfc4e13 h1:6rd4zSo2UaWQcAPZfHK9yzKVqH0BnMv1hqMzqXZyTds= github.com/openshift/client-go v0.0.0-20260108185524-48f4ccfc4e13/go.mod h1:YvOmPmV7wcJxpfhTDuFqqs2Xpb3M3ovsM6Qs/i2ptq4= -github.com/openshift/library-go v0.0.0-20260210145149-d0e860e8d752 h1:KQj7j9VpMzv+gYerCgA9CbPehwGO3ARUg+B2Pt1YcWs= -github.com/openshift/library-go v0.0.0-20260210145149-d0e860e8d752/go.mod h1:DCRz1EgdayEmr9b6KXKDL+DWBN0rGHu/VYADeHzPoOk= github.com/openshift/multi-operator-manager v0.0.0-20241205181422-20aa3906b99d h1:Rzx23P63JFNNz5D23ubhC0FCN5rK8CeJhKcq5QKcdyU= github.com/openshift/multi-operator-manager v0.0.0-20241205181422-20aa3906b99d/go.mod h1:iVi9Bopa5cLhjG5ie9DoZVVqkH8BGb1FQVTtecOLn4I= github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20251001123353-fd5b1fb35db1 h1:PMTgifBcBRLJJiM+LgSzPDTk9/Rx4qS09OUrfpY6GBQ= @@ -205,6 +203,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/tchap/library-go v0.0.0-20260216103045-5a90edab46c3 h1:ZAAnudXm3Q3NadreXfLC/rbfN9AUYMs5yaz25SSzkCQ= +github.com/tchap/library-go v0.0.0-20260216103045-5a90edab46c3/go.mod h1:K3FoNLgNBFYbFuG+Kr8usAnQxj1w84XogyUp2M8rK8k= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= diff --git a/pkg/controllers/common/scaling/scaling.go b/pkg/controllers/common/scaling/scaling.go new file mode 100644 index 000000000..f6765adf4 --- /dev/null +++ b/pkg/controllers/common/scaling/scaling.go @@ -0,0 +1,108 @@ +package scaling + +import ( + "fmt" + "time" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/utils/clock" + "k8s.io/utils/ptr" + + operatorv1 "github.com/openshift/api/operator/v1" + applyoperatorv1 "github.com/openshift/client-go/operator/applyconfigurations/operator/v1" +) + +const ( + replicasChangedAnnotation = "authentication.operator.openshift.io/replicas-changed" + deploymentProgressedAnnotation = "authentication.operator.openshift.io/deployment-progressed" + scalingBeginTimeout = 1 * time.Minute +) + +// ProcessDeployment ensures the operator does not end up progressing on scaling. +// We define that scaling happens any time .spec.replicas is the only field that changes. +// The idea is then as follows: +// +// 1. When the replicas field is updated, store the change timestamp in a deployment annotation. +// 2. When the deployment eventually starts progressing, add another annotation so that we know it happened. +// 3. When the deployment hasn't progressing for too long, or it has finished progressing, remove all annotations. +// +// When the timestamp annotation is present, we should overwrite Progressing to be false. +// +// So, ProcessDeployment amends the expected deployment in place, also returning any conditions to set on the operator. +func ProcessDeployment(existing, expected *appsv1.Deployment, clock clock.Clock, conditionPrefix string) ([]*applyoperatorv1.OperatorConditionApplyConfiguration, error) { + if !specsEqualIgnoringReplicas(existing, expected) { + return nil, nil + } + + if expected.Annotations == nil { + expected.Annotations = make(map[string]string) + } + + if !ptr.Equal(existing.Spec.Replicas, expected.Spec.Replicas) { + expected.Annotations[replicasChangedAnnotation] = clock.Now().UTC().Format(time.RFC3339) + return cancelProgressing(conditionPrefix), nil + } + + var replicasChangedAt time.Time + if v, ok := existing.Annotations[replicasChangedAnnotation]; ok { + var err error + replicasChangedAt, err = time.Parse(time.RFC3339, v) + if err != nil { + return nil, fmt.Errorf("unable to parse annotation %q = %q: %w", replicasChangedAnnotation, v, err) + } + } + if replicasChangedAt.IsZero() { + return nil, nil + } + + // Cancel scaling if we are done, or the whole process has reached the specified timeout. + startedProgressing := existing.Annotations[deploymentProgressedAnnotation] == "true" + if !isDeploymentProgressing(existing.Status) && (startedProgressing || clock.Since(replicasChangedAt) > scalingBeginTimeout) { + return nil, nil + } + + expected.Annotations[replicasChangedAnnotation] = existing.Annotations[replicasChangedAnnotation] + if startedProgressing || isDeploymentProgressing(existing.Status) { + expected.Annotations[deploymentProgressedAnnotation] = "true" + } + return cancelProgressing(conditionPrefix), nil +} + +// specsEqualIgnoringReplicas returns true when the deployment specs are the same or diff only in the replicas field. +// The function returns false automatically when one of the deployments is nil. +func specsEqualIgnoringReplicas(existing, expected *appsv1.Deployment) bool { + if existing == nil || expected == nil { + return false + } + + s1 := &existing.Spec + s2 := &expected.Spec + + if !ptr.Equal(s1.Replicas, s2.Replicas) { + s2 = s2.DeepCopy() + s2.Replicas = s1.Replicas + } + return equality.Semantic.DeepEqual(s1, s2) +} + +// isDeploymentProgressing returns whether the given deployment is progressing. +func isDeploymentProgressing(status appsv1.DeploymentStatus) bool { + for _, cond := range status.Conditions { + if cond.Type == appsv1.DeploymentProgressing { + return !(cond.Status == corev1.ConditionTrue && cond.Reason == "NewReplicaSetAvailable") + } + } + return false +} + +func cancelProgressing(conditionPrefix string) []*applyoperatorv1.OperatorConditionApplyConfiguration { + return []*applyoperatorv1.OperatorConditionApplyConfiguration{ + applyoperatorv1.OperatorCondition(). + WithType(fmt.Sprintf("%sDeploymentProgressing", conditionPrefix)). + WithStatus(operatorv1.ConditionFalse). + WithReason("AsExpected"). + WithMessage("Scaling replicas only"), + } +} diff --git a/pkg/controllers/common/scaling/scaling_test.go b/pkg/controllers/common/scaling/scaling_test.go new file mode 100644 index 000000000..674422844 --- /dev/null +++ b/pkg/controllers/common/scaling/scaling_test.go @@ -0,0 +1,538 @@ +package scaling + +import ( + "testing" + "time" + + "github.com/google/go-cmp/cmp" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clocktesting "k8s.io/utils/clock/testing" + "k8s.io/utils/ptr" + + operatorv1 "github.com/openshift/api/operator/v1" +) + +func TestProcessDeployment(t *testing.T) { + baseTime := time.Date(2024, 1, 15, 12, 0, 0, 0, time.UTC) + fakeClock := clocktesting.NewFakeClock(baseTime) + + // Helper to create a basic deployment + makeDeployment := func(replicas int32, annotations map[string]string) *appsv1.Deployment { + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "test-ns", + Annotations: annotations, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To(replicas), + }, + } + } + + // Helper to create deployment status with Progressing condition + makeStatus := func(progressing bool, reason string) appsv1.DeploymentStatus { + status := corev1.ConditionFalse + if progressing { + status = corev1.ConditionTrue + } + return appsv1.DeploymentStatus{ + Conditions: []appsv1.DeploymentCondition{ + { + Type: appsv1.DeploymentProgressing, + Status: status, + Reason: reason, + }, + }, + } + } + + // Timestamps used in tests + thirtySecondsAgo := baseTime.Add(-30 * time.Second).UTC().Format(time.RFC3339) + justBeforeTimeout := baseTime.Add(-59 * time.Second).UTC().Format(time.RFC3339) + justAfterTimeout := baseTime.Add(-61 * time.Second).UTC().Format(time.RFC3339) + nowTimestamp := baseTime.UTC().Format(time.RFC3339) + + tests := []struct { + name string + existing *appsv1.Deployment + expected *appsv1.Deployment + wantAnnotations map[string]string + wantConditionOverwrite bool + wantErr bool + }{ + // Edge cases: nil deployments and non-scaling changes + { + name: "noop when existing is nil", + existing: nil, + expected: makeDeployment(3, nil), + }, + { + name: "noop when expected is nil", + existing: makeDeployment(3, nil), + expected: nil, + }, + { + name: "noop when spec changed beyond replicas and discard tracking annotations", + existing: func() *appsv1.Deployment { + d := makeDeployment(3, map[string]string{ + replicasChangedAnnotation: thirtySecondsAgo, + deploymentProgressedAnnotation: "true", + }) + d.Spec.Paused = true + return d + }(), + expected: makeDeployment(3, nil), + }, + { + name: "noop when replicas unchanged and no tracking annotation", + existing: makeDeployment(3, nil), + expected: makeDeployment(3, nil), + }, + { + name: "noop when existing has nil annotations map", + existing: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "test-ns", + Annotations: nil, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To[int32](3), + }, + }, + expected: makeDeployment(3, nil), + }, + + // Scaling start: replicas change detected + { + name: "scaling start: cancel progressing when scaling up", + existing: makeDeployment(3, nil), + expected: makeDeployment(5, nil), + wantAnnotations: map[string]string{ + replicasChangedAnnotation: nowTimestamp, + }, + wantConditionOverwrite: true, + }, + { + name: "scaling start: cancel progressing when scaling down", + existing: makeDeployment(5, nil), + expected: makeDeployment(3, nil), + wantAnnotations: map[string]string{ + replicasChangedAnnotation: nowTimestamp, + }, + wantConditionOverwrite: true, + }, + { + name: "scaling start: new timestamp when replicas change during active scaling", + existing: func() *appsv1.Deployment { + d := makeDeployment(5, map[string]string{ + replicasChangedAnnotation: thirtySecondsAgo, + }) + d.Status = makeStatus(true, "ReplicaSetUpdated") + return d + }(), + expected: makeDeployment(6, nil), + wantAnnotations: map[string]string{ + replicasChangedAnnotation: nowTimestamp, // new timestamp, not the old one + }, + wantConditionOverwrite: true, + }, + + // Scaling in progress: deployment is actively rolling out + { + name: "scaling in progress: keep canceling and mark as progressed", + existing: func() *appsv1.Deployment { + d := makeDeployment(3, map[string]string{ + replicasChangedAnnotation: thirtySecondsAgo, + }) + d.Status = makeStatus(true, "ReplicaSetUpdated") + return d + }(), + expected: makeDeployment(3, nil), + wantAnnotations: map[string]string{ + replicasChangedAnnotation: thirtySecondsAgo, + deploymentProgressedAnnotation: "true", + }, + wantConditionOverwrite: true, + }, + { + name: "scaling in progress: keep canceling when no deployment Progressing condition exists", + existing: makeDeployment(3, map[string]string{ + replicasChangedAnnotation: thirtySecondsAgo, + }), + expected: makeDeployment(3, nil), + wantAnnotations: map[string]string{ + replicasChangedAnnotation: thirtySecondsAgo, + }, + wantConditionOverwrite: true, + }, + + // Scaling complete: deployment finished, clear tracking + { + name: "scaling complete: clear annotations after observing progression", + existing: func() *appsv1.Deployment { + d := makeDeployment(3, map[string]string{ + replicasChangedAnnotation: thirtySecondsAgo, + deploymentProgressedAnnotation: "true", + }) + d.Status = makeStatus(true, "NewReplicaSetAvailable") + return d + }(), + expected: makeDeployment(3, nil), + }, + { + name: "scaling complete: clear annotations after timeout even without observing progression", + existing: func() *appsv1.Deployment { + d := makeDeployment(3, map[string]string{ + replicasChangedAnnotation: justAfterTimeout, + }) + d.Status = makeStatus(true, "NewReplicaSetAvailable") + return d + }(), + expected: makeDeployment(3, nil), + }, + + // Scaling complete but waiting: finished quickly, never saw progression + { + name: "scaling finished early: keep canceling until timeout", + existing: func() *appsv1.Deployment { + d := makeDeployment(3, map[string]string{ + replicasChangedAnnotation: thirtySecondsAgo, + }) + d.Status = makeStatus(true, "NewReplicaSetAvailable") + return d + }(), + expected: makeDeployment(3, nil), + wantAnnotations: map[string]string{ + replicasChangedAnnotation: thirtySecondsAgo, + }, + wantConditionOverwrite: true, + }, + + // Timeout boundary tests + { + name: "timeout boundary: keep canceling at 59 seconds", + existing: func() *appsv1.Deployment { + d := makeDeployment(3, map[string]string{ + replicasChangedAnnotation: justBeforeTimeout, + }) + d.Status = makeStatus(true, "NewReplicaSetAvailable") + return d + }(), + expected: makeDeployment(3, nil), + wantAnnotations: map[string]string{ + replicasChangedAnnotation: justBeforeTimeout, + }, + wantConditionOverwrite: true, + }, + { + name: "timeout boundary: clear annotations at 61 seconds", + existing: func() *appsv1.Deployment { + d := makeDeployment(3, map[string]string{ + replicasChangedAnnotation: justAfterTimeout, + }) + d.Status = makeStatus(true, "NewReplicaSetAvailable") + return d + }(), + expected: makeDeployment(3, nil), + }, + + // Error cases + { + name: "error on malformed timestamp annotation", + existing: makeDeployment(3, map[string]string{ + replicasChangedAnnotation: "not-a-timestamp", + }), + expected: makeDeployment(3, nil), + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + conditions, err := ProcessDeployment(tt.existing, tt.expected, fakeClock, "Test") + + if tt.wantErr != (err != nil) { + t.Errorf("unexpected error returned: %v", err) + } + + // Check conditions + if tt.wantConditionOverwrite != (len(conditions) == 1) { + var expectedCount int + if tt.wantConditionOverwrite { + expectedCount = 1 + } + t.Errorf("expected %d condition overwrites, but got %d", expectedCount, len(conditions)) + } else if tt.wantConditionOverwrite { + cond := conditions[0] + if *cond.Type != "TestDeploymentProgressing" { + t.Errorf("expected condition type %q, but got %q", "TestDeploymentProgressing", *cond.Type) + } + if *cond.Status != operatorv1.ConditionFalse { + t.Errorf("expected condition status False, but got %v", *cond.Status) + } + } + + // Check scaling-related annotations on expected deployment + if tt.expected != nil { + gotAnnotations := tt.expected.Annotations + if gotAnnotations == nil { + gotAnnotations = make(map[string]string) + } + + wantAnnotations := tt.wantAnnotations + if wantAnnotations == nil { + wantAnnotations = make(map[string]string) + } + + if !cmp.Equal(wantAnnotations, gotAnnotations) { + t.Errorf("annotations mismatch:\n%s", cmp.Diff(wantAnnotations, gotAnnotations)) + } + } + }) + } +} + +func TestSpecsEqualIgnoringReplicas(t *testing.T) { + tests := []struct { + name string + existing *appsv1.Deployment + expected *appsv1.Deployment + want bool + }{ + { + name: "existing nil returns false", + existing: nil, + expected: &appsv1.Deployment{}, + want: false, + }, + { + name: "expected nil returns false", + existing: &appsv1.Deployment{}, + expected: nil, + want: false, + }, + { + name: "both nil returns false", + existing: nil, + expected: nil, + want: false, + }, + { + name: "identical specs returns true", + existing: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To[int32](3), + MinReadySeconds: 5, + }, + }, + expected: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To[int32](3), + MinReadySeconds: 5, + }, + }, + want: true, + }, + { + name: "same specs except replicas returns true", + existing: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To[int32](3), + MinReadySeconds: 5, + }, + }, + expected: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To[int32](5), + MinReadySeconds: 5, + }, + }, + want: true, + }, + { + name: "different specs returns false", + existing: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To[int32](3), + MinReadySeconds: 5}, + }, + expected: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To[int32](3), + MinReadySeconds: 3, + }, + }, + want: false, + }, + { + name: "nil replicas in existing handled correctly", + existing: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Replicas: nil, + }, + }, + expected: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To[int32](3), + }, + }, + want: true, + }, + { + name: "nil replicas in expected handled correctly", + existing: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To[int32](3), + }, + }, + expected: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Replicas: nil, + }, + }, + want: true, + }, + { + name: "both replicas nil handled correctly", + existing: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Replicas: nil, + }, + }, + expected: &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Replicas: nil, + }, + }, + want: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := specsEqualIgnoringReplicas(tt.existing, tt.expected) + if got != tt.want { + t.Errorf("specsEqualIgnoringReplicas() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestIsDeploymentProgressing(t *testing.T) { + tests := []struct { + name string + status appsv1.DeploymentStatus + want bool + }{ + { + name: "return false on empty conditions", + status: appsv1.DeploymentStatus{}, + want: false, + }, + { + name: "Progressing condition with NewReplicaSetAvailable returns false", + status: appsv1.DeploymentStatus{ + Conditions: []appsv1.DeploymentCondition{ + { + Type: appsv1.DeploymentProgressing, + Status: corev1.ConditionTrue, + Reason: "NewReplicaSetAvailable", + }, + }, + }, + want: false, + }, + { + name: "Progressing condition with ReplicaSetUpdated returns true", + status: appsv1.DeploymentStatus{ + Conditions: []appsv1.DeploymentCondition{ + { + Type: appsv1.DeploymentProgressing, + Status: corev1.ConditionTrue, + Reason: "ReplicaSetUpdated", + }, + }, + }, + want: true, + }, + { + name: "Progressing condition with status False returns true", + status: appsv1.DeploymentStatus{ + Conditions: []appsv1.DeploymentCondition{ + { + Type: appsv1.DeploymentProgressing, + Status: corev1.ConditionFalse, + Reason: "ProgressDeadlineExceeded", + }, + }, + }, + want: true, + }, + { + name: "other condition type is ignored", + status: appsv1.DeploymentStatus{ + Conditions: []appsv1.DeploymentCondition{ + { + Type: appsv1.DeploymentAvailable, + Status: corev1.ConditionTrue, + }, + }, + }, + want: false, + }, + { + name: "Progressing is found among multiple conditions", + status: appsv1.DeploymentStatus{ + Conditions: []appsv1.DeploymentCondition{ + { + Type: appsv1.DeploymentAvailable, + Status: corev1.ConditionTrue, + }, + { + Type: appsv1.DeploymentProgressing, + Status: corev1.ConditionTrue, + Reason: "ReplicaSetUpdated", + }, + }, + }, + want: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := isDeploymentProgressing(tt.status) + if got != tt.want { + t.Errorf("isDeploymentProgressing() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestCancelProgressing(t *testing.T) { + conditions := cancelProgressing("OAuth") + + if len(conditions) != 1 { + t.Fatalf("expected 1 condition, got %d", len(conditions)) + } + + cond := conditions[0] + + if *cond.Type != "OAuthDeploymentProgressing" { + t.Errorf("expected type %q, got %q", "OAuthDeploymentProgressing", *cond.Type) + } + if *cond.Status != operatorv1.ConditionFalse { + t.Errorf("expected status False, got %v", *cond.Status) + } + if *cond.Reason != "AsExpected" { + t.Errorf("expected reason %q, got %q", "AsExpected", *cond.Reason) + } + if *cond.Message != "Scaling replicas only" { + t.Errorf("expected message %q, got %q", "Scaling replicas only", *cond.Message) + } +} diff --git a/pkg/controllers/deployment/deployment_controller.go b/pkg/controllers/deployment/deployment_controller.go index b6b8732ec..f2f344a72 100644 --- a/pkg/controllers/deployment/deployment_controller.go +++ b/pkg/controllers/deployment/deployment_controller.go @@ -19,14 +19,17 @@ import ( appsv1listers "k8s.io/client-go/listers/apps/v1" corev1listers "k8s.io/client-go/listers/core/v1" "k8s.io/klog/v2" + "k8s.io/utils/clock" configv1 "github.com/openshift/api/config/v1" configinformer "github.com/openshift/client-go/config/informers/externalversions" configv1listers "github.com/openshift/client-go/config/listers/config/v1" + applyoperatorv1 "github.com/openshift/client-go/operator/applyconfigurations/operator/v1" routeinformers "github.com/openshift/client-go/route/informers/externalversions" routev1listers "github.com/openshift/client-go/route/listers/route/v1" "github.com/openshift/cluster-authentication-operator/bindata" "github.com/openshift/cluster-authentication-operator/pkg/controllers/common" + "github.com/openshift/cluster-authentication-operator/pkg/controllers/common/scaling" bootstrap "github.com/openshift/library-go/pkg/authentication/bootstrapauthenticator" "github.com/openshift/library-go/pkg/controller/factory" "github.com/openshift/library-go/pkg/operator/apiserver/controller/workload" @@ -188,17 +191,17 @@ func (c *oauthServerDeploymentSyncer) PreconditionFulfilled(_ context.Context) ( return true, nil } -func (c *oauthServerDeploymentSyncer) Sync(ctx context.Context, syncContext factory.SyncContext) (*appsv1.Deployment, bool, []error) { +func (c *oauthServerDeploymentSyncer) Sync(ctx context.Context, syncContext factory.SyncContext) (*appsv1.Deployment, bool, []*applyoperatorv1.OperatorConditionApplyConfiguration, []error) { errs := []error{} operatorSpec, operatorStatus, _, err := c.operatorClient.GetOperatorState() if err != nil { - return nil, false, append(errs, err) + return nil, false, nil, append(errs, err) } proxyConfig, err := c.getProxyConfig() if err != nil { - return nil, false, append(errs, err) + return nil, false, nil, append(errs, err) } // resourceVersions serves to store versions of config resources so that we @@ -215,7 +218,7 @@ func (c *oauthServerDeploymentSyncer) Sync(ctx context.Context, syncContext fact configResourceVersions, err := c.getConfigResourceVersions() if err != nil { - return nil, false, append(errs, err) + return nil, false, nil, append(errs, err) } resourceVersions = append(resourceVersions, configResourceVersions...) @@ -230,10 +233,16 @@ func (c *oauthServerDeploymentSyncer) Sync(ctx context.Context, syncContext fact } } + deploymentTemplate := resourceread.ReadDeploymentV1OrDie(bindata.MustAsset("oauth-openshift/deployment.yaml")) + existingDeployment, err := c.deploymentsLister.Deployments(deploymentTemplate.Namespace).Get(deploymentTemplate.Name) + if err != nil && !errors.IsNotFound(err) { + return nil, false, nil, append(errs, err) + } + // deployment, have RV of all resources expectedDeployment, err := getOAuthServerDeployment(operatorSpec, proxyConfig, c.bootstrapUserChangeRollOut, resourceVersions...) if err != nil { - return nil, false, append(errs, err) + return nil, false, nil, append(errs, err) } if _, err := c.secretLister.Secrets("openshift-authentication").Get("v4-0-config-system-custom-router-certs"); err == nil { @@ -254,19 +263,26 @@ func (c *oauthServerDeploymentSyncer) Sync(ctx context.Context, syncContext fact err = c.ensureAtMostOnePodPerNode(&expectedDeployment.Spec, "oauth-openshift") if err != nil { - return nil, false, append(errs, fmt.Errorf("unable to ensure at most one pod per node: %v", err)) + return nil, false, nil, append(errs, fmt.Errorf("unable to ensure at most one pod per node: %v", err)) } // Set the replica count to the number of control plane nodes. controlPlaneCount, err := c.countNodes(expectedDeployment.Spec.Template.Spec.NodeSelector) if err != nil { - return nil, false, append(errs, fmt.Errorf("failed to determine number of control plane nodes: %v", err)) + return nil, false, nil, append(errs, fmt.Errorf("failed to determine number of control plane nodes: %v", err)) } if controlPlaneCount == nil { - return nil, false, append(errs, fmt.Errorf("found nil control plane nodes count")) + return nil, false, nil, append(errs, fmt.Errorf("found nil control plane nodes count")) } expectedDeployment.Spec.Replicas = controlPlaneCount + + // This must be called before we set the rolling update parameters. + conditionOverwrites, err := scaling.ProcessDeployment(existingDeployment, expectedDeployment, clock.RealClock{}, "OAuthServer") + if err != nil { + return nil, false, nil, append(errs, err) + } + setRollingUpdateParameters(*controlPlaneCount, expectedDeployment) deployment, _, err := resourceapply.ApplyDeployment(ctx, c.deployments, @@ -275,10 +291,10 @@ func (c *oauthServerDeploymentSyncer) Sync(ctx context.Context, syncContext fact resourcemerge.ExpectedDeploymentGeneration(expectedDeployment, operatorStatus.Generations), ) if err != nil { - return nil, false, append(errs, fmt.Errorf("applying deployment of the integrated OAuth server failed: %w", err)) + return nil, false, nil, append(errs, fmt.Errorf("applying deployment of the integrated OAuth server failed: %w", err)) } - return deployment, true, errs + return deployment, true, conditionOverwrites, errs } func (c *oauthServerDeploymentSyncer) getProxyConfig() (*configv1.Proxy, error) { diff --git a/pkg/operator/starter.go b/pkg/operator/starter.go index 331b00abe..03e36c020 100644 --- a/pkg/operator/starter.go +++ b/pkg/operator/starter.go @@ -456,6 +456,8 @@ func prepareOauthAPIServerOperator( return nil, nil, err } + const apiServerConditionsPrefix = "APIServer" + authAPIServerWorkload := workload.NewOAuthAPIServerWorkload( authOperatorInput.authenticationOperatorClient, workloadcontroller.CountNodesFuncWrapper(informerFactories.kubeInformersForNamespaces.InformersFor("").Core().V1().Nodes().Lister()), @@ -467,7 +469,9 @@ func prepareOauthAPIServerOperator( informerFactories.kubeInformersForNamespaces.InformersFor("openshift-oauth-apiserver").Apps().V1().Deployments().Lister(), authConfigChecker, featureGateAccessor, - versionRecorder) + versionRecorder, + apiServerConditionsPrefix, + ) infra, err := authOperatorInput.configClient.ConfigV1().Infrastructures().Get(ctx, "cluster", metav1.GetOptions{}) if err != nil && errors.IsNotFound(err) { @@ -503,8 +507,6 @@ func prepareOauthAPIServerOperator( return s }) - const apiServerConditionsPrefix = "APIServer" - apiServerControllers, err := apiservercontrollerset.NewAPIServerControllerSet( "oauth-apiserver", authOperatorInput.authenticationOperatorClient, diff --git a/pkg/operator/workload/sync_openshift_oauth_apiserver.go b/pkg/operator/workload/sync_openshift_oauth_apiserver.go index f3f432416..f166a34e4 100644 --- a/pkg/operator/workload/sync_openshift_oauth_apiserver.go +++ b/pkg/operator/workload/sync_openshift_oauth_apiserver.go @@ -8,8 +8,6 @@ import ( "strconv" "strings" - "github.com/openshift/library-go/pkg/operator/v1helpers" - appsv1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -17,8 +15,10 @@ import ( "k8s.io/client-go/kubernetes" appsv1listers "k8s.io/client-go/listers/apps/v1" "k8s.io/klog/v2" + "k8s.io/utils/clock" operatorv1 "github.com/openshift/api/operator/v1" + applyoperatorv1 "github.com/openshift/client-go/operator/applyconfigurations/operator/v1" "github.com/openshift/library-go/pkg/controller/factory" libgoetcd "github.com/openshift/library-go/pkg/operator/configobserver/etcd" "github.com/openshift/library-go/pkg/operator/configobserver/featuregates" @@ -29,10 +29,12 @@ import ( "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" "github.com/openshift/library-go/pkg/operator/resource/resourceread" "github.com/openshift/library-go/pkg/operator/status" + "github.com/openshift/library-go/pkg/operator/v1helpers" "github.com/openshift/cluster-authentication-operator/bindata" "github.com/openshift/cluster-authentication-operator/pkg/controllers/common" "github.com/openshift/cluster-authentication-operator/pkg/controllers/common/arguments" + "github.com/openshift/cluster-authentication-operator/pkg/controllers/common/scaling" oauthapiconfigobservercontroller "github.com/openshift/cluster-authentication-operator/pkg/operator/configobservation/configobservercontroller" ) @@ -59,6 +61,7 @@ type OAuthAPIServerWorkload struct { deploymentsLister appsv1listers.DeploymentLister authConfigChecker common.AuthConfigChecker featureGateAccessor featuregates.FeatureGateAccess + conditionPrefix string } // NewOAuthAPIServerWorkload creates new OAuthAPIServerWorkload struct @@ -74,6 +77,7 @@ func NewOAuthAPIServerWorkload( authConfigChecker common.AuthConfigChecker, featureGateAccessor featuregates.FeatureGateAccess, versionRecorder status.VersionGetter, + conditionPrefix string, ) *OAuthAPIServerWorkload { return &OAuthAPIServerWorkload{ operatorClient: operatorClient, @@ -87,6 +91,7 @@ func NewOAuthAPIServerWorkload( deploymentsLister: deploymentsLister, authConfigChecker: authConfigChecker, featureGateAccessor: featureGateAccessor, + conditionPrefix: conditionPrefix, } } @@ -154,41 +159,46 @@ func (c *OAuthAPIServerWorkload) preconditionFulfilledInternal(operatorSpec *ope } // Sync essentially manages OAuthAPI server. -func (c *OAuthAPIServerWorkload) Sync(ctx context.Context, syncCtx factory.SyncContext) (*appsv1.Deployment, bool, []error) { +func (c *OAuthAPIServerWorkload) Sync(ctx context.Context, syncCtx factory.SyncContext) (*appsv1.Deployment, bool, []*applyoperatorv1.OperatorConditionApplyConfiguration, []error) { errs := []error{} operatorSpec, operatorStatus, _, err := c.operatorClient.GetOperatorState() if err != nil { errs = append(errs, err) - return nil, false, errs + return nil, false, nil, errs } - actualDeployment, err := c.syncDeployment(ctx, operatorSpec, operatorStatus, syncCtx.Recorder()) + actualDeployment, conditionOverwrites, err := c.syncDeployment(ctx, operatorSpec, operatorStatus, syncCtx.Recorder()) if err != nil { errs = append(errs, fmt.Errorf("%q: %v", "deployments", err)) } - return actualDeployment, true, errs + return actualDeployment, true, conditionOverwrites, errs } -func (c *OAuthAPIServerWorkload) syncDeployment(ctx context.Context, operatorSpec *operatorv1.OperatorSpec, operatorStatus *operatorv1.OperatorStatus, eventRecorder events.Recorder) (*appsv1.Deployment, error) { +func (c *OAuthAPIServerWorkload) syncDeployment( + ctx context.Context, + operatorSpec *operatorv1.OperatorSpec, + operatorStatus *operatorv1.OperatorStatus, + eventRecorder events.Recorder, +) (*appsv1.Deployment, []*applyoperatorv1.OperatorConditionApplyConfiguration, error) { if operatorStatus.LatestAvailableRevision == 0 { // this a backstop during the migration from 4.17 whe this information is in .status.oauthAPIServer.latestAvailableRevision - return nil, fmt.Errorf(".status.latestAvailableRevision is not yet available") + return nil, nil, fmt.Errorf(".status.latestAvailableRevision is not yet available") } tmpl, err := bindata.Asset("oauth-apiserver/deploy.yaml") if err != nil { - return nil, err + return nil, nil, err } argsRaw, err := GetAPIServerArgumentsRaw(*operatorSpec) if err != nil { - return nil, err + return nil, nil, err } args, err := arguments.Parse(argsRaw) if err != nil { - return nil, err + return nil, nil, err } // log level verbosity is taken from the spec always @@ -204,11 +214,16 @@ func (c *OAuthAPIServerWorkload) syncDeployment(ctx context.Context, operatorSpe tmpl = []byte(r.Replace(string(tmpl))) re := regexp.MustCompile(`\$\{[^}]*}`) if match := re.Find(tmpl); len(match) > 0 && !excludedReferences.Has(string(match)) { - return nil, fmt.Errorf("invalid template reference %q", string(match)) + return nil, nil, fmt.Errorf("invalid template reference %q", string(match)) } required := resourceread.ReadDeploymentV1OrDie(tmpl) + existing, err := c.deploymentsLister.Deployments(required.Namespace).Get(required.Name) + if err != nil && !errors.IsNotFound(err) { + return nil, nil, err + } + // use the following routine for things that would require special formatting/padding (yaml) encodedArgs := arguments.EncodeWithDelimiter(args, " \\\n ") r = strings.NewReplacer( @@ -245,7 +260,7 @@ func (c *OAuthAPIServerWorkload) syncDeployment(ctx context.Context, operatorSpe resourcehash.NewObjectRef().ForConfigMap().InNamespace(c.targetNamespace).Named("trusted-ca-bundle"), ) if err != nil { - return nil, fmt.Errorf("invalid dependency reference: %q", err) + return nil, nil, fmt.Errorf("invalid dependency reference: %q", err) } for k, v := range inputHashes { @@ -259,22 +274,28 @@ func (c *OAuthAPIServerWorkload) syncDeployment(ctx context.Context, operatorSpe err = c.ensureAtMostOnePodPerNode(&required.Spec, "oauth-apiserver") if err != nil { - return nil, fmt.Errorf("unable to ensure at most one pod per node: %v", err) + return nil, nil, fmt.Errorf("unable to ensure at most one pod per node: %v", err) } // Set the replica count to the number of master nodes. masterNodeCount, err := c.countNodes(required.Spec.Template.Spec.NodeSelector) if err != nil { - return nil, fmt.Errorf("failed to determine number of master nodes: %v", err) + return nil, nil, fmt.Errorf("failed to determine number of master nodes: %v", err) } required.Spec.Replicas = masterNodeCount if err := encryptionkms.AddKMSPluginVolumeAndMountToPodSpec(&required.Spec.Template.Spec, "oauth-apiserver", c.featureGateAccessor); err != nil { - return nil, fmt.Errorf("failed to add KMS encryption volumes: %w", err) + return nil, nil, fmt.Errorf("failed to add KMS encryption volumes: %w", err) + } + + // This must be called when the required deployment is complete. + conditionOverwrites, err := scaling.ProcessDeployment(existing, required, clock.RealClock{}, c.conditionPrefix) + if err != nil { + return nil, nil, err } deployment, _, err := resourceapply.ApplyDeployment(ctx, c.kubeClient.AppsV1(), eventRecorder, required, resourcemerge.ExpectedDeploymentGeneration(required, operatorStatus.Generations)) - return deployment, err + return deployment, conditionOverwrites, err } func loglevelToKlog(logLevel operatorv1.LogLevel) string { diff --git a/pkg/operator/workload/sync_openshift_oauth_apiserver_test.go b/pkg/operator/workload/sync_openshift_oauth_apiserver_test.go index 5857104ab..645154736 100644 --- a/pkg/operator/workload/sync_openshift_oauth_apiserver_test.go +++ b/pkg/operator/workload/sync_openshift_oauth_apiserver_test.go @@ -18,6 +18,7 @@ import ( "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/scheme" clientgotesting "k8s.io/client-go/testing" @@ -141,15 +142,17 @@ func TestSyncOAuthAPIServerDeployment(t *testing.T) { t.Run(scenario.name, func(t *testing.T) { eventRecorder := events.NewInMemoryRecorder("", clocktesting.NewFakePassiveClock(time.Now())) fakeKubeClient := fake.NewSimpleClientset() + fakeInformers := informers.NewSharedInformerFactory(fakeKubeClient, 0) target := &OAuthAPIServerWorkload{ countNodes: func(nodeSelector map[string]string) (*int32, error) { var i int32; i = 1; return &i, nil }, ensureAtMostOnePodPerNode: func(spec *appsv1.DeploymentSpec, componentName string) error { return nil }, kubeClient: fakeKubeClient, + deploymentsLister: fakeInformers.Apps().V1().Deployments().Lister(), featureGateAccessor: featuregates.NewHardcodedFeatureGateAccessForTesting(nil, nil, make(chan struct{}), nil), } - actualDeployment, err := target.syncDeployment(context.TODO(), &scenario.operator.Spec.OperatorSpec, &scenario.operator.Status.OperatorStatus, eventRecorder) + actualDeployment, _, err := target.syncDeployment(context.TODO(), &scenario.operator.Spec.OperatorSpec, &scenario.operator.Status.OperatorStatus, eventRecorder) if err != nil { t.Fatal(err) } @@ -165,7 +168,7 @@ func TestSyncOAuthAPIServerDeployment(t *testing.T) { } if !equality.Semantic.DeepEqual(actualDeployment, goldenDeployment) { - t.Errorf("created Deployment is different from the expected one (file) : %s", cmp.Diff(actualDeployment, goldenDeployment)) + t.Errorf("created Deployment is different from the expected one (file) : %s", cmp.Diff(goldenDeployment, actualDeployment)) } } }) From a09bdc000076f7aa884b3c09460db7b001e1879d Mon Sep 17 00:00:00 2001 From: Ondra Kupka Date: Thu, 5 Feb 2026 17:25:39 +0100 Subject: [PATCH 2/2] go mod vendor --- .../github.com/openshift/api/.coderabbit.yaml | 1 + .../types_compatibilityrequirement.go | 1 + ..._generated.featuregated-crd-manifests.yaml | 3 +- .../openshift/api/config/v1/types_insights.go | 1 + .../openshift/api/config/v1/types_network.go | 4 +- .../api/config/v1/types_tlssecurityprofile.go | 49 ++--- ..._generated.featuregated-crd-manifests.yaml | 5 +- .../v1/zz_generated.swagger_doc_generated.go | 8 +- .../openshift/api/config/v1alpha1/register.go | 2 + .../v1alpha1/types_cluster_monitoring.go | 103 +++++++++- .../types_crio_credential_provider_config.go | 186 ++++++++++++++++++ .../api/config/v1alpha1/types_insights.go | 1 + .../config/v1alpha1/zz_generated.deepcopy.go | 154 +++++++++++++++ ..._generated.featuregated-crd-manifests.yaml | 25 ++- .../zz_generated.swagger_doc_generated.go | 72 ++++++- .../api/config/v1alpha2/types_insights.go | 1 + ..._generated.featuregated-crd-manifests.yaml | 2 +- vendor/github.com/openshift/api/features.md | 21 +- .../openshift/api/features/features.go | 124 ++++-------- .../api/features/legacyfeaturegates.go | 12 -- .../api/machine/v1beta1/types_awsprovider.go | 91 ++++++++- .../machine/v1beta1/zz_generated.deepcopy.go | 58 +++++- .../zz_generated.swagger_doc_generated.go | 29 ++- .../api/operator/v1/types_network.go | 6 +- ..._50_ingress_00_ingresscontrollers.crd.yaml | 41 ++-- .../0000_70_network_01_networks.crd.yaml | 14 +- ..._01_machineconfigurations-Default.crd.yaml | 12 ++ ...nfig_01_machineconfigurations-OKD.crd.yaml | 12 ++ ..._generated.featuregated-crd-manifests.yaml | 5 +- .../openshift/library-go/pkg/crypto/crypto.go | 28 ++- .../apiserver/controller/workload/workload.go | 36 +++- vendor/modules.txt | 5 +- 32 files changed, 871 insertions(+), 241 deletions(-) create mode 100644 vendor/github.com/openshift/api/config/v1alpha1/types_crio_credential_provider_config.go diff --git a/vendor/github.com/openshift/api/.coderabbit.yaml b/vendor/github.com/openshift/api/.coderabbit.yaml index 1cb17f1e1..a3ee2d122 100644 --- a/vendor/github.com/openshift/api/.coderabbit.yaml +++ b/vendor/github.com/openshift/api/.coderabbit.yaml @@ -14,6 +14,7 @@ reviews: - "!payload-manifests" - "!**/zz_generated.crd-manifests/*" # Contains files - "!**/zz_generated.featuregated-crd-manifests/**" # Contains folders + - "!openapi/**" - "!**/vendor/**" - "!vendor/**" tools: diff --git a/vendor/github.com/openshift/api/apiextensions/v1alpha1/types_compatibilityrequirement.go b/vendor/github.com/openshift/api/apiextensions/v1alpha1/types_compatibilityrequirement.go index 46e211cd5..5abbfec7c 100644 --- a/vendor/github.com/openshift/api/apiextensions/v1alpha1/types_compatibilityrequirement.go +++ b/vendor/github.com/openshift/api/apiextensions/v1alpha1/types_compatibilityrequirement.go @@ -21,6 +21,7 @@ import ( // +kubebuilder:subresource:status // +kubebuilder:resource:path=compatibilityrequirements,scope=Cluster // +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/2479 +// +kubebuilder:metadata:annotations="release.openshift.io/feature-gate=CRDCompatibilityRequirementOperator" type CompatibilityRequirement struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/apiextensions/v1alpha1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/apiextensions/v1alpha1/zz_generated.featuregated-crd-manifests.yaml index 319f2b335..433546401 100644 --- a/vendor/github.com/openshift/api/apiextensions/v1alpha1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/apiextensions/v1alpha1/zz_generated.featuregated-crd-manifests.yaml @@ -1,5 +1,6 @@ compatibilityrequirements.apiextensions.openshift.io: - Annotations: {} + Annotations: + release.openshift.io/feature-gate: CRDCompatibilityRequirementOperator ApprovedPRNumber: https://github.com/openshift/api/pull/2479 CRDName: compatibilityrequirements.apiextensions.openshift.io Capability: "" diff --git a/vendor/github.com/openshift/api/config/v1/types_insights.go b/vendor/github.com/openshift/api/config/v1/types_insights.go index b0959881f..710d4303d 100644 --- a/vendor/github.com/openshift/api/config/v1/types_insights.go +++ b/vendor/github.com/openshift/api/config/v1/types_insights.go @@ -13,6 +13,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/2448 // +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 // +openshift:enable:FeatureGate=InsightsConfig +// +openshift:capability=Insights // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 diff --git a/vendor/github.com/openshift/api/config/v1/types_network.go b/vendor/github.com/openshift/api/config/v1/types_network.go index c0d1602b3..fb8ed2fff 100644 --- a/vendor/github.com/openshift/api/config/v1/types_network.go +++ b/vendor/github.com/openshift/api/config/v1/types_network.go @@ -41,7 +41,7 @@ type Network struct { // As a general rule, this SHOULD NOT be read directly. Instead, you should // consume the NetworkStatus, as it indicates the currently deployed configuration. // Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each. -// +openshift:validation:FeatureGateAwareXValidation:featureGate=NetworkDiagnosticsConfig,rule="!has(self.networkDiagnostics) || !has(self.networkDiagnostics.mode) || self.networkDiagnostics.mode!='Disabled' || !has(self.networkDiagnostics.sourcePlacement) && !has(self.networkDiagnostics.targetPlacement)",message="cannot set networkDiagnostics.sourcePlacement and networkDiagnostics.targetPlacement when networkDiagnostics.mode is Disabled" +// +kubebuilder:validation:XValidation:rule="!has(self.networkDiagnostics) || !has(self.networkDiagnostics.mode) || self.networkDiagnostics.mode!='Disabled' || !has(self.networkDiagnostics.sourcePlacement) && !has(self.networkDiagnostics.targetPlacement)",message="cannot set networkDiagnostics.sourcePlacement and networkDiagnostics.targetPlacement when networkDiagnostics.mode is Disabled" type NetworkSpec struct { // IP address pool to use for pod IPs. // This field is immutable after installation. @@ -85,7 +85,6 @@ type NetworkSpec struct { // the network diagnostics feature will be disabled. // // +optional - // +openshift:enable:FeatureGate=NetworkDiagnosticsConfig NetworkDiagnostics NetworkDiagnostics `json:"networkDiagnostics"` } @@ -119,7 +118,6 @@ type NetworkStatus struct { // +optional // +listType=map // +listMapKey=type - // +openshift:enable:FeatureGate=NetworkDiagnosticsConfig Conditions []metav1.Condition `json:"conditions,omitempty"` } diff --git a/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go b/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go index 1e5189796..48657b089 100644 --- a/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go +++ b/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go @@ -7,9 +7,10 @@ type TLSSecurityProfile struct { // type is one of Old, Intermediate, Modern or Custom. Custom provides the // ability to specify individual TLS security profile parameters. // - // The profiles are currently based on version 5.0 of the Mozilla Server Side TLS - // configuration guidelines (released 2019-06-28) with TLS 1.3 ciphers added for - // forward compatibility. See: https://ssl-config.mozilla.org/guidelines/5.0.json + // The profiles are based on version 5.7 of the Mozilla Server Side TLS + // configuration guidelines. The cipher lists consist of the configuration's + // "ciphersuites" followed by the Go-specific "ciphers" from the guidelines. + // See: https://ssl-config.mozilla.org/guidelines/5.7.json // // The profiles are intent based, so they may change over time as new ciphers are // developed and existing ciphers are found to be insecure. Depending on @@ -22,9 +23,6 @@ type TLSSecurityProfile struct { // old is a TLS profile for use when services need to be accessed by very old // clients or libraries and should be used only as a last resort. // - // The cipher list includes TLS 1.3 ciphers for forward compatibility, followed - // by the "old" profile ciphers. - // // This profile is equivalent to a Custom profile specified as: // minTLSVersion: VersionTLS10 // ciphers: @@ -37,23 +35,15 @@ type TLSSecurityProfile struct { // - ECDHE-RSA-AES256-GCM-SHA384 // - ECDHE-ECDSA-CHACHA20-POLY1305 // - ECDHE-RSA-CHACHA20-POLY1305 - // - DHE-RSA-AES128-GCM-SHA256 - // - DHE-RSA-AES256-GCM-SHA384 - // - DHE-RSA-CHACHA20-POLY1305 // - ECDHE-ECDSA-AES128-SHA256 // - ECDHE-RSA-AES128-SHA256 // - ECDHE-ECDSA-AES128-SHA // - ECDHE-RSA-AES128-SHA - // - ECDHE-ECDSA-AES256-SHA384 - // - ECDHE-RSA-AES256-SHA384 // - ECDHE-ECDSA-AES256-SHA // - ECDHE-RSA-AES256-SHA - // - DHE-RSA-AES128-SHA256 - // - DHE-RSA-AES256-SHA256 // - AES128-GCM-SHA256 // - AES256-GCM-SHA384 // - AES128-SHA256 - // - AES256-SHA256 // - AES128-SHA // - AES256-SHA // - DES-CBC3-SHA @@ -66,9 +56,6 @@ type TLSSecurityProfile struct { // legacy clients and want to remain highly secure while being compatible with // most clients currently in use. // - // The cipher list includes TLS 1.3 ciphers for forward compatibility, followed - // by the "intermediate" profile ciphers. - // // This profile is equivalent to a Custom profile specified as: // minTLSVersion: VersionTLS12 // ciphers: @@ -81,8 +68,6 @@ type TLSSecurityProfile struct { // - ECDHE-RSA-AES256-GCM-SHA384 // - ECDHE-ECDSA-CHACHA20-POLY1305 // - ECDHE-RSA-CHACHA20-POLY1305 - // - DHE-RSA-AES128-GCM-SHA256 - // - DHE-RSA-AES256-GCM-SHA384 // // +optional // +nullable @@ -160,12 +145,14 @@ const ( // TLSProfileSpec is the desired behavior of a TLSSecurityProfile. type TLSProfileSpec struct { // ciphers is used to specify the cipher algorithms that are negotiated - // during the TLS handshake. Operators may remove entries their operands - // do not support. For example, to use DES-CBC3-SHA (yaml): + // during the TLS handshake. Operators may remove entries that their operands + // do not support. For example, to use only ECDHE-RSA-AES128-GCM-SHA256 (yaml): // // ciphers: - // - DES-CBC3-SHA + // - ECDHE-RSA-AES128-GCM-SHA256 // + // TLS 1.3 cipher suites (e.g. TLS_AES_128_GCM_SHA256) are not configurable + // and are always enabled when TLS 1.3 is negotiated. // +listType=atomic Ciphers []string `json:"ciphers"` // minTLSVersion is used to specify the minimal version of the TLS protocol @@ -200,9 +187,11 @@ const ( // TLSProfiles contains a map of TLSProfileType names to TLSProfileSpec. // -// These profiles are based on version 5.0 of the Mozilla Server Side TLS -// configuration guidelines (2019-06-28) with TLS 1.3 cipher suites prepended for -// forward compatibility. See: https://ssl-config.mozilla.org/guidelines/5.0.json +// These profiles are based on version 5.7 of the Mozilla Server Side TLS +// configuration guidelines. See: https://ssl-config.mozilla.org/guidelines/5.7.json +// +// Each Ciphers slice is the configuration's "ciphersuites" followed by the +// Go-specific "ciphers" from the guidelines JSON. // // NOTE: The caller needs to make sure to check that these constants are valid // for their binary. Not all entries map to values for all binaries. In the case @@ -220,23 +209,15 @@ var TLSProfiles = map[TLSProfileType]*TLSProfileSpec{ "ECDHE-RSA-AES256-GCM-SHA384", "ECDHE-ECDSA-CHACHA20-POLY1305", "ECDHE-RSA-CHACHA20-POLY1305", - "DHE-RSA-AES128-GCM-SHA256", - "DHE-RSA-AES256-GCM-SHA384", - "DHE-RSA-CHACHA20-POLY1305", "ECDHE-ECDSA-AES128-SHA256", "ECDHE-RSA-AES128-SHA256", "ECDHE-ECDSA-AES128-SHA", "ECDHE-RSA-AES128-SHA", - "ECDHE-ECDSA-AES256-SHA384", - "ECDHE-RSA-AES256-SHA384", "ECDHE-ECDSA-AES256-SHA", "ECDHE-RSA-AES256-SHA", - "DHE-RSA-AES128-SHA256", - "DHE-RSA-AES256-SHA256", "AES128-GCM-SHA256", "AES256-GCM-SHA384", "AES128-SHA256", - "AES256-SHA256", "AES128-SHA", "AES256-SHA", "DES-CBC3-SHA", @@ -254,8 +235,6 @@ var TLSProfiles = map[TLSProfileType]*TLSProfileSpec{ "ECDHE-RSA-AES256-GCM-SHA384", "ECDHE-ECDSA-CHACHA20-POLY1305", "ECDHE-RSA-CHACHA20-POLY1305", - "DHE-RSA-AES128-GCM-SHA256", - "DHE-RSA-AES256-GCM-SHA384", }, MinTLSVersion: VersionTLS12, }, diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml index 576fd510c..eb7c485e0 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml @@ -417,7 +417,7 @@ insightsdatagathers.config.openshift.io: Annotations: {} ApprovedPRNumber: https://github.com/openshift/api/pull/2448 CRDName: insightsdatagathers.config.openshift.io - Capability: "" + Capability: Insights Category: "" FeatureGates: - InsightsConfig @@ -443,8 +443,7 @@ networks.config.openshift.io: CRDName: networks.config.openshift.io Capability: "" Category: "" - FeatureGates: - - NetworkDiagnosticsConfig + FeatureGates: [] FilenameOperatorName: config-operator FilenameOperatorOrdering: "01" FilenameRunLevel: "0000_10" diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go index 7f0018950..69fb37c52 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go @@ -3004,7 +3004,7 @@ func (OldTLSProfile) SwaggerDoc() map[string]string { var map_TLSProfileSpec = map[string]string{ "": "TLSProfileSpec is the desired behavior of a TLSSecurityProfile.", - "ciphers": "ciphers is used to specify the cipher algorithms that are negotiated during the TLS handshake. Operators may remove entries their operands do not support. For example, to use DES-CBC3-SHA (yaml):\n\n ciphers:\n - DES-CBC3-SHA", + "ciphers": "ciphers is used to specify the cipher algorithms that are negotiated during the TLS handshake. Operators may remove entries that their operands do not support. For example, to use only ECDHE-RSA-AES128-GCM-SHA256 (yaml):\n\n ciphers:\n - ECDHE-RSA-AES128-GCM-SHA256\n\nTLS 1.3 cipher suites (e.g. TLS_AES_128_GCM_SHA256) are not configurable and are always enabled when TLS 1.3 is negotiated.", "minTLSVersion": "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml):\n\n minTLSVersion: VersionTLS11", } @@ -3014,9 +3014,9 @@ func (TLSProfileSpec) SwaggerDoc() map[string]string { var map_TLSSecurityProfile = map[string]string{ "": "TLSSecurityProfile defines the schema for a TLS security profile. This object is used by operators to apply TLS security settings to operands.", - "type": "type is one of Old, Intermediate, Modern or Custom. Custom provides the ability to specify individual TLS security profile parameters.\n\nThe profiles are currently based on version 5.0 of the Mozilla Server Side TLS configuration guidelines (released 2019-06-28) with TLS 1.3 ciphers added for forward compatibility. See: https://ssl-config.mozilla.org/guidelines/5.0.json\n\nThe profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be reduced.", - "old": "old is a TLS profile for use when services need to be accessed by very old clients or libraries and should be used only as a last resort.\n\nThe cipher list includes TLS 1.3 ciphers for forward compatibility, followed by the \"old\" profile ciphers.\n\nThis profile is equivalent to a Custom profile specified as:\n minTLSVersion: VersionTLS10\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES256-GCM-SHA384\n - ECDHE-RSA-AES256-GCM-SHA384\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - DHE-RSA-AES128-GCM-SHA256\n - DHE-RSA-AES256-GCM-SHA384\n - DHE-RSA-CHACHA20-POLY1305\n - ECDHE-ECDSA-AES128-SHA256\n - ECDHE-RSA-AES128-SHA256\n - ECDHE-ECDSA-AES128-SHA\n - ECDHE-RSA-AES128-SHA\n - ECDHE-ECDSA-AES256-SHA384\n - ECDHE-RSA-AES256-SHA384\n - ECDHE-ECDSA-AES256-SHA\n - ECDHE-RSA-AES256-SHA\n - DHE-RSA-AES128-SHA256\n - DHE-RSA-AES256-SHA256\n - AES128-GCM-SHA256\n - AES256-GCM-SHA384\n - AES128-SHA256\n - AES256-SHA256\n - AES128-SHA\n - AES256-SHA\n - DES-CBC3-SHA", - "intermediate": "intermediate is a TLS profile for use when you do not need compatibility with legacy clients and want to remain highly secure while being compatible with most clients currently in use.\n\nThe cipher list includes TLS 1.3 ciphers for forward compatibility, followed by the \"intermediate\" profile ciphers.\n\nThis profile is equivalent to a Custom profile specified as:\n minTLSVersion: VersionTLS12\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES256-GCM-SHA384\n - ECDHE-RSA-AES256-GCM-SHA384\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - DHE-RSA-AES128-GCM-SHA256\n - DHE-RSA-AES256-GCM-SHA384", + "type": "type is one of Old, Intermediate, Modern or Custom. Custom provides the ability to specify individual TLS security profile parameters.\n\nThe profiles are based on version 5.7 of the Mozilla Server Side TLS configuration guidelines. The cipher lists consist of the configuration's \"ciphersuites\" followed by the Go-specific \"ciphers\" from the guidelines. See: https://ssl-config.mozilla.org/guidelines/5.7.json\n\nThe profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be reduced.", + "old": "old is a TLS profile for use when services need to be accessed by very old clients or libraries and should be used only as a last resort.\n\nThis profile is equivalent to a Custom profile specified as:\n minTLSVersion: VersionTLS10\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES256-GCM-SHA384\n - ECDHE-RSA-AES256-GCM-SHA384\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - ECDHE-ECDSA-AES128-SHA256\n - ECDHE-RSA-AES128-SHA256\n - ECDHE-ECDSA-AES128-SHA\n - ECDHE-RSA-AES128-SHA\n - ECDHE-ECDSA-AES256-SHA\n - ECDHE-RSA-AES256-SHA\n - AES128-GCM-SHA256\n - AES256-GCM-SHA384\n - AES128-SHA256\n - AES128-SHA\n - AES256-SHA\n - DES-CBC3-SHA", + "intermediate": "intermediate is a TLS profile for use when you do not need compatibility with legacy clients and want to remain highly secure while being compatible with most clients currently in use.\n\nThis profile is equivalent to a Custom profile specified as:\n minTLSVersion: VersionTLS12\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES256-GCM-SHA384\n - ECDHE-RSA-AES256-GCM-SHA384\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305", "modern": "modern is a TLS security profile for use with clients that support TLS 1.3 and do not need backward compatibility for older clients.\n\nThis profile is equivalent to a Custom profile specified as:\n minTLSVersion: VersionTLS13\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256", "custom": "custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this:\n\n minTLSVersion: VersionTLS11\n ciphers:\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256", } diff --git a/vendor/github.com/openshift/api/config/v1alpha1/register.go b/vendor/github.com/openshift/api/config/v1alpha1/register.go index 4b30ea380..c90962495 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/register.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/register.go @@ -40,6 +40,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ImagePolicyList{}, &ClusterImagePolicy{}, &ClusterImagePolicyList{}, + &CRIOCredentialProviderConfig{}, + &CRIOCredentialProviderConfigList{}, ) metav1.AddToGroupVersion(scheme, GroupVersion) return nil diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_monitoring.go b/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_monitoring.go index 0653eeb5a..29bf8ba48 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_monitoring.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_monitoring.go @@ -94,6 +94,11 @@ type ClusterMonitoringSpec struct { // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. // +optional MetricsServerConfig MetricsServerConfig `json:"metricsServerConfig,omitempty,omitzero"` + // prometheusOperatorConfig is an optional field that can be used to configure the Prometheus Operator component. + // Specifically, it can configure how the Prometheus Operator instance is deployed, pod scheduling, and resource allocation. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + // +optional + PrometheusOperatorConfig PrometheusOperatorConfig `json:"prometheusOperatorConfig,omitempty,omitzero"` } // UserDefinedMonitoring config for user-defined projects. @@ -185,6 +190,7 @@ type AlertmanagerCustomConfig struct { // limit: null // Maximum length for this list is 10. // Minimum length for this list is 1. + // Each resource name must be unique within this list. // +optional // +listType=map // +listMapKey=name @@ -218,8 +224,8 @@ type AlertmanagerCustomConfig struct { // When omitted, this means the user has no opinion and the platform is left // to choose reasonable defaults. These defaults are subject to change over time. // Defaults are empty/unset. - // Maximum length for this list is 10 - // Minimum length for this list is 1 + // Maximum length for this list is 10. + // Minimum length for this list is 1. // +kubebuilder:validation:MaxItems=10 // +kubebuilder:validation:MinItems=1 // +listType=atomic @@ -235,7 +241,7 @@ type AlertmanagerCustomConfig struct { // This field maps directly to the `topologySpreadConstraints` field in the Pod spec. // Default is empty list. // Maximum length for this list is 10. - // Minimum length for this list is 1 + // Minimum length for this list is 1. // Entries must have unique topologyKey and whenUnsatisfiable pairs. // +kubebuilder:validation:MaxItems=10 // +kubebuilder:validation:MinItems=1 @@ -356,8 +362,8 @@ type MetricsServerConfig struct { // When omitted, this means the user has no opinion and the platform is left // to choose reasonable defaults. These defaults are subject to change over time. // Defaults are empty/unset. - // Maximum length for this list is 10 - // Minimum length for this list is 1 + // Maximum length for this list is 10. + // Minimum length for this list is 1. // +kubebuilder:validation:MaxItems=10 // +kubebuilder:validation:MinItems=1 // +listType=atomic @@ -389,6 +395,7 @@ type MetricsServerConfig struct { // limit: null // Maximum length for this list is 10. // Minimum length for this list is 1. + // Each resource name must be unique within this list. // +optional // +listType=map // +listMapKey=name @@ -405,7 +412,91 @@ type MetricsServerConfig struct { // This field maps directly to the `topologySpreadConstraints` field in the Pod spec. // Default is empty list. // Maximum length for this list is 10. - // Minimum length for this list is 1 + // Minimum length for this list is 1. + // Entries must have unique topologyKey and whenUnsatisfiable pairs. + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:MinItems=1 + // +listType=map + // +listMapKey=topologyKey + // +listMapKey=whenUnsatisfiable + // +optional + TopologySpreadConstraints []v1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` +} + +// PrometheusOperatorConfig provides configuration options for the Prometheus Operator instance +// Use this configuration to control how the Prometheus Operator instance is deployed, how it logs, and how its pods are scheduled. +// +kubebuilder:validation:MinProperties=1 +type PrometheusOperatorConfig struct { + // logLevel defines the verbosity of logs emitted by Prometheus Operator. + // This field allows users to control the amount and severity of logs generated, which can be useful + // for debugging issues or reducing noise in production environments. + // Allowed values are Error, Warn, Info, and Debug. + // When set to Error, only errors will be logged. + // When set to Warn, both warnings and errors will be logged. + // When set to Info, general information, warnings, and errors will all be logged. + // When set to Debug, detailed debugging information will be logged. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, that is subject to change over time. + // The current default value is `Info`. + // +optional + LogLevel LogLevel `json:"logLevel,omitempty"` + // nodeSelector defines the nodes on which the Pods are scheduled + // nodeSelector is optional. + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default value is `kubernetes.io/os: linux`. + // When specified, nodeSelector must contain at least 1 entry and must not contain more than 10 entries. + // +optional + // +kubebuilder:validation:MinProperties=1 + // +kubebuilder:validation:MaxProperties=10 + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // resources defines the compute resource requests and limits for the Prometheus Operator container. + // This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. + // When not specified, defaults are used by the platform. Requests cannot exceed limits. + // This field is optional. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // This is a simplified API that maps to Kubernetes ResourceRequirements. + // The current default values are: + // resources: + // - name: cpu + // request: 4m + // limit: null + // - name: memory + // request: 40Mi + // limit: null + // Maximum length for this list is 10. + // Minimum length for this list is 1. + // Each resource name must be unique within this list. + // +optional + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:MinItems=1 + Resources []ContainerResource `json:"resources,omitempty"` + // tolerations defines tolerations for the pods. + // tolerations is optional. + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // Defaults are empty/unset. + // Maximum length for this list is 10. + // Minimum length for this list is 1. + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:MinItems=1 + // +listType=atomic + // +optional + Tolerations []v1.Toleration `json:"tolerations,omitempty"` + // topologySpreadConstraints defines rules for how Prometheus Operator Pods should be distributed + // across topology domains such as zones, nodes, or other user-defined labels. + // topologySpreadConstraints is optional. + // This helps improve high availability and resource efficiency by avoiding placing + // too many replicas in the same failure domain. + // + // When omitted, this means no opinion and the platform is left to choose a default, which is subject to change over time. + // This field maps directly to the `topologySpreadConstraints` field in the Pod spec. + // Default is empty list. + // Maximum length for this list is 10. + // Minimum length for this list is 1. // Entries must have unique topologyKey and whenUnsatisfiable pairs. // +kubebuilder:validation:MaxItems=10 // +kubebuilder:validation:MinItems=1 diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_crio_credential_provider_config.go b/vendor/github.com/openshift/api/config/v1alpha1/types_crio_credential_provider_config.go new file mode 100644 index 000000000..9e2e0d39d --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_crio_credential_provider_config.go @@ -0,0 +1,186 @@ +package v1alpha1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CRIOCredentialProviderConfig holds cluster-wide singleton resource configurations for CRI-O credential provider, the name of this instance is "cluster". CRI-O credential provider is a binary shipped with CRI-O that provides a way to obtain container image pull credentials from external sources. +// For example, it can be used to fetch mirror registry credentials from secrets resources in the cluster within the same namespace the pod will be running in. +// CRIOCredentialProviderConfig configuration specifies the pod image sources registries that should trigger the CRI-O credential provider execution, which will resolve the CRI-O mirror configurations and obtain the necessary credentials for pod creation. +// Note: Configuration changes will only take effect after the kubelet restarts, which is automatically managed by the cluster during rollout. +// +// The resource is a singleton named "cluster". +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=criocredentialproviderconfigs,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/2557 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +openshift:enable:FeatureGate=CRIOCredentialProviderConfig +// +openshift:compatibility-gen:level=4 +// +kubebuilder:validation:XValidation:rule="self.metadata.name == 'cluster'",message="criocredentialproviderconfig is a singleton, .metadata.name must be 'cluster'" +type CRIOCredentialProviderConfig struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitzero"` + + // spec defines the desired configuration of the CRI-O Credential Provider. + // This field is required and must be provided when creating the resource. + // +required + Spec *CRIOCredentialProviderConfigSpec `json:"spec,omitempty,omitzero"` + + // status represents the current state of the CRIOCredentialProviderConfig. + // When omitted or nil, it indicates that the status has not yet been set by the controller. + // The controller will populate this field with validation conditions and operational state. + // +optional + Status CRIOCredentialProviderConfigStatus `json:"status,omitzero,omitempty"` +} + +// CRIOCredentialProviderConfigSpec defines the desired configuration of the CRI-O Credential Provider. +// +kubebuilder:validation:MinProperties=0 +type CRIOCredentialProviderConfigSpec struct { + // matchImages is a list of string patterns used to determine whether + // the CRI-O credential provider should be invoked for a given image. This list is + // passed to the kubelet CredentialProviderConfig, and if any pattern matches + // the requested image, CRI-O credential provider will be invoked to obtain credentials for pulling + // that image or its mirrors. + // Depending on the platform, the CRI-O credential provider may be installed alongside an existing platform specific provider. + // Conflicts between the existing platform specific provider image match configuration and this list will be handled by + // the following precedence rule: credentials from built-in kubelet providers (e.g., ECR, GCR, ACR) take precedence over those + // from the CRIOCredentialProviderConfig when both match the same image. + // To avoid uncertainty, it is recommended to avoid configuring your private image patterns to overlap with + // existing platform specific provider config(e.g., the entries from https://github.com/openshift/machine-config-operator/blob/main/templates/common/aws/files/etc-kubernetes-credential-providers-ecr-credential-provider.yaml). + // You can check the resource's Status conditions + // to see if any entries were ignored due to exact matches with known built-in provider patterns. + // + // This field is optional, the items of the list must contain between 1 and 50 entries. + // The list is treated as a set, so duplicate entries are not allowed. + // + // For more details, see: + // https://kubernetes.io/docs/tasks/administer-cluster/kubelet-credential-provider/ + // https://github.com/cri-o/crio-credential-provider#architecture + // + // Each entry in matchImages is a pattern which can optionally contain a port and a path. Each entry must be no longer than 512 characters. + // Wildcards ('*') are supported for full subdomain labels, such as '*.k8s.io' or 'k8s.*.io', + // and for top-level domains, such as 'k8s.*' (which matches 'k8s.io' or 'k8s.net'). + // A global wildcard '*' (matching any domain) is not allowed. + // Wildcards may replace an entire hostname label (e.g., *.example.com), but they cannot appear within a label (e.g., f*oo.example.com) and are not allowed in the port or path. + // For example, 'example.*.com' is valid, but 'exa*mple.*.com' is not. + // Each wildcard matches only a single domain label, + // so '*.io' does **not** match '*.k8s.io'. + // + // A match exists between an image and a matchImage when all of the below are true: + // Both contain the same number of domain parts and each part matches. + // The URL path of an matchImages must be a prefix of the target image URL path. + // If the matchImages contains a port, then the port must match in the image as well. + // + // Example values of matchImages: + // - 123456789.dkr.ecr.us-east-1.amazonaws.com + // - *.azurecr.io + // - gcr.io + // - *.*.registry.io + // - registry.io:8080/path + // + // +kubebuilder:validation:MaxItems=50 + // +kubebuilder:validation:MinItems=1 + // +listType=set + // +optional + MatchImages []MatchImage `json:"matchImages,omitempty"` +} + +// MatchImage is a string pattern used to match container image registry addresses. +// It must be a valid fully qualified domain name with optional wildcard, port, and path. +// The maximum length is 512 characters. +// +// Wildcards ('*') are supported for full subdomain labels and top-level domains. +// Each entry can optionally contain a port (e.g., :8080) and a path (e.g., /path). +// Wildcards are not allowed in the port or path portions. +// +// Examples: +// - "registry.io" - matches exactly registry.io +// - "*.azurecr.io" - matches any single subdomain of azurecr.io +// - "registry.io:8080/path" - matches with specific port and path prefix +// +// +kubebuilder:validation:MaxLength=512 +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:XValidation:rule="self != '*'",message="global wildcard '*' is not allowed" +// +kubebuilder:validation:XValidation:rule=`self.matches('^((\\*|[a-z0-9]([a-z0-9-]*[a-z0-9])?)(\\.(\\*|[a-z0-9]([a-z0-9-]*[a-z0-9])?))*)(:[0-9]+)?(/[-a-z0-9._/]*)?$')`,message="invalid matchImages value, must be a valid fully qualified domain name in lowercase with optional wildcard, port, and path" +type MatchImage string + +// +k8s:deepcopy-gen=true +// CRIOCredentialProviderConfigStatus defines the observed state of CRIOCredentialProviderConfig +// +kubebuilder:validation:MinProperties=1 +type CRIOCredentialProviderConfigStatus struct { + // conditions represent the latest available observations of the configuration state. + // When omitted, it indicates that no conditions have been reported yet. + // The maximum number of conditions is 16. + // Conditions are stored as a map keyed by condition type, ensuring uniqueness. + // + // Expected condition types include: + // "Validated": indicates whether the matchImages configuration is valid + // +optional + // +kubebuilder:validation:MaxItems=16 + // +kubebuilder:validation:MinItems=1 + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CRIOCredentialProviderConfigList contains a list of CRIOCredentialProviderConfig resources +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +type CRIOCredentialProviderConfigList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []CRIOCredentialProviderConfig `json:"items"` +} + +const ( + // ConditionTypeValidated is a condition type that indicates whether the CRIOCredentialProviderConfig + // matchImages configuration has been validated successfully. + // When True, all matchImage patterns are valid and have been applied. + // When False, the configuration contains errors (see Reason for details). + // Possible reasons for False status: + // - ValidationFailed: matchImages contains invalid patterns + // - ConfigurationPartiallyApplied: some matchImage entries were ignored due to conflicts + ConditionTypeValidated = "Validated" + + // ReasonValidationFailed is a condition reason used with ConditionTypeValidated=False + // to indicate that the matchImages configuration contains one or more invalid registry patterns + // that do not conform to the required format (valid FQDN with optional wildcard, port, and path). + ReasonValidationFailed = "ValidationFailed" + + // ReasonConfigurationPartiallyApplied is a condition reason used with ConditionTypeValidated=False + // to indicate that some matchImage entries were ignored due to conflicts or overlapping patterns. + // The condition message will contain details about which entries were ignored and why. + ReasonConfigurationPartiallyApplied = "ConfigurationPartiallyApplied" + + // ConditionTypeMachineConfigRendered is a condition type that indicates whether + // the CRIOCredentialProviderConfig has been successfully rendered into a + // MachineConfig object. + // When True, the corresponding MachineConfig is present in the cluster. + // When False, rendering failed. + ConditionTypeMachineConfigRendered = "MachineConfigRendered" + + // ReasonMachineConfigRenderingSucceeded is a condition reason used with ConditionTypeMachineConfigRendered=True + // to indicate that the MachineConfig was successfully created/updated in the API server. + ReasonMachineConfigRenderingSucceeded = "MachineConfigRenderingSucceeded" + + // ReasonMachineConfigRenderingFailed is a condition reason used with ConditionTypeMachineConfigRendered=False + // to indicate that the MachineConfig creation/update failed. + // The condition message will contain details about the failure. + ReasonMachineConfigRenderingFailed = "MachineConfigRenderingFailed" +) diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_insights.go b/vendor/github.com/openshift/api/config/v1alpha1/types_insights.go index 46666ae3b..bef31b905 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/types_insights.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_insights.go @@ -16,6 +16,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 // +openshift:enable:FeatureGate=InsightsConfig // +openshift:compatibility-gen:level=4 +// +openshift:capability=Insights type InsightsDataGather struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go index 9ead6aba2..dc51326b9 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go @@ -192,6 +192,115 @@ func (in *BackupStatus) DeepCopy() *BackupStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CRIOCredentialProviderConfig) DeepCopyInto(out *CRIOCredentialProviderConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(CRIOCredentialProviderConfigSpec) + (*in).DeepCopyInto(*out) + } + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CRIOCredentialProviderConfig. +func (in *CRIOCredentialProviderConfig) DeepCopy() *CRIOCredentialProviderConfig { + if in == nil { + return nil + } + out := new(CRIOCredentialProviderConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CRIOCredentialProviderConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CRIOCredentialProviderConfigList) DeepCopyInto(out *CRIOCredentialProviderConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CRIOCredentialProviderConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CRIOCredentialProviderConfigList. +func (in *CRIOCredentialProviderConfigList) DeepCopy() *CRIOCredentialProviderConfigList { + if in == nil { + return nil + } + out := new(CRIOCredentialProviderConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CRIOCredentialProviderConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CRIOCredentialProviderConfigSpec) DeepCopyInto(out *CRIOCredentialProviderConfigSpec) { + *out = *in + if in.MatchImages != nil { + in, out := &in.MatchImages, &out.MatchImages + *out = make([]MatchImage, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CRIOCredentialProviderConfigSpec. +func (in *CRIOCredentialProviderConfigSpec) DeepCopy() *CRIOCredentialProviderConfigSpec { + if in == nil { + return nil + } + out := new(CRIOCredentialProviderConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CRIOCredentialProviderConfigStatus) DeepCopyInto(out *CRIOCredentialProviderConfigStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CRIOCredentialProviderConfigStatus. +func (in *CRIOCredentialProviderConfigStatus) DeepCopy() *CRIOCredentialProviderConfigStatus { + if in == nil { + return nil + } + out := new(CRIOCredentialProviderConfigStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterImagePolicy) DeepCopyInto(out *ClusterImagePolicy) { *out = *in @@ -365,6 +474,7 @@ func (in *ClusterMonitoringSpec) DeepCopyInto(out *ClusterMonitoringSpec) { out.UserDefined = in.UserDefined in.AlertmanagerConfig.DeepCopyInto(&out.AlertmanagerConfig) in.MetricsServerConfig.DeepCopyInto(&out.MetricsServerConfig) + in.PrometheusOperatorConfig.DeepCopyInto(&out.PrometheusOperatorConfig) return } @@ -952,6 +1062,50 @@ func (in *PolicyRootOfTrust) DeepCopy() *PolicyRootOfTrust { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusOperatorConfig) DeepCopyInto(out *PrometheusOperatorConfig) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ContainerResource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TopologySpreadConstraints != nil { + in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints + *out = make([]v1.TopologySpreadConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusOperatorConfig. +func (in *PrometheusOperatorConfig) DeepCopy() *PrometheusOperatorConfig { + if in == nil { + return nil + } + out := new(PrometheusOperatorConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RetentionNumberConfig) DeepCopyInto(out *RetentionNumberConfig) { *out = *in diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.featuregated-crd-manifests.yaml index 2f79f801d..14091b587 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.featuregated-crd-manifests.yaml @@ -21,6 +21,29 @@ backups.config.openshift.io: - AutomatedEtcdBackup Version: v1alpha1 +criocredentialproviderconfigs.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/2557 + CRDName: criocredentialproviderconfigs.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - CRIOCredentialProviderConfig + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: CRIOCredentialProviderConfig + Labels: {} + PluralName: criocredentialproviderconfigs + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: + - CRIOCredentialProviderConfig + Version: v1alpha1 + clusterimagepolicies.config.openshift.io: Annotations: {} ApprovedPRNumber: https://github.com/openshift/api/pull/1457 @@ -97,7 +120,7 @@ insightsdatagathers.config.openshift.io: Annotations: {} ApprovedPRNumber: https://github.com/openshift/api/pull/1245 CRDName: insightsdatagathers.config.openshift.io - Capability: "" + Capability: Insights Category: "" FeatureGates: - InsightsConfig diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go index 59a5b3708..c060ce874 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go @@ -132,10 +132,10 @@ var map_AlertmanagerCustomConfig = map[string]string{ "": "AlertmanagerCustomConfig represents the configuration for a custom Alertmanager deployment. alertmanagerCustomConfig provides configuration options for the default Alertmanager instance that runs in the `openshift-monitoring` namespace. Use this configuration to control whether the default Alertmanager is deployed, how it logs, and how its pods are scheduled.", "logLevel": "logLevel defines the verbosity of logs emitted by Alertmanager. This field allows users to control the amount and severity of logs generated, which can be useful for debugging issues or reducing noise in production environments. Allowed values are Error, Warn, Info, and Debug. When set to Error, only errors will be logged. When set to Warn, both warnings and errors will be logged. When set to Info, general information, warnings, and errors will all be logged. When set to Debug, detailed debugging information will be logged. When omitted, this means no opinion and the platform is left to choose a reasonable default, that is subject to change over time. The current default value is `Info`.", "nodeSelector": "nodeSelector defines the nodes on which the Pods are scheduled nodeSelector is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default value is `kubernetes.io/os: linux`.", - "resources": "resources defines the compute resource requests and limits for the Alertmanager container. This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. When not specified, defaults are used by the platform. Requests cannot exceed limits. This field is optional. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ This is a simplified API that maps to Kubernetes ResourceRequirements. The current default values are:\n resources:\n - name: cpu\n request: 4m\n limit: null\n - name: memory\n request: 40Mi\n limit: null\nMaximum length for this list is 10. Minimum length for this list is 1.", + "resources": "resources defines the compute resource requests and limits for the Alertmanager container. This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. When not specified, defaults are used by the platform. Requests cannot exceed limits. This field is optional. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ This is a simplified API that maps to Kubernetes ResourceRequirements. The current default values are:\n resources:\n - name: cpu\n request: 4m\n limit: null\n - name: memory\n request: 40Mi\n limit: null\nMaximum length for this list is 10. Minimum length for this list is 1. Each resource name must be unique within this list.", "secrets": "secrets defines a list of secrets that need to be mounted into the Alertmanager. The secrets must reside within the same namespace as the Alertmanager object. They will be added as volumes named secret- and mounted at /etc/alertmanager/secrets/ within the 'alertmanager' container of the Alertmanager Pods.\n\nThese secrets can be used to authenticate Alertmanager with endpoint receivers. For example, you can use secrets to: - Provide certificates for TLS authentication with receivers that require private CA certificates - Store credentials for Basic HTTP authentication with receivers that require password-based auth - Store any other authentication credentials needed by your alert receivers\n\nThis field is optional. Maximum length for this list is 10. Minimum length for this list is 1. Entries in this list must be unique.", - "tolerations": "tolerations defines tolerations for the pods. tolerations is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. Defaults are empty/unset. Maximum length for this list is 10 Minimum length for this list is 1", - "topologySpreadConstraints": "topologySpreadConstraints defines rules for how Alertmanager Pods should be distributed across topology domains such as zones, nodes, or other user-defined labels. topologySpreadConstraints is optional. This helps improve high availability and resource efficiency by avoiding placing too many replicas in the same failure domain.\n\nWhen omitted, this means no opinion and the platform is left to choose a default, which is subject to change over time. This field maps directly to the `topologySpreadConstraints` field in the Pod spec. Default is empty list. Maximum length for this list is 10. Minimum length for this list is 1 Entries must have unique topologyKey and whenUnsatisfiable pairs.", + "tolerations": "tolerations defines tolerations for the pods. tolerations is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. Defaults are empty/unset. Maximum length for this list is 10. Minimum length for this list is 1.", + "topologySpreadConstraints": "topologySpreadConstraints defines rules for how Alertmanager Pods should be distributed across topology domains such as zones, nodes, or other user-defined labels. topologySpreadConstraints is optional. This helps improve high availability and resource efficiency by avoiding placing too many replicas in the same failure domain.\n\nWhen omitted, this means no opinion and the platform is left to choose a default, which is subject to change over time. This field maps directly to the `topologySpreadConstraints` field in the Pod spec. Default is empty list. Maximum length for this list is 10. Minimum length for this list is 1. Entries must have unique topologyKey and whenUnsatisfiable pairs.", "volumeClaimTemplate": "volumeClaimTemplate Defines persistent storage for Alertmanager. Use this setting to configure the persistent volume claim, including storage class, volume size, and name. If omitted, the Pod uses ephemeral storage and alert data will not persist across restarts. This field is optional.", } @@ -174,10 +174,11 @@ func (ClusterMonitoringList) SwaggerDoc() map[string]string { } var map_ClusterMonitoringSpec = map[string]string{ - "": "ClusterMonitoringSpec defines the desired state of Cluster Monitoring Operator", - "userDefined": "userDefined set the deployment mode for user-defined monitoring in addition to the default platform monitoring. userDefined is optional. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default value is `Disabled`.", - "alertmanagerConfig": "alertmanagerConfig allows users to configure how the default Alertmanager instance should be deployed in the `openshift-monitoring` namespace. alertmanagerConfig is optional. When omitted, this means no opinion and the platform is left to choose a reasonable default, that is subject to change over time. The current default value is `DefaultConfig`.", - "metricsServerConfig": "metricsServerConfig is an optional field that can be used to configure the Kubernetes Metrics Server that runs in the openshift-monitoring namespace. Specifically, it can configure how the Metrics Server instance is deployed, pod scheduling, its audit policy and log verbosity. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time.", + "": "ClusterMonitoringSpec defines the desired state of Cluster Monitoring Operator", + "userDefined": "userDefined set the deployment mode for user-defined monitoring in addition to the default platform monitoring. userDefined is optional. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default value is `Disabled`.", + "alertmanagerConfig": "alertmanagerConfig allows users to configure how the default Alertmanager instance should be deployed in the `openshift-monitoring` namespace. alertmanagerConfig is optional. When omitted, this means no opinion and the platform is left to choose a reasonable default, that is subject to change over time. The current default value is `DefaultConfig`.", + "metricsServerConfig": "metricsServerConfig is an optional field that can be used to configure the Kubernetes Metrics Server that runs in the openshift-monitoring namespace. Specifically, it can configure how the Metrics Server instance is deployed, pod scheduling, its audit policy and log verbosity. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time.", + "prometheusOperatorConfig": "prometheusOperatorConfig is an optional field that can be used to configure the Prometheus Operator component. Specifically, it can configure how the Prometheus Operator instance is deployed, pod scheduling, and resource allocation. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time.", } func (ClusterMonitoringSpec) SwaggerDoc() map[string]string { @@ -207,16 +208,29 @@ var map_MetricsServerConfig = map[string]string{ "": "MetricsServerConfig provides configuration options for the Metrics Server instance that runs in the `openshift-monitoring` namespace. Use this configuration to control how the Metrics Server instance is deployed, how it logs, and how its pods are scheduled.", "audit": "audit defines the audit configuration used by the Metrics Server instance. audit is optional. When omitted, this means no opinion and the platform is left to choose a reasonable default, that is subject to change over time. The current default sets audit.profile to Metadata", "nodeSelector": "nodeSelector defines the nodes on which the Pods are scheduled nodeSelector is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default value is `kubernetes.io/os: linux`.", - "tolerations": "tolerations defines tolerations for the pods. tolerations is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. Defaults are empty/unset. Maximum length for this list is 10 Minimum length for this list is 1", + "tolerations": "tolerations defines tolerations for the pods. tolerations is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. Defaults are empty/unset. Maximum length for this list is 10. Minimum length for this list is 1.", "verbosity": "verbosity defines the verbosity of log messages for Metrics Server. Valid values are Errors, Info, Trace, TraceAll and omitted. When set to Errors, only critical messages and errors are logged. When set to Info, only basic information messages are logged. When set to Trace, information useful for general debugging is logged. When set to TraceAll, detailed information about metric scraping is logged. When omitted, this means no opinion and the platform is left to choose a reasonable default, that is subject to change over time. The current default value is `Errors`", - "resources": "resources defines the compute resource requests and limits for the Metrics Server container. This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. When not specified, defaults are used by the platform. Requests cannot exceed limits. This field is optional. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ This is a simplified API that maps to Kubernetes ResourceRequirements. The current default values are:\n resources:\n - name: cpu\n request: 4m\n limit: null\n - name: memory\n request: 40Mi\n limit: null\nMaximum length for this list is 10. Minimum length for this list is 1.", - "topologySpreadConstraints": "topologySpreadConstraints defines rules for how Metrics Server Pods should be distributed across topology domains such as zones, nodes, or other user-defined labels. topologySpreadConstraints is optional. This helps improve high availability and resource efficiency by avoiding placing too many replicas in the same failure domain.\n\nWhen omitted, this means no opinion and the platform is left to choose a default, which is subject to change over time. This field maps directly to the `topologySpreadConstraints` field in the Pod spec. Default is empty list. Maximum length for this list is 10. Minimum length for this list is 1 Entries must have unique topologyKey and whenUnsatisfiable pairs.", + "resources": "resources defines the compute resource requests and limits for the Metrics Server container. This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. When not specified, defaults are used by the platform. Requests cannot exceed limits. This field is optional. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ This is a simplified API that maps to Kubernetes ResourceRequirements. The current default values are:\n resources:\n - name: cpu\n request: 4m\n limit: null\n - name: memory\n request: 40Mi\n limit: null\nMaximum length for this list is 10. Minimum length for this list is 1. Each resource name must be unique within this list.", + "topologySpreadConstraints": "topologySpreadConstraints defines rules for how Metrics Server Pods should be distributed across topology domains such as zones, nodes, or other user-defined labels. topologySpreadConstraints is optional. This helps improve high availability and resource efficiency by avoiding placing too many replicas in the same failure domain.\n\nWhen omitted, this means no opinion and the platform is left to choose a default, which is subject to change over time. This field maps directly to the `topologySpreadConstraints` field in the Pod spec. Default is empty list. Maximum length for this list is 10. Minimum length for this list is 1. Entries must have unique topologyKey and whenUnsatisfiable pairs.", } func (MetricsServerConfig) SwaggerDoc() map[string]string { return map_MetricsServerConfig } +var map_PrometheusOperatorConfig = map[string]string{ + "": "PrometheusOperatorConfig provides configuration options for the Prometheus Operator instance Use this configuration to control how the Prometheus Operator instance is deployed, how it logs, and how its pods are scheduled.", + "logLevel": "logLevel defines the verbosity of logs emitted by Prometheus Operator. This field allows users to control the amount and severity of logs generated, which can be useful for debugging issues or reducing noise in production environments. Allowed values are Error, Warn, Info, and Debug. When set to Error, only errors will be logged. When set to Warn, both warnings and errors will be logged. When set to Info, general information, warnings, and errors will all be logged. When set to Debug, detailed debugging information will be logged. When omitted, this means no opinion and the platform is left to choose a reasonable default, that is subject to change over time. The current default value is `Info`.", + "nodeSelector": "nodeSelector defines the nodes on which the Pods are scheduled nodeSelector is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default value is `kubernetes.io/os: linux`. When specified, nodeSelector must contain at least 1 entry and must not contain more than 10 entries.", + "resources": "resources defines the compute resource requests and limits for the Prometheus Operator container. This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. When not specified, defaults are used by the platform. Requests cannot exceed limits. This field is optional. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ This is a simplified API that maps to Kubernetes ResourceRequirements. The current default values are:\n resources:\n - name: cpu\n request: 4m\n limit: null\n - name: memory\n request: 40Mi\n limit: null\nMaximum length for this list is 10. Minimum length for this list is 1. Each resource name must be unique within this list.", + "tolerations": "tolerations defines tolerations for the pods. tolerations is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. Defaults are empty/unset. Maximum length for this list is 10. Minimum length for this list is 1.", + "topologySpreadConstraints": "topologySpreadConstraints defines rules for how Prometheus Operator Pods should be distributed across topology domains such as zones, nodes, or other user-defined labels. topologySpreadConstraints is optional. This helps improve high availability and resource efficiency by avoiding placing too many replicas in the same failure domain.\n\nWhen omitted, this means no opinion and the platform is left to choose a default, which is subject to change over time. This field maps directly to the `topologySpreadConstraints` field in the Pod spec. Default is empty list. Maximum length for this list is 10. Minimum length for this list is 1. Entries must have unique topologyKey and whenUnsatisfiable pairs.", +} + +func (PrometheusOperatorConfig) SwaggerDoc() map[string]string { + return map_PrometheusOperatorConfig +} + var map_UserDefinedMonitoring = map[string]string{ "": "UserDefinedMonitoring config for user-defined projects.", "mode": "mode defines the different configurations of UserDefinedMonitoring Valid values are Disabled and NamespaceIsolated Disabled disables monitoring for user-defined projects. This restricts the default monitoring stack, installed in the openshift-monitoring project, to monitor only platform namespaces, which prevents any custom monitoring configurations or resources from being applied to user-defined namespaces. NamespaceIsolated enables monitoring for user-defined projects with namespace-scoped tenancy. This ensures that metrics, alerts, and monitoring data are isolated at the namespace level. The current default value is `Disabled`.", @@ -226,6 +240,44 @@ func (UserDefinedMonitoring) SwaggerDoc() map[string]string { return map_UserDefinedMonitoring } +var map_CRIOCredentialProviderConfig = map[string]string{ + "": "CRIOCredentialProviderConfig holds cluster-wide singleton resource configurations for CRI-O credential provider, the name of this instance is \"cluster\". CRI-O credential provider is a binary shipped with CRI-O that provides a way to obtain container image pull credentials from external sources. For example, it can be used to fetch mirror registry credentials from secrets resources in the cluster within the same namespace the pod will be running in. CRIOCredentialProviderConfig configuration specifies the pod image sources registries that should trigger the CRI-O credential provider execution, which will resolve the CRI-O mirror configurations and obtain the necessary credentials for pod creation. Note: Configuration changes will only take effect after the kubelet restarts, which is automatically managed by the cluster during rollout.\n\nThe resource is a singleton named \"cluster\".\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec defines the desired configuration of the CRI-O Credential Provider. This field is required and must be provided when creating the resource.", + "status": "status represents the current state of the CRIOCredentialProviderConfig. When omitted or nil, it indicates that the status has not yet been set by the controller. The controller will populate this field with validation conditions and operational state.", +} + +func (CRIOCredentialProviderConfig) SwaggerDoc() map[string]string { + return map_CRIOCredentialProviderConfig +} + +var map_CRIOCredentialProviderConfigList = map[string]string{ + "": "CRIOCredentialProviderConfigList contains a list of CRIOCredentialProviderConfig resources\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (CRIOCredentialProviderConfigList) SwaggerDoc() map[string]string { + return map_CRIOCredentialProviderConfigList +} + +var map_CRIOCredentialProviderConfigSpec = map[string]string{ + "": "CRIOCredentialProviderConfigSpec defines the desired configuration of the CRI-O Credential Provider.", + "matchImages": "matchImages is a list of string patterns used to determine whether the CRI-O credential provider should be invoked for a given image. This list is passed to the kubelet CredentialProviderConfig, and if any pattern matches the requested image, CRI-O credential provider will be invoked to obtain credentials for pulling that image or its mirrors. Depending on the platform, the CRI-O credential provider may be installed alongside an existing platform specific provider. Conflicts between the existing platform specific provider image match configuration and this list will be handled by the following precedence rule: credentials from built-in kubelet providers (e.g., ECR, GCR, ACR) take precedence over those from the CRIOCredentialProviderConfig when both match the same image. To avoid uncertainty, it is recommended to avoid configuring your private image patterns to overlap with existing platform specific provider config(e.g., the entries from https://github.com/openshift/machine-config-operator/blob/main/templates/common/aws/files/etc-kubernetes-credential-providers-ecr-credential-provider.yaml). You can check the resource's Status conditions to see if any entries were ignored due to exact matches with known built-in provider patterns.\n\nThis field is optional, the items of the list must contain between 1 and 50 entries. The list is treated as a set, so duplicate entries are not allowed.\n\nFor more details, see: https://kubernetes.io/docs/tasks/administer-cluster/kubelet-credential-provider/ https://github.com/cri-o/crio-credential-provider#architecture\n\nEach entry in matchImages is a pattern which can optionally contain a port and a path. Each entry must be no longer than 512 characters. Wildcards ('*') are supported for full subdomain labels, such as '*.k8s.io' or 'k8s.*.io', and for top-level domains, such as 'k8s.*' (which matches 'k8s.io' or 'k8s.net'). A global wildcard '*' (matching any domain) is not allowed. Wildcards may replace an entire hostname label (e.g., *.example.com), but they cannot appear within a label (e.g., f*oo.example.com) and are not allowed in the port or path. For example, 'example.*.com' is valid, but 'exa*mple.*.com' is not. Each wildcard matches only a single domain label, so '*.io' does **not** match '*.k8s.io'.\n\nA match exists between an image and a matchImage when all of the below are true: Both contain the same number of domain parts and each part matches. The URL path of an matchImages must be a prefix of the target image URL path. If the matchImages contains a port, then the port must match in the image as well.\n\nExample values of matchImages: - 123456789.dkr.ecr.us-east-1.amazonaws.com - *.azurecr.io - gcr.io - *.*.registry.io - registry.io:8080/path", +} + +func (CRIOCredentialProviderConfigSpec) SwaggerDoc() map[string]string { + return map_CRIOCredentialProviderConfigSpec +} + +var map_CRIOCredentialProviderConfigStatus = map[string]string{ + "": "CRIOCredentialProviderConfigStatus defines the observed state of CRIOCredentialProviderConfig", + "conditions": "conditions represent the latest available observations of the configuration state. When omitted, it indicates that no conditions have been reported yet. The maximum number of conditions is 16. Conditions are stored as a map keyed by condition type, ensuring uniqueness.\n\nExpected condition types include: \"Validated\": indicates whether the matchImages configuration is valid", +} + +func (CRIOCredentialProviderConfigStatus) SwaggerDoc() map[string]string { + return map_CRIOCredentialProviderConfigStatus +} + var map_ImagePolicy = map[string]string{ "": "ImagePolicy holds namespace-wide configuration for image signature verification\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", diff --git a/vendor/github.com/openshift/api/config/v1alpha2/types_insights.go b/vendor/github.com/openshift/api/config/v1alpha2/types_insights.go index d59f5920b..fbe666249 100644 --- a/vendor/github.com/openshift/api/config/v1alpha2/types_insights.go +++ b/vendor/github.com/openshift/api/config/v1alpha2/types_insights.go @@ -16,6 +16,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 // +openshift:enable:FeatureGate=InsightsConfig // +openshift:compatibility-gen:level=4 +// +openshift:capability=Insights type InsightsDataGather struct { metav1.TypeMeta `json:",inline"` // metadata is the standard object's metadata. diff --git a/vendor/github.com/openshift/api/config/v1alpha2/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/config/v1alpha2/zz_generated.featuregated-crd-manifests.yaml index 99fe308ef..1f73e723e 100644 --- a/vendor/github.com/openshift/api/config/v1alpha2/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/config/v1alpha2/zz_generated.featuregated-crd-manifests.yaml @@ -2,7 +2,7 @@ insightsdatagathers.config.openshift.io: Annotations: {} ApprovedPRNumber: https://github.com/openshift/api/pull/2195 CRDName: insightsdatagathers.config.openshift.io - Capability: "" + Capability: Insights Category: "" FeatureGates: - InsightsConfig diff --git a/vendor/github.com/openshift/api/features.md b/vendor/github.com/openshift/api/features.md index 82940288e..d2a18f5da 100644 --- a/vendor/github.com/openshift/api/features.md +++ b/vendor/github.com/openshift/api/features.md @@ -5,16 +5,15 @@ | EventedPLEG| | | | | | | | | | MachineAPIOperatorDisableMachineHealthCheckController| | | | | | | | | | MultiArchInstallAzure| | | | | | | | | -| NewOLMBoxCutterRuntime| | | | | | | | | | ShortCertRotation| | | | | | | | | | ClusterAPIComputeInstall| | | Enabled | Enabled | | | | | | ClusterAPIControlPlaneInstall| | | Enabled | Enabled | | | | | | ClusterAPIMachineManagementVSphere| | | Enabled | Enabled | | | | | | Example2| | | Enabled | Enabled | | | | | | ExternalSnapshotMetadata| | | Enabled | Enabled | | | | | -| IngressControllerDynamicConfigurationManager| | | Enabled | Enabled | | | | | | KMSEncryptionProvider| | | Enabled | Enabled | | | | | | NetworkConnect| | | Enabled | Enabled | | | | | +| NewOLMBoxCutterRuntime| | | | Enabled | | | | Enabled | | NewOLMCatalogdAPIV1Metas| | | | Enabled | | | | Enabled | | NewOLMPreflightPermissionChecks| | | | Enabled | | | | Enabled | | NoRegistryClusterInstall| | | | Enabled | | | | Enabled | @@ -33,6 +32,7 @@ | BootcNodeManagement| | | Enabled | Enabled | | | Enabled | Enabled | | CBORServingAndStorage| | | Enabled | Enabled | | | Enabled | Enabled | | CRDCompatibilityRequirementOperator| | | Enabled | Enabled | | | Enabled | Enabled | +| CRIOCredentialProviderConfig| | | Enabled | Enabled | | | Enabled | Enabled | | ClientsPreferCBOR| | | Enabled | Enabled | | | Enabled | Enabled | | ClusterAPIInstallIBMCloud| | | Enabled | Enabled | | | Enabled | Enabled | | ClusterAPIMachineManagement| | | Enabled | Enabled | | | Enabled | Enabled | @@ -41,6 +41,7 @@ | ClusterVersionOperatorConfiguration| | | Enabled | Enabled | | | Enabled | Enabled | | ConfigurablePKI| | | Enabled | Enabled | | | Enabled | Enabled | | DNSNameResolver| | | Enabled | Enabled | | | Enabled | Enabled | +| DRAPartitionableDevices| | | Enabled | Enabled | | | Enabled | Enabled | | DualReplica| | | Enabled | Enabled | | | Enabled | Enabled | | DyanmicServiceEndpointIBMCloud| | | Enabled | Enabled | | | Enabled | Enabled | | EVPN| | | Enabled | Enabled | | | Enabled | Enabled | @@ -52,19 +53,19 @@ | GCPCustomAPIEndpoints| | | Enabled | Enabled | | | Enabled | Enabled | | GCPCustomAPIEndpointsInstall| | | Enabled | Enabled | | | Enabled | Enabled | | GCPDualStackInstall| | | Enabled | Enabled | | | Enabled | Enabled | +| GatewayAPIWithoutOLM| | | Enabled | Enabled | | | Enabled | Enabled | | HyperShiftOnlyDynamicResourceAllocation| Enabled | | Enabled | | Enabled | | Enabled | | | ImageModeStatusReporting| | | Enabled | Enabled | | | Enabled | Enabled | +| IngressControllerDynamicConfigurationManager| | | Enabled | Enabled | | | Enabled | Enabled | | InsightsConfig| | | Enabled | Enabled | | | Enabled | Enabled | | InsightsOnDemandDataGather| | | Enabled | Enabled | | | Enabled | Enabled | | IrreconcilableMachineConfig| | | Enabled | Enabled | | | Enabled | Enabled | | KMSEncryption| | | Enabled | Enabled | | | Enabled | Enabled | | MachineAPIMigration| | | Enabled | Enabled | | | Enabled | Enabled | -| ManagedBootImagesCPMS| | | Enabled | Enabled | | | Enabled | Enabled | | MaxUnavailableStatefulSet| | | Enabled | Enabled | | | Enabled | Enabled | | MinimumKubeletVersion| | | Enabled | Enabled | | | Enabled | Enabled | | MixedCPUsAllocation| | | Enabled | Enabled | | | Enabled | Enabled | | MultiDiskSetup| | | Enabled | Enabled | | | Enabled | Enabled | -| MutableCSINodeAllocatableCount| | | Enabled | Enabled | | | Enabled | Enabled | | MutatingAdmissionPolicy| | | Enabled | Enabled | | | Enabled | Enabled | | NewOLM| | Enabled | | Enabled | | Enabled | | Enabled | | NewOLMOwnSingleNamespace| | Enabled | | Enabled | | Enabled | | Enabled | @@ -79,9 +80,6 @@ | VSphereHostVMGroupZonal| | | Enabled | Enabled | | | Enabled | Enabled | | VSphereMixedNodeEnv| | | Enabled | Enabled | | | Enabled | Enabled | | VolumeGroupSnapshot| | | Enabled | Enabled | | | Enabled | Enabled | -| AdditionalRoutingCapabilities| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| AdminNetworkPolicy| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| AlibabaPlatform| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | AzureWorkloadIdentity| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | BuildCSIVolumes| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | CPMSMachineNamePrefix| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | @@ -99,16 +97,12 @@ | ManagedBootImages| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | ManagedBootImagesAWS| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | ManagedBootImagesAzure| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| ManagedBootImagesCPMS| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | ManagedBootImagesvSphere| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | MetricsCollectionProfiles| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| NetworkDiagnosticsConfig| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| NetworkLiveMigration| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| NetworkSegmentation| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| MutableCSINodeAllocatableCount| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | OpenShiftPodSecurityAdmission| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | PinnedImages| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| PreconfiguredUDNAddresses| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| ProcMountType| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| RouteAdvertisements| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | RouteExternalCertificate| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | ServiceAccountTokenNodeBinding| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | SigstoreImageVerification| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | @@ -119,4 +113,3 @@ | UserNamespacesSupport| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | VSphereMultiDisk| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | VSphereMultiNetworks| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| VolumeAttributesClass| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | diff --git a/vendor/github.com/openshift/api/features/features.go b/vendor/github.com/openshift/api/features/features.go index de21ab393..36a479071 100644 --- a/vendor/github.com/openshift/api/features/features.go +++ b/vendor/github.com/openshift/api/features/features.go @@ -131,13 +131,13 @@ var ( enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default, configv1.OKD). mustRegister() - FeatureGateAlibabaPlatform = newFeatureGate("AlibabaPlatform"). - reportProblemsToJiraComponent("cloud-provider"). - contactPerson("jspeed"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() + FeatureGateCRIOCredentialProviderConfig = newFeatureGate("CRIOCredentialProviderConfig"). + reportProblemsToJiraComponent("node"). + contactPerson("QiWang"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1861"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() FeatureGateVSphereHostVMGroupZonal = newFeatureGate("VSphereHostVMGroupZonal"). reportProblemsToJiraComponent("splat"). @@ -171,22 +171,6 @@ var ( enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() - FeatureGateAdminNetworkPolicy = newFeatureGate("AdminNetworkPolicy"). - reportProblemsToJiraComponent("Networking/ovn-kubernetes"). - contactPerson("tssurya"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - - FeatureGateNetworkSegmentation = newFeatureGate("NetworkSegmentation"). - reportProblemsToJiraComponent("Networking/ovn-kubernetes"). - contactPerson("tssurya"). - productScope(ocpSpecific). - enhancementPR("https://github.com/openshift/enhancements/pull/1623"). - enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - FeatureGateNetworkConnect = newFeatureGate("NetworkConnect"). reportProblemsToJiraComponent("Networking/ovn-kubernetes"). contactPerson("tssurya"). @@ -195,22 +179,6 @@ var ( enableIn(configv1.DevPreviewNoUpgrade). mustRegister() - FeatureGateAdditionalRoutingCapabilities = newFeatureGate("AdditionalRoutingCapabilities"). - reportProblemsToJiraComponent("Networking/cluster-network-operator"). - contactPerson("jcaamano"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - - FeatureGateRouteAdvertisements = newFeatureGate("RouteAdvertisements"). - reportProblemsToJiraComponent("Networking/ovn-kubernetes"). - contactPerson("jcaamano"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - FeatureGateEVPN = newFeatureGate("EVPN"). reportProblemsToJiraComponent("Networking/ovn-kubernetes"). contactPerson("jcaamano"). @@ -219,22 +187,6 @@ var ( enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() - FeatureGateNetworkLiveMigration = newFeatureGate("NetworkLiveMigration"). - reportProblemsToJiraComponent("Networking/ovn-kubernetes"). - contactPerson("pliu"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - - FeatureGateNetworkDiagnosticsConfig = newFeatureGate("NetworkDiagnosticsConfig"). - reportProblemsToJiraComponent("Networking/cluster-network-operator"). - contactPerson("kyrtapz"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - FeatureGateOVNObservability = newFeatureGate("OVNObservability"). reportProblemsToJiraComponent("Networking"). contactPerson("npinaeva"). @@ -366,7 +318,7 @@ var ( contactPerson("djoshy"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1818"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateBootImageSkewEnforcement = newFeatureGate("BootImageSkewEnforcement"). @@ -417,14 +369,6 @@ var ( enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default, configv1.OKD). mustRegister() - FeatureGateVolumeAttributesClass = newFeatureGate("VolumeAttributesClass"). - reportProblemsToJiraComponent("Storage / Kubernetes External Components"). - contactPerson("dfajmon"). - productScope(kubernetes). - enhancementPR("https://github.com/kubernetes/enhancements/issues/3751"). - enableIn(configv1.Default, configv1.OKD, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - FeatureGateVolumeGroupSnapshot = newFeatureGate("VolumeGroupSnapshot"). reportProblemsToJiraComponent("Storage / Kubernetes External Components"). contactPerson("fbertina"). @@ -526,6 +470,7 @@ var ( contactPerson("pegoncal"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1890"). + enableForClusterProfile(SelfManaged, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateInsightsOnDemandDataGather = newFeatureGate("InsightsOnDemandDataGather"). @@ -626,14 +571,6 @@ var ( enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default, configv1.OKD). mustRegister() - FeatureGateProcMountType = newFeatureGate("ProcMountType"). - reportProblemsToJiraComponent("Node"). - contactPerson("haircommander"). - productScope(kubernetes). - enhancementPR("https://github.com/kubernetes/enhancements/issues/4265"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default, configv1.OKD). - mustRegister() - FeatureGateVSphereMultiNetworks = newFeatureGate("VSphereMultiNetworks"). reportProblemsToJiraComponent("SPLAT"). contactPerson("rvanderp"). @@ -646,8 +583,8 @@ var ( reportProblemsToJiraComponent("Networking/router"). contactPerson("miciah"). productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.DevPreviewNoUpgrade). + enhancementPR("https://github.com/openshift/enhancements/pull/1687"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateMinimumKubeletVersion = newFeatureGate("MinimumKubeletVersion"). @@ -675,12 +612,12 @@ var ( mustRegister() FeatureGateKMSEncryption = newFeatureGate("KMSEncryption"). - reportProblemsToJiraComponent("kube-apiserver"). - contactPerson("ardaguclu"). - productScope(ocpSpecific). - enhancementPR("https://github.com/openshift/enhancements/pull/1900"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() + reportProblemsToJiraComponent("kube-apiserver"). + contactPerson("ardaguclu"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1900"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() FeatureGateHighlyAvailableArbiter = newFeatureGate("HighlyAvailableArbiter"). reportProblemsToJiraComponent("Two Node with Arbiter"). @@ -806,14 +743,6 @@ var ( enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() - FeatureGatePreconfiguredUDNAddresses = newFeatureGate("PreconfiguredUDNAddresses"). - reportProblemsToJiraComponent("Networking/ovn-kubernetes"). - contactPerson("kyrtapz"). - productScope(ocpSpecific). - enhancementPR("https://github.com/openshift/enhancements/pull/1793"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default, configv1.OKD). - mustRegister() - FeatureGateAWSServiceLBNetworkSecurityGroup = newFeatureGate("AWSServiceLBNetworkSecurityGroup"). reportProblemsToJiraComponent("Cloud Compute / Cloud Controller Manager"). contactPerson("mtulio"). @@ -929,7 +858,7 @@ var ( contactPerson("jsafrane"). productScope(kubernetes). enhancementPR("https://github.com/kubernetes/enhancements/issues/4876"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default, configv1.OKD). mustRegister() FeatureGateOSStreams = newFeatureGate("OSStreams"). reportProblemsToJiraComponent("MachineConfigOperator"). @@ -970,6 +899,14 @@ var ( enableForClusterProfile(Hypershift, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default, configv1.OKD). mustRegister() + FeatureGateDRAPartitionableDevices = newFeatureGate("DRAPartitionableDevices"). + reportProblemsToJiraComponent("Node"). + contactPerson("harche"). + productScope(kubernetes). + enhancementPR("https://github.com/kubernetes/enhancements/issues/4815"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + FeatureGateConfigurablePKI = newFeatureGate("ConfigurablePKI"). reportProblemsToJiraComponent("kube-apiserver"). contactPerson("sanchezl"). @@ -993,4 +930,13 @@ var ( enhancementPR("https://github.com/openshift/enhancements/pull/1465"). enableIn(configv1.DevPreviewNoUpgrade). mustRegister() + + FeatureGateGatewayAPIWithoutOLM = newFeatureGate("GatewayAPIWithoutOLM"). + reportProblemsToJiraComponent("Routing"). + contactPerson("miciah"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1933"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() ) + diff --git a/vendor/github.com/openshift/api/features/legacyfeaturegates.go b/vendor/github.com/openshift/api/features/legacyfeaturegates.go index dd11fdf66..a92c0b9bb 100644 --- a/vendor/github.com/openshift/api/features/legacyfeaturegates.go +++ b/vendor/github.com/openshift/api/features/legacyfeaturegates.go @@ -7,10 +7,6 @@ var legacyFeatureGates = sets.New( // never add to this list, if you think you have an exception ask @deads2k "AWSEFSDriverVolumeMetrics", // never add to this list, if you think you have an exception ask @deads2k - "AdditionalRoutingCapabilities", - // never add to this list, if you think you have an exception ask @deads2k - "AdminNetworkPolicy", - // never add to this list, if you think you have an exception ask @deads2k "AlibabaPlatform", // never add to this list, if you think you have an exception ask @deads2k "AutomatedEtcdBackup", @@ -79,12 +75,6 @@ var legacyFeatureGates = sets.New( // never add to this list, if you think you have an exception ask @deads2k "MultiArchInstallGCP", // never add to this list, if you think you have an exception ask @deads2k - "NetworkDiagnosticsConfig", - // never add to this list, if you think you have an exception ask @deads2k - "NetworkLiveMigration", - // never add to this list, if you think you have an exception ask @deads2k - "NetworkSegmentation", - // never add to this list, if you think you have an exception ask @deads2k "NewOLM", // never add to this list, if you think you have an exception ask @deads2k "OVNObservability", @@ -95,8 +85,6 @@ var legacyFeatureGates = sets.New( // never add to this list, if you think you have an exception ask @deads2k "PrivateHostedZoneAWS", // never add to this list, if you think you have an exception ask @deads2k - "RouteAdvertisements", - // never add to this list, if you think you have an exception ask @deads2k "RouteExternalCertificate", // never add to this list, if you think you have an exception ask @deads2k "SetEIPForNLBIngressController", diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go index d1d5941fa..e3508d667 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go @@ -331,9 +331,16 @@ type Filter struct { // TagSpecification is the name/value pair for a tag type TagSpecification struct { - // name of the tag + // name of the tag. + // This field is required and must be a non-empty string. + // Must be between 1 and 128 characters in length. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=128 + // +required Name string `json:"name"` - // value of the tag + // value of the tag. + // When omitted, this creates a tag with an empty string as the value. + // +optional Value string `json:"value"` } @@ -407,6 +414,26 @@ type AWSMachineProviderStatus struct { // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty"` + // dedicatedHost tracks the dynamically allocated dedicated host. + // This field is populated when allocationStrategy is Dynamic (with or without DynamicHostAllocation). + // When omitted, this indicates that the dedicated host has not yet been allocated, or allocation is in progress. + // +optional + DedicatedHost *DedicatedHostStatus `json:"dedicatedHost,omitempty"` +} + +// DedicatedHostStatus defines the observed state of a dynamically allocated dedicated host +// associated with an AWSMachine. This struct is used to track the ID of the dedicated host. +type DedicatedHostStatus struct { + // id tracks the dynamically allocated dedicated host ID. + // This field is populated when allocationStrategy is Dynamic (with or without DynamicHostAllocation). + // The value must start with "h-" followed by either 8 or 17 lowercase hexadecimal characters (0-9 and a-f). + // The use of 8 lowercase hexadecimal characters is for older legacy hosts that may not have been migrated to newer format. + // Must be either 10 or 19 characters in length. + // +kubebuilder:validation:XValidation:rule="self.matches('^h-([0-9a-f]{8}|[0-9a-f]{17})$')",message="id must start with 'h-' followed by either 8 or 17 lowercase hexadecimal characters (0-9 and a-f)" + // +kubebuilder:validation:MinLength=10 + // +kubebuilder:validation:MaxLength=19 + // +required + ID string `json:"id,omitempty"` } // MarketType describes the market type of an EC2 Instance @@ -454,21 +481,77 @@ type HostAffinity string const ( // HostAffinityAnyAvailable lets the platform select any available dedicated host. + HostAffinityAnyAvailable HostAffinity = "AnyAvailable" // HostAffinityDedicatedHost requires specifying a particular host via dedicatedHost.host.hostID. HostAffinityDedicatedHost HostAffinity = "DedicatedHost" ) +// AllocationStrategy selects how a dedicated host is provided to the system for assigning to the instance. +// +kubebuilder:validation:Enum:=UserProvided;Dynamic +// +enum +type AllocationStrategy string + +const ( + // AllocationStrategyUserProvided specifies that the system should assign instances to a user-provided dedicated host. + AllocationStrategyUserProvided AllocationStrategy = "UserProvided" + + // AllocationStrategyDynamic specifies that the system should dynamically allocate a dedicated host for instances. + AllocationStrategyDynamic AllocationStrategy = "Dynamic" +) + // DedicatedHost represents the configuration for the usage of dedicated host. +// +kubebuilder:validation:XValidation:rule="self.allocationStrategy == 'UserProvided' ? has(self.id) : !has(self.id)",message="id is required when allocationStrategy is UserProvided, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="has(self.dynamicHostAllocation) ? self.allocationStrategy == 'Dynamic' : true",message="dynamicHostAllocation is only allowed when allocationStrategy is Dynamic" +// +union type DedicatedHost struct { + // allocationStrategy specifies if the dedicated host will be provided by the admin through the id field or if the host will be dynamically allocated. + // Valid values are UserProvided and Dynamic. + // When omitted, the value defaults to "UserProvided", which requires the id field to be set. + // When allocationStrategy is set to UserProvided, an ID of the dedicated host to assign must be provided. + // When allocationStrategy is set to Dynamic, a dedicated host will be allocated and used to assign instances. + // When allocationStrategy is set to Dynamic, and dynamicHostAllocation is configured, a dedicated host will be allocated and the tags in dynamicHostAllocation will be assigned to that host. + // +optional + // +unionDiscriminator + // +default="UserProvided" + AllocationStrategy *AllocationStrategy `json:"allocationStrategy,omitempty"` + // id identifies the AWS Dedicated Host on which the instance must run. // The value must start with "h-" followed by either 8 or 17 lowercase hexadecimal characters (0-9 and a-f). // The use of 8 lowercase hexadecimal characters is for older legacy hosts that may not have been migrated to newer format. // Must be either 10 or 19 characters in length. - // +kubebuilder:validation:XValidation:rule="self.matches('^h-([0-9a-f]{8}|[0-9a-f]{17})$')",message="hostID must start with 'h-' followed by either 8 or 17 lowercase hexadecimal characters (0-9 and a-f)" + // This field is required when allocationStrategy is UserProvided, and forbidden otherwise. + // When omitted with allocationStrategy set to Dynamic, the platform will dynamically allocate a dedicated host. + // +kubebuilder:validation:XValidation:rule="self.matches('^h-([0-9a-f]{8}|[0-9a-f]{17})$')",message="id must start with 'h-' followed by either 8 or 17 lowercase hexadecimal characters (0-9 and a-f)" // +kubebuilder:validation:MinLength=10 // +kubebuilder:validation:MaxLength=19 - // +required + // +optional + // +unionMember=UserProvided ID string `json:"id,omitempty"` + + // dynamicHostAllocation specifies tags to apply to a dynamically allocated dedicated host. + // This field is only allowed when allocationStrategy is Dynamic, and is mutually exclusive with id. + // When specified, a dedicated host will be allocated with the provided tags applied. + // When omitted (and allocationStrategy is Dynamic), a dedicated host will be allocated without any additional tags. + // +optional + // +unionMember=Dynamic + DynamicHostAllocation *DynamicHostAllocationSpec `json:"dynamicHostAllocation,omitempty"` +} + +// DynamicHostAllocationSpec defines the configuration for dynamic dedicated host allocation. +// This specification always allocates exactly one dedicated host per machine. +// At least one property must be specified when this struct is used. +// Currently only Tags are available for configuring, but in the future more configs may become available. +// +kubebuilder:validation:MinProperties=1 +type DynamicHostAllocationSpec struct { + // tags specifies a set of key-value pairs to apply to the allocated dedicated host. + // When omitted, no additional user-defined tags will be applied to the allocated host. + // A maximum of 50 tags can be specified. + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=50 + // +listType=map + // +listMapKey=name + // +optional + Tags *[]TagSpecification `json:"tags,omitempty"` } diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go index d08906c7d..63b9bb5ff 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go @@ -152,6 +152,11 @@ func (in *AWSMachineProviderStatus) DeepCopyInto(out *AWSMachineProviderStatus) (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.DedicatedHost != nil { + in, out := &in.DedicatedHost, &out.DedicatedHost + *out = new(DedicatedHostStatus) + **out = **in + } return } @@ -512,6 +517,16 @@ func (in *DataDiskManagedDiskParameters) DeepCopy() *DataDiskManagedDiskParamete // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DedicatedHost) DeepCopyInto(out *DedicatedHost) { *out = *in + if in.AllocationStrategy != nil { + in, out := &in.AllocationStrategy, &out.AllocationStrategy + *out = new(AllocationStrategy) + **out = **in + } + if in.DynamicHostAllocation != nil { + in, out := &in.DynamicHostAllocation, &out.DynamicHostAllocation + *out = new(DynamicHostAllocationSpec) + (*in).DeepCopyInto(*out) + } return } @@ -525,6 +540,22 @@ func (in *DedicatedHost) DeepCopy() *DedicatedHost { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DedicatedHostStatus) DeepCopyInto(out *DedicatedHostStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DedicatedHostStatus. +func (in *DedicatedHostStatus) DeepCopy() *DedicatedHostStatus { + if in == nil { + return nil + } + out := new(DedicatedHostStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DiskEncryptionSetParameters) DeepCopyInto(out *DiskEncryptionSetParameters) { *out = *in @@ -557,6 +588,31 @@ func (in *DiskSettings) DeepCopy() *DiskSettings { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DynamicHostAllocationSpec) DeepCopyInto(out *DynamicHostAllocationSpec) { + *out = *in + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = new([]TagSpecification) + if **in != nil { + in, out := *in, *out + *out = make([]TagSpecification, len(*in)) + copy(*out, *in) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DynamicHostAllocationSpec. +func (in *DynamicHostAllocationSpec) DeepCopy() *DynamicHostAllocationSpec { + if in == nil { + return nil + } + out := new(DynamicHostAllocationSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EBSBlockDeviceSpec) DeepCopyInto(out *EBSBlockDeviceSpec) { *out = *in @@ -935,7 +991,7 @@ func (in *HostPlacement) DeepCopyInto(out *HostPlacement) { if in.DedicatedHost != nil { in, out := &in.DedicatedHost, &out.DedicatedHost *out = new(DedicatedHost) - **out = **in + (*in).DeepCopyInto(*out) } return } diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go index 0d043ad60..2c4a9030c 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go @@ -54,6 +54,7 @@ var map_AWSMachineProviderStatus = map[string]string{ "instanceId": "instanceId is the instance ID of the machine created in AWS", "instanceState": "instanceState is the state of the AWS instance for this machine", "conditions": "conditions is a set of conditions associated with the Machine to indicate errors or other status", + "dedicatedHost": "dedicatedHost tracks the dynamically allocated dedicated host. This field is populated when allocationStrategy is Dynamic (with or without DynamicHostAllocation). When omitted, this indicates that the dedicated host has not yet been allocated, or allocation is in progress.", } func (AWSMachineProviderStatus) SwaggerDoc() map[string]string { @@ -93,14 +94,34 @@ func (CPUOptions) SwaggerDoc() map[string]string { } var map_DedicatedHost = map[string]string{ - "": "DedicatedHost represents the configuration for the usage of dedicated host.", - "id": "id identifies the AWS Dedicated Host on which the instance must run. The value must start with \"h-\" followed by either 8 or 17 lowercase hexadecimal characters (0-9 and a-f). The use of 8 lowercase hexadecimal characters is for older legacy hosts that may not have been migrated to newer format. Must be either 10 or 19 characters in length.", + "": "DedicatedHost represents the configuration for the usage of dedicated host.", + "allocationStrategy": "allocationStrategy specifies if the dedicated host will be provided by the admin through the id field or if the host will be dynamically allocated. Valid values are UserProvided and Dynamic. When omitted, the value defaults to \"UserProvided\", which requires the id field to be set. When allocationStrategy is set to UserProvided, an ID of the dedicated host to assign must be provided. When allocationStrategy is set to Dynamic, a dedicated host will be allocated and used to assign instances. When allocationStrategy is set to Dynamic, and dynamicHostAllocation is configured, a dedicated host will be allocated and the tags in dynamicHostAllocation will be assigned to that host.", + "id": "id identifies the AWS Dedicated Host on which the instance must run. The value must start with \"h-\" followed by either 8 or 17 lowercase hexadecimal characters (0-9 and a-f). The use of 8 lowercase hexadecimal characters is for older legacy hosts that may not have been migrated to newer format. Must be either 10 or 19 characters in length. This field is required when allocationStrategy is UserProvided, and forbidden otherwise. When omitted with allocationStrategy set to Dynamic, the platform will dynamically allocate a dedicated host.", + "dynamicHostAllocation": "dynamicHostAllocation specifies tags to apply to a dynamically allocated dedicated host. This field is only allowed when allocationStrategy is Dynamic, and is mutually exclusive with id. When specified, a dedicated host will be allocated with the provided tags applied. When omitted (and allocationStrategy is Dynamic), a dedicated host will be allocated without any additional tags.", } func (DedicatedHost) SwaggerDoc() map[string]string { return map_DedicatedHost } +var map_DedicatedHostStatus = map[string]string{ + "": "DedicatedHostStatus defines the observed state of a dynamically allocated dedicated host associated with an AWSMachine. This struct is used to track the ID of the dedicated host.", + "id": "id tracks the dynamically allocated dedicated host ID. This field is populated when allocationStrategy is Dynamic (with or without DynamicHostAllocation). The value must start with \"h-\" followed by either 8 or 17 lowercase hexadecimal characters (0-9 and a-f). The use of 8 lowercase hexadecimal characters is for older legacy hosts that may not have been migrated to newer format. Must be either 10 or 19 characters in length.", +} + +func (DedicatedHostStatus) SwaggerDoc() map[string]string { + return map_DedicatedHostStatus +} + +var map_DynamicHostAllocationSpec = map[string]string{ + "": "DynamicHostAllocationSpec defines the configuration for dynamic dedicated host allocation. This specification always allocates exactly one dedicated host per machine. At least one property must be specified when this struct is used. Currently only Tags are available for configuring, but in the future more configs may become available.", + "tags": "tags specifies a set of key-value pairs to apply to the allocated dedicated host. When omitted, no additional user-defined tags will be applied to the allocated host. A maximum of 50 tags can be specified.", +} + +func (DynamicHostAllocationSpec) SwaggerDoc() map[string]string { + return map_DynamicHostAllocationSpec +} + var map_EBSBlockDeviceSpec = map[string]string{ "": "EBSBlockDeviceSpec describes a block device for an EBS volume. https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EbsBlockDevice", "deleteOnTermination": "Indicates whether the EBS volume is deleted on machine termination.\n\nDeprecated: setting this field has no effect.", @@ -176,8 +197,8 @@ func (SpotMarketOptions) SwaggerDoc() map[string]string { var map_TagSpecification = map[string]string{ "": "TagSpecification is the name/value pair for a tag", - "name": "name of the tag", - "value": "value of the tag", + "name": "name of the tag. This field is required and must be a non-empty string. Must be between 1 and 128 characters in length.", + "value": "value of the tag. When omitted, this creates a tag with an empty string as the value.", } func (TagSpecification) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/operator/v1/types_network.go b/vendor/github.com/openshift/api/operator/v1/types_network.go index 111240eec..1cf56f549 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_network.go +++ b/vendor/github.com/openshift/api/operator/v1/types_network.go @@ -54,7 +54,7 @@ type NetworkList struct { // NetworkSpec is the top-level network configuration object. // +kubebuilder:validation:XValidation:rule="!has(self.defaultNetwork) || !has(self.defaultNetwork.ovnKubernetesConfig) || !has(self.defaultNetwork.ovnKubernetesConfig.gatewayConfig) || !has(self.defaultNetwork.ovnKubernetesConfig.gatewayConfig.ipForwarding) || self.defaultNetwork.ovnKubernetesConfig.gatewayConfig.ipForwarding == oldSelf.defaultNetwork.ovnKubernetesConfig.gatewayConfig.ipForwarding || self.defaultNetwork.ovnKubernetesConfig.gatewayConfig.ipForwarding == 'Restricted' || self.defaultNetwork.ovnKubernetesConfig.gatewayConfig.ipForwarding == 'Global'",message="invalid value for IPForwarding, valid values are 'Restricted' or 'Global'" -// +openshift:validation:FeatureGateAwareXValidation:featureGate=RouteAdvertisements,rule="(has(self.additionalRoutingCapabilities) && ('FRR' in self.additionalRoutingCapabilities.providers)) || !has(self.defaultNetwork) || !has(self.defaultNetwork.ovnKubernetesConfig) || !has(self.defaultNetwork.ovnKubernetesConfig.routeAdvertisements) || self.defaultNetwork.ovnKubernetesConfig.routeAdvertisements != 'Enabled'",message="Route advertisements cannot be Enabled if 'FRR' routing capability provider is not available" +// +kubebuilder:validation:XValidation:rule="(has(self.additionalRoutingCapabilities) && ('FRR' in self.additionalRoutingCapabilities.providers)) || !has(self.defaultNetwork) || !has(self.defaultNetwork.ovnKubernetesConfig) || !has(self.defaultNetwork.ovnKubernetesConfig.routeAdvertisements) || self.defaultNetwork.ovnKubernetesConfig.routeAdvertisements != 'Enabled'",message="Route advertisements cannot be Enabled if 'FRR' routing capability provider is not available" type NetworkSpec struct { OperatorSpec `json:",inline"` @@ -136,7 +136,6 @@ type NetworkSpec struct { // capabilities acquired through the enablement of these components but may // require specific configuration on their side to do so; refer to their // respective documentation and configuration options. - // +openshift:enable:FeatureGate=AdditionalRoutingCapabilities // +optional AdditionalRoutingCapabilities *AdditionalRoutingCapabilities `json:"additionalRoutingCapabilities,omitempty"` } @@ -157,7 +156,7 @@ const ( ) // NetworkMigration represents the cluster network migration configuration. -// +openshift:validation:FeatureGateAwareXValidation:featureGate=NetworkLiveMigration,rule="!has(self.mtu) || !has(self.networkType) || self.networkType == \"\" || has(self.mode) && self.mode == 'Live'",message="networkType migration in mode other than 'Live' may not be configured at the same time as mtu migration" +// +kubebuilder:validation:XValidation:rule="!has(self.mtu) || !has(self.networkType) || self.networkType == \"\" || has(self.mode) && self.mode == 'Live'",message="networkType migration in mode other than 'Live' may not be configured at the same time as mtu migration" type NetworkMigration struct { // mtu contains the MTU migration configuration. Set this to allow changing // the MTU values for the default network. If unset, the operation of @@ -465,7 +464,6 @@ type OVNKubernetesConfig struct { // means the user has no opinion and the platform is left to choose // reasonable defaults. These defaults are subject to change over time. The // current default is "Disabled". - // +openshift:enable:FeatureGate=RouteAdvertisements // +optional RouteAdvertisements RouteAdvertisementsEnablement `json:"routeAdvertisements,omitempty"` } diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_50_ingress_00_ingresscontrollers.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_50_ingress_00_ingresscontrollers.crd.yaml index fa03ef576..fd7ecdeba 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_50_ingress_00_ingresscontrollers.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_50_ingress_00_ingresscontrollers.crd.yaml @@ -2005,11 +2005,14 @@ spec: ciphers: description: |- ciphers is used to specify the cipher algorithms that are negotiated - during the TLS handshake. Operators may remove entries their operands - do not support. For example, to use DES-CBC3-SHA (yaml): + during the TLS handshake. Operators may remove entries that their operands + do not support. For example, to use only ECDHE-RSA-AES128-GCM-SHA256 (yaml): ciphers: - - DES-CBC3-SHA + - ECDHE-RSA-AES128-GCM-SHA256 + + TLS 1.3 cipher suites (e.g. TLS_AES_128_GCM_SHA256) are not configurable + and are always enabled when TLS 1.3 is negotiated. items: type: string type: array @@ -2034,9 +2037,6 @@ spec: legacy clients and want to remain highly secure while being compatible with most clients currently in use. - The cipher list includes TLS 1.3 ciphers for forward compatibility, followed - by the "intermediate" profile ciphers. - This profile is equivalent to a Custom profile specified as: minTLSVersion: VersionTLS12 ciphers: @@ -2049,8 +2049,6 @@ spec: - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - - DHE-RSA-AES128-GCM-SHA256 - - DHE-RSA-AES256-GCM-SHA384 nullable: true type: object modern: @@ -2071,9 +2069,6 @@ spec: old is a TLS profile for use when services need to be accessed by very old clients or libraries and should be used only as a last resort. - The cipher list includes TLS 1.3 ciphers for forward compatibility, followed - by the "old" profile ciphers. - This profile is equivalent to a Custom profile specified as: minTLSVersion: VersionTLS10 ciphers: @@ -2086,23 +2081,15 @@ spec: - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - - DHE-RSA-AES128-GCM-SHA256 - - DHE-RSA-AES256-GCM-SHA384 - - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256 - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA - ECDHE-RSA-AES128-SHA - - ECDHE-ECDSA-AES256-SHA384 - - ECDHE-RSA-AES256-SHA384 - ECDHE-ECDSA-AES256-SHA - ECDHE-RSA-AES256-SHA - - DHE-RSA-AES128-SHA256 - - DHE-RSA-AES256-SHA256 - AES128-GCM-SHA256 - AES256-GCM-SHA384 - AES128-SHA256 - - AES256-SHA256 - AES128-SHA - AES256-SHA - DES-CBC3-SHA @@ -2113,9 +2100,10 @@ spec: type is one of Old, Intermediate, Modern or Custom. Custom provides the ability to specify individual TLS security profile parameters. - The profiles are currently based on version 5.0 of the Mozilla Server Side TLS - configuration guidelines (released 2019-06-28) with TLS 1.3 ciphers added for - forward compatibility. See: https://ssl-config.mozilla.org/guidelines/5.0.json + The profiles are based on version 5.7 of the Mozilla Server Side TLS + configuration guidelines. The cipher lists consist of the configuration's + "ciphersuites" followed by the Go-specific "ciphers" from the guidelines. + See: https://ssl-config.mozilla.org/guidelines/5.7.json The profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers are found to be insecure. Depending on @@ -3253,11 +3241,14 @@ spec: ciphers: description: |- ciphers is used to specify the cipher algorithms that are negotiated - during the TLS handshake. Operators may remove entries their operands - do not support. For example, to use DES-CBC3-SHA (yaml): + during the TLS handshake. Operators may remove entries that their operands + do not support. For example, to use only ECDHE-RSA-AES128-GCM-SHA256 (yaml): ciphers: - - DES-CBC3-SHA + - ECDHE-RSA-AES128-GCM-SHA256 + + TLS 1.3 cipher suites (e.g. TLS_AES_128_GCM_SHA256) are not configurable + and are always enabled when TLS 1.3 is negotiated. items: type: string type: array diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_70_network_01_networks.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_70_network_01_networks.crd.yaml index 7a41655bd..60459deca 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_70_network_01_networks.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_70_network_01_networks.crd.yaml @@ -914,13 +914,6 @@ spec: type: boolean type: object x-kubernetes-validations: - - message: Route advertisements cannot be Enabled if 'FRR' routing capability - provider is not available - rule: (has(self.additionalRoutingCapabilities) && ('FRR' in self.additionalRoutingCapabilities.providers)) - || !has(self.defaultNetwork) || !has(self.defaultNetwork.ovnKubernetesConfig) - || !has(self.defaultNetwork.ovnKubernetesConfig.routeAdvertisements) - || self.defaultNetwork.ovnKubernetesConfig.routeAdvertisements != - 'Enabled' - message: invalid value for IPForwarding, valid values are 'Restricted' or 'Global' rule: '!has(self.defaultNetwork) || !has(self.defaultNetwork.ovnKubernetesConfig) @@ -931,6 +924,13 @@ spec: || self.defaultNetwork.ovnKubernetesConfig.gatewayConfig.ipForwarding == ''Restricted'' || self.defaultNetwork.ovnKubernetesConfig.gatewayConfig.ipForwarding == ''Global''' + - message: Route advertisements cannot be Enabled if 'FRR' routing capability + provider is not available + rule: (has(self.additionalRoutingCapabilities) && ('FRR' in self.additionalRoutingCapabilities.providers)) + || !has(self.defaultNetwork) || !has(self.defaultNetwork.ovnKubernetesConfig) + || !has(self.defaultNetwork.ovnKubernetesConfig.routeAdvertisements) + || self.defaultNetwork.ovnKubernetesConfig.routeAdvertisements != + 'Enabled' status: description: |- NetworkStatus is detailed operator status, which is distilled diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations-Default.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations-Default.crd.yaml index 3de28dcdf..2e65e97c8 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations-Default.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations-Default.crd.yaml @@ -108,6 +108,7 @@ spec: controlplanemachinesets means that the machine manager will only register resources of the kind ControlPlaneMachineSet. enum: - machinesets + - controlplanemachinesets type: string selection: description: selection allows granular control of the machine @@ -197,6 +198,11 @@ spec: - resource - selection type: object + x-kubernetes-validations: + - message: Only All or None selection mode is permitted for + ControlPlaneMachineSets + rule: self.resource != 'controlplanemachinesets' || self.selection.mode + == 'All' || self.selection.mode == 'None' maxItems: 5 type: array x-kubernetes-list-map-keys: @@ -741,6 +747,7 @@ spec: controlplanemachinesets means that the machine manager will only register resources of the kind ControlPlaneMachineSet. enum: - machinesets + - controlplanemachinesets type: string selection: description: selection allows granular control of the machine @@ -830,6 +837,11 @@ spec: - resource - selection type: object + x-kubernetes-validations: + - message: Only All or None selection mode is permitted for + ControlPlaneMachineSets + rule: self.resource != 'controlplanemachinesets' || self.selection.mode + == 'All' || self.selection.mode == 'None' maxItems: 5 type: array x-kubernetes-list-map-keys: diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations-OKD.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations-OKD.crd.yaml index 6c763ea47..1d1600228 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations-OKD.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations-OKD.crd.yaml @@ -108,6 +108,7 @@ spec: controlplanemachinesets means that the machine manager will only register resources of the kind ControlPlaneMachineSet. enum: - machinesets + - controlplanemachinesets type: string selection: description: selection allows granular control of the machine @@ -197,6 +198,11 @@ spec: - resource - selection type: object + x-kubernetes-validations: + - message: Only All or None selection mode is permitted for + ControlPlaneMachineSets + rule: self.resource != 'controlplanemachinesets' || self.selection.mode + == 'All' || self.selection.mode == 'None' maxItems: 5 type: array x-kubernetes-list-map-keys: @@ -741,6 +747,7 @@ spec: controlplanemachinesets means that the machine manager will only register resources of the kind ControlPlaneMachineSet. enum: - machinesets + - controlplanemachinesets type: string selection: description: selection allows granular control of the machine @@ -830,6 +837,11 @@ spec: - resource - selection type: object + x-kubernetes-validations: + - message: Only All or None selection mode is permitted for + ControlPlaneMachineSets + rule: self.resource != 'controlplanemachinesets' || self.selection.mode + == 'All' || self.selection.mode == 'None' maxItems: 5 type: array x-kubernetes-list-map-keys: diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml index e7c94e286..51a758804 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml @@ -327,10 +327,7 @@ networks.operator.openshift.io: CRDName: networks.operator.openshift.io Capability: "" Category: "" - FeatureGates: - - AdditionalRoutingCapabilities - - NetworkLiveMigration - - RouteAdvertisements + FeatureGates: [] FilenameOperatorName: network FilenameOperatorOrdering: "01" FilenameRunLevel: "0000_70" diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go b/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go index ca2806ecc..be0337b90 100644 --- a/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go +++ b/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go @@ -143,7 +143,11 @@ var ciphers = map[string]uint16{ } // openSSLToIANACiphersMap maps OpenSSL cipher suite names to IANA names -// ref: https://www.iana.org/assignments/tls-parameters/tls-parameters.xml +// Ref: https://www.iana.org/assignments/tls-parameters/tls-parameters.xml +// This must hold a 1:1 mapping for each OpenSSL cipher defined in openshift/api TLSSecurityProfiles, +// so it can be used to translate OpenSSL ciphers to IANA ciphers, which is what go's crypto/tls understands. +// Ciphers in this map must also be compatible with go's crypto/tls ciphers: +// https://github.com/golang/go/blob/d4febb45179fa99ee1d5783bcb693ed7ba14115c/src/crypto/tls/cipher_suites.go#L682-L724 var openSSLToIANACiphersMap = map[string]string{ // TLS 1.3 ciphers - not configurable in go 1.13, all of them are used in TLSv1.3 flows "TLS_AES_128_GCM_SHA256": "TLS_AES_128_GCM_SHA256", // 0x13,0x01 @@ -163,6 +167,21 @@ var openSSLToIANACiphersMap = map[string]string{ "AES256-GCM-SHA384": "TLS_RSA_WITH_AES_256_GCM_SHA384", // 0x00,0x9D "AES128-SHA256": "TLS_RSA_WITH_AES_128_CBC_SHA256", // 0x00,0x3C + // Go's crypto/tls does not support CBC mode and DHE ciphers, so we don't want to include them here. + // See: + // - https://github.com/golang/go/issues/26652 + // - https://github.com/golang/go/issues/7758 + // - https://redhat-internal.slack.com/archives/C098FU5MRAB/p1770309657097269 + // + // "ECDHE-ECDSA-AES256-SHA384": "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384", // 0xC0,0x24 + // "ECDHE-RSA-AES256-SHA384": "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384", // 0xC0,0x28 + // "AES256-SHA256": "TLS_RSA_WITH_AES_256_CBC_SHA256", // 0x00,0x3D + // "DHE-RSA-AES128-GCM-SHA256": "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256", // 0x00,0x9E + // "DHE-RSA-AES256-GCM-SHA384": "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384", // 0x00,0x9F + // "DHE-RSA-CHACHA20-POLY1305": "TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256", // 0xCC,0xAA + // "DHE-RSA-AES128-SHA256": "TLS_DHE_RSA_WITH_AES_128_CBC_SHA256", // 0x00,0x67 + // "DHE-RSA-AES256-SHA256": "TLS_DHE_RSA_WITH_AES_256_CBC_SHA256", // 0x00,0x6B + // TLS 1 "ECDHE-ECDSA-AES128-SHA": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", // 0xC0,0x09 "ECDHE-RSA-AES128-SHA": "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", // 0xC0,0x13 @@ -170,9 +189,10 @@ var openSSLToIANACiphersMap = map[string]string{ "ECDHE-RSA-AES256-SHA": "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", // 0xC0,0x14 // SSL 3 - "AES128-SHA": "TLS_RSA_WITH_AES_128_CBC_SHA", // 0x00,0x2F - "AES256-SHA": "TLS_RSA_WITH_AES_256_CBC_SHA", // 0x00,0x35 - "DES-CBC3-SHA": "TLS_RSA_WITH_3DES_EDE_CBC_SHA", // 0x00,0x0A + "AES128-SHA": "TLS_RSA_WITH_AES_128_CBC_SHA", // 0x00,0x2F + "AES256-SHA": "TLS_RSA_WITH_AES_256_CBC_SHA", // 0x00,0x35 + "DES-CBC3-SHA": "TLS_RSA_WITH_3DES_EDE_CBC_SHA", // 0x00,0x0A + "ECDHE-RSA-DES-CBC3-SHA": "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", // 0xC0,0x12 } // CipherSuitesToNamesOrDie given a list of cipher suites as ints, return their readable names diff --git a/vendor/github.com/openshift/library-go/pkg/operator/apiserver/controller/workload/workload.go b/vendor/github.com/openshift/library-go/pkg/operator/apiserver/controller/workload/workload.go index 7d031f5ed..34621c2a6 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/apiserver/controller/workload/workload.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/apiserver/controller/workload/workload.go @@ -16,6 +16,7 @@ import ( corev1listers "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" + "k8s.io/utils/ptr" operatorv1 "github.com/openshift/api/operator/v1" applyoperatorv1 "github.com/openshift/client-go/operator/applyconfigurations/operator/v1" @@ -33,7 +34,9 @@ const ( // Delegate captures a set of methods that hold a custom logic type Delegate interface { // Sync a method that will be used for delegation. It should bring the desired workload into operation. - Sync(ctx context.Context, controllerContext factory.SyncContext) (*appsv1.Deployment, bool, []error) + // + // Returned conditions will replace the generated conditions of the same type. + Sync(ctx context.Context, controllerContext factory.SyncContext) (*appsv1.Deployment, bool, []*applyoperatorv1.OperatorConditionApplyConfiguration, []error) // PreconditionFulfilled a method that indicates whether all prerequisites are met and we can Sync. // @@ -131,13 +134,13 @@ func (c *Controller) sync(ctx context.Context, controllerContext factory.SyncCon } if fulfilled, err := c.delegate.PreconditionFulfilled(ctx); err != nil { - return c.updateOperatorStatus(ctx, operatorStatus, nil, false, false, []error{err}) + return c.updateOperatorStatus(ctx, operatorStatus, nil, false, false, nil, []error{err}) } else if !fulfilled { - return c.updateOperatorStatus(ctx, operatorStatus, nil, false, false, nil) + return c.updateOperatorStatus(ctx, operatorStatus, nil, false, false, nil, nil) } if deleted, operandName, err := c.delegate.WorkloadDeleted(ctx); err != nil { - return c.updateOperatorStatus(ctx, operatorStatus, nil, false, false, []error{err}) + return c.updateOperatorStatus(ctx, operatorStatus, nil, false, false, nil, []error{err}) } else if deleted { // Server-Side-Apply with an empty operator status for the specific field manager will effectively // remove any conditions and generations owned by it, because the respective API fields have 'map' @@ -150,9 +153,9 @@ func (c *Controller) sync(ctx context.Context, controllerContext factory.SyncCon return nil } - workload, operatorConfigAtHighestGeneration, errs := c.delegate.Sync(ctx, controllerContext) + workload, operatorConfigAtHighestGeneration, conditionOverwrites, errs := c.delegate.Sync(ctx, controllerContext) - return c.updateOperatorStatus(ctx, operatorStatus, workload, operatorConfigAtHighestGeneration, true, errs) + return c.updateOperatorStatus(ctx, operatorStatus, workload, operatorConfigAtHighestGeneration, true, conditionOverwrites, errs) } // shouldSync checks ManagementState to determine if we can run this operator, probably set by a cluster administrator. @@ -174,7 +177,15 @@ func (c *Controller) shouldSync(ctx context.Context, operatorSpec *operatorv1.Op } // updateOperatorStatus updates the status based on the actual workload and errors that might have occurred during synchronization. -func (c *Controller) updateOperatorStatus(ctx context.Context, previousStatus *operatorv1.OperatorStatus, workload *appsv1.Deployment, operatorConfigAtHighestGeneration bool, preconditionsReady bool, errs []error) (err error) { +func (c *Controller) updateOperatorStatus( + ctx context.Context, + previousStatus *operatorv1.OperatorStatus, + workload *appsv1.Deployment, + operatorConfigAtHighestGeneration bool, + preconditionsReady bool, + conditionOverwrites []*applyoperatorv1.OperatorConditionApplyConfiguration, + errs []error, +) (err error) { if errs == nil { errs = []error{} } @@ -211,6 +222,17 @@ func (c *Controller) updateOperatorStatus(ctx context.Context, previousStatus *o workloadDegradedCondition, ) + OverwritesLoop: + for _, overwrite := range conditionOverwrites { + for i, cond := range status.Conditions { + if ptr.Equal(cond.Type, overwrite.Type) { + status.Conditions[i] = *overwrite + continue OverwritesLoop + } + } + status.Conditions = append(status.Conditions, *overwrite) + } + if applyError := c.operatorClient.ApplyOperatorStatus(ctx, c.controllerInstanceName, status); applyError != nil { err = applyError } diff --git a/vendor/modules.txt b/vendor/modules.txt index 143da3225..1d7d1cffa 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -226,7 +226,7 @@ github.com/openshift-eng/openshift-tests-extension/pkg/ginkgo github.com/openshift-eng/openshift-tests-extension/pkg/junit github.com/openshift-eng/openshift-tests-extension/pkg/util/sets github.com/openshift-eng/openshift-tests-extension/pkg/version -# github.com/openshift/api v0.0.0-20260126183958-606bd613f9f7 +# github.com/openshift/api v0.0.0-20260212193555-c06ab675261f ## explicit; go 1.24.0 github.com/openshift/api github.com/openshift/api/annotations @@ -376,7 +376,7 @@ github.com/openshift/client-go/user/applyconfigurations/internal github.com/openshift/client-go/user/applyconfigurations/user/v1 github.com/openshift/client-go/user/clientset/versioned/scheme github.com/openshift/client-go/user/clientset/versioned/typed/user/v1 -# github.com/openshift/library-go v0.0.0-20260210145149-d0e860e8d752 +# github.com/openshift/library-go v0.0.0-20260210145149-d0e860e8d752 => github.com/tchap/library-go v0.0.0-20260216103045-5a90edab46c3 ## explicit; go 1.24.0 github.com/openshift/library-go/pkg/apiserver/jsonpatch github.com/openshift/library-go/pkg/apps/deployment @@ -1632,3 +1632,4 @@ sigs.k8s.io/structured-merge-diff/v6/value ## explicit; go 1.22 sigs.k8s.io/yaml # github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20251001123353-fd5b1fb35db1 +# github.com/openshift/library-go => github.com/tchap/library-go v0.0.0-20260216103045-5a90edab46c3