diff --git a/Makefile b/Makefile index d9739044e8..66af13bff8 100644 --- a/Makefile +++ b/Makefile @@ -31,6 +31,8 @@ COLLECT_PROFILES_CMD := $(addprefix bin/, collect-profiles) OPM := $(addprefix bin/, opm) OLM_CMDS := $(shell go list -mod=vendor $(OLM_PKG)/cmd/...) PSM_CMD := $(addprefix bin/, psm) +LIFECYCLE_CONTROLLER_CMD := $(addprefix bin/, lifecycle-controller) +LIFECYCLE_SERVER_CMD := $(addprefix bin/, lifecycle-server) REGISTRY_CMDS := $(addprefix bin/, $(shell ls staging/operator-registry/cmd | grep -v opm)) # Default image tag for build/olm-container and build/registry-container @@ -77,7 +79,7 @@ build/registry: $(MAKE) $(REGISTRY_CMDS) $(OPM) build/olm: - $(MAKE) $(PSM_CMD) $(OLM_CMDS) $(COLLECT_PROFILES_CMD) bin/copy-content + $(MAKE) $(PSM_CMD) $(OLM_CMDS) $(COLLECT_PROFILES_CMD) bin/copy-content $(LIFECYCLE_CONTROLLER_CMD) $(LIFECYCLE_SERVER_CMD) $(OPM): version_flags=-ldflags "-X '$(REGISTRY_PKG)/cmd/opm/version.gitCommit=$(GIT_COMMIT)' -X '$(REGISTRY_PKG)/cmd/opm/version.opmVersion=$(OPM_VERSION)' -X '$(REGISTRY_PKG)/cmd/opm/version.buildDate=$(BUILD_DATE)'" $(OPM): @@ -97,6 +99,12 @@ $(PSM_CMD): FORCE $(COLLECT_PROFILES_CMD): FORCE go build $(GO_BUILD_OPTS) $(GO_BUILD_TAGS) -o $(COLLECT_PROFILES_CMD) $(ROOT_PKG)/cmd/collect-profiles +$(LIFECYCLE_CONTROLLER_CMD): FORCE + go build $(GO_BUILD_OPTS) $(GO_BUILD_TAGS) -o $(LIFECYCLE_CONTROLLER_CMD) $(ROOT_PKG)/cmd/lifecycle-controller + +$(LIFECYCLE_SERVER_CMD): FORCE + go build $(GO_BUILD_OPTS) $(GO_BUILD_TAGS) -o $(LIFECYCLE_SERVER_CMD) $(ROOT_PKG)/cmd/lifecycle-server + .PHONY: cross cross: version_flags=-X '$(REGISTRY_PKG)/cmd/opm/version.gitCommit=$(GIT_COMMIT)' -X '$(REGISTRY_PKG)/cmd/opm/version.opmVersion=$(OPM_VERSION)' -X '$(REGISTRY_PKG)/cmd/opm/version.buildDate=$(BUILD_DATE)' cross: diff --git a/cmd/lifecycle-controller/main.go b/cmd/lifecycle-controller/main.go new file mode 100644 index 0000000000..04c5ce578a --- /dev/null +++ b/cmd/lifecycle-controller/main.go @@ -0,0 +1,23 @@ +package main + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" + _ "k8s.io/client-go/plugin/pkg/client/auth" +) + +func main() { + rootCmd := &cobra.Command{ + Use: "lifecycle-controller", + Short: "Lifecycle Metadata Controller for OLM", + } + + rootCmd.AddCommand(newStartCmd()) + + if err := rootCmd.Execute(); err != nil { + fmt.Fprintf(os.Stderr, "error running lifecycle-controller: %v\n", err) + os.Exit(1) + } +} diff --git a/cmd/lifecycle-controller/start.go b/cmd/lifecycle-controller/start.go new file mode 100644 index 0000000000..57cd965184 --- /dev/null +++ b/cmd/lifecycle-controller/start.go @@ -0,0 +1,307 @@ +package main + +import ( + "cmp" + "context" + "crypto/tls" + "errors" + "fmt" + "net/http" + "os" + + "github.com/go-logr/logr" + configv1 "github.com/openshift/api/config/v1" + tlsutil "github.com/openshift/controller-runtime-common/pkg/tls" + "github.com/openshift/library-go/pkg/crypto" + "github.com/openshift/operator-framework-olm/pkg/leaderelection" + controllers "github.com/openshift/operator-framework-olm/pkg/lifecycle-controller" + operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" + "github.com/spf13/cobra" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + "k8s.io/klog/v2" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/manager" + metricsfilters "sigs.k8s.io/controller-runtime/pkg/metrics/filters" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" +) + +const ( + defaultMetricsAddr = ":8443" + defaultHealthCheckAddr = ":8081" + leaderElectionID = "lifecycle-controller-lock" +) + +var ( + disableLeaderElection bool + healthCheckAddr string + metricsAddr string + catalogSourceLabelSelector string + catalogSourceFieldSelector string + tlsCertFile string + tlsKeyFile string +) + +func newStartCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "start", + Short: "Start the Lifecycle Controller", + SilenceUsage: true, + RunE: run, + } + + cmd.Flags().StringVar(&healthCheckAddr, "health", defaultHealthCheckAddr, "health check address") + cmd.Flags().StringVar(&metricsAddr, "metrics", defaultMetricsAddr, "metrics address") + cmd.Flags().BoolVar(&disableLeaderElection, "disable-leader-election", false, "disable leader election") + cmd.Flags().StringVar(&catalogSourceLabelSelector, "catalog-source-label-selector", "", "label selector for catalog sources to manage (empty means all)") + cmd.Flags().StringVar(&catalogSourceFieldSelector, "catalog-source-field-selector", "", "field selector for catalog sources to manage (empty means all)") + cmd.Flags().StringVar(&tlsCertFile, "tls-cert", "", "path to TLS certificate file for metrics server") + cmd.Flags().StringVar(&tlsKeyFile, "tls-key", "", "path to TLS key file for metrics server") + _ = cmd.MarkFlagRequired("tls-cert") + _ = cmd.MarkFlagRequired("tls-key") + return cmd +} + +func run(_ *cobra.Command, _ []string) error { + ctx := ctrl.SetupSignalHandler() + ctrl.SetLogger(klog.NewKlogr()) + setupLog := ctrl.Log.WithName("setup") + + cfg, err := loadStartConfig(ctx) + if err != nil { + return fmt.Errorf("unable to load startup configuration: %v", err) + } + logConfig(cfg, setupLog) + + mgr, err := setupManager(cfg) + if err != nil { + return fmt.Errorf("failed to setup manager instance: %v", err) + } + + tlsProfileChan, err := setupTLSProfileWatcher(mgr, cfg) + if err != nil { + return fmt.Errorf("unable to setup TLS profile watcher: %v", err) + } + defer close(tlsProfileChan) + + if err := setupLifecycleServerController(mgr, cfg, tlsProfileChan); err != nil { + return fmt.Errorf("unable to setup lifecycle server controller: %v", err) + } + + setupLog.Info("starting manager") + if err := mgr.Start(ctx); err != nil { + return fmt.Errorf("unable to start manager: %v", err) + } + + return nil +} + +type startConfig struct { + Namespace string + Version string + + ServerImage string + CatalogSourceFieldSelector fields.Selector + CatalogSourceLabelSelector labels.Selector + RESTConfig *rest.Config + Scheme *runtime.Scheme + + LeaderElection configv1.LeaderElection + + InitialTLSProfileSpec configv1.TLSProfileSpec + TLSConfigProvider *controllers.TLSConfigProvider + EnableTLSProfileWatcher bool +} + +func loadStartConfig(ctx context.Context) (*startConfig, error) { + cfg := &startConfig{ + Namespace: os.Getenv("NAMESPACE"), + Version: cmp.Or(os.Getenv("RELEASE_VERSION"), "unknown"), + ServerImage: os.Getenv("LIFECYCLE_SERVER_IMAGE"), + } + if cfg.Namespace == "" && !disableLeaderElection { + return nil, fmt.Errorf("NAMESPACE environment variable is required when leader election is enabled") + } + if cfg.ServerImage == "" { + return nil, fmt.Errorf("LIFECYCLE_SERVER_IMAGE environment variable is required") + } + + // Using a function to load the keypair each time means that we automatically pick up the new certificate when it reloads. + getCertificate := func(_ *tls.ClientHelloInfo) (*tls.Certificate, error) { + cert, err := tls.LoadX509KeyPair(tlsCertFile, tlsKeyFile) + if err != nil { + return nil, err + } + return &cert, nil + } + _, err := getCertificate(nil) + if err != nil { + return nil, fmt.Errorf("failed to load TLS certificate/key: %v", err) + } + cfg.CatalogSourceFieldSelector, err = fields.ParseSelector(catalogSourceFieldSelector) + if err != nil { + return nil, fmt.Errorf("failed to parse catalog source field selector %q: %v", catalogSourceFieldSelector, err) + } + cfg.CatalogSourceLabelSelector, err = labels.Parse(catalogSourceLabelSelector) + if err != nil { + return nil, fmt.Errorf("failed to parse catalog source label selector %q: %v", catalogSourceLabelSelector, err) + } + cfg.RESTConfig, err = ctrl.GetConfig() + if err != nil { + return nil, fmt.Errorf("failed to get rest config: %v", err) + } + cfg.Scheme = setupScheme() + cfg.LeaderElection = leaderelection.GetLeaderElectionConfig(ctrl.Log.WithName("leaderelection"), cfg.RESTConfig, !disableLeaderElection) + + cfg.InitialTLSProfileSpec, cfg.EnableTLSProfileWatcher, err = getInitialTLSProfile(ctx, cfg.RESTConfig, cfg.Scheme) + if err != nil { + return nil, fmt.Errorf("failed to get initial TLS security profile: %v", err) + } + cfg.TLSConfigProvider = controllers.NewTLSConfigProvider(getCertificate, cfg.InitialTLSProfileSpec) + return cfg, nil +} + +func logConfig(cfg *startConfig, log logr.Logger) { + log.Info("starting lifecycle-controller", "version", cfg.Version) + log.Info("config", "lifecycleServerImage", cfg.ServerImage) + if !cfg.CatalogSourceLabelSelector.Empty() { + log.Info("config", "catalogSourceLabelSelector", cfg.CatalogSourceLabelSelector.String()) + } + if !cfg.CatalogSourceFieldSelector.Empty() { + log.Info("config", "catalogSourceFieldSelector", cfg.CatalogSourceFieldSelector.String()) + } + tlsProfile, unsupportedCiphers := cfg.TLSConfigProvider.Get() + log.Info("config", "tlsMinVersion", crypto.TLSVersionToNameOrDie(tlsProfile.MinVersion)) + log.Info("config", "tlsCipherSuites", crypto.CipherSuitesToNamesOrDie(tlsProfile.CipherSuites)) + if len(unsupportedCiphers) > 0 { + log.Error(errors.New("ignored config"), "unsupported TLS cipher suites", "tlsCipherSuites", unsupportedCiphers) + } +} + +func getInitialTLSProfile(ctx context.Context, restConfig *rest.Config, sch *runtime.Scheme) (configv1.TLSProfileSpec, bool, error) { + cl, err := client.New(restConfig, client.Options{Scheme: sch}) + if err != nil { + return configv1.TLSProfileSpec{}, false, fmt.Errorf("failed to create client: %v", err) + } + initialTLSProfileSpec, err := tlsutil.FetchAPIServerTLSProfile(ctx, cl) + if err != nil { + return *configv1.TLSProfiles[crypto.DefaultTLSProfileType], false, nil + } + return initialTLSProfileSpec, true, nil +} + +func setupManager(cfg *startConfig) (manager.Manager, error) { + mgr, err := ctrl.NewManager(cfg.RESTConfig, manager.Options{ + Scheme: cfg.Scheme, + Metrics: metricsserver.Options{ + BindAddress: metricsAddr, + SecureServing: true, + FilterProvider: metricsfilters.WithAuthenticationAndAuthorization, + TLSOpts: []func(*tls.Config){func(tlsConfig *tls.Config) { + tlsConfig.GetConfigForClient = func(*tls.ClientHelloInfo) (*tls.Config, error) { + tlsCfg, _ := cfg.TLSConfigProvider.Get() + return tlsCfg, nil + } + }}, + }, + LeaderElection: !cfg.LeaderElection.Disable, + LeaderElectionNamespace: cfg.Namespace, + LeaderElectionID: leaderElectionID, + LeaseDuration: &cfg.LeaderElection.LeaseDuration.Duration, + RenewDeadline: &cfg.LeaderElection.RenewDeadline.Duration, + RetryPeriod: &cfg.LeaderElection.RetryPeriod.Duration, + HealthProbeBindAddress: healthCheckAddr, + LeaderElectionReleaseOnCancel: true, + Cache: cache.Options{ + ByObject: map[client.Object]cache.ByObject{ + &operatorsv1alpha1.CatalogSource{}: {}, + &corev1.Pod{}: { + Label: catalogPodLabelSelector(), + }, + &appsv1.Deployment{}: { + Label: controllers.LifecycleServerLabelSelector(), + }, + &corev1.ServiceAccount{}: { + Label: controllers.LifecycleServerLabelSelector(), + }, + &corev1.Service{}: { + Label: controllers.LifecycleServerLabelSelector(), + }, + &networkingv1.NetworkPolicy{}: { + Label: controllers.LifecycleServerLabelSelector(), + }, + &configv1.APIServer{}: { + Field: fields.SelectorFromSet(fields.Set{"metadata.name": "cluster"}), + }, + }, + }, + }) + if err != nil { + return nil, fmt.Errorf("failed to create manager: %v", err) + } + + // Add health check endpoint (used for both liveness and readiness probes) + if err := mgr.AddHealthzCheck("healthz", func(req *http.Request) error { + return nil + }); err != nil { + return nil, fmt.Errorf("failed to configure health check handler: %v", err) + } + return mgr, nil +} + +func setupTLSProfileWatcher(mgr manager.Manager, cfg *startConfig) (chan event.TypedGenericEvent[configv1.TLSProfileSpec], error) { + tlsChangeChan := make(chan event.TypedGenericEvent[configv1.TLSProfileSpec]) + + if !cfg.EnableTLSProfileWatcher { + return tlsChangeChan, nil + } + + log := ctrl.Log.WithName("tls-profile") + tlsProfileReconciler := tlsutil.SecurityProfileWatcher{ + Client: mgr.GetClient(), + InitialTLSProfileSpec: cfg.InitialTLSProfileSpec, + OnProfileChange: func(ctx context.Context, oldTLSProfileSpec, newTLSProfileSpec configv1.TLSProfileSpec) { + cfg.TLSConfigProvider.UpdateProfile(newTLSProfileSpec) + log.Info("applying new TLS profile spec", + "minVersion", newTLSProfileSpec.MinTLSVersion, + "cipherSuites", newTLSProfileSpec.Ciphers, + ) + + _, unsupportedCiphers := cfg.TLSConfigProvider.Get() + if len(unsupportedCiphers) > 0 { + log.Info("ignoring unsupported ciphers found in TLS profile", "unsupportedCiphers", unsupportedCiphers) + } + tlsChangeChan <- event.TypedGenericEvent[configv1.TLSProfileSpec]{Object: newTLSProfileSpec} + }, + } + + if err := tlsProfileReconciler.SetupWithManager(mgr); err != nil { + return nil, err + } + return tlsChangeChan, nil +} + +func setupLifecycleServerController(mgr manager.Manager, cfg *startConfig, tlsProfileChan <-chan event.TypedGenericEvent[configv1.TLSProfileSpec]) error { + reconciler := &controllers.LifecycleServerReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("lifecycle-server"), + Scheme: mgr.GetScheme(), + ServerImage: cfg.ServerImage, + CatalogSourceLabelSelector: cfg.CatalogSourceLabelSelector, + CatalogSourceFieldSelector: cfg.CatalogSourceFieldSelector, + TLSConfigProvider: cfg.TLSConfigProvider, + } + + if err := reconciler.SetupWithManager(mgr, tlsProfileChan); err != nil { + return fmt.Errorf("unable to setup lifecycle server controller: %v", err) + } + return nil +} diff --git a/cmd/lifecycle-controller/util.go b/cmd/lifecycle-controller/util.go new file mode 100644 index 0000000000..14f0bf76da --- /dev/null +++ b/cmd/lifecycle-controller/util.go @@ -0,0 +1,34 @@ +package main + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/selection" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + + configv1 "github.com/openshift/api/config/v1" + operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" +) + +func setupScheme() *runtime.Scheme { + scheme := runtime.NewScheme() + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(operatorsv1alpha1.AddToScheme(scheme)) + utilruntime.Must(configv1.AddToScheme(scheme)) + + return scheme +} + +// catalogPodLabelSelector returns a label selector matching pods with olm.catalogSource label +func catalogPodLabelSelector() labels.Selector { + // This call cannot fail: the label key is valid and selection.Exists requires no values. + req, err := labels.NewRequirement("olm.catalogSource", selection.Exists, nil) + if err != nil { + // Panic on impossible error to satisfy static analysis and catch programming errors + panic(fmt.Sprintf("BUG: failed to create label requirement: %v", err)) + } + return labels.NewSelector().Add(*req) +} diff --git a/cmd/lifecycle-server/main.go b/cmd/lifecycle-server/main.go new file mode 100644 index 0000000000..56584328e9 --- /dev/null +++ b/cmd/lifecycle-server/main.go @@ -0,0 +1,22 @@ +package main + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" +) + +func main() { + rootCmd := &cobra.Command{ + Use: "lifecycle-server", + Short: "Lifecycle Metadata Server for OLM", + } + + rootCmd.AddCommand(newStartCmd()) + + if err := rootCmd.Execute(); err != nil { + fmt.Fprintf(os.Stderr, "error running lifecycle-server: %v\n", err) + os.Exit(1) + } +} diff --git a/cmd/lifecycle-server/start.go b/cmd/lifecycle-server/start.go new file mode 100644 index 0000000000..15e8200422 --- /dev/null +++ b/cmd/lifecycle-server/start.go @@ -0,0 +1,225 @@ +package main + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "net/http" + "time" + + "github.com/openshift/library-go/pkg/crypto" + "github.com/spf13/cobra" + "golang.org/x/sync/errgroup" + "k8s.io/client-go/rest" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/metrics/filters" + + "k8s.io/klog/v2" + + server "github.com/openshift/operator-framework-olm/pkg/lifecycle-server" +) + +const ( + defaultFBCPath = "/catalog/configs" + defaultListenAddr = ":8443" + defaultHealthAddr = ":8081" + defaultTLSCertPath = "/var/run/secrets/serving-cert/tls.crt" + defaultTLSKeyPath = "/var/run/secrets/serving-cert/tls.key" + shutdownTimeout = 10 * time.Second +) + +var ( + fbcPath string + listenAddr string + healthAddr string + tlsCertPath string + tlsKeyPath string + tlsMinVersionStr string + tlsCipherSuiteStrs []string +) + +func newStartCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "start", + Short: "Start the Lifecycle Server", + SilenceUsage: true, + RunE: run, + } + + cmd.Flags().StringVar(&fbcPath, "fbc-path", defaultFBCPath, "path to FBC catalog data") + cmd.Flags().StringVar(&listenAddr, "listen", defaultListenAddr, "address to listen on for HTTPS API") + cmd.Flags().StringVar(&healthAddr, "health", defaultHealthAddr, "address to listen on for health checks") + cmd.Flags().StringVar(&tlsCertPath, "tls-cert", defaultTLSCertPath, "path to TLS certificate") + cmd.Flags().StringVar(&tlsKeyPath, "tls-key", defaultTLSKeyPath, "path to TLS private key") + cmd.Flags().StringVar(&tlsMinVersionStr, "tls-min-version", "", "minimum TLS version") + cmd.Flags().StringSliceVar(&tlsCipherSuiteStrs, "tls-cipher-suites", nil, "comma-separated list of cipher suites") + + return cmd +} + +func parseTLSFlags(certPath, keyPath, minVersionStr string, cipherSuiteStrs []string) (*tls.Config, error) { + // Using a function to load the keypair each time means that we automatically pick up the new certificate when it reloads. + getCertificate := func(_ *tls.ClientHelloInfo) (*tls.Certificate, error) { + cert, err := tls.LoadX509KeyPair(certPath, keyPath) + if err != nil { + return nil, err + } + return &cert, nil + } + if _, err := getCertificate(nil); err != nil { + return nil, fmt.Errorf("unable to load TLS certificate: %v", err) + } + + minVersion, err := crypto.TLSVersion(minVersionStr) + if err != nil { + return nil, fmt.Errorf("invalid TLS minimum version: %s", minVersionStr) + } + + var ( + cipherSuites []uint16 + cipherSuiteErrs []error + ) + for _, tlsCipherSuiteStr := range cipherSuiteStrs { + tlsCipherSuite, err := crypto.CipherSuite(tlsCipherSuiteStr) + if err != nil { + cipherSuiteErrs = append(cipherSuiteErrs, err) + } else { + cipherSuites = append(cipherSuites, tlsCipherSuite) + } + } + if len(cipherSuiteErrs) != 0 { + return nil, fmt.Errorf("invalid TLS cipher suites: %v", errors.Join(cipherSuiteErrs...)) + } + + return &tls.Config{ + GetCertificate: getCertificate, + MinVersion: minVersion, + CipherSuites: cipherSuites, + }, nil +} + +func run(_ *cobra.Command, _ []string) error { + log := klog.NewKlogr() + log.Info("starting lifecycle-server") + + tlsConfig, err := parseTLSFlags(tlsCertPath, tlsKeyPath, tlsMinVersionStr, tlsCipherSuiteStrs) + if err != nil { + return fmt.Errorf("failed to parse tls flags: %w", err) + } + + // Create Kubernetes client for authn/authz + restCfg := ctrl.GetConfigOrDie() + httpClient, err := rest.HTTPClientFor(restCfg) + if err != nil { + log.Error(err, "failed to create http client") + return err + } + + authnzFilter, err := filters.WithAuthenticationAndAuthorization(restCfg, httpClient) + if err != nil { + log.Error(err, "failed to create authorization filter") + return err + } + + // Load lifecycle data from FBC + log.Info("loading lifecycle data from FBC", "path", fbcPath) + data, err := server.LoadLifecycleData(fbcPath) + if err != nil { + log.Error(err, "failed to load lifecycle data, starting with empty data") + data = make(server.LifecycleIndex) + } + log.Info("loaded lifecycle data", + "packageCount", data.CountPackages(), + "blobCount", data.CountBlobs(), + "versions", data.ListVersions(), + ) + + // Create HTTP apiHandler with authn/authz middleware + baseHandler := server.NewHandler(data, log) + apiHandler, err := authnzFilter(log, baseHandler) + if err != nil { + log.Error(err, "failed to create api handler") + return err + } + + // Create health apiHandler (no auth required) + healthHandler := http.NewServeMux() + healthHandler.HandleFunc("GET /healthz", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte("ok")) + }) + + // Create servers + apiServer := cancelableServer{ + Server: &http.Server{ + Addr: listenAddr, + Handler: apiHandler, + TLSConfig: tlsConfig, + }, + ShutdownTimeout: shutdownTimeout, + } + healthServer := cancelableServer{ + Server: &http.Server{ + Addr: healthAddr, + Handler: healthHandler, + }, + ShutdownTimeout: shutdownTimeout, + } + + eg, ctx := errgroup.WithContext(ctrl.SetupSignalHandler()) + eg.Go(func() error { + if err := apiServer.ListenAndServeTLS(ctx, "", ""); err != nil { + return fmt.Errorf("api server error: %w", err) + } + return nil + }) + eg.Go(func() error { + if err := healthServer.ListenAndServe(ctx); err != nil { + return fmt.Errorf("health server error: %w", err) + } + return nil + }) + return eg.Wait() +} + +type cancelableServer struct { + *http.Server + ShutdownTimeout time.Duration +} + +func (s *cancelableServer) ListenAndServe(ctx context.Context) error { + return s.listenAndServe(ctx, + func() error { + return s.Server.ListenAndServe() + }, + s.Server.Shutdown, + ) +} +func (s *cancelableServer) ListenAndServeTLS(ctx context.Context, certFile, keyFile string) error { + return s.listenAndServe(ctx, + func() error { + return s.Server.ListenAndServeTLS(certFile, keyFile) + }, + s.Server.Shutdown, + ) +} + +func (s *cancelableServer) listenAndServe(ctx context.Context, runFunc func() error, cancelFunc func(context.Context) error) error { + errChan := make(chan error, 1) + go func() { + errChan <- runFunc() + }() + + select { + case err := <-errChan: + return err + case <-ctx.Done(): + shutdownCtx, cancel := context.WithTimeout(context.Background(), s.ShutdownTimeout) + defer cancel() + if err := cancelFunc(shutdownCtx); err != nil { + return err + } + return nil + } +} diff --git a/cmd/lifecycle-server/util.go b/cmd/lifecycle-server/util.go new file mode 100644 index 0000000000..cd6410f5f2 --- /dev/null +++ b/cmd/lifecycle-server/util.go @@ -0,0 +1,14 @@ +package main + +import ( + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" +) + +func setupScheme() *runtime.Scheme { + scheme := runtime.NewScheme() + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + + return scheme +} diff --git a/go.mod b/go.mod index 1ac435dde8..cc40db3788 100644 --- a/go.mod +++ b/go.mod @@ -13,12 +13,15 @@ require ( github.com/mikefarah/yq/v3 v3.0.0-20201202084205-8846255d1c37 github.com/onsi/ginkgo/v2 v2.28.1 github.com/openshift/api v0.0.0-20260204104751-e09e5a4ebcd0 + github.com/openshift/controller-runtime-common v0.0.0-20260204183245-642129afd14f + github.com/openshift/library-go v0.0.0-20260205095356-7bced6e899b6 github.com/operator-framework/api v0.39.0 github.com/operator-framework/operator-lifecycle-manager v0.0.0-00010101000000-000000000000 github.com/operator-framework/operator-registry v1.63.0 github.com/sirupsen/logrus v1.9.4 github.com/spf13/cobra v1.10.2 github.com/stretchr/testify v1.11.1 + golang.org/x/sync v0.19.0 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.6.0 google.golang.org/protobuf v1.36.11 gopkg.in/yaml.v2 v2.4.0 @@ -26,6 +29,7 @@ require ( k8s.io/apimachinery v0.35.0 k8s.io/client-go v0.35.0 k8s.io/code-generator v0.35.0 + k8s.io/klog/v2 v2.130.1 k8s.io/kube-openapi v0.0.0-20260127142750-a19766b6e2d4 k8s.io/utils v0.0.0-20260108192941-914a6e750570 sigs.k8s.io/controller-runtime v0.23.1 @@ -157,7 +161,6 @@ require ( github.com/opencontainers/image-spec v1.1.1 // indirect github.com/opencontainers/runtime-spec v1.3.0 // indirect github.com/openshift/client-go v0.0.0-20260108185524-48f4ccfc4e13 // indirect - github.com/openshift/library-go v0.0.0-20260204111611-b7d4fa0e292a // indirect github.com/otiai10/copy v1.14.1 // indirect github.com/otiai10/mint v1.6.3 // indirect github.com/pkg/errors v0.9.1 // indirect @@ -209,7 +212,6 @@ require ( golang.org/x/mod v0.32.0 // indirect golang.org/x/net v0.49.0 // indirect golang.org/x/oauth2 v0.34.0 // indirect - golang.org/x/sync v0.19.0 // indirect golang.org/x/sys v0.40.0 // indirect golang.org/x/term v0.39.0 // indirect golang.org/x/text v0.33.0 // indirect @@ -232,7 +234,6 @@ require ( k8s.io/component-base v0.35.0 // indirect k8s.io/gengo/v2 v2.0.0-20250922181213-ec3ebc5fd46b // indirect k8s.io/klog v1.0.0 // indirect - k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kms v0.35.0 // indirect k8s.io/kube-aggregator v0.35.0 // indirect k8s.io/kubectl v0.35.0 // indirect @@ -250,3 +251,5 @@ replace ( github.com/operator-framework/operator-lifecycle-manager => ./staging/operator-lifecycle-manager github.com/operator-framework/operator-registry => ./staging/operator-registry ) + +replace github.com/openshift/controller-runtime-common => github.com/joelanford/controller-runtime-common v0.0.0-20260206162334-afe447e6c57e diff --git a/go.sum b/go.sum index 37bff5e05d..1bf7006558 100644 --- a/go.sum +++ b/go.sum @@ -316,6 +316,8 @@ github.com/itchyny/timefmt-go v0.1.7 h1:xyftit9Tbw+Dc/huSSPJaEmX1TVL8lw5vxjJLK4G github.com/itchyny/timefmt-go v0.1.7/go.mod h1:5E46Q+zj7vbTgWY8o5YkMeYb4I6GeWLFnetPy5oBrAI= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/joelanford/controller-runtime-common v0.0.0-20260206162334-afe447e6c57e h1:QKmwy5dnsX8Gik2J3wliX3IXSpZPIWyCAlL2xi3y/do= +github.com/joelanford/controller-runtime-common v0.0.0-20260206162334-afe447e6c57e/go.mod h1:v50YKO19Utu2nn7jsWwMFZXmO+HSHdkeDdEdNvOSy88= github.com/joelanford/ignore v0.1.1 h1:vKky5RDoPT+WbONrbQBgOn95VV/UPh4ejlyAbbzgnQk= github.com/joelanford/ignore v0.1.1/go.mod h1:8eho/D8fwQ3rIXrLwE23AaeaGDNXqLE9QJ3zJ4LIPCw= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= @@ -430,8 +432,8 @@ github.com/openshift/api v0.0.0-20260204104751-e09e5a4ebcd0 h1:mj1uTiMB24CUakpEc github.com/openshift/api v0.0.0-20260204104751-e09e5a4ebcd0/go.mod h1:d5uzF0YN2nQQFA0jIEWzzOZ+edmo6wzlGLvx5Fhz4uY= github.com/openshift/client-go v0.0.0-20260108185524-48f4ccfc4e13 h1:6rd4zSo2UaWQcAPZfHK9yzKVqH0BnMv1hqMzqXZyTds= github.com/openshift/client-go v0.0.0-20260108185524-48f4ccfc4e13/go.mod h1:YvOmPmV7wcJxpfhTDuFqqs2Xpb3M3ovsM6Qs/i2ptq4= -github.com/openshift/library-go v0.0.0-20260204111611-b7d4fa0e292a h1:YLnZtVfqGUfTbQ+M06QAslEmP4WrnRoPrk4AtoBJdm8= -github.com/openshift/library-go v0.0.0-20260204111611-b7d4fa0e292a/go.mod h1:DCRz1EgdayEmr9b6KXKDL+DWBN0rGHu/VYADeHzPoOk= +github.com/openshift/library-go v0.0.0-20260205095356-7bced6e899b6 h1:YoT3Q+9/I3QMicrayX7ZwGZh8BFVKjaVat2gdMd8Ads= +github.com/openshift/library-go v0.0.0-20260205095356-7bced6e899b6/go.mod h1:DCRz1EgdayEmr9b6KXKDL+DWBN0rGHu/VYADeHzPoOk= github.com/otiai10/copy v1.14.1 h1:5/7E6qsUMBaH5AnQ0sSLzzTg1oTECmcCmT6lvF45Na8= github.com/otiai10/copy v1.14.1/go.mod h1:oQwrEDDOci3IM8dJF0d8+jnbfPDllW6vUjNc3DoZm9I= github.com/otiai10/mint v1.6.3 h1:87qsV/aw1F5as1eH1zS/yqHY85ANKVMgkDrf9rcxbQs= diff --git a/manifests/0000_50_olm_00-catalogsources.crd.yaml b/manifests/0000_50_olm_00-catalogsources.crd.yaml index d0455ae902..6679b6cc80 100644 --- a/manifests/0000_50_olm_00-catalogsources.crd.yaml +++ b/manifests/0000_50_olm_00-catalogsources.crd.yaml @@ -1095,9 +1095,7 @@ spec: publisher: type: string runAsRoot: - description: |- - RunAsRoot allows admins to indicate that they wish to run the CatalogSource pod in a privileged - pod as root. This should only be enabled when running older catalog images which could not be run as non-root. + description: RunAsRoot allows admins to indicate that they wish to run the CatalogSource pod in a privileged pod as root. This should only be enabled when running older catalog images which could not be run as non-root. type: boolean secrets: description: |- diff --git a/manifests/0000_50_olm_00-clusterserviceversions.crd.yaml b/manifests/0000_50_olm_00-clusterserviceversions.crd.yaml index b3ffad4856..4fb38974e7 100644 --- a/manifests/0000_50_olm_00-clusterserviceversions.crd.yaml +++ b/manifests/0000_50_olm_00-clusterserviceversions.crd.yaml @@ -4712,6 +4712,7 @@ spec: ip: description: IP address of the host file entry. type: string + default: "" x-kubernetes-list-map-keys: - ip x-kubernetes-list-type: map diff --git a/manifests/0000_50_olm_08-lifecycle-controller.deployment.ibm-cloud-managed.yaml b/manifests/0000_50_olm_08-lifecycle-controller.deployment.ibm-cloud-managed.yaml new file mode 100644 index 0000000000..e49a80fecd --- /dev/null +++ b/manifests/0000_50_olm_08-lifecycle-controller.deployment.ibm-cloud-managed.yaml @@ -0,0 +1,110 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager + labels: + app: olm-lifecycle-controller + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/ibm-cloud-managed: "true" + capability.openshift.io/name: "OperatorLifecycleManager" + include.release.openshift.io/hypershift: "true" +spec: + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 0 + maxSurge: 1 + replicas: 1 + selector: + matchLabels: + app: olm-lifecycle-controller + template: + metadata: + annotations: + target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' + openshift.io/required-scc: restricted-v2 + kubectl.kubernetes.io/default-container: lifecycle-controller + labels: + app: olm-lifecycle-controller + spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: lifecycle-controller + priorityClassName: "system-cluster-critical" + containers: + - name: lifecycle-controller + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: ["ALL"] + command: + - /bin/lifecycle-controller + args: + - start + - --catalog-source-field-selector=metadata.namespace=openshift-marketplace,metadata.name=redhat-operators + - --tls-cert=/var/run/secrets/serving-cert/tls.crt + - --tls-key=/var/run/secrets/serving-cert/tls.key + image: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607 + imagePullPolicy: IfNotPresent + env: + - name: RELEASE_VERSION + value: "0.0.1-snapshot" + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LIFECYCLE_SERVER_IMAGE + value: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607 + - name: GOMEMLIMIT + value: "5MiB" + resources: + requests: + cpu: 10m + memory: 10Mi + ports: + - containerPort: 8081 + name: health + - containerPort: 8443 + name: metrics + protocol: TCP + volumeMounts: + - name: serving-cert + mountPath: /var/run/secrets/serving-cert + readOnly: true + livenessProbe: + httpGet: + path: /healthz + port: health + scheme: HTTP + initialDelaySeconds: 30 + readinessProbe: + httpGet: + path: /healthz + port: health + scheme: HTTP + initialDelaySeconds: 30 + terminationMessagePolicy: FallbackToLogsOnError + nodeSelector: + kubernetes.io/os: linux + node-role.kubernetes.io/control-plane: "" + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 120 + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 120 + volumes: + - name: serving-cert + secret: + secretName: lifecycle-controller-serving-cert diff --git a/manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml b/manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml new file mode 100644 index 0000000000..1c1cd18f2f --- /dev/null +++ b/manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml @@ -0,0 +1,109 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager + labels: + app: olm-lifecycle-controller + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/self-managed-high-availability: "true" + capability.openshift.io/name: "OperatorLifecycleManager" +spec: + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 0 + maxSurge: 1 + replicas: 1 + selector: + matchLabels: + app: olm-lifecycle-controller + template: + metadata: + annotations: + target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' + openshift.io/required-scc: restricted-v2 + kubectl.kubernetes.io/default-container: lifecycle-controller + labels: + app: olm-lifecycle-controller + spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: lifecycle-controller + priorityClassName: "system-cluster-critical" + containers: + - name: lifecycle-controller + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: ["ALL"] + command: + - /bin/lifecycle-controller + args: + - start + - --catalog-source-field-selector=metadata.namespace=openshift-marketplace,metadata.name=redhat-operators + - --tls-cert=/var/run/secrets/serving-cert/tls.crt + - --tls-key=/var/run/secrets/serving-cert/tls.key + image: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607 + imagePullPolicy: IfNotPresent + env: + - name: RELEASE_VERSION + value: "0.0.1-snapshot" + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LIFECYCLE_SERVER_IMAGE + value: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607 + - name: GOMEMLIMIT + value: "5MiB" + resources: + requests: + cpu: 10m + memory: 10Mi + ports: + - containerPort: 8081 + name: health + - containerPort: 8443 + name: metrics + protocol: TCP + volumeMounts: + - name: serving-cert + mountPath: /var/run/secrets/serving-cert + readOnly: true + livenessProbe: + httpGet: + path: /healthz + port: health + scheme: HTTP + initialDelaySeconds: 30 + readinessProbe: + httpGet: + path: /healthz + port: health + scheme: HTTP + initialDelaySeconds: 30 + terminationMessagePolicy: FallbackToLogsOnError + nodeSelector: + kubernetes.io/os: linux + node-role.kubernetes.io/control-plane: "" + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 120 + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 120 + volumes: + - name: serving-cert + secret: + secretName: lifecycle-controller-serving-cert diff --git a/manifests/0000_50_olm_08-lifecycle-controller.networkpolicy.yaml b/manifests/0000_50_olm_08-lifecycle-controller.networkpolicy.yaml new file mode 100644 index 0000000000..c08803d707 --- /dev/null +++ b/manifests/0000_50_olm_08-lifecycle-controller.networkpolicy.yaml @@ -0,0 +1,35 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + capability.openshift.io/name: "OperatorLifecycleManager" + include.release.openshift.io/hypershift: "true" +spec: + podSelector: + matchLabels: + app: olm-lifecycle-controller + ingress: + - ports: + - port: 8443 + protocol: TCP + egress: + - ports: + - port: 6443 + protocol: TCP + - ports: + - port: 53 + protocol: TCP + - port: 53 + protocol: UDP + - port: 5353 + protocol: TCP + - port: 5353 + protocol: UDP + policyTypes: + - Ingress + - Egress diff --git a/manifests/0000_50_olm_08-lifecycle-controller.rbac.yaml b/manifests/0000_50_olm_08-lifecycle-controller.rbac.yaml new file mode 100644 index 0000000000..24da8ffaf7 --- /dev/null +++ b/manifests/0000_50_olm_08-lifecycle-controller.rbac.yaml @@ -0,0 +1,91 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + capability.openshift.io/name: "OperatorLifecycleManager" + include.release.openshift.io/hypershift: "true" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: operator-lifecycle-manager-lifecycle-controller + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + capability.openshift.io/name: "OperatorLifecycleManager" + include.release.openshift.io/hypershift: "true" +rules: + # Read APIServer for TLS security profile configuration + - apiGroups: ["config.openshift.io"] + resources: ["apiservers"] + verbs: ["get", "list", "watch"] + # Watch CatalogSources cluster-wide + - apiGroups: ["operators.coreos.com"] + resources: ["catalogsources"] + verbs: ["get", "list", "watch"] + # Watch catalog pods cluster-wide + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] + # Manage lifecycle-server deployments + - apiGroups: ["apps"] + resources: ["deployments"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Manage lifecycle-server services + - apiGroups: [""] + resources: ["services"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Manage lifecycle-server serviceaccounts + - apiGroups: [""] + resources: ["serviceaccounts"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Manage lifecycle-server networkpolicies + - apiGroups: ["networking.k8s.io"] + resources: ["networkpolicies"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Manage lifecycle-server clusterrolebindings + - apiGroups: ["rbac.authorization.k8s.io"] + resources: ["clusterrolebindings"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Required to grant these permissions to lifecycle-server via CRB + - apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["create"] + - apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["create"] + # Leader election + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: operator-lifecycle-manager-lifecycle-controller + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + capability.openshift.io/name: "OperatorLifecycleManager" + include.release.openshift.io/hypershift: "true" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-lifecycle-manager-lifecycle-controller +subjects: + - kind: ServiceAccount + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager diff --git a/manifests/0000_50_olm_08-lifecycle-controller.service.yaml b/manifests/0000_50_olm_08-lifecycle-controller.service.yaml new file mode 100644 index 0000000000..995d6e6677 --- /dev/null +++ b/manifests/0000_50_olm_08-lifecycle-controller.service.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + service.beta.openshift.io/serving-cert-secret-name: lifecycle-controller-serving-cert + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + capability.openshift.io/name: "OperatorLifecycleManager" + include.release.openshift.io/hypershift: "true" +spec: + ports: + - name: metrics + port: 8443 + protocol: TCP + targetPort: metrics + selector: + app: olm-lifecycle-controller + type: ClusterIP diff --git a/manifests/0000_50_olm_09-lifecycle-server.rbac.yaml b/manifests/0000_50_olm_09-lifecycle-server.rbac.yaml new file mode 100644 index 0000000000..d848837106 --- /dev/null +++ b/manifests/0000_50_olm_09-lifecycle-server.rbac.yaml @@ -0,0 +1,18 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: operator-lifecycle-manager-lifecycle-server + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + capability.openshift.io/name: "OperatorLifecycleManager" + include.release.openshift.io/hypershift: "true" +rules: + # Required by kube-rbac-proxy for authn/authz + - apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["create"] + - apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["create"] diff --git a/microshift-manifests/0000_50_olm_00-catalogsources.crd.yaml b/microshift-manifests/0000_50_olm_00-catalogsources.crd.yaml index d0455ae902..6679b6cc80 100644 --- a/microshift-manifests/0000_50_olm_00-catalogsources.crd.yaml +++ b/microshift-manifests/0000_50_olm_00-catalogsources.crd.yaml @@ -1095,9 +1095,7 @@ spec: publisher: type: string runAsRoot: - description: |- - RunAsRoot allows admins to indicate that they wish to run the CatalogSource pod in a privileged - pod as root. This should only be enabled when running older catalog images which could not be run as non-root. + description: RunAsRoot allows admins to indicate that they wish to run the CatalogSource pod in a privileged pod as root. This should only be enabled when running older catalog images which could not be run as non-root. type: boolean secrets: description: |- diff --git a/microshift-manifests/0000_50_olm_00-clusterserviceversions.crd.yaml b/microshift-manifests/0000_50_olm_00-clusterserviceversions.crd.yaml index b3ffad4856..4fb38974e7 100644 --- a/microshift-manifests/0000_50_olm_00-clusterserviceversions.crd.yaml +++ b/microshift-manifests/0000_50_olm_00-clusterserviceversions.crd.yaml @@ -4712,6 +4712,7 @@ spec: ip: description: IP address of the host file entry. type: string + default: "" x-kubernetes-list-map-keys: - ip x-kubernetes-list-type: map diff --git a/microshift-manifests/0000_50_olm_08-lifecycle-controller.deployment.ibm-cloud-managed.yaml b/microshift-manifests/0000_50_olm_08-lifecycle-controller.deployment.ibm-cloud-managed.yaml new file mode 100644 index 0000000000..e49a80fecd --- /dev/null +++ b/microshift-manifests/0000_50_olm_08-lifecycle-controller.deployment.ibm-cloud-managed.yaml @@ -0,0 +1,110 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager + labels: + app: olm-lifecycle-controller + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/ibm-cloud-managed: "true" + capability.openshift.io/name: "OperatorLifecycleManager" + include.release.openshift.io/hypershift: "true" +spec: + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 0 + maxSurge: 1 + replicas: 1 + selector: + matchLabels: + app: olm-lifecycle-controller + template: + metadata: + annotations: + target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' + openshift.io/required-scc: restricted-v2 + kubectl.kubernetes.io/default-container: lifecycle-controller + labels: + app: olm-lifecycle-controller + spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: lifecycle-controller + priorityClassName: "system-cluster-critical" + containers: + - name: lifecycle-controller + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: ["ALL"] + command: + - /bin/lifecycle-controller + args: + - start + - --catalog-source-field-selector=metadata.namespace=openshift-marketplace,metadata.name=redhat-operators + - --tls-cert=/var/run/secrets/serving-cert/tls.crt + - --tls-key=/var/run/secrets/serving-cert/tls.key + image: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607 + imagePullPolicy: IfNotPresent + env: + - name: RELEASE_VERSION + value: "0.0.1-snapshot" + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LIFECYCLE_SERVER_IMAGE + value: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607 + - name: GOMEMLIMIT + value: "5MiB" + resources: + requests: + cpu: 10m + memory: 10Mi + ports: + - containerPort: 8081 + name: health + - containerPort: 8443 + name: metrics + protocol: TCP + volumeMounts: + - name: serving-cert + mountPath: /var/run/secrets/serving-cert + readOnly: true + livenessProbe: + httpGet: + path: /healthz + port: health + scheme: HTTP + initialDelaySeconds: 30 + readinessProbe: + httpGet: + path: /healthz + port: health + scheme: HTTP + initialDelaySeconds: 30 + terminationMessagePolicy: FallbackToLogsOnError + nodeSelector: + kubernetes.io/os: linux + node-role.kubernetes.io/control-plane: "" + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 120 + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 120 + volumes: + - name: serving-cert + secret: + secretName: lifecycle-controller-serving-cert diff --git a/microshift-manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml b/microshift-manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml new file mode 100644 index 0000000000..1c1cd18f2f --- /dev/null +++ b/microshift-manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml @@ -0,0 +1,109 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager + labels: + app: olm-lifecycle-controller + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/self-managed-high-availability: "true" + capability.openshift.io/name: "OperatorLifecycleManager" +spec: + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 0 + maxSurge: 1 + replicas: 1 + selector: + matchLabels: + app: olm-lifecycle-controller + template: + metadata: + annotations: + target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' + openshift.io/required-scc: restricted-v2 + kubectl.kubernetes.io/default-container: lifecycle-controller + labels: + app: olm-lifecycle-controller + spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: lifecycle-controller + priorityClassName: "system-cluster-critical" + containers: + - name: lifecycle-controller + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: ["ALL"] + command: + - /bin/lifecycle-controller + args: + - start + - --catalog-source-field-selector=metadata.namespace=openshift-marketplace,metadata.name=redhat-operators + - --tls-cert=/var/run/secrets/serving-cert/tls.crt + - --tls-key=/var/run/secrets/serving-cert/tls.key + image: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607 + imagePullPolicy: IfNotPresent + env: + - name: RELEASE_VERSION + value: "0.0.1-snapshot" + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LIFECYCLE_SERVER_IMAGE + value: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607 + - name: GOMEMLIMIT + value: "5MiB" + resources: + requests: + cpu: 10m + memory: 10Mi + ports: + - containerPort: 8081 + name: health + - containerPort: 8443 + name: metrics + protocol: TCP + volumeMounts: + - name: serving-cert + mountPath: /var/run/secrets/serving-cert + readOnly: true + livenessProbe: + httpGet: + path: /healthz + port: health + scheme: HTTP + initialDelaySeconds: 30 + readinessProbe: + httpGet: + path: /healthz + port: health + scheme: HTTP + initialDelaySeconds: 30 + terminationMessagePolicy: FallbackToLogsOnError + nodeSelector: + kubernetes.io/os: linux + node-role.kubernetes.io/control-plane: "" + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 120 + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 120 + volumes: + - name: serving-cert + secret: + secretName: lifecycle-controller-serving-cert diff --git a/microshift-manifests/0000_50_olm_08-lifecycle-controller.networkpolicy.yaml b/microshift-manifests/0000_50_olm_08-lifecycle-controller.networkpolicy.yaml new file mode 100644 index 0000000000..c08803d707 --- /dev/null +++ b/microshift-manifests/0000_50_olm_08-lifecycle-controller.networkpolicy.yaml @@ -0,0 +1,35 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + capability.openshift.io/name: "OperatorLifecycleManager" + include.release.openshift.io/hypershift: "true" +spec: + podSelector: + matchLabels: + app: olm-lifecycle-controller + ingress: + - ports: + - port: 8443 + protocol: TCP + egress: + - ports: + - port: 6443 + protocol: TCP + - ports: + - port: 53 + protocol: TCP + - port: 53 + protocol: UDP + - port: 5353 + protocol: TCP + - port: 5353 + protocol: UDP + policyTypes: + - Ingress + - Egress diff --git a/microshift-manifests/0000_50_olm_08-lifecycle-controller.rbac.yaml b/microshift-manifests/0000_50_olm_08-lifecycle-controller.rbac.yaml new file mode 100644 index 0000000000..24da8ffaf7 --- /dev/null +++ b/microshift-manifests/0000_50_olm_08-lifecycle-controller.rbac.yaml @@ -0,0 +1,91 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + capability.openshift.io/name: "OperatorLifecycleManager" + include.release.openshift.io/hypershift: "true" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: operator-lifecycle-manager-lifecycle-controller + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + capability.openshift.io/name: "OperatorLifecycleManager" + include.release.openshift.io/hypershift: "true" +rules: + # Read APIServer for TLS security profile configuration + - apiGroups: ["config.openshift.io"] + resources: ["apiservers"] + verbs: ["get", "list", "watch"] + # Watch CatalogSources cluster-wide + - apiGroups: ["operators.coreos.com"] + resources: ["catalogsources"] + verbs: ["get", "list", "watch"] + # Watch catalog pods cluster-wide + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] + # Manage lifecycle-server deployments + - apiGroups: ["apps"] + resources: ["deployments"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Manage lifecycle-server services + - apiGroups: [""] + resources: ["services"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Manage lifecycle-server serviceaccounts + - apiGroups: [""] + resources: ["serviceaccounts"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Manage lifecycle-server networkpolicies + - apiGroups: ["networking.k8s.io"] + resources: ["networkpolicies"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Manage lifecycle-server clusterrolebindings + - apiGroups: ["rbac.authorization.k8s.io"] + resources: ["clusterrolebindings"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Required to grant these permissions to lifecycle-server via CRB + - apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["create"] + - apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["create"] + # Leader election + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: operator-lifecycle-manager-lifecycle-controller + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + capability.openshift.io/name: "OperatorLifecycleManager" + include.release.openshift.io/hypershift: "true" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-lifecycle-manager-lifecycle-controller +subjects: + - kind: ServiceAccount + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager diff --git a/microshift-manifests/0000_50_olm_08-lifecycle-controller.service.yaml b/microshift-manifests/0000_50_olm_08-lifecycle-controller.service.yaml new file mode 100644 index 0000000000..995d6e6677 --- /dev/null +++ b/microshift-manifests/0000_50_olm_08-lifecycle-controller.service.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + service.beta.openshift.io/serving-cert-secret-name: lifecycle-controller-serving-cert + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + capability.openshift.io/name: "OperatorLifecycleManager" + include.release.openshift.io/hypershift: "true" +spec: + ports: + - name: metrics + port: 8443 + protocol: TCP + targetPort: metrics + selector: + app: olm-lifecycle-controller + type: ClusterIP diff --git a/microshift-manifests/0000_50_olm_09-lifecycle-server.rbac.yaml b/microshift-manifests/0000_50_olm_09-lifecycle-server.rbac.yaml new file mode 100644 index 0000000000..d848837106 --- /dev/null +++ b/microshift-manifests/0000_50_olm_09-lifecycle-server.rbac.yaml @@ -0,0 +1,18 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: operator-lifecycle-manager-lifecycle-server + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + capability.openshift.io/name: "OperatorLifecycleManager" + include.release.openshift.io/hypershift: "true" +rules: + # Required by kube-rbac-proxy for authn/authz + - apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["create"] + - apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["create"] diff --git a/microshift-manifests/kustomization.yaml b/microshift-manifests/kustomization.yaml index 206174302a..6aad44a640 100644 --- a/microshift-manifests/kustomization.yaml +++ b/microshift-manifests/kustomization.yaml @@ -20,6 +20,11 @@ resources: - 0000_50_olm_03-services.yaml - 0000_50_olm_07-olm-operator.deployment.yaml - 0000_50_olm_08-catalog-operator.deployment.yaml + - 0000_50_olm_08-lifecycle-controller.deployment.yaml + - 0000_50_olm_08-lifecycle-controller.networkpolicy.yaml + - 0000_50_olm_08-lifecycle-controller.rbac.yaml + - 0000_50_olm_08-lifecycle-controller.service.yaml - 0000_50_olm_09-aggregated.clusterrole.yaml + - 0000_50_olm_09-lifecycle-server.rbac.yaml - 0000_50_olm_13-operatorgroup-default.yaml - 0000_50_olm_15-csv-viewer.rbac.yaml diff --git a/operator-lifecycle-manager.Dockerfile b/operator-lifecycle-manager.Dockerfile index f1fe671ea4..839daf5907 100644 --- a/operator-lifecycle-manager.Dockerfile +++ b/operator-lifecycle-manager.Dockerfile @@ -40,6 +40,8 @@ COPY --from=builder /build/bin/cpb /bin/cpb COPY --from=builder /build/bin/psm /bin/psm COPY --from=builder /build/bin/copy-content /bin/copy-content COPY --from=builder /tmp/build/olmv0-tests-ext.gz /usr/bin/olmv0-tests-ext.gz +COPY --from=builder /build/bin/lifecycle-controller /bin/lifecycle-controller +COPY --from=builder /build/bin/lifecycle-server /bin/lifecycle-server # This image doesn't need to run as root user. USER 1001 diff --git a/pkg/lifecycle-controller/TODO.md b/pkg/lifecycle-controller/TODO.md new file mode 100644 index 0000000000..9e73c11961 --- /dev/null +++ b/pkg/lifecycle-controller/TODO.md @@ -0,0 +1,41 @@ +# Future e2e Test Coverage + +The unit tests in this package cover behavioral contracts of the builder +functions and reconciliation logic. The items below should be validated as +end-to-end tests running against a real cluster. + +## Happy-path test (covers multiple behaviors at once) + +A single test can validate most of the controller's output by exercising it +end-to-end: + +1. Create a CatalogSource with a catalog image containing lifecycle schema blobs. +2. Wait for the lifecycle-server Deployment to become ready. +3. Query the lifecycle-server API through the Service and assert a correct JSON + response. + +A successful response validates: +- Deployment created with the correct catalog image mounted at the correct + digest +- TLS via serving-cert annotation and secret +- Service routing to the lifecycle-server pods +- RBAC on nonResourceURL grants access +- FBC path correctness + +## Separate targeted tests + +- **RBAC denial**: Users without RBAC access to the nonResourceURL are denied. + Query with an unauthorized ServiceAccount and expect 403. +- **NetworkPolicy**: A NetworkPolicy restricts traffic to only the API port + inbound and API server + DNS outbound. +- **Node affinity**: The server prefers scheduling on the same node as the + catalog pod. +- **Pod hardening**: The lifecycle-server pods are hardened: run as non-root, + read-only root filesystem, drop all capabilities, use seccomp + RuntimeDefault. (Note: the lifecycle-controller's own hardening is in static + manifests at `manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml` + and validated by SCC admission.) +- **Cleanup on deletion**: Resources are cleaned up when a CatalogSource is + deleted (end-to-end). +- **Cleanup on selector mismatch**: Resources are cleaned up when a + CatalogSource stops matching selectors (end-to-end). diff --git a/pkg/lifecycle-controller/controller.go b/pkg/lifecycle-controller/controller.go new file mode 100644 index 0000000000..2cb64ca847 --- /dev/null +++ b/pkg/lifecycle-controller/controller.go @@ -0,0 +1,703 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "crypto/tls" + "fmt" + "sort" + "strings" + + "github.com/go-logr/logr" + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/library-go/pkg/crypto" + "sigs.k8s.io/controller-runtime/pkg/event" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + appsv1ac "k8s.io/client-go/applyconfigurations/apps/v1" + corev1ac "k8s.io/client-go/applyconfigurations/core/v1" + metav1ac "k8s.io/client-go/applyconfigurations/meta/v1" + networkingv1ac "k8s.io/client-go/applyconfigurations/networking/v1" + rbacv1ac "k8s.io/client-go/applyconfigurations/rbac/v1" + + operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +const ( + catalogLabelKey = "olm.catalogSource" + catalogNameLabelKey = "olm.lifecycle-server/catalog-name" + fieldManager = "lifecycle-controller" + clusterRoleName = "operator-lifecycle-manager-lifecycle-server" + clusterRoleBindingName = "operator-lifecycle-manager-lifecycle-server" + appLabelKey = "app" + appLabelVal = "olm-lifecycle-server" + resourceBaseName = "lifecycle-server" +) + +// LifecycleServerReconciler reconciles CatalogSources and manages lifecycle-server resources +type LifecycleServerReconciler struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + ServerImage string + CatalogSourceLabelSelector labels.Selector + CatalogSourceFieldSelector fields.Selector + TLSConfigProvider *TLSConfigProvider +} + +// matchesCatalogSource checks if a CatalogSource matches both label and field selectors +func (r *LifecycleServerReconciler) matchesCatalogSource(cs *operatorsv1alpha1.CatalogSource) bool { + if !r.CatalogSourceLabelSelector.Matches(labels.Set(cs.Labels)) { + return false + } + fieldSet := fields.Set{ + "metadata.name": cs.Name, + "metadata.namespace": cs.Namespace, + } + return r.CatalogSourceFieldSelector.Matches(fieldSet) +} + +// Reconcile watches CatalogSources and manages lifecycle-server resources per catalog +func (r *LifecycleServerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := r.Log.WithValues("catalogSource", req.NamespacedName) + + log.Info("handling reconciliation request") + defer log.Info("finished reconciliation") + + // Get the CatalogSource + var cs operatorsv1alpha1.CatalogSource + if err := r.Get(ctx, req.NamespacedName, &cs); err != nil { + if errors.IsNotFound(err) { + // CatalogSource was deleted, cleanup resources + if err := r.cleanupResources(ctx, log, req.Namespace, req.Name); err != nil { + return ctrl.Result{}, err + } + // Also reconcile the shared CRB to remove this SA + return ctrl.Result{}, r.reconcileClusterRoleBinding(ctx, log) + } + log.Error(err, "failed to get catalog source") + return ctrl.Result{}, err + } + + // Check if CatalogSource matches our selectors + if !r.matchesCatalogSource(&cs) { + // CatalogSource doesn't match, cleanup any existing resources + if err := r.cleanupResources(ctx, log, cs.Namespace, cs.Name); err != nil { + return ctrl.Result{}, err + } + // Also reconcile the shared CRB to remove this SA + return ctrl.Result{}, r.reconcileClusterRoleBinding(ctx, log) + } + + // Get the catalog image ref from running pod + imageRef, nodeName, err := r.getCatalogPodInfo(ctx, &cs) + if err != nil { + log.Error(err, "failed to get catalog pod info") + return ctrl.Result{}, err + } + if imageRef == "" { + log.Info("no valid image ref for catalog source, waiting for pod") + return ctrl.Result{}, nil + } + + // Ensure all resources exist for this CatalogSource + if err := r.ensureResources(ctx, log, &cs, imageRef, nodeName); err != nil { + return ctrl.Result{}, err + } + + // Reconcile the shared ClusterRoleBinding + if err := r.reconcileClusterRoleBinding(ctx, log); err != nil { + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil +} + +// getCatalogPodInfo gets the image digest and node name from the catalog's running pod +func (r *LifecycleServerReconciler) getCatalogPodInfo(ctx context.Context, cs *operatorsv1alpha1.CatalogSource) (string, string, error) { + var pods corev1.PodList + if err := r.List(ctx, &pods, + client.InNamespace(cs.Namespace), + client.MatchingLabels{catalogLabelKey: cs.Name}, + ); err != nil { + return "", "", err + } + + // Find a running pod with a valid digest + for i := range pods.Items { + p := &pods.Items[i] + if p.Status.Phase != corev1.PodRunning { + continue + } + digest := imageID(p) + if digest != "" { + return digest, p.Spec.NodeName, nil + } + } + + return "", "", nil +} + +// ensureResources creates or updates namespace-scoped resources for a CatalogSource +func (r *LifecycleServerReconciler) ensureResources(ctx context.Context, log logr.Logger, cs *operatorsv1alpha1.CatalogSource, imageRef, nodeName string) error { + name := resourceName(cs.Name) + applyOpts := []client.ApplyOption{client.FieldOwner(fieldManager), client.ForceOwnership} + + // Apply ServiceAccount (in catalog's namespace) + sa := r.buildServiceAccount(name, cs) + if err := r.Apply(ctx, sa, applyOpts...); err != nil { + log.Error(err, "failed to apply serviceaccount") + return err + } + + // Apply Service (in catalog's namespace) + svc := r.buildService(name, cs) + if err := r.Apply(ctx, svc, applyOpts...); err != nil { + log.Error(err, "failed to apply service") + return err + } + + // Apply Deployment (in catalog's namespace) + deploy := r.buildDeployment(name, cs, imageRef, nodeName) + if err := r.Apply(ctx, deploy, applyOpts...); err != nil { + log.Error(err, "failed to apply deployment") + return err + } + + // Apply NetworkPolicy (in catalog's namespace) + np := r.buildNetworkPolicy(name, cs) + if err := r.Apply(ctx, np, applyOpts...); err != nil { + log.Error(err, "failed to apply networkpolicy") + return err + } + + log.Info("applied resources", "name", name, "namespace", cs.Namespace, "imageRef", imageRef, "nodeName", nodeName) + return nil +} + +// reconcileClusterRoleBinding maintains a single CRB with all lifecycle-server ServiceAccounts +func (r *LifecycleServerReconciler) reconcileClusterRoleBinding(ctx context.Context, log logr.Logger) error { + // List all matching CatalogSources + var allCatalogSources operatorsv1alpha1.CatalogSourceList + if err := r.List(ctx, &allCatalogSources); err != nil { + log.Error(err, "failed to list catalog sources for CRB reconciliation") + return err + } + + // Build subjects list from matching CatalogSources + var subjects []*rbacv1ac.SubjectApplyConfiguration + for i := range allCatalogSources.Items { + cs := &allCatalogSources.Items[i] + if !r.matchesCatalogSource(cs) { + continue + } + // Check if SA exists (only add if we've created resources for this catalog) + saName := resourceName(cs.Name) + var sa corev1.ServiceAccount + if err := r.Get(ctx, types.NamespacedName{Name: saName, Namespace: cs.Namespace}, &sa); err != nil { + if errors.IsNotFound(err) { + continue // SA doesn't exist yet, skip + } + return err + } + subjects = append(subjects, rbacv1ac.Subject(). + WithKind("ServiceAccount"). + WithName(saName). + WithNamespace(cs.Namespace)) + } + + // Sort subjects for deterministic ordering + sort.Slice(subjects, func(i, j int) bool { + if *subjects[i].Namespace != *subjects[j].Namespace { + return *subjects[i].Namespace < *subjects[j].Namespace + } + return *subjects[i].Name < *subjects[j].Name + }) + + // Apply the CRB + crb := rbacv1ac.ClusterRoleBinding(clusterRoleBindingName). + WithLabels(map[string]string{ + appLabelKey: appLabelVal, + }). + WithRoleRef(rbacv1ac.RoleRef(). + WithAPIGroup("rbac.authorization.k8s.io"). + WithKind("ClusterRole"). + WithName(clusterRoleName)). + WithSubjects(subjects...) + + if err := r.Apply(ctx, crb, client.FieldOwner(fieldManager), client.ForceOwnership); err != nil { + log.Error(err, "failed to apply clusterrolebinding") + return err + } + + log.Info("reconciled clusterrolebinding", "subjectCount", len(subjects)) + return nil +} + +// cleanupResources deletes namespace-scoped resources for a CatalogSource +func (r *LifecycleServerReconciler) cleanupResources(ctx context.Context, log logr.Logger, csNamespace, csName string) error { + name := resourceName(csName) + log = log.WithValues("resourceName", name, "namespace", csNamespace) + + var deleted bool + + // Delete Deployment + deploy := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: csNamespace, + }, + } + if err := r.Delete(ctx, deploy); err != nil && !errors.IsNotFound(err) { + log.Error(err, "failed to delete deployment") + return err + } else if err == nil { + deleted = true + } + + // Delete Service + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: csNamespace, + }, + } + if err := r.Delete(ctx, svc); err != nil && !errors.IsNotFound(err) { + log.Error(err, "failed to delete service") + return err + } else if err == nil { + deleted = true + } + + // Delete NetworkPolicy + np := &networkingv1.NetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: csNamespace, + }, + } + if err := r.Delete(ctx, np); err != nil && !errors.IsNotFound(err) { + log.Error(err, "failed to delete networkpolicy") + return err + } else if err == nil { + deleted = true + } + + // Delete ServiceAccount + sa := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: csNamespace, + }, + } + if err := r.Delete(ctx, sa); err != nil && !errors.IsNotFound(err) { + log.Error(err, "failed to delete serviceaccount") + return err + } else if err == nil { + deleted = true + } + + if deleted { + log.Info("cleaned up resources") + } + return nil +} + +// resourceName generates a DNS-compatible name for lifecycle-server resources +func resourceName(csName string) string { + name := fmt.Sprintf("%s-%s", csName, resourceBaseName) + name = strings.ReplaceAll(name, ".", "-") + name = strings.ReplaceAll(name, "_", "-") + if len(name) > 63 { + name = name[:63] + } + name = strings.TrimRight(name, "-") + return strings.ToLower(name) +} + +// buildServiceAccount creates a ServiceAccount for a lifecycle-server +func (r *LifecycleServerReconciler) buildServiceAccount(name string, cs *operatorsv1alpha1.CatalogSource) *corev1ac.ServiceAccountApplyConfiguration { + return corev1ac.ServiceAccount(name, cs.Namespace). + WithLabels(map[string]string{ + appLabelKey: appLabelVal, + catalogNameLabelKey: cs.Name, + }) +} + +// buildService creates a Service for a lifecycle-server +func (r *LifecycleServerReconciler) buildService(name string, cs *operatorsv1alpha1.CatalogSource) *corev1ac.ServiceApplyConfiguration { + return corev1ac.Service(name, cs.Namespace). + WithLabels(map[string]string{ + appLabelKey: appLabelVal, + catalogNameLabelKey: cs.Name, + }). + WithAnnotations(map[string]string{ + "service.beta.openshift.io/serving-cert-secret-name": fmt.Sprintf("%s-tls", name), + }). + WithSpec(corev1ac.ServiceSpec(). + WithSelector(map[string]string{ + appLabelKey: appLabelVal, + catalogNameLabelKey: cs.Name, + }). + WithPorts(corev1ac.ServicePort(). + WithName("api"). + WithPort(8443). + WithTargetPort(intstr.FromString("api")). + WithProtocol(corev1.ProtocolTCP)). + WithType(corev1.ServiceTypeClusterIP)) +} + +// buildDeployment creates a Deployment for a lifecycle-server +func (r *LifecycleServerReconciler) buildDeployment(name string, cs *operatorsv1alpha1.CatalogSource, imageRef, nodeName string) *appsv1ac.DeploymentApplyConfiguration { + podLabels := map[string]string{ + appLabelKey: appLabelVal, + catalogNameLabelKey: cs.Name, + } + + // Determine the catalog directory inside the image + catalogDir := "/configs" // default for standard catalog images + if cs.Spec.GrpcPodConfig != nil && cs.Spec.GrpcPodConfig.ExtractContent != nil && cs.Spec.GrpcPodConfig.ExtractContent.CatalogDir != "" { + catalogDir = cs.Spec.GrpcPodConfig.ExtractContent.CatalogDir + } + + const catalogMountPath = "/catalog" + fbcPath := catalogMountPath + catalogDir + + return appsv1ac.Deployment(name, cs.Namespace). + WithLabels(podLabels). + WithSpec(appsv1ac.DeploymentSpec(). + WithReplicas(1). + WithStrategy(appsv1ac.DeploymentStrategy(). + WithType(appsv1.RollingUpdateDeploymentStrategyType). + WithRollingUpdate(appsv1ac.RollingUpdateDeployment(). + WithMaxUnavailable(intstr.FromInt32(0)). + WithMaxSurge(intstr.FromInt32(1)))). + WithSelector(metav1ac.LabelSelector(). + WithMatchLabels(podLabels)). + WithTemplate(corev1ac.PodTemplateSpec(). + WithLabels(podLabels). + WithAnnotations(map[string]string{ + "target.workload.openshift.io/management": `{"effect": "PreferredDuringScheduling"}`, + "openshift.io/required-scc": "restricted-v2", + "kubectl.kubernetes.io/default-container": "lifecycle-server", + }). + WithSpec(corev1ac.PodSpec(). + WithSecurityContext(corev1ac.PodSecurityContext(). + WithRunAsNonRoot(true). + WithSeccompProfile(corev1ac.SeccompProfile(). + WithType(corev1.SeccompProfileTypeRuntimeDefault))). + WithServiceAccountName(name). + WithPriorityClassName("system-cluster-critical"). + WithAffinity(nodeAffinityForNode(nodeName)). + WithNodeSelector(map[string]string{ + "kubernetes.io/os": "linux", + }). + WithTolerations( + corev1ac.Toleration(). + WithKey("node-role.kubernetes.io/master"). + WithOperator(corev1.TolerationOpExists). + WithEffect(corev1.TaintEffectNoSchedule), + corev1ac.Toleration(). + WithKey("node.kubernetes.io/unreachable"). + WithOperator(corev1.TolerationOpExists). + WithEffect(corev1.TaintEffectNoExecute). + WithTolerationSeconds(120), + corev1ac.Toleration(). + WithKey("node.kubernetes.io/not-ready"). + WithOperator(corev1.TolerationOpExists). + WithEffect(corev1.TaintEffectNoExecute). + WithTolerationSeconds(120), + ). + WithContainers(corev1ac.Container(). + WithName("lifecycle-server"). + WithImage(r.ServerImage). + WithImagePullPolicy(corev1.PullIfNotPresent). + WithCommand("/bin/lifecycle-server"). + WithArgs(r.buildLifecycleServerArgs(fbcPath)...). + WithEnv(corev1ac.EnvVar(). + WithName("GOMEMLIMIT"). + WithValue("50MiB")). + WithPorts( + corev1ac.ContainerPort(). + WithName("api"). + WithContainerPort(8443), + corev1ac.ContainerPort(). + WithName("health"). + WithContainerPort(8081), + ). + WithVolumeMounts( + corev1ac.VolumeMount(). + WithName("catalog"). + WithMountPath(catalogMountPath). + WithReadOnly(true), + corev1ac.VolumeMount(). + WithName("serving-cert"). + WithMountPath("/var/run/secrets/serving-cert"). + WithReadOnly(true), + ). + WithLivenessProbe(corev1ac.Probe(). + WithHTTPGet(corev1ac.HTTPGetAction(). + WithPath("/healthz"). + WithPort(intstr.FromString("health")). + WithScheme(corev1.URISchemeHTTP)). + WithInitialDelaySeconds(30)). + WithReadinessProbe(corev1ac.Probe(). + WithHTTPGet(corev1ac.HTTPGetAction(). + WithPath("/healthz"). + WithPort(intstr.FromString("health")). + WithScheme(corev1.URISchemeHTTP)). + WithInitialDelaySeconds(30)). + WithResources(corev1ac.ResourceRequirements(). + WithRequests(corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("10m"), + corev1.ResourceMemory: resource.MustParse("50Mi"), + })). + WithSecurityContext(corev1ac.SecurityContext(). + WithAllowPrivilegeEscalation(false). + WithReadOnlyRootFilesystem(true). + WithCapabilities(corev1ac.Capabilities(). + WithDrop(corev1.Capability("ALL")))). + WithTerminationMessagePolicy(corev1.TerminationMessageFallbackToLogsOnError)). + WithVolumes( + corev1ac.Volume(). + WithName("catalog"). + WithImage(corev1ac.ImageVolumeSource(). + WithReference(imageRef). + WithPullPolicy(corev1.PullIfNotPresent)), + corev1ac.Volume(). + WithName("serving-cert"). + WithSecret(corev1ac.SecretVolumeSource(). + WithSecretName(fmt.Sprintf("%s-tls", name))))))) +} + +// buildNetworkPolicy creates a NetworkPolicy for a lifecycle-server +func (r *LifecycleServerReconciler) buildNetworkPolicy(name string, cs *operatorsv1alpha1.CatalogSource) *networkingv1ac.NetworkPolicyApplyConfiguration { + return networkingv1ac.NetworkPolicy(name, cs.Namespace). + WithLabels(map[string]string{ + appLabelKey: appLabelVal, + catalogNameLabelKey: cs.Name, + }). + WithSpec(networkingv1ac.NetworkPolicySpec(). + WithPodSelector(metav1ac.LabelSelector(). + WithMatchLabels(map[string]string{ + appLabelKey: appLabelVal, + catalogNameLabelKey: cs.Name, + })). + WithIngress(networkingv1ac.NetworkPolicyIngressRule(). + WithPorts(networkingv1ac.NetworkPolicyPort(). + WithPort(intstr.FromInt32(8443)). + WithProtocol(corev1.ProtocolTCP))). + WithEgress( + // API server + networkingv1ac.NetworkPolicyEgressRule(). + WithPorts(networkingv1ac.NetworkPolicyPort(). + WithPort(intstr.FromInt32(6443)). + WithProtocol(corev1.ProtocolTCP)), + // DNS + networkingv1ac.NetworkPolicyEgressRule(). + WithPorts( + networkingv1ac.NetworkPolicyPort().WithPort(intstr.FromInt32(53)).WithProtocol(corev1.ProtocolTCP), + networkingv1ac.NetworkPolicyPort().WithPort(intstr.FromInt32(53)).WithProtocol(corev1.ProtocolUDP), + networkingv1ac.NetworkPolicyPort().WithPort(intstr.FromInt32(5353)).WithProtocol(corev1.ProtocolTCP), + networkingv1ac.NetworkPolicyPort().WithPort(intstr.FromInt32(5353)).WithProtocol(corev1.ProtocolUDP)), + ). + WithPolicyTypes(networkingv1.PolicyTypeIngress, networkingv1.PolicyTypeEgress)) +} + +// buildLifecycleServerArgs builds the command-line arguments for lifecycle-server. +// TLS settings are passed as CLI args rather than dynamically watched because +// cluster TLS profile changes are expected to be rare. When a change occurs, +// the controller rebuilds the Deployment with updated args, causing a rolling restart. +func (r *LifecycleServerReconciler) buildLifecycleServerArgs(fbcPath string) []string { + args := []string{ + "start", + fmt.Sprintf("--fbc-path=%s", fbcPath), + } + + if r.TLSConfigProvider != nil { + cfg, _ := r.TLSConfigProvider.Get() + args = append(args, fmt.Sprintf("--tls-min-version=%s", crypto.TLSVersionToNameOrDie(cfg.MinVersion))) + if cfg.MinVersion <= tls.VersionTLS12 { + args = append(args, fmt.Sprintf("--tls-cipher-suites=%s", strings.Join(crypto.CipherSuitesToNamesOrDie(cfg.CipherSuites), ","))) + } + } + return args +} + +// imageID extracts digest from pod status (handles extract-content mode) +func imageID(pod *corev1.Pod) string { + // In extract-content mode, look for the "extract-content" init container + for i := range pod.Status.InitContainerStatuses { + if pod.Status.InitContainerStatuses[i].Name == "extract-content" { + return pod.Status.InitContainerStatuses[i].ImageID + } + } + // Fallback to the first container (standard grpc mode) + if len(pod.Status.ContainerStatuses) > 0 { + return pod.Status.ContainerStatuses[0].ImageID + } + return "" +} + +// nodeAffinityForNode returns a node affinity preferring the given node, or nil if nodeName is empty +func nodeAffinityForNode(nodeName string) *corev1ac.AffinityApplyConfiguration { + if nodeName == "" { + return nil + } + return corev1ac.Affinity(). + WithNodeAffinity(corev1ac.NodeAffinity(). + WithPreferredDuringSchedulingIgnoredDuringExecution( + corev1ac.PreferredSchedulingTerm(). + WithWeight(100). + WithPreference(corev1ac.NodeSelectorTerm(). + WithMatchExpressions(corev1ac.NodeSelectorRequirement(). + WithKey("kubernetes.io/hostname"). + WithOperator(corev1.NodeSelectorOpIn). + WithValues(nodeName))))) +} + +// LifecycleServerLabelSelector returns a label selector matching lifecycle-server deployments +func LifecycleServerLabelSelector() labels.Selector { + return labels.SelectorFromSet(labels.Set{appLabelKey: appLabelVal}) +} + +// catalogPodPredicate filters pod events to only those where fields relevant +// to the reconciler have changed: Status.Phase, container ImageIDs, or Spec.NodeName. +func catalogPodPredicate() predicate.Predicate { + return predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { return true }, + DeleteFunc: func(e event.DeleteEvent) bool { return true }, + UpdateFunc: func(e event.UpdateEvent) bool { + oldPod, ok := e.ObjectOld.(*corev1.Pod) + if !ok { + return false + } + newPod, ok := e.ObjectNew.(*corev1.Pod) + if !ok { + return false + } + if oldPod.Status.Phase != newPod.Status.Phase { + return true + } + if oldPod.Spec.NodeName != newPod.Spec.NodeName { + return true + } + if imageID(oldPod) != imageID(newPod) { + return true + } + return false + }, + GenericFunc: func(e event.GenericEvent) bool { return true }, + } +} + +// mapPodToCatalogSource maps a Pod event to a reconcile request for its owning CatalogSource. +// Pods without a catalog label or with an empty catalog label value are ignored. +func mapPodToCatalogSource(_ context.Context, obj client.Object) []reconcile.Request { + pod, ok := obj.(*corev1.Pod) + if !ok { + return nil + } + // Check if this is a catalog pod + catalogName := pod.Labels[catalogLabelKey] + if catalogName == "" { + return nil + } + // Enqueue the CatalogSource for reconciliation + return []reconcile.Request{ + { + NamespacedName: types.NamespacedName{ + Name: catalogName, + Namespace: pod.Namespace, + }, + }, + } +} + +// mapLifecycleResourceToCatalogSource maps a lifecycle-server resource event to a reconcile request for its CatalogSource. +func mapLifecycleResourceToCatalogSource(_ context.Context, obj client.Object) []reconcile.Request { + // Only watch our resources + if obj.GetLabels()[appLabelKey] != appLabelVal { + return nil + } + csName := obj.GetLabels()[catalogNameLabelKey] + if csName == "" { + return nil + } + return []reconcile.Request{ + { + NamespacedName: types.NamespacedName{ + Name: csName, + Namespace: obj.GetNamespace(), + }, + }, + } +} + +// SetupWithManager sets up the controller with the Manager. +// tlsChangeSource is an optional channel source that triggers reconciliation when TLS profileSpec changes. +func (r *LifecycleServerReconciler) SetupWithManager(mgr ctrl.Manager, tlsProfileChan <-chan event.TypedGenericEvent[configv1.TLSProfileSpec]) error { + bldr := ctrl.NewControllerManagedBy(mgr). + // Watch CatalogSources, but only reconcile on spec or label changes (not status-only updates). + For(&operatorsv1alpha1.CatalogSource{}, builder.WithPredicates( + predicate.Or(predicate.GenerationChangedPredicate{}, predicate.LabelChangedPredicate{}), + )). + // Watch Pods to detect catalog pod changes, but only when phase, imageID, or nodeName change. + Watches(&corev1.Pod{}, handler.EnqueueRequestsFromMapFunc(mapPodToCatalogSource), builder.WithPredicates(catalogPodPredicate())). + // Watch lifecycle-server resources to detect spec drift or deletion. + Watches(&appsv1.Deployment{}, handler.EnqueueRequestsFromMapFunc(mapLifecycleResourceToCatalogSource), builder.WithPredicates(predicate.GenerationChangedPredicate{})). + Watches(&corev1.ServiceAccount{}, handler.EnqueueRequestsFromMapFunc(mapLifecycleResourceToCatalogSource)). + Watches(&corev1.Service{}, handler.EnqueueRequestsFromMapFunc(mapLifecycleResourceToCatalogSource), builder.WithPredicates(predicate.GenerationChangedPredicate{})). + Watches(&networkingv1.NetworkPolicy{}, handler.EnqueueRequestsFromMapFunc(mapLifecycleResourceToCatalogSource), builder.WithPredicates(predicate.GenerationChangedPredicate{})) + + // Add TLS change source if provided + bldr = bldr.WatchesRawSource(source.Channel(tlsProfileChan, handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, _ configv1.TLSProfileSpec) []reconcile.Request { + // Trigger reconciliation of all CatalogSources to update lifecycle-server deployments + var catalogSources operatorsv1alpha1.CatalogSourceList + if err := mgr.GetClient().List(ctx, &catalogSources); err != nil { + r.Log.Error(err, "failed to list CatalogSources to requeue for TLS reconfiguration; CatalogSources will not receive new TLS configuration until their next reconciliation") + return nil + } + + // Send events to trigger reconciliation + var requests []reconcile.Request + for _, obj := range catalogSources.Items { + requests = append(requests, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&obj)}) + } + return requests + }))) + + return bldr.Complete(r) +} diff --git a/pkg/lifecycle-controller/controller_test.go b/pkg/lifecycle-controller/controller_test.go new file mode 100644 index 0000000000..aa94620e05 --- /dev/null +++ b/pkg/lifecycle-controller/controller_test.go @@ -0,0 +1,1149 @@ +package controllers + +import ( + "context" + "strings" + "testing" + + "github.com/go-logr/logr" + configv1 "github.com/openshift/api/config/v1" + operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + corev1ac "k8s.io/client-go/applyconfigurations/core/v1" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +func testScheme() *runtime.Scheme { + scheme := runtime.NewScheme() + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(operatorsv1alpha1.AddToScheme(scheme)) + return scheme +} + +func testReconciler(cl client.Client) *LifecycleServerReconciler { + return &LifecycleServerReconciler{ + Client: cl, + Log: logr.Discard(), + Scheme: testScheme(), + ServerImage: "quay.io/test/lifecycle-server:latest", + CatalogSourceLabelSelector: labels.Everything(), + CatalogSourceFieldSelector: fields.Everything(), + } +} + +func newCatalogSource(name, namespace string, labelMap map[string]string) *operatorsv1alpha1.CatalogSource { + return &operatorsv1alpha1.CatalogSource{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: labelMap, + }, + Spec: operatorsv1alpha1.CatalogSourceSpec{ + SourceType: operatorsv1alpha1.SourceTypeGrpc, + Image: "quay.io/test/catalog:latest", + }, + } +} + +func catalogPod(csName, namespace, nodeName, imageID string, phase corev1.PodPhase) *corev1.Pod { + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: csName + "-pod", + Namespace: namespace, + Labels: map[string]string{ + catalogLabelKey: csName, + }, + }, + Spec: corev1.PodSpec{ + NodeName: nodeName, + Containers: []corev1.Container{ + {Name: "registry"}, + }, + }, + Status: corev1.PodStatus{ + Phase: phase, + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: "registry", + ImageID: imageID, + }, + }, + }, + } +} + +// --- Pure function tests --- + +func TestResourceName(t *testing.T) { + tt := []struct { + name string + input string + expected string + }{ + { + name: "simple name", + input: "my-catalog", + expected: "my-catalog-lifecycle-server", + }, + { + name: "dots replaced with hyphens", + input: "my.catalog", + expected: "my-catalog-lifecycle-server", + }, + { + name: "underscores replaced with hyphens", + input: "my_catalog", + expected: "my-catalog-lifecycle-server", + }, + { + name: "mixed case and special characters", + input: "My.Catalog_Name", + expected: "my-catalog-name-lifecycle-server", + }, + { + name: "truncation at 63 chars", + input: "this-is-a-very-long-catalog-source-name-that-exceeds-the-dns-limit", + expected: "this-is-a-very-long-catalog-source-name-that-exceeds-the-dns-li", + }, + { + name: "empty name", + input: "", + expected: "-lifecycle-server", + }, + { + name: "already lowercase with hyphens", + input: "redhat-operators", + expected: "redhat-operators-lifecycle-server", + }, + { + name: "truncation should not produce trailing hyphen", + input: "this-is-a-very-long-catalog-source-name-that-exceeds-the-dns--", + expected: "this-is-a-very-long-catalog-source-name-that-exceeds-the-dns", + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + result := resourceName(tc.input) + require.Equal(t, tc.expected, result) + require.LessOrEqual(t, len(result), 63, "resource name must not exceed 63 characters") + }) + } +} + +func TestImageID(t *testing.T) { + tt := []struct { + name string + pod *corev1.Pod + expected string + }{ + { + name: "extract-content init container returns its ImageID", + pod: &corev1.Pod{ + Status: corev1.PodStatus{ + InitContainerStatuses: []corev1.ContainerStatus{ + { + Name: "extract-content", + ImageID: "sha256:abc123", + }, + }, + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: "registry", + ImageID: "sha256:def456", + }, + }, + }, + }, + expected: "sha256:abc123", + }, + { + name: "no extract-content init container falls back to first container", + pod: &corev1.Pod{ + Status: corev1.PodStatus{ + InitContainerStatuses: []corev1.ContainerStatus{ + { + Name: "other-init", + ImageID: "sha256:other", + }, + }, + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: "registry", + ImageID: "sha256:def456", + }, + }, + }, + }, + expected: "sha256:def456", + }, + { + name: "no init containers falls back to first container", + pod: &corev1.Pod{ + Status: corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: "registry", + ImageID: "sha256:def456", + }, + }, + }, + }, + expected: "sha256:def456", + }, + { + name: "no container statuses returns empty", + pod: &corev1.Pod{ + Status: corev1.PodStatus{}, + }, + expected: "", + }, + { + name: "extract-content with empty ImageID returns empty", + pod: &corev1.Pod{ + Status: corev1.PodStatus{ + InitContainerStatuses: []corev1.ContainerStatus{ + { + Name: "extract-content", + ImageID: "", + }, + }, + }, + }, + expected: "", + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + result := imageID(tc.pod) + require.Equal(t, tc.expected, result) + }) + } +} + +func TestNodeAffinityForNode(t *testing.T) { + tt := []struct { + name string + nodeName string + isNil bool + }{ + { + name: "empty node name returns nil", + nodeName: "", + isNil: true, + }, + { + name: "non-empty node name returns affinity", + nodeName: "worker-node-1", + isNil: false, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + result := nodeAffinityForNode(tc.nodeName) + if tc.isNil { + require.Nil(t, result) + return + } + require.NotNil(t, result) + require.NotNil(t, result.NodeAffinity) + preferred := result.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution + require.Len(t, preferred, 1) + require.Equal(t, int32(100), *preferred[0].Weight) + require.Len(t, preferred[0].Preference.MatchExpressions, 1) + expr := preferred[0].Preference.MatchExpressions[0] + require.Equal(t, "kubernetes.io/hostname", *expr.Key) + require.Equal(t, corev1.NodeSelectorOpIn, *expr.Operator) + require.Equal(t, []string{tc.nodeName}, expr.Values) + }) + } +} + +func TestLifecycleServerLabelSelector(t *testing.T) { + sel := LifecycleServerLabelSelector() + require.True(t, sel.Matches(labels.Set{appLabelKey: appLabelVal})) + require.False(t, sel.Matches(labels.Set{"app": "other"})) + require.False(t, sel.Matches(labels.Set{})) +} + +// --- Builder method tests --- + +func TestBuildServiceAccount(t *testing.T) { + r := testReconciler(nil) + cs := newCatalogSource("test-catalog", "test-ns", nil) + name := resourceName(cs.Name) + + sa := r.buildServiceAccount(name, cs) + + require.Equal(t, name, *sa.GetName()) + require.Equal(t, "test-ns", *sa.GetNamespace()) + require.Equal(t, appLabelVal, sa.ObjectMetaApplyConfiguration.Labels[appLabelKey]) + require.Equal(t, "test-catalog", sa.ObjectMetaApplyConfiguration.Labels[catalogNameLabelKey]) +} + +func TestBuildService(t *testing.T) { + r := testReconciler(nil) + cs := newCatalogSource("test-catalog", "test-ns", nil) + name := resourceName(cs.Name) + + svc := r.buildService(name, cs) + deploy := r.buildDeployment(name, cs, "sha256:abc123", "worker-1") + deployLabels := deploy.Spec.Template.ObjectMetaApplyConfiguration.Labels + + // Service port is 8443 (other components depend on this) + require.Equal(t, int32(8443), *svc.Spec.Ports[0].Port) + + // Service selector labels match the deployment template labels exactly (otherwise routing breaks) + require.Equal(t, deployLabels, svc.Spec.Selector) + + // Serving-cert annotation is present with the correct secret name (otherwise TLS won't work) + require.Equal(t, name+"-tls", svc.ObjectMetaApplyConfiguration.Annotations["service.beta.openshift.io/serving-cert-secret-name"]) +} + +func TestBuildDeployment(t *testing.T) { + tt := []struct { + name string + cs *operatorsv1alpha1.CatalogSource + imageRef string + nodeName string + expectedCatalogDir string + }{ + { + name: "default catalog dir when GrpcPodConfig is nil", + cs: newCatalogSource("test-catalog", "test-ns", nil), + imageRef: "sha256:abc123", + nodeName: "worker-1", + expectedCatalogDir: "/catalog/configs", + }, + { + name: "custom catalog dir from ExtractContent", + cs: &operatorsv1alpha1.CatalogSource{ + ObjectMeta: metav1.ObjectMeta{ + Name: "custom-catalog", + Namespace: "test-ns", + }, + Spec: operatorsv1alpha1.CatalogSourceSpec{ + GrpcPodConfig: &operatorsv1alpha1.GrpcPodConfig{ + ExtractContent: &operatorsv1alpha1.ExtractContentConfig{ + CatalogDir: "/custom/path", + }, + }, + }, + }, + imageRef: "sha256:def456", + nodeName: "", + expectedCatalogDir: "/catalog/custom/path", + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + r := testReconciler(nil) + name := resourceName(tc.cs.Name) + deploy := r.buildDeployment(name, tc.cs, tc.imageRef, tc.nodeName) + + podSpec := deploy.Spec.Template.Spec + container := podSpec.Containers[0] + + // Deployment uses the provided imageRef as the OCI image volume reference + catalogVolume := findVolume(podSpec.Volumes, "catalog") + require.NotNil(t, catalogVolume, "catalog volume must exist") + require.NotNil(t, catalogVolume.Image) + require.Equal(t, tc.imageRef, *catalogVolume.Image.Reference) + + // Deployment uses the provided ServerImage for the container image + require.Equal(t, r.ServerImage, *container.Image) + + // --fbc-path arg reflects the catalog dir + require.Contains(t, container.Args, "start") + foundFBCArg := false + for _, arg := range container.Args { + if arg == "--fbc-path="+tc.expectedCatalogDir { + foundFBCArg = true + } + } + require.True(t, foundFBCArg, "expected --fbc-path=%s in args %v", tc.expectedCatalogDir, container.Args) + + // TLS cert volume is mounted from the correctly-named secret + certVolume := findVolume(podSpec.Volumes, "serving-cert") + require.NotNil(t, certVolume, "serving-cert volume must exist") + require.NotNil(t, certVolume.Secret) + require.Equal(t, name+"-tls", *certVolume.Secret.SecretName) + + // Catalog volume is mounted read-only + catalogMount := findVolumeMount(container.VolumeMounts, "catalog") + require.NotNil(t, catalogMount, "catalog volume mount must exist") + require.True(t, *catalogMount.ReadOnly) + + // Service account name matches the expected resource name + require.Equal(t, name, *podSpec.ServiceAccountName) + + // Rollout strategy ensures availability (RollingUpdate with MaxUnavailable=0) + require.Equal(t, appsv1.RollingUpdateDeploymentStrategyType, *deploy.Spec.Strategy.Type) + require.NotNil(t, deploy.Spec.Strategy.RollingUpdate) + require.Equal(t, intstr.FromInt32(0), *deploy.Spec.Strategy.RollingUpdate.MaxUnavailable) + }) + } +} + +func findVolume(volumes []corev1ac.VolumeApplyConfiguration, name string) *corev1ac.VolumeApplyConfiguration { + for i := range volumes { + if volumes[i].Name != nil && *volumes[i].Name == name { + return &volumes[i] + } + } + return nil +} + +func findVolumeMount(mounts []corev1ac.VolumeMountApplyConfiguration, name string) *corev1ac.VolumeMountApplyConfiguration { + for i := range mounts { + if mounts[i].Name != nil && *mounts[i].Name == name { + return &mounts[i] + } + } + return nil +} + +func TestBuildNetworkPolicy(t *testing.T) { + r := testReconciler(nil) + cs := newCatalogSource("test-catalog", "test-ns", nil) + name := resourceName(cs.Name) + + np := r.buildNetworkPolicy(name, cs) + deploy := r.buildDeployment(name, cs, "sha256:abc123", "worker-1") + deployLabels := deploy.Spec.Template.ObjectMetaApplyConfiguration.Labels + + // Pod selector labels match the deployment template labels exactly (otherwise NP targets wrong pods) + require.Equal(t, deployLabels, np.Spec.PodSelector.MatchLabels) + + // Both Ingress and Egress policy types are present + require.Contains(t, np.Spec.PolicyTypes, networkingv1.PolicyTypeIngress) + require.Contains(t, np.Spec.PolicyTypes, networkingv1.PolicyTypeEgress) +} + +func TestBuildLifecycleServerArgs(t *testing.T) { + tt := []struct { + name string + tlsConfigProvider *TLSConfigProvider + fbcPath string + expectMinVersion bool + expectCipherSuite bool + }{ + { + name: "without TLS provider", + tlsConfigProvider: nil, + fbcPath: "/catalog/configs", + expectMinVersion: false, + expectCipherSuite: false, + }, + { + name: "with TLS 1.2 profile includes min version and cipher suites", + tlsConfigProvider: NewTLSConfigProvider(dummyGetCertificate, configv1.TLSProfileSpec{ + MinTLSVersion: configv1.VersionTLS12, + Ciphers: []string{ + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + }, + }), + fbcPath: "/catalog/configs", + expectMinVersion: true, + expectCipherSuite: true, + }, + { + name: "with TLS 1.3 profile includes min version but NOT cipher suites", + tlsConfigProvider: NewTLSConfigProvider(dummyGetCertificate, configv1.TLSProfileSpec{ + MinTLSVersion: configv1.VersionTLS13, + }), + fbcPath: "/catalog/custom", + expectMinVersion: true, + expectCipherSuite: false, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + r := testReconciler(nil) + r.TLSConfigProvider = tc.tlsConfigProvider + args := r.buildLifecycleServerArgs(tc.fbcPath) + + require.Contains(t, args, "start") + require.Contains(t, args, "--fbc-path="+tc.fbcPath) + + hasMinVersion := false + hasCipherSuites := false + for _, arg := range args { + if strings.HasPrefix(arg, "--tls-min-version=") { + hasMinVersion = true + } + if strings.HasPrefix(arg, "--tls-cipher-suites=") { + hasCipherSuites = true + } + } + require.Equal(t, tc.expectMinVersion, hasMinVersion, "tls-min-version flag presence mismatch") + require.Equal(t, tc.expectCipherSuite, hasCipherSuites, "tls-cipher-suites flag presence mismatch") + }) + } +} + +// --- matchesCatalogSource tests --- + +func TestMatchesCatalogSource(t *testing.T) { + tt := []struct { + name string + labelSelector string + fieldSelector string + cs *operatorsv1alpha1.CatalogSource + expected bool + }{ + { + name: "everything selectors match all", + labelSelector: "", + fieldSelector: "", + cs: newCatalogSource("test", "test-ns", nil), + expected: true, + }, + { + name: "label selector matches", + labelSelector: "env=prod", + fieldSelector: "", + cs: newCatalogSource("test", "test-ns", map[string]string{"env": "prod"}), + expected: true, + }, + { + name: "label selector does not match", + labelSelector: "env=prod", + fieldSelector: "", + cs: newCatalogSource("test", "test-ns", map[string]string{"env": "dev"}), + expected: false, + }, + { + name: "field selector matches name", + labelSelector: "", + fieldSelector: "metadata.name=test", + cs: newCatalogSource("test", "test-ns", nil), + expected: true, + }, + { + name: "field selector does not match name", + labelSelector: "", + fieldSelector: "metadata.name=other", + cs: newCatalogSource("test", "test-ns", nil), + expected: false, + }, + { + name: "both selectors must match", + labelSelector: "env=prod", + fieldSelector: "metadata.name=test", + cs: newCatalogSource("test", "test-ns", map[string]string{"env": "prod"}), + expected: true, + }, + { + name: "label matches but field does not", + labelSelector: "env=prod", + fieldSelector: "metadata.name=other", + cs: newCatalogSource("test", "test-ns", map[string]string{"env": "prod"}), + expected: false, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + labelSel, err := labels.Parse(tc.labelSelector) + require.NoError(t, err) + fieldSel, err := fields.ParseSelector(tc.fieldSelector) + require.NoError(t, err) + + r := testReconciler(nil) + r.CatalogSourceLabelSelector = labelSel + r.CatalogSourceFieldSelector = fieldSel + + result := r.matchesCatalogSource(tc.cs) + require.Equal(t, tc.expected, result) + }) + } +} + +// --- Reconcile integration tests with fake client --- + +func TestReconcile_CatalogSourceNotFound(t *testing.T) { + scheme := testScheme() + cl := fake.NewClientBuilder().WithScheme(scheme).Build() + r := testReconciler(cl) + + // Reconcile a CatalogSource that doesn't exist - should not error + result, err := r.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: types.NamespacedName{Name: "nonexistent", Namespace: "test-ns"}, + }) + require.NoError(t, err) + require.Equal(t, ctrl.Result{}, result) +} + +func TestReconcile_CatalogSourceDoesNotMatchSelectors(t *testing.T) { + scheme := testScheme() + cs := newCatalogSource("test-catalog", "test-ns", map[string]string{"env": "dev"}) + cl := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(cs). + Build() + + labelSel, err := labels.Parse("env=prod") + require.NoError(t, err) + + r := testReconciler(cl) + r.CatalogSourceLabelSelector = labelSel + + result, err := r.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: types.NamespacedName{Name: "test-catalog", Namespace: "test-ns"}, + }) + require.NoError(t, err) + require.Equal(t, ctrl.Result{}, result) +} + +func TestReconcile_NoPodRunning(t *testing.T) { + scheme := testScheme() + cs := newCatalogSource("test-catalog", "test-ns", nil) + cl := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(cs). + Build() + + r := testReconciler(cl) + + result, err := r.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: types.NamespacedName{Name: "test-catalog", Namespace: "test-ns"}, + }) + require.NoError(t, err) + require.Equal(t, ctrl.Result{}, result) +} + +func TestReconcile_MatchingCatalogSourceWithRunningPod(t *testing.T) { + scheme := testScheme() + cs := newCatalogSource("test-catalog", "test-ns", nil) + pod := catalogPod("test-catalog", "test-ns", "worker-1", "sha256:abc123", corev1.PodRunning) + + cl := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(cs, pod). + Build() + + r := testReconciler(cl) + + result, err := r.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: types.NamespacedName{Name: "test-catalog", Namespace: "test-ns"}, + }) + require.NoError(t, err) + require.Equal(t, ctrl.Result{}, result) + + ctx := context.Background() + name := resourceName("test-catalog") + + // Verify ServiceAccount was created + var sa corev1.ServiceAccount + err = cl.Get(ctx, types.NamespacedName{Name: name, Namespace: "test-ns"}, &sa) + require.NoError(t, err) + require.Equal(t, appLabelVal, sa.Labels[appLabelKey]) + + // Verify Service was created + var svc corev1.Service + err = cl.Get(ctx, types.NamespacedName{Name: name, Namespace: "test-ns"}, &svc) + require.NoError(t, err) + require.Equal(t, int32(8443), svc.Spec.Ports[0].Port) + + // Verify Deployment was created + var deploy appsv1.Deployment + err = cl.Get(ctx, types.NamespacedName{Name: name, Namespace: "test-ns"}, &deploy) + require.NoError(t, err) + require.Equal(t, r.ServerImage, deploy.Spec.Template.Spec.Containers[0].Image) + + // Verify NetworkPolicy was created + var np networkingv1.NetworkPolicy + err = cl.Get(ctx, types.NamespacedName{Name: name, Namespace: "test-ns"}, &np) + require.NoError(t, err) + + // Verify ClusterRoleBinding was created + var crb rbacv1.ClusterRoleBinding + err = cl.Get(ctx, types.NamespacedName{Name: clusterRoleBindingName}, &crb) + require.NoError(t, err) + require.Equal(t, clusterRoleName, crb.RoleRef.Name) + require.Len(t, crb.Subjects, 1) + require.Equal(t, name, crb.Subjects[0].Name) + require.Equal(t, "test-ns", crb.Subjects[0].Namespace) +} + +// --- cleanupResources tests --- + +func TestCleanupResources(t *testing.T) { + scheme := testScheme() + name := resourceName("test-catalog") + ctx := context.Background() + + t.Run("deletes all resources including NetworkPolicy", func(t *testing.T) { + // Pre-create all 4 resource types that cleanupResources should delete + deploy := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: "test-ns"}, + } + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: "test-ns"}, + } + sa := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: "test-ns"}, + } + np := &networkingv1.NetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: "test-ns"}, + } + + cl := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(deploy, svc, sa, np). + Build() + + r := testReconciler(cl) + + err := r.cleanupResources(ctx, logr.Discard(), "test-ns", "test-catalog") + require.NoError(t, err) + + // Verify all 4 resource types are deleted + err = cl.Get(ctx, types.NamespacedName{Name: name, Namespace: "test-ns"}, &appsv1.Deployment{}) + require.True(t, errors.IsNotFound(err), "Deployment should be deleted") + + err = cl.Get(ctx, types.NamespacedName{Name: name, Namespace: "test-ns"}, &corev1.Service{}) + require.True(t, errors.IsNotFound(err), "Service should be deleted") + + err = cl.Get(ctx, types.NamespacedName{Name: name, Namespace: "test-ns"}, &corev1.ServiceAccount{}) + require.True(t, errors.IsNotFound(err), "ServiceAccount should be deleted") + + err = cl.Get(ctx, types.NamespacedName{Name: name, Namespace: "test-ns"}, &networkingv1.NetworkPolicy{}) + require.True(t, errors.IsNotFound(err), "NetworkPolicy should be deleted") + }) + + t.Run("handles not-found resources gracefully", func(t *testing.T) { + // No resources exist + cl := fake.NewClientBuilder(). + WithScheme(scheme). + Build() + + r := testReconciler(cl) + + err := r.cleanupResources(ctx, logr.Discard(), "test-ns", "test-catalog") + require.NoError(t, err) + }) +} + +// --- reconcileClusterRoleBinding tests --- + +func TestReconcileClusterRoleBinding(t *testing.T) { + scheme := testScheme() + ctx := context.Background() + + t.Run("no matching CatalogSources produces CRB with no subjects", func(t *testing.T) { + cl := fake.NewClientBuilder().WithScheme(scheme).Build() + r := testReconciler(cl) + + err := r.reconcileClusterRoleBinding(ctx, logr.Discard()) + require.NoError(t, err) + + var crb rbacv1.ClusterRoleBinding + err = cl.Get(ctx, types.NamespacedName{Name: clusterRoleBindingName}, &crb) + require.NoError(t, err) + require.Empty(t, crb.Subjects) + require.Equal(t, clusterRoleName, crb.RoleRef.Name) + }) + + t.Run("multiple matching CatalogSources produce sorted subjects", func(t *testing.T) { + cs1 := newCatalogSource("catalog-z", "ns-a", nil) + cs2 := newCatalogSource("catalog-a", "ns-b", nil) + sa1Name := resourceName("catalog-z") + sa2Name := resourceName("catalog-a") + sa1 := &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: sa1Name, Namespace: "ns-a"}} + sa2 := &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: sa2Name, Namespace: "ns-b"}} + + cl := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(cs1, cs2, sa1, sa2). + Build() + + r := testReconciler(cl) + + err := r.reconcileClusterRoleBinding(ctx, logr.Discard()) + require.NoError(t, err) + + var crb rbacv1.ClusterRoleBinding + err = cl.Get(ctx, types.NamespacedName{Name: clusterRoleBindingName}, &crb) + require.NoError(t, err) + require.Len(t, crb.Subjects, 2) + + // Subjects should be sorted by namespace, then name + require.Equal(t, "ns-a", crb.Subjects[0].Namespace) + require.Equal(t, sa1Name, crb.Subjects[0].Name) + require.Equal(t, "ns-b", crb.Subjects[1].Namespace) + require.Equal(t, sa2Name, crb.Subjects[1].Name) + }) + + t.Run("CatalogSource without SA is skipped", func(t *testing.T) { + cs := newCatalogSource("catalog-no-sa", "test-ns", nil) + + cl := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(cs). + Build() + + r := testReconciler(cl) + + err := r.reconcileClusterRoleBinding(ctx, logr.Discard()) + require.NoError(t, err) + + var crb rbacv1.ClusterRoleBinding + err = cl.Get(ctx, types.NamespacedName{Name: clusterRoleBindingName}, &crb) + require.NoError(t, err) + require.Empty(t, crb.Subjects) + }) +} + +// --- getCatalogPodInfo tests --- + +func TestGetCatalogPodInfo(t *testing.T) { + scheme := testScheme() + ctx := context.Background() + + tt := []struct { + name string + pods []*corev1.Pod + expectedImage string + expectedNode string + expectErr bool + }{ + { + name: "no pods returns empty", + pods: nil, + expectedImage: "", + expectedNode: "", + }, + { + name: "running pod with digest", + pods: []*corev1.Pod{ + catalogPod("test-catalog", "test-ns", "worker-1", "sha256:abc123", corev1.PodRunning), + }, + expectedImage: "sha256:abc123", + expectedNode: "worker-1", + }, + { + name: "pending pod is skipped", + pods: []*corev1.Pod{ + catalogPod("test-catalog", "test-ns", "worker-1", "sha256:abc123", corev1.PodPending), + }, + expectedImage: "", + expectedNode: "", + }, + { + name: "running pod with empty imageID is skipped", + pods: []*corev1.Pod{ + catalogPod("test-catalog", "test-ns", "worker-1", "", corev1.PodRunning), + }, + expectedImage: "", + expectedNode: "", + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + builder := fake.NewClientBuilder().WithScheme(scheme) + for _, p := range tc.pods { + builder = builder.WithObjects(p) + } + cl := builder.Build() + + r := testReconciler(cl) + cs := newCatalogSource("test-catalog", "test-ns", nil) + + image, node, err := r.getCatalogPodInfo(ctx, cs) + if tc.expectErr { + require.Error(t, err) + return + } + require.NoError(t, err) + require.Equal(t, tc.expectedImage, image) + require.Equal(t, tc.expectedNode, node) + }) + } +} + +// --- catalogPodPredicate tests --- + +func TestCatalogPodPredicate(t *testing.T) { + pred := catalogPodPredicate() + + basePod := func() *corev1.Pod { + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "test-ns", + Labels: map[string]string{catalogLabelKey: "test-catalog"}, + }, + Spec: corev1.PodSpec{ + NodeName: "worker-1", + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + ContainerStatuses: []corev1.ContainerStatus{ + {Name: "registry", ImageID: "sha256:abc123"}, + }, + }, + } + } + + t.Run("create events always pass", func(t *testing.T) { + result := pred.Create(event.CreateEvent{Object: basePod()}) + require.True(t, result) + }) + + t.Run("delete events always pass", func(t *testing.T) { + result := pred.Delete(event.DeleteEvent{Object: basePod()}) + require.True(t, result) + }) + + t.Run("generic events always pass", func(t *testing.T) { + result := pred.Generic(event.GenericEvent{Object: basePod()}) + require.True(t, result) + }) + + t.Run("update: phase change passes", func(t *testing.T) { + oldPod := basePod() + newPod := basePod() + newPod.Status.Phase = corev1.PodSucceeded + + result := pred.Update(event.UpdateEvent{ObjectOld: oldPod, ObjectNew: newPod}) + require.True(t, result) + }) + + t.Run("update: node name change passes", func(t *testing.T) { + oldPod := basePod() + newPod := basePod() + newPod.Spec.NodeName = "worker-2" + + result := pred.Update(event.UpdateEvent{ObjectOld: oldPod, ObjectNew: newPod}) + require.True(t, result) + }) + + t.Run("update: imageID change passes", func(t *testing.T) { + oldPod := basePod() + newPod := basePod() + newPod.Status.ContainerStatuses[0].ImageID = "sha256:def456" + + result := pred.Update(event.UpdateEvent{ObjectOld: oldPod, ObjectNew: newPod}) + require.True(t, result) + }) + + t.Run("update: no relevant change is filtered out", func(t *testing.T) { + oldPod := basePod() + newPod := basePod() + // Only an annotation change - should be filtered + newPod.Annotations = map[string]string{"foo": "bar"} + + result := pred.Update(event.UpdateEvent{ObjectOld: oldPod, ObjectNew: newPod}) + require.False(t, result) + }) + + t.Run("update: non-Pod objects return false", func(t *testing.T) { + svc := &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: "svc"}} + result := pred.Update(event.UpdateEvent{ObjectOld: svc, ObjectNew: svc}) + require.False(t, result) + }) +} + +// --- mapPodToCatalogSource tests --- + +func TestMapPodToCatalogSource(t *testing.T) { + tt := []struct { + name string + obj client.Object + expected []reconcile.Request + }{ + { + name: "pod with valid catalog label enqueues request", + obj: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "olm", + Labels: map[string]string{catalogLabelKey: "my-catalog"}, + }, + }, + expected: []reconcile.Request{ + {NamespacedName: types.NamespacedName{Name: "my-catalog", Namespace: "olm"}}, + }, + }, + { + name: "pod with empty catalog label value is ignored", + obj: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "olm", + Labels: map[string]string{catalogLabelKey: ""}, + }, + }, + expected: nil, + }, + { + name: "pod without catalog label is ignored", + obj: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "olm", + Labels: map[string]string{"other": "label"}, + }, + }, + expected: nil, + }, + { + name: "pod with no labels is ignored", + obj: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "olm", + }, + }, + expected: nil, + }, + { + name: "non-pod object is ignored", + obj: &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: "svc", Namespace: "olm"}}, + expected: nil, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + result := mapPodToCatalogSource(context.Background(), tc.obj) + require.Equal(t, tc.expected, result) + }) + } +} + +// --- Reconcile deletion cleanup test --- + +func TestReconcile_CatalogSourceDeleted_CleansUpResources(t *testing.T) { + scheme := testScheme() + ctx := context.Background() + name := resourceName("test-catalog") + + // Pre-create all lifecycle-server resources as if they had been created by a previous reconcile + deploy := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: "test-ns"}, + } + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: "test-ns"}, + } + sa := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: "test-ns"}, + } + np := &networkingv1.NetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: "test-ns"}, + } + + cl := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(deploy, svc, sa, np). + Build() + + r := testReconciler(cl) + + // Reconcile with CatalogSource absent (deleted) + result, err := r.Reconcile(ctx, ctrl.Request{ + NamespacedName: types.NamespacedName{Name: "test-catalog", Namespace: "test-ns"}, + }) + require.NoError(t, err) + require.Equal(t, ctrl.Result{}, result) + + // Verify all 4 resource types are deleted + err = cl.Get(ctx, types.NamespacedName{Name: name, Namespace: "test-ns"}, &appsv1.Deployment{}) + require.True(t, errors.IsNotFound(err), "Deployment should be deleted") + + err = cl.Get(ctx, types.NamespacedName{Name: name, Namespace: "test-ns"}, &corev1.Service{}) + require.True(t, errors.IsNotFound(err), "Service should be deleted") + + err = cl.Get(ctx, types.NamespacedName{Name: name, Namespace: "test-ns"}, &corev1.ServiceAccount{}) + require.True(t, errors.IsNotFound(err), "ServiceAccount should be deleted") + + err = cl.Get(ctx, types.NamespacedName{Name: name, Namespace: "test-ns"}, &networkingv1.NetworkPolicy{}) + require.True(t, errors.IsNotFound(err), "NetworkPolicy should be deleted") + + // Verify CRB was reconciled (should exist with no subjects since no CatalogSources remain) + var crb rbacv1.ClusterRoleBinding + err = cl.Get(ctx, types.NamespacedName{Name: clusterRoleBindingName}, &crb) + require.NoError(t, err) + require.Empty(t, crb.Subjects) +} + +// --- Reconcile idempotency test --- + +func TestReconcile_Idempotent(t *testing.T) { + scheme := testScheme() + ctx := context.Background() + cs := newCatalogSource("test-catalog", "test-ns", nil) + pod := catalogPod("test-catalog", "test-ns", "worker-1", "sha256:abc123", corev1.PodRunning) + + cl := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(cs, pod). + Build() + + r := testReconciler(cl) + req := ctrl.Request{ + NamespacedName: types.NamespacedName{Name: "test-catalog", Namespace: "test-ns"}, + } + + // First reconcile + result, err := r.Reconcile(ctx, req) + require.NoError(t, err) + require.Equal(t, ctrl.Result{}, result) + + // Second reconcile (idempotent) + result, err = r.Reconcile(ctx, req) + require.NoError(t, err) + require.Equal(t, ctrl.Result{}, result) + + // Verify all resources still exist with correct state + name := resourceName("test-catalog") + + var sa corev1.ServiceAccount + err = cl.Get(ctx, types.NamespacedName{Name: name, Namespace: "test-ns"}, &sa) + require.NoError(t, err) + require.Equal(t, appLabelVal, sa.Labels[appLabelKey]) + + var svc corev1.Service + err = cl.Get(ctx, types.NamespacedName{Name: name, Namespace: "test-ns"}, &svc) + require.NoError(t, err) + require.Equal(t, int32(8443), svc.Spec.Ports[0].Port) + + var deploy appsv1.Deployment + err = cl.Get(ctx, types.NamespacedName{Name: name, Namespace: "test-ns"}, &deploy) + require.NoError(t, err) + require.Equal(t, r.ServerImage, deploy.Spec.Template.Spec.Containers[0].Image) + + var np networkingv1.NetworkPolicy + err = cl.Get(ctx, types.NamespacedName{Name: name, Namespace: "test-ns"}, &np) + require.NoError(t, err) + + var crb rbacv1.ClusterRoleBinding + err = cl.Get(ctx, types.NamespacedName{Name: clusterRoleBindingName}, &crb) + require.NoError(t, err) + require.Equal(t, clusterRoleName, crb.RoleRef.Name) +} diff --git a/pkg/lifecycle-controller/tls.go b/pkg/lifecycle-controller/tls.go new file mode 100644 index 0000000000..7c62e6a599 --- /dev/null +++ b/pkg/lifecycle-controller/tls.go @@ -0,0 +1,54 @@ +package controllers + +import ( + "crypto/tls" + "slices" + "sync" + + configv1 "github.com/openshift/api/config/v1" + tlsutil "github.com/openshift/controller-runtime-common/pkg/tls" +) + +// TLSConfigProvider provides thread-safe access to dynamically updated TLS configuration. +// It implements controllers.TLSConfigProvider interface. +type TLSConfigProvider struct { + mu sync.RWMutex + getCertificateFunc func(info *tls.ClientHelloInfo) (*tls.Certificate, error) + + profileSpec configv1.TLSProfileSpec + + tlsConfig *tls.Config + unsupportedCiphers []string +} + +// NewTLSConfigProvider creates a new TLSConfigProvider with the given initial profileSpec. +func NewTLSConfigProvider(getCertificateFunc func(*tls.ClientHelloInfo) (*tls.Certificate, error), initial configv1.TLSProfileSpec) *TLSConfigProvider { + p := &TLSConfigProvider{getCertificateFunc: getCertificateFunc} + p.UpdateProfile(initial) + return p +} + +// Get returns the current TLS configuration. +func (p *TLSConfigProvider) Get() (*tls.Config, []string) { + p.mu.RLock() + defer p.mu.RUnlock() + return p.tlsConfig.Clone(), slices.Clone(p.unsupportedCiphers) +} + +// UpdateProfile sets a new TLS profile spec. +func (p *TLSConfigProvider) UpdateProfile(profileSpec configv1.TLSProfileSpec) { + p.mu.Lock() + defer p.mu.Unlock() + p.profileSpec = profileSpec + + p.tlsConfig, p.unsupportedCiphers = p.generateTLSConfig() +} + +func (p *TLSConfigProvider) generateTLSConfig() (*tls.Config, []string) { + tlsConfigFunc, unsupportedCiphers := tlsutil.NewTLSConfigFromProfile(p.profileSpec) + tlsConfig := &tls.Config{ + GetCertificate: p.getCertificateFunc, + } + tlsConfigFunc(tlsConfig) + return tlsConfig, unsupportedCiphers +} diff --git a/pkg/lifecycle-controller/tls_test.go b/pkg/lifecycle-controller/tls_test.go new file mode 100644 index 0000000000..7db43881e0 --- /dev/null +++ b/pkg/lifecycle-controller/tls_test.go @@ -0,0 +1,122 @@ +package controllers + +import ( + "crypto/tls" + "sync" + "testing" + + configv1 "github.com/openshift/api/config/v1" + "github.com/stretchr/testify/require" +) + +func dummyGetCertificate(_ *tls.ClientHelloInfo) (*tls.Certificate, error) { + return &tls.Certificate{}, nil +} + +func tls12Profile() configv1.TLSProfileSpec { + return configv1.TLSProfileSpec{ + MinTLSVersion: configv1.VersionTLS12, + Ciphers: []string{ + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + }, + } +} + +func tls13Profile() configv1.TLSProfileSpec { + return configv1.TLSProfileSpec{ + MinTLSVersion: configv1.VersionTLS13, + } +} + +func TestNewTLSConfigProvider(t *testing.T) { + profile := tls12Profile() + p := NewTLSConfigProvider(dummyGetCertificate, profile) + + require.NotNil(t, p) + + cfg, unsupported := p.Get() + require.NotNil(t, cfg) + require.Equal(t, uint16(tls.VersionTLS12), cfg.MinVersion) + require.Empty(t, unsupported) +} + +func TestTLSConfigProvider_Get_ReturnsClonedConfig(t *testing.T) { + profile := tls12Profile() + p := NewTLSConfigProvider(dummyGetCertificate, profile) + + cfg1, _ := p.Get() + cfg2, _ := p.Get() + + // Modifying the returned config should not affect the provider + cfg1.MinVersion = tls.VersionTLS11 + cfg2After, _ := p.Get() + require.Equal(t, uint16(tls.VersionTLS12), cfg2After.MinVersion) + + // Two successive Gets should return equivalent but distinct configs + require.NotSame(t, cfg1, cfg2) +} + +func TestTLSConfigProvider_UpdateProfile(t *testing.T) { + initialProfile := tls12Profile() + p := NewTLSConfigProvider(dummyGetCertificate, initialProfile) + + cfg, _ := p.Get() + require.Equal(t, uint16(tls.VersionTLS12), cfg.MinVersion) + + // Update to TLS 1.3 + newProfile := tls13Profile() + p.UpdateProfile(newProfile) + + cfg, _ = p.Get() + require.Equal(t, uint16(tls.VersionTLS13), cfg.MinVersion) +} + +func TestTLSConfigProvider_GetCertificatePreserved(t *testing.T) { + called := false + getCert := func(_ *tls.ClientHelloInfo) (*tls.Certificate, error) { + called = true + return &tls.Certificate{}, nil + } + + p := NewTLSConfigProvider(getCert, tls12Profile()) + cfg, _ := p.Get() + + require.NotNil(t, cfg.GetCertificate) + _, err := cfg.GetCertificate(nil) + require.NoError(t, err) + require.True(t, called, "getCertificate function should be preserved in config") +} + +func TestTLSConfigProvider_ConcurrentAccess(t *testing.T) { + p := NewTLSConfigProvider(dummyGetCertificate, tls12Profile()) + + const goroutines = 50 + var wg sync.WaitGroup + wg.Add(goroutines * 2) + + // Half the goroutines read, half update + for i := range goroutines { + go func() { + defer wg.Done() + cfg, _ := p.Get() + require.NotNil(t, cfg) + }() + go func(i int) { + defer wg.Done() + var profile configv1.TLSProfileSpec + if i%2 == 0 { + profile = tls12Profile() + } else { + profile = tls13Profile() + } + p.UpdateProfile(profile) + }(i) + } + + wg.Wait() + + // Provider should still be functional after concurrent access + cfg, _ := p.Get() + require.NotNil(t, cfg) +} diff --git a/pkg/lifecycle-server/fbc.go b/pkg/lifecycle-server/fbc.go new file mode 100644 index 0000000000..2e0bdf0d14 --- /dev/null +++ b/pkg/lifecycle-server/fbc.go @@ -0,0 +1,115 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "context" + "encoding/json" + "os" + "regexp" + "sync" + + "github.com/operator-framework/operator-registry/alpha/declcfg" + "k8s.io/apimachinery/pkg/util/sets" +) + +// versionPattern matches API versions like v1, v1alpha1, v2beta3 +// Matches: v1, v1alpha1, v1beta1, v200beta300 +// Does not match: 1, v0, v1beta0 +const versionPattern = `v[1-9][0-9]*(?:(?:alpha|beta)[1-9][0-9]*)?` + +// schemaVersionRegex matches lifecycle schema versions in FBC blobs +var schemaVersionRegex = regexp.MustCompile(`^io\.openshift\.operators\.lifecycles\.(` + versionPattern + `)$`) + +// LifecycleIndex maps schema version -> package name -> raw JSON blob +type LifecycleIndex map[string]map[string]json.RawMessage + +// LoadLifecycleData loads lifecycle blobs from FBC files at the given path +func LoadLifecycleData(fbcPath string) (LifecycleIndex, error) { + result := make(LifecycleIndex) + var mu sync.Mutex + + // Check if path exists + if _, err := os.Stat(fbcPath); os.IsNotExist(err) { + return result, nil + } + + root := os.DirFS(fbcPath) + err := declcfg.WalkMetasFS(context.Background(), root, func(path string, meta *declcfg.Meta, err error) error { + if err != nil { + return nil // Skip errors, continue walking + } + if meta == nil { + return nil + } + + // Check if schema matches our pattern + matches := schemaVersionRegex.FindStringSubmatch(meta.Schema) + if matches == nil { + return nil + } + schemaVersion := matches[1] // e.g., "v1alpha1" + + if meta.Package == "" { + return nil + } + + // Store in index (thread-safe) + mu.Lock() + if result[schemaVersion] == nil { + result[schemaVersion] = make(map[string]json.RawMessage) + } + result[schemaVersion][meta.Package] = meta.Blob + mu.Unlock() + + return nil + }) + + if err != nil { + return nil, err + } + + return result, nil +} + +// CountBlobs returns the total number of blobs in the index +func (index LifecycleIndex) CountBlobs() int { + count := 0 + for _, packages := range index { + count += len(packages) + } + return count +} + +func (index LifecycleIndex) CountPackages() int { + pkgs := sets.New[string]() + for _, packages := range index { + for pkg := range packages { + pkgs.Insert(pkg) + } + } + return pkgs.Len() +} + +// ListVersions returns the list of versions available in the index +func (index LifecycleIndex) ListVersions() []string { + versions := make([]string, 0, len(index)) + for v := range index { + versions = append(versions, v) + } + return versions +} diff --git a/pkg/lifecycle-server/fbc_test.go b/pkg/lifecycle-server/fbc_test.go new file mode 100644 index 0000000000..92c5e3f3a5 --- /dev/null +++ b/pkg/lifecycle-server/fbc_test.go @@ -0,0 +1,420 @@ +package server + +import ( + "encoding/json" + "os" + "path/filepath" + "sort" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestSchemaVersionRegex(t *testing.T) { + tt := []struct { + name string + input string + matches bool + version string + }{ + { + name: "v1", + input: "io.openshift.operators.lifecycles.v1", + matches: true, + version: "v1", + }, + { + name: "v1alpha1", + input: "io.openshift.operators.lifecycles.v1alpha1", + matches: true, + version: "v1alpha1", + }, + { + name: "v1beta1", + input: "io.openshift.operators.lifecycles.v1beta1", + matches: true, + version: "v1beta1", + }, + { + name: "v2beta3", + input: "io.openshift.operators.lifecycles.v2beta3", + matches: true, + version: "v2beta3", + }, + { + name: "v200beta300", + input: "io.openshift.operators.lifecycles.v200beta300", + matches: true, + version: "v200beta300", + }, + { + name: "missing v prefix", + input: "io.openshift.operators.lifecycles.1", + matches: false, + }, + { + name: "v0 not allowed", + input: "io.openshift.operators.lifecycles.v0", + matches: false, + }, + { + name: "v1beta0 not allowed", + input: "io.openshift.operators.lifecycles.v1beta0", + matches: false, + }, + { + name: "v0alpha1 not allowed", + input: "io.openshift.operators.lifecycles.v0alpha1", + matches: false, + }, + { + name: "random schema", + input: "olm.package", + matches: false, + }, + { + name: "empty string", + input: "", + matches: false, + }, + { + name: "partial prefix match", + input: "io.openshift.operators.lifecycles.", + matches: false, + }, + { + name: "wrong prefix", + input: "io.openshift.operators.lifecycle.v1", + matches: false, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + matches := schemaVersionRegex.FindStringSubmatch(tc.input) + if tc.matches { + require.NotNil(t, matches, "expected %q to match", tc.input) + require.Equal(t, tc.version, matches[1]) + } else { + require.Nil(t, matches, "expected %q not to match", tc.input) + } + }) + } +} + +// writeFBCFile writes a JSON file containing FBC meta objects to the given directory. +// Each object must be a map with "schema", "package", and other fields that become the blob. +func writeFBCFile(t *testing.T, dir, filename string, objects ...map[string]any) { + t.Helper() + var data []byte + for _, obj := range objects { + b, err := json.Marshal(obj) + require.NoError(t, err) + data = append(data, b...) + data = append(data, '\n') + } + err := os.WriteFile(filepath.Join(dir, filename), data, 0644) + require.NoError(t, err) +} + +func TestLoadLifecycleData(t *testing.T) { + tt := []struct { + name string + setup func(t *testing.T) string + expectedIndex LifecycleIndex + expectErr bool + }{ + { + name: "non-existent path returns empty index", + setup: func(t *testing.T) string { + return filepath.Join(t.TempDir(), "does-not-exist") + }, + expectedIndex: LifecycleIndex{}, + }, + { + name: "empty directory returns empty index", + setup: func(t *testing.T) string { + return t.TempDir() + }, + expectedIndex: LifecycleIndex{}, + }, + { + name: "lifecycle blob is indexed correctly", + setup: func(t *testing.T) string { + dir := t.TempDir() + writeFBCFile(t, dir, "catalog.json", + map[string]any{ + "schema": "io.openshift.operators.lifecycles.v1alpha1", + "package": "my-operator", + "data": "test-value", + }, + ) + return dir + }, + expectedIndex: LifecycleIndex{ + "v1alpha1": { + "my-operator": json.RawMessage(`{"data":"test-value","package":"my-operator","schema":"io.openshift.operators.lifecycles.v1alpha1"}`), + }, + }, + }, + { + name: "non-lifecycle schemas are skipped", + setup: func(t *testing.T) string { + dir := t.TempDir() + writeFBCFile(t, dir, "catalog.json", + map[string]any{ + "schema": "olm.package", + "package": "my-operator", + "name": "my-operator", + }, + map[string]any{ + "schema": "olm.channel", + "package": "my-operator", + "name": "stable", + }, + ) + return dir + }, + expectedIndex: LifecycleIndex{}, + }, + { + name: "multiple versions and packages", + setup: func(t *testing.T) string { + dir := t.TempDir() + writeFBCFile(t, dir, "catalog.json", + map[string]any{ + "schema": "io.openshift.operators.lifecycles.v1alpha1", + "package": "operator-a", + "status": "active", + }, + map[string]any{ + "schema": "io.openshift.operators.lifecycles.v1alpha1", + "package": "operator-b", + "status": "deprecated", + }, + map[string]any{ + "schema": "io.openshift.operators.lifecycles.v1", + "package": "operator-a", + "level": "ga", + }, + ) + return dir + }, + expectedIndex: LifecycleIndex{ + "v1alpha1": { + "operator-a": json.RawMessage(`{"package":"operator-a","schema":"io.openshift.operators.lifecycles.v1alpha1","status":"active"}`), + "operator-b": json.RawMessage(`{"package":"operator-b","schema":"io.openshift.operators.lifecycles.v1alpha1","status":"deprecated"}`), + }, + "v1": { + "operator-a": json.RawMessage(`{"level":"ga","package":"operator-a","schema":"io.openshift.operators.lifecycles.v1"}`), + }, + }, + }, + { + name: "empty package name is skipped", + setup: func(t *testing.T) string { + dir := t.TempDir() + writeFBCFile(t, dir, "catalog.json", + map[string]any{ + "schema": "io.openshift.operators.lifecycles.v1alpha1", + "data": "should-be-skipped", + }, + ) + return dir + }, + expectedIndex: LifecycleIndex{}, + }, + { + name: "mixed lifecycle and non-lifecycle schemas", + setup: func(t *testing.T) string { + dir := t.TempDir() + writeFBCFile(t, dir, "catalog.json", + map[string]any{ + "schema": "olm.package", + "package": "my-operator", + "name": "my-operator", + }, + map[string]any{ + "schema": "io.openshift.operators.lifecycles.v1alpha1", + "package": "my-operator", + "eol": "2025-12-31", + }, + ) + return dir + }, + expectedIndex: LifecycleIndex{ + "v1alpha1": { + "my-operator": json.RawMessage(`{"eol":"2025-12-31","package":"my-operator","schema":"io.openshift.operators.lifecycles.v1alpha1"}`), + }, + }, + }, + { + name: "corrupted entries are silently skipped, valid entries still loaded", + setup: func(t *testing.T) string { + dir := t.TempDir() + // Write a valid lifecycle blob + writeFBCFile(t, dir, "valid.json", + map[string]any{ + "schema": "io.openshift.operators.lifecycles.v1alpha1", + "package": "good-operator", + "status": "active", + }, + ) + // Write a file with invalid JSON (corrupted entry) + err := os.WriteFile(filepath.Join(dir, "corrupted.json"), []byte("not valid json{{{"), 0644) + require.NoError(t, err) + return dir + }, + // WalkMetasFS passes per-meta errors to the callback, where LoadLifecycleData + // silently skips them (fbc.go:53-54). No error is returned overall, and + // valid entries from other files are still loaded successfully. + expectedIndex: LifecycleIndex{ + "v1alpha1": { + "good-operator": json.RawMessage(`{"package":"good-operator","schema":"io.openshift.operators.lifecycles.v1alpha1","status":"active"}`), + }, + }, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + path := tc.setup(t) + result, err := LoadLifecycleData(path) + if tc.expectErr { + require.Error(t, err) + return + } + require.NoError(t, err) + + // Compare version keys + require.Equal(t, len(tc.expectedIndex), len(result), "version count mismatch") + for version, expectedPkgs := range tc.expectedIndex { + resultPkgs, ok := result[version] + require.True(t, ok, "missing version %q in result", version) + require.Equal(t, len(expectedPkgs), len(resultPkgs), "package count mismatch for version %q", version) + for pkg, expectedBlob := range expectedPkgs { + resultBlob, ok := resultPkgs[pkg] + require.True(t, ok, "missing package %q in version %q", pkg, version) + // Compare as unmarshalled maps since JSON key order is not guaranteed + var expectedMap, resultMap map[string]any + require.NoError(t, json.Unmarshal(expectedBlob, &expectedMap)) + require.NoError(t, json.Unmarshal(resultBlob, &resultMap)) + require.Equal(t, expectedMap, resultMap) + } + } + }) + } +} + +func TestLifecycleIndex_CountBlobs(t *testing.T) { + tt := []struct { + name string + index LifecycleIndex + expected int + }{ + { + name: "empty index", + index: LifecycleIndex{}, + expected: 0, + }, + { + name: "single version single package", + index: LifecycleIndex{ + "v1": {"pkg-a": json.RawMessage(`{}`)}, + }, + expected: 1, + }, + { + name: "multiple versions and packages", + index: LifecycleIndex{ + "v1alpha1": { + "pkg-a": json.RawMessage(`{}`), + "pkg-b": json.RawMessage(`{}`), + }, + "v1": { + "pkg-a": json.RawMessage(`{}`), + }, + }, + expected: 3, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + require.Equal(t, tc.expected, tc.index.CountBlobs()) + }) + } +} + +func TestLifecycleIndex_CountPackages(t *testing.T) { + tt := []struct { + name string + index LifecycleIndex + expected int + }{ + { + name: "empty index", + index: LifecycleIndex{}, + expected: 0, + }, + { + name: "same package across versions counted once", + index: LifecycleIndex{ + "v1alpha1": {"pkg-a": json.RawMessage(`{}`)}, + "v1": {"pkg-a": json.RawMessage(`{}`)}, + }, + expected: 1, + }, + { + name: "different packages counted separately", + index: LifecycleIndex{ + "v1alpha1": { + "pkg-a": json.RawMessage(`{}`), + "pkg-b": json.RawMessage(`{}`), + }, + "v1": { + "pkg-c": json.RawMessage(`{}`), + }, + }, + expected: 3, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + require.Equal(t, tc.expected, tc.index.CountPackages()) + }) + } +} + +func TestLifecycleIndex_ListVersions(t *testing.T) { + tt := []struct { + name string + index LifecycleIndex + expected []string + }{ + { + name: "empty index", + index: LifecycleIndex{}, + expected: []string{}, + }, + { + name: "multiple versions", + index: LifecycleIndex{ + "v1alpha1": {"pkg-a": json.RawMessage(`{}`)}, + "v1": {"pkg-a": json.RawMessage(`{}`)}, + "v2beta1": {"pkg-b": json.RawMessage(`{}`)}, + }, + expected: []string{"v1", "v1alpha1", "v2beta1"}, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + result := tc.index.ListVersions() + sort.Strings(result) + sort.Strings(tc.expected) + require.Equal(t, tc.expected, result) + }) + } +} diff --git a/pkg/lifecycle-server/server.go b/pkg/lifecycle-server/server.go new file mode 100644 index 0000000000..7571ccc017 --- /dev/null +++ b/pkg/lifecycle-server/server.go @@ -0,0 +1,66 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "net/http" + + "github.com/go-logr/logr" +) + +// NewHandler creates a new HTTP handler for the lifecycle API +func NewHandler(data LifecycleIndex, log logr.Logger) http.Handler { + mux := http.NewServeMux() + + // GET /api/{version}/lifecycles/{package} + mux.HandleFunc("GET /api/{version}/lifecycles/{package}", func(w http.ResponseWriter, r *http.Request) { + version := r.PathValue("version") + pkg := r.PathValue("package") + + // If no lifecycle data is available, return 503 Service Unavailable + if len(data) == 0 { + log.V(1).Info("no lifecycle data available, returning 503") + http.Error(w, "No lifecycle data available", http.StatusServiceUnavailable) + return + } + + // Look up version in index + versionData, ok := data[version] + if !ok { + log.V(1).Info("version not found", "version", version, "package", pkg) + http.NotFound(w, r) + return + } + + // Look up package in version + rawJSON, ok := versionData[pkg] + if !ok { + log.V(1).Info("package not found", "version", version, "package", pkg) + http.NotFound(w, r) + return + } + + log.V(1).Info("returning lifecycle data", "version", version, "package", pkg) + + w.Header().Set("Content-Type", "application/json") + if _, err := w.Write(rawJSON); err != nil { + log.V(1).Error(err, "failed to write response") + } + }) + + return mux +} diff --git a/pkg/lifecycle-server/server_test.go b/pkg/lifecycle-server/server_test.go new file mode 100644 index 0000000000..68ed3ef15a --- /dev/null +++ b/pkg/lifecycle-server/server_test.go @@ -0,0 +1,145 @@ +package server + +import ( + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/go-logr/logr" + "github.com/stretchr/testify/require" +) + +func TestNewHandler(t *testing.T) { + testBlob := json.RawMessage(`{"eol":"2025-12-31","status":"active"}`) + + tt := []struct { + name string + data LifecycleIndex + method string + path string + expectedStatus int + expectedBody string + expectedCT string + }{ + { + name: "valid version and package returns 200 with JSON", + data: LifecycleIndex{ + "v1alpha1": { + "my-operator": testBlob, + }, + }, + method: http.MethodGet, + path: "/api/v1alpha1/lifecycles/my-operator", + expectedStatus: http.StatusOK, + expectedBody: `{"eol":"2025-12-31","status":"active"}`, + expectedCT: "application/json", + }, + { + name: "empty data returns 503", + data: LifecycleIndex{}, + method: http.MethodGet, + path: "/api/v1alpha1/lifecycles/my-operator", + expectedStatus: http.StatusServiceUnavailable, + }, + { + name: "unknown version returns 404", + data: LifecycleIndex{ + "v1alpha1": { + "my-operator": testBlob, + }, + }, + method: http.MethodGet, + path: "/api/v2/lifecycles/my-operator", + expectedStatus: http.StatusNotFound, + }, + { + name: "known version unknown package returns 404", + data: LifecycleIndex{ + "v1alpha1": { + "my-operator": testBlob, + }, + }, + method: http.MethodGet, + path: "/api/v1alpha1/lifecycles/other-operator", + expectedStatus: http.StatusNotFound, + }, + { + name: "POST method not allowed", + data: LifecycleIndex{ + "v1alpha1": { + "my-operator": testBlob, + }, + }, + method: http.MethodPost, + path: "/api/v1alpha1/lifecycles/my-operator", + expectedStatus: http.StatusMethodNotAllowed, + }, + { + name: "wrong path returns 404", + data: LifecycleIndex{ + "v1alpha1": { + "my-operator": testBlob, + }, + }, + method: http.MethodGet, + path: "/wrong/path", + expectedStatus: http.StatusNotFound, + }, + { + name: "nil data (nil map) returns 503", + data: nil, + method: http.MethodGet, + path: "/api/v1alpha1/lifecycles/my-operator", + expectedStatus: http.StatusServiceUnavailable, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + handler := NewHandler(tc.data, logr.Discard()) + + req := httptest.NewRequest(tc.method, tc.path, nil) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + resp := rec.Result() + defer resp.Body.Close() + require.Equal(t, tc.expectedStatus, resp.StatusCode) + + if tc.expectedBody != "" { + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + require.Equal(t, tc.expectedBody, string(body)) + } + + if tc.expectedCT != "" { + require.Equal(t, tc.expectedCT, resp.Header.Get("Content-Type")) + } + }) + } +} + +func TestNewHandler_RawBlobReturnedByteForByte(t *testing.T) { + // Verify that the raw JSON blob is returned exactly as stored, not re-serialized. + // This matters because the handler writes rawJSON directly with w.Write(rawJSON). + originalBlob := json.RawMessage(`{"keys":"in-specific-order","numbers":42,"nested":{"a":1}}`) + + data := LifecycleIndex{ + "v1alpha1": { + "test-pkg": originalBlob, + }, + } + + handler := NewHandler(data, logr.Discard()) + req := httptest.NewRequest(http.MethodGet, "/api/v1alpha1/lifecycles/test-pkg", nil) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + resp := rec.Result() + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + require.Equal(t, string(originalBlob), string(body), "response body should be byte-for-byte identical to the stored blob") +} diff --git a/scripts/generate_crds_manifests.sh b/scripts/generate_crds_manifests.sh index 0ddc0dddd7..8dfe2929e6 100755 --- a/scripts/generate_crds_manifests.sh +++ b/scripts/generate_crds_manifests.sh @@ -18,13 +18,11 @@ export GOFLAGS="-mod=vendor" source .bingo/variables.env YQ="go run ./vendor/github.com/mikefarah/yq/v3/" -CONTROLLER_GEN="go run ./vendor/sigs.k8s.io/controller-tools/cmd/controller-gen" ver=${OLM_VERSION:-"0.0.0-dev"} tmpdir="$(mktemp -p . -d 2>/dev/null || mktemp -d ./tmpdir.XXXXXXX)" chartdir="${tmpdir}/chart" crddir="${chartdir}/crds" -crdsrcdir="${tmpdir}/operators" SED="sed" if ! command -v ${SED} &> /dev/null; then @@ -44,21 +42,15 @@ fi cp -R "${ROOT_DIR}/staging/operator-lifecycle-manager/deploy/chart/" "${chartdir}" cp "${ROOT_DIR}"/values*.yaml "${tmpdir}" -cp -R "${ROOT_DIR}/staging/api/pkg/operators/" ${crdsrcdir} rm -rf ./manifests/* ${crddir}/* trap "rm -rf ${tmpdir}" EXIT -${CONTROLLER_GEN} crd:crdVersions=v1 output:crd:dir=${crddir} paths=${crdsrcdir}/... -${CONTROLLER_GEN} schemapatch:manifests=${crddir} output:dir=${crddir} paths=${crdsrcdir}/... - -${YQ} w --inplace ${crddir}/operators.coreos.com_clusterserviceversions.yaml spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.install.properties.spec.properties.deployments.items.properties.spec.properties.template.properties.spec.properties.containers.items.properties.ports.items.properties.protocol.default TCP -${YQ} w --inplace ${crddir}/operators.coreos.com_clusterserviceversions.yaml spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.install.properties.spec.properties.deployments.items.properties.spec.properties.template.properties.spec.properties.initContainers.items.properties.ports.items.properties.protocol.default TCP -${YQ} w --inplace ${crddir}/operators.coreos.com_clusterserviceversions.yaml spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.install.properties.spec.properties.deployments.items.properties.spec.properties.template.properties.metadata.x-kubernetes-preserve-unknown-fields true -${YQ} d --inplace ${crddir}/operators.coreos.com_operatorconditions.yaml 'spec.versions[*].schema.openAPIV3Schema.properties.spec.properties.overrides.items.required(.==lastTransitionTime)' +# Copy upstream CRDs directly instead of regenerating with controller-gen +cp "${ROOT_DIR}"/staging/api/crds/*.yaml "${crddir}/" +# Rename CRD files to match OpenShift manifest naming convention for f in ${crddir}/*.yaml ; do - ${YQ} d --inplace $f status mv -v "$f" "${crddir}/0000_50_olm_00-$(basename $f | ${SED} 's/^.*_\([^.]\+\)\.yaml/\1.crd.yaml/')" done @@ -556,6 +548,270 @@ subjects: name: system:authenticated EOF +cat << EOF > manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager + labels: + app: olm-lifecycle-controller + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" +spec: + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 0 + maxSurge: 1 + replicas: 1 + selector: + matchLabels: + app: olm-lifecycle-controller + template: + metadata: + annotations: + target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' + openshift.io/required-scc: restricted-v2 + kubectl.kubernetes.io/default-container: lifecycle-controller + labels: + app: olm-lifecycle-controller + spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: lifecycle-controller + priorityClassName: "system-cluster-critical" + containers: + - name: lifecycle-controller + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: ["ALL"] + command: + - /bin/lifecycle-controller + args: + - start + - --catalog-source-field-selector=metadata.namespace=openshift-marketplace,metadata.name=redhat-operators + - --tls-cert=/var/run/secrets/serving-cert/tls.crt + - --tls-key=/var/run/secrets/serving-cert/tls.key + image: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607 + imagePullPolicy: IfNotPresent + env: + - name: RELEASE_VERSION + value: "0.0.1-snapshot" + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LIFECYCLE_SERVER_IMAGE + value: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607 + - name: GOMEMLIMIT + value: "5MiB" + resources: + requests: + cpu: 10m + memory: 10Mi + ports: + - containerPort: 8081 + name: health + - containerPort: 8443 + name: metrics + protocol: TCP + volumeMounts: + - name: serving-cert + mountPath: /var/run/secrets/serving-cert + readOnly: true + livenessProbe: + httpGet: + path: /healthz + port: health + scheme: HTTP + initialDelaySeconds: 30 + readinessProbe: + httpGet: + path: /healthz + port: health + scheme: HTTP + initialDelaySeconds: 30 + terminationMessagePolicy: FallbackToLogsOnError + nodeSelector: + kubernetes.io/os: linux + node-role.kubernetes.io/control-plane: "" + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 120 + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 120 + volumes: + - name: serving-cert + secret: + secretName: lifecycle-controller-serving-cert +EOF + +cat << EOF > manifests/0000_50_olm_08-lifecycle-controller.service.yaml +apiVersion: v1 +kind: Service +metadata: + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + service.beta.openshift.io/serving-cert-secret-name: lifecycle-controller-serving-cert +spec: + ports: + - name: metrics + port: 8443 + protocol: TCP + targetPort: metrics + selector: + app: olm-lifecycle-controller + type: ClusterIP +EOF + +cat << EOF > manifests/0000_50_olm_08-lifecycle-controller.networkpolicy.yaml +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" +spec: + podSelector: + matchLabels: + app: olm-lifecycle-controller + ingress: + - ports: + - port: 8443 + protocol: TCP + egress: + - ports: + - port: 6443 + protocol: TCP + - ports: + - port: 53 + protocol: TCP + - port: 53 + protocol: UDP + - port: 5353 + protocol: TCP + - port: 5353 + protocol: UDP + policyTypes: + - Ingress + - Egress +EOF + +cat << EOF > manifests/0000_50_olm_08-lifecycle-controller.rbac.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: operator-lifecycle-manager-lifecycle-controller + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" +rules: + # Read APIServer for TLS security profile configuration + - apiGroups: ["config.openshift.io"] + resources: ["apiservers"] + verbs: ["get", "list", "watch"] + # Watch CatalogSources cluster-wide + - apiGroups: ["operators.coreos.com"] + resources: ["catalogsources"] + verbs: ["get", "list", "watch"] + # Watch catalog pods cluster-wide + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] + # Manage lifecycle-server deployments + - apiGroups: ["apps"] + resources: ["deployments"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Manage lifecycle-server services + - apiGroups: [""] + resources: ["services"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Manage lifecycle-server serviceaccounts + - apiGroups: [""] + resources: ["serviceaccounts"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Manage lifecycle-server networkpolicies + - apiGroups: ["networking.k8s.io"] + resources: ["networkpolicies"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Manage lifecycle-server clusterrolebindings + - apiGroups: ["rbac.authorization.k8s.io"] + resources: ["clusterrolebindings"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Required to grant these permissions to lifecycle-server via CRB + - apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["create"] + - apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["create"] + # Leader election + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: operator-lifecycle-manager-lifecycle-controller + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-lifecycle-manager-lifecycle-controller +subjects: + - kind: ServiceAccount + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager +EOF + +cat << EOF > manifests/0000_50_olm_09-lifecycle-server.rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: operator-lifecycle-manager-lifecycle-server + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" +rules: + # Required by kube-rbac-proxy for authn/authz + - apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["create"] + - apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["create"] +EOF + + add_ibm_managed_cloud_annotations "${ROOT_DIR}/manifests" hypershift_manifests_dir="${ROOT_DIR}/manifests" diff --git a/vendor/github.com/openshift/controller-runtime-common/LICENSE b/vendor/github.com/openshift/controller-runtime-common/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/github.com/openshift/controller-runtime-common/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/openshift/controller-runtime-common/pkg/tls/controller.go b/vendor/github.com/openshift/controller-runtime-common/pkg/tls/controller.go new file mode 100644 index 0000000000..b7efbd93fc --- /dev/null +++ b/vendor/github.com/openshift/controller-runtime-common/pkg/tls/controller.go @@ -0,0 +1,144 @@ +/* +Copyright 2026 Red Hat, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tls + +import ( + "context" + "fmt" + "reflect" + + "github.com/go-logr/logr" + configv1 "github.com/openshift/api/config/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// SecurityProfileWatcher watches the APIServer object for TLS profile changes +// and triggers a graceful shutdown when the profile changes. +type SecurityProfileWatcher struct { + client.Client + + // InitialTLSProfileSpec is the TLS profile spec that was configured when the operator started. + InitialTLSProfileSpec configv1.TLSProfileSpec + + // OnProfileChange is a function that will be called when the TLS profile changes. + // It receives the reconcile context, old and new TLS profile specs. + // This allows the caller to make decisions based on the actual profile changes. + // + // The most common use case for this callback is + // to trigger a graceful shutdown of the operator + // to make it pick up the new configuration. + // + // Example: + // + // // Create a context that can be cancelled when there is a need to shut down the manager. + // ctx, cancel := context.WithCancel(ctrl.SetupSignalHandler()) + // defer cancel() + // + // watcher := &SecurityProfileWatcher{ + // OnProfileChange: func(ctx context.Context, old, new configv1.TLSProfileSpec) { + // logger.Infof("TLS profile has changed, initiating a shutdown to reload it. %q: %+v, %q: %+v", + // "old profile", old, + // "new profile", new, + // ) + // // Cancel the outer context to trigger a graceful shutdown of the manager. + // cancel() + // }, + // } + OnProfileChange func(ctx context.Context, oldTLSProfileSpec, newTLSProfileSpec configv1.TLSProfileSpec) +} + +// SetupWithManager sets up the controller with the Manager. +func (r *SecurityProfileWatcher) SetupWithManager(mgr ctrl.Manager) error { + if err := ctrl.NewControllerManagedBy(mgr). + Named("tlssecurityprofilewatcher"). + For(&configv1.APIServer{}, builder.WithPredicates( + predicate.Funcs{ + // Only watch the "cluster" APIServer object. + CreateFunc: func(e event.CreateEvent) bool { + return e.Object.GetName() == APIServerName + }, + UpdateFunc: func(e event.UpdateEvent) bool { + return e.ObjectNew.GetName() == APIServerName + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return e.Object.GetName() == APIServerName + }, + GenericFunc: func(e event.GenericEvent) bool { + return e.Object.GetName() == APIServerName + }, + }, + )). + // Override the default log constructor as it makes the logs very chatty. + WithLogConstructor(func(_ *reconcile.Request) logr.Logger { + return mgr.GetLogger().WithValues( + "controller", "tlssecurityprofilewatcher", + ) + }). + Complete(r); err != nil { + return fmt.Errorf("could not set up controller for TLS security profile watcher: %w", err) + } + + return nil +} + +// Reconcile watches for changes to the APIServer TLS profile and triggers a shutdown +// when the profile changes from the initial configuration. +func (r *SecurityProfileWatcher) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx, "name", req.Name) + + logger.V(1).Info("Reconciling APIServer TLS profile") + defer logger.V(1).Info("Finished reconciling APIServer TLS profile") + + // Fetch the APIServer object. + apiServer := &configv1.APIServer{} + if err := r.Get(ctx, req.NamespacedName, apiServer); err != nil { + if apierrors.IsNotFound(err) { + // If the APIServer object is not found, we don't need to do anything. + // This could happen if the object was deleted. + return ctrl.Result{}, nil + } + + return ctrl.Result{}, fmt.Errorf("failed to get APIServer %s: %w", req.NamespacedName.String(), err) + } + + // Get the current TLS profile spec. + currentTLSProfileSpec, err := GetTLSProfileSpec(apiServer.Spec.TLSSecurityProfile) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to get TLS profile from APIServer %s: %w", req.NamespacedName.String(), err) + } + + // Compare the current TLS profile spec with the initial one. + if tlsProfileChanged := !reflect.DeepEqual(r.InitialTLSProfileSpec, currentTLSProfileSpec); tlsProfileChanged { + // TLS profile has changed, invoke the callback if it is set. + if r.OnProfileChange != nil { + r.OnProfileChange(ctx, r.InitialTLSProfileSpec, currentTLSProfileSpec) + } + + // Persist the new profile for future change detection. + r.InitialTLSProfileSpec = currentTLSProfileSpec + } + + // No need to requeue, as the callback will handle further actions. + return ctrl.Result{}, nil +} diff --git a/vendor/github.com/openshift/controller-runtime-common/pkg/tls/tls.go b/vendor/github.com/openshift/controller-runtime-common/pkg/tls/tls.go new file mode 100644 index 0000000000..6b33bd147e --- /dev/null +++ b/vendor/github.com/openshift/controller-runtime-common/pkg/tls/tls.go @@ -0,0 +1,155 @@ +/* +Copyright 2026 Red Hat, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package tls provides utilities for working with OpenShift TLS profiles. +package tls + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + + configv1 "github.com/openshift/api/config/v1" + libgocrypto "github.com/openshift/library-go/pkg/crypto" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + // APIServerName is the name of the APIServer resource in the cluster. + APIServerName = "cluster" +) + +var ( + // ErrCustomProfileNil is returned when a custom TLS profile is specified but the Custom field is nil. + ErrCustomProfileNil = errors.New("custom TLS profile specified but Custom field is nil") + + // DefaultTLSCiphers are the default TLS ciphers for API servers. + DefaultTLSCiphers = configv1.TLSProfiles[configv1.TLSProfileIntermediateType].Ciphers //nolint:gochecknoglobals + // DefaultMinTLSVersion is the default minimum TLS version for API servers. + DefaultMinTLSVersion = configv1.TLSProfiles[configv1.TLSProfileIntermediateType].MinTLSVersion //nolint:gochecknoglobals +) + +// FetchAPIServerTLSProfile fetches the TLS profile spec configured in APIServer. +// If no profile is configured, the default profile is returned. +func FetchAPIServerTLSProfile(ctx context.Context, k8sClient client.Client) (configv1.TLSProfileSpec, error) { + apiServer := &configv1.APIServer{} + key := client.ObjectKey{Name: APIServerName} + + if err := k8sClient.Get(ctx, key, apiServer); err != nil { + return configv1.TLSProfileSpec{}, fmt.Errorf("failed to get APIServer %q: %w", key.String(), err) + } + + profile, err := GetTLSProfileSpec(apiServer.Spec.TLSSecurityProfile) + if err != nil { + return configv1.TLSProfileSpec{}, fmt.Errorf("failed to get TLS profile from APIServer %q: %w", key.String(), err) + } + + return profile, nil +} + +// GetTLSProfileSpec returns TLSProfileSpec for the given profile. +// If no profile is configured, the default profile is returned. +func GetTLSProfileSpec(profile *configv1.TLSSecurityProfile) (configv1.TLSProfileSpec, error) { + // Define the default profile (at the time of writing, this is the intermediate profile). + defaultProfile := *configv1.TLSProfiles[configv1.TLSProfileIntermediateType] + // If the profile is nil or the type is empty, return the default profile. + if profile == nil || profile.Type == "" { + return defaultProfile, nil + } + + // Get the profile type. + profileType := profile.Type + + // If the profile type is not custom, return the profile from the map. + if profileType != configv1.TLSProfileCustomType { + if tlsConfig, ok := configv1.TLSProfiles[profileType]; ok { + return *tlsConfig, nil + } + + // If the profile type is not found, return the default profile. + return defaultProfile, nil + } + + if profile.Custom == nil { + // If the custom profile is nil, return an error. + return configv1.TLSProfileSpec{}, ErrCustomProfileNil + } + + // Return the custom profile spec. + return profile.Custom.TLSProfileSpec, nil +} + +// NewTLSConfigFromProfile returns a function that configures a tls.Config based on the provided TLSProfileSpec, +// along with any cipher names from the profile that are not supported by the library-go crypto package. +// The returned function is intended to be used with controller-runtime's TLSOpts. +// +// Note: CipherSuites are only set when MinVersion is below TLS 1.3, as Go's TLS 1.3 implementation +// does not allow configuring cipher suites - all TLS 1.3 ciphers are always enabled. +// See: https://github.com/golang/go/issues/29349 +func NewTLSConfigFromProfile(profile configv1.TLSProfileSpec) (tlsConfig func(*tls.Config), unsupportedCiphers []string) { + minVersion := libgocrypto.TLSVersionOrDie(string(profile.MinTLSVersion)) + cipherSuites, unsupportedCiphers := cipherCodes(profile.Ciphers) + + return func(tlsConf *tls.Config) { + tlsConf.MinVersion = minVersion + // TODO: add curve preferences from profile once https://github.com/openshift/api/pull/2583 merges. + // tlsConf.CurvePreferences <<<<<< profile.Curves + + // TLS 1.3 cipher suites are not configurable in Go (https://github.com/golang/go/issues/29349), so only set CipherSuites accordingly. + // TODO: revisit this once we get an answer on the best way to handle this here: + // https://docs.google.com/document/d/1cMc9E8psHfnoK06ntR8kHSWB8d3rMtmldhnmM4nImjs/edit?disco=AAABu_nPcYg + if minVersion != tls.VersionTLS13 { + tlsConf.CipherSuites = cipherSuites + } + }, unsupportedCiphers +} + +// cipherCode returns the TLS cipher code for an OpenSSL or IANA cipher name. +// Returns 0 if the cipher is not supported. +func cipherCode(cipher string) uint16 { + // First try as IANA name directly. + if code, err := libgocrypto.CipherSuite(cipher); err == nil { + return code + } + + // Try converting from OpenSSL name to IANA name. + ianaCiphers := libgocrypto.OpenSSLToIANACipherSuites([]string{cipher}) + if len(ianaCiphers) == 1 { + if code, err := libgocrypto.CipherSuite(ianaCiphers[0]); err == nil { + return code + } + } + + // Return 0 if the cipher is not supported. + return 0 +} + +// cipherCodes converts a list of cipher names (OpenSSL or IANA format) to their uint16 codes. +// Returns the converted codes and a list of any unsupported cipher names. +func cipherCodes(ciphers []string) (codes []uint16, unsupportedCiphers []string) { + for _, cipher := range ciphers { + code := cipherCode(cipher) + if code == 0 { + unsupportedCiphers = append(unsupportedCiphers, cipher) + continue + } + + codes = append(codes, code) + } + + return codes, unsupportedCiphers +} diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go b/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go index 696278eaf0..ca2806ecc6 100644 --- a/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go +++ b/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go @@ -159,17 +159,9 @@ var openSSLToIANACiphersMap = map[string]string{ "ECDHE-RSA-CHACHA20-POLY1305": "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", // 0xCC,0xA8 "ECDHE-ECDSA-AES128-SHA256": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", // 0xC0,0x23 "ECDHE-RSA-AES128-SHA256": "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", // 0xC0,0x27 - "ECDHE-ECDSA-AES256-SHA384": "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384", // 0xC0,0x24 - "ECDHE-RSA-AES256-SHA384": "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384", // 0xC0,0x28 - "DHE-RSA-AES128-GCM-SHA256": "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256", // 0x00,0x9E - "DHE-RSA-AES256-GCM-SHA384": "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384", // 0x00,0x9F - "DHE-RSA-CHACHA20-POLY1305": "TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256", // 0xCC,0xAA - "DHE-RSA-AES128-SHA256": "TLS_DHE_RSA_WITH_AES_128_CBC_SHA256", // 0x00,0x67 - "DHE-RSA-AES256-SHA256": "TLS_DHE_RSA_WITH_AES_256_CBC_SHA256", // 0x00,0x6B "AES128-GCM-SHA256": "TLS_RSA_WITH_AES_128_GCM_SHA256", // 0x00,0x9C "AES256-GCM-SHA384": "TLS_RSA_WITH_AES_256_GCM_SHA384", // 0x00,0x9D "AES128-SHA256": "TLS_RSA_WITH_AES_128_CBC_SHA256", // 0x00,0x3C - "AES256-SHA256": "TLS_RSA_WITH_AES_256_CBC_SHA256", // 0x00,0x3D // TLS 1 "ECDHE-ECDSA-AES128-SHA": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", // 0xC0,0x09 @@ -178,10 +170,9 @@ var openSSLToIANACiphersMap = map[string]string{ "ECDHE-RSA-AES256-SHA": "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", // 0xC0,0x14 // SSL 3 - "AES128-SHA": "TLS_RSA_WITH_AES_128_CBC_SHA", // 0x00,0x2F - "AES256-SHA": "TLS_RSA_WITH_AES_256_CBC_SHA", // 0x00,0x35 - "DES-CBC3-SHA": "TLS_RSA_WITH_3DES_EDE_CBC_SHA", // 0x00,0x0A - "ECDHE-RSA-DES-CBC3-SHA": "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", // 0xC0,0x12 + "AES128-SHA": "TLS_RSA_WITH_AES_128_CBC_SHA", // 0x00,0x2F + "AES256-SHA": "TLS_RSA_WITH_AES_256_CBC_SHA", // 0x00,0x35 + "DES-CBC3-SHA": "TLS_RSA_WITH_3DES_EDE_CBC_SHA", // 0x00,0x0A } // CipherSuitesToNamesOrDie given a list of cipher suites as ints, return their readable names diff --git a/vendor/modules.txt b/vendor/modules.txt index 5ec56655f6..9505a077be 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -635,7 +635,10 @@ github.com/openshift/client-go/config/informers/externalversions/internalinterfa github.com/openshift/client-go/config/listers/config/v1 github.com/openshift/client-go/config/listers/config/v1alpha1 github.com/openshift/client-go/config/listers/config/v1alpha2 -# github.com/openshift/library-go v0.0.0-20260204111611-b7d4fa0e292a +# github.com/openshift/controller-runtime-common v0.0.0-20260204183245-642129afd14f => github.com/joelanford/controller-runtime-common v0.0.0-20260206162334-afe447e6c57e +## explicit; go 1.24.0 +github.com/openshift/controller-runtime-common/pkg/tls +# github.com/openshift/library-go v0.0.0-20260205095356-7bced6e899b6 ## explicit; go 1.24.0 github.com/openshift/library-go/pkg/crypto # github.com/operator-framework/api v0.39.0 => ./staging/api @@ -2372,3 +2375,4 @@ sigs.k8s.io/yaml/kyaml # github.com/operator-framework/api => ./staging/api # github.com/operator-framework/operator-lifecycle-manager => ./staging/operator-lifecycle-manager # github.com/operator-framework/operator-registry => ./staging/operator-registry +# github.com/openshift/controller-runtime-common => github.com/joelanford/controller-runtime-common v0.0.0-20260206162334-afe447e6c57e