diff --git a/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/operator.go b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/operator.go index 32f5f652d8..6706cecec1 100644 --- a/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/operator.go +++ b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/operator.go @@ -1,2559 +1,2572 @@ package catalog import ( - "context" - "encoding/json" - "errors" - "fmt" - "reflect" - "sort" - "strings" - "sync" - "time" - - errorwrap "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "google.golang.org/grpc/connectivity" - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - "k8s.io/apiextensions-apiserver/pkg/apiserver/validation" - extinf "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/selection" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/informers" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/client-go/tools/pager" - "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/retry" - "k8s.io/client-go/util/workqueue" - utilclock "k8s.io/utils/clock" - - "github.com/operator-framework/api/pkg/operators/reference" - operatorsv1 "github.com/operator-framework/api/pkg/operators/v1" - "github.com/operator-framework/api/pkg/operators/v1alpha1" - "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned" - "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/informers/externalversions" - operatorsv1alpha1listers "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/listers/operators/v1alpha1" - "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/bundle" - olmerrors "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/errors" - "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/install" - "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/catalog/subscription" - "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/internal/pruning" - "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry" - "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/grpc" - "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/reconciler" - "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/resolver" - resolvercache "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/resolver/cache" - "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/resolver/solver" - "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/catalogsource" - "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/clients" - controllerclient "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/controller-runtime/client" - "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/event" - index "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/index" - "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/operatorclient" - "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/operatorlister" - "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/ownerutil" - "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/queueinformer" - "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/scoped" - sharedtime "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/time" - "github.com/operator-framework/operator-lifecycle-manager/pkg/metrics" + "context" + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" + "strings" + "sync" + "time" + + errorwrap "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "google.golang.org/grpc/connectivity" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apiextensions-apiserver/pkg/apiserver/validation" + extinf "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/selection" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/informers" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/retry" + "k8s.io/client-go/util/workqueue" + utilclock "k8s.io/utils/clock" + + "github.com/operator-framework/api/pkg/operators/reference" + operatorsv1 "github.com/operator-framework/api/pkg/operators/v1" + "github.com/operator-framework/api/pkg/operators/v1alpha1" + "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned" + "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/informers/externalversions" + operatorsv1alpha1listers "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/listers/operators/v1alpha1" + "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/bundle" + olmerrors "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/errors" + "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/install" + "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/catalog/subscription" + "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/internal/pruning" + "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry" + "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/grpc" + "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/reconciler" + "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/resolver" + resolvercache "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/resolver/cache" + "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/resolver/solver" + "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/catalogsource" + "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/clients" + controllerclient "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/controller-runtime/client" + "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/event" + index "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/index" + "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/operatorclient" + "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/operatorlister" + "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/ownerutil" + "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/queueinformer" + "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/scoped" + sharedtime "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/time" + "github.com/operator-framework/operator-lifecycle-manager/pkg/metrics" ) const ( - crdKind = "CustomResourceDefinition" - secretKind = "Secret" - clusterRoleKind = "ClusterRole" - clusterRoleBindingKind = "ClusterRoleBinding" - configMapKind = "ConfigMap" - csvKind = "ClusterServiceVersion" - serviceAccountKind = "ServiceAccount" - serviceKind = "Service" - roleKind = "Role" - roleBindingKind = "RoleBinding" - generatedByKey = "olm.generated-by" - maxInstallPlanCount = 5 - maxDeletesPerSweep = 5 - RegistryFieldManager = "olm.registry" + crdKind = "CustomResourceDefinition" + secretKind = "Secret" + clusterRoleKind = "ClusterRole" + clusterRoleBindingKind = "ClusterRoleBinding" + configMapKind = "ConfigMap" + csvKind = "ClusterServiceVersion" + serviceAccountKind = "ServiceAccount" + serviceKind = "Service" + roleKind = "Role" + roleBindingKind = "RoleBinding" + generatedByKey = "olm.generated-by" + maxInstallPlanCount = 5 + maxDeletesPerSweep = 5 + RegistryFieldManager = "olm.registry" ) // Operator represents a Kubernetes operator that executes InstallPlans by // resolving dependencies in a catalog. type Operator struct { - queueinformer.Operator - - logger *logrus.Logger - clock utilclock.Clock - opClient operatorclient.ClientInterface - client versioned.Interface - dynamicClient dynamic.Interface - lister operatorlister.OperatorLister - catsrcQueueSet *queueinformer.ResourceQueueSet - subQueueSet *queueinformer.ResourceQueueSet - ipQueueSet *queueinformer.ResourceQueueSet - ogQueueSet *queueinformer.ResourceQueueSet - nsResolveQueue workqueue.RateLimitingInterface - namespace string - recorder record.EventRecorder - sources *grpc.SourceStore - sourcesLastUpdate sharedtime.SharedTime - resolver resolver.StepResolver - reconciler reconciler.RegistryReconcilerFactory - catalogSubscriberIndexer map[string]cache.Indexer - clientAttenuator *scoped.ClientAttenuator - serviceAccountQuerier *scoped.UserDefinedServiceAccountQuerier - bundleUnpacker bundle.Unpacker - installPlanTimeout time.Duration - bundleUnpackTimeout time.Duration - clientFactory clients.Factory - muInstallPlan sync.Mutex - sourceInvalidator *resolver.RegistrySourceProvider + queueinformer.Operator + + logger *logrus.Logger + clock utilclock.Clock + opClient operatorclient.ClientInterface + client versioned.Interface + dynamicClient dynamic.Interface + lister operatorlister.OperatorLister + catsrcQueueSet *queueinformer.ResourceQueueSet + subQueueSet *queueinformer.ResourceQueueSet + ipQueueSet *queueinformer.ResourceQueueSet + ogQueueSet *queueinformer.ResourceQueueSet + nsResolveQueue workqueue.RateLimitingInterface + namespace string + recorder record.EventRecorder + sources *grpc.SourceStore + sourcesLastUpdate sharedtime.SharedTime + resolver resolver.StepResolver + reconciler reconciler.RegistryReconcilerFactory + catalogSubscriberIndexer map[string]cache.Indexer + clientAttenuator *scoped.ClientAttenuator + serviceAccountQuerier *scoped.UserDefinedServiceAccountQuerier + bundleUnpacker bundle.Unpacker + installPlanTimeout time.Duration + bundleUnpackTimeout time.Duration + clientFactory clients.Factory + muInstallPlan sync.Mutex + sourceInvalidator *resolver.RegistrySourceProvider } type CatalogSourceSyncFunc func(logger *logrus.Entry, in *v1alpha1.CatalogSource) (out *v1alpha1.CatalogSource, continueSync bool, syncError error) // NewOperator creates a new Catalog Operator. func NewOperator(ctx context.Context, kubeconfigPath string, clock utilclock.Clock, logger *logrus.Logger, resync time.Duration, configmapRegistryImage, opmImage, utilImage string, operatorNamespace string, scheme *runtime.Scheme, installPlanTimeout time.Duration, bundleUnpackTimeout time.Duration, workloadUserID int64) (*Operator, error) { - resyncPeriod := queueinformer.ResyncWithJitter(resync, 0.2) - config, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath) - if err != nil { - return nil, err - } - - // Create a new client for OLM types (CRs) - crClient, err := versioned.NewForConfig(config) - if err != nil { - return nil, err - } - - // Create a new client for dynamic types (CRs) - dynamicClient, err := dynamic.NewForConfig(config) - if err != nil { - return nil, err - } - - // Create a new queueinformer-based operator. - opClient, err := operatorclient.NewClientFromRestConfig(config) - if err != nil { - return nil, err - } - - queueOperator, err := queueinformer.NewOperator(opClient.KubernetesInterface().Discovery(), queueinformer.WithOperatorLogger(logger)) - if err != nil { - return nil, err - } - - // Create an OperatorLister - lister := operatorlister.NewLister() - - // eventRecorder can emit events - eventRecorder, err := event.NewRecorder(opClient.KubernetesInterface().CoreV1().Events(metav1.NamespaceAll)) - if err != nil { - return nil, err - } - - ssaClient, err := controllerclient.NewForConfig(config, scheme, RegistryFieldManager) - if err != nil { - return nil, err - } - - // Allocate the new instance of an Operator. - op := &Operator{ - Operator: queueOperator, - logger: logger, - clock: clock, - opClient: opClient, - dynamicClient: dynamicClient, - client: crClient, - lister: lister, - namespace: operatorNamespace, - recorder: eventRecorder, - catsrcQueueSet: queueinformer.NewEmptyResourceQueueSet(), - subQueueSet: queueinformer.NewEmptyResourceQueueSet(), - ipQueueSet: queueinformer.NewEmptyResourceQueueSet(), - ogQueueSet: queueinformer.NewEmptyResourceQueueSet(), - catalogSubscriberIndexer: map[string]cache.Indexer{}, - serviceAccountQuerier: scoped.NewUserDefinedServiceAccountQuerier(logger, crClient), - clientAttenuator: scoped.NewClientAttenuator(logger, config, opClient), - installPlanTimeout: installPlanTimeout, - bundleUnpackTimeout: bundleUnpackTimeout, - clientFactory: clients.NewFactory(config), - } - op.sources = grpc.NewSourceStore(logger, 10*time.Second, 10*time.Minute, op.syncSourceState) - op.sourceInvalidator = resolver.SourceProviderFromRegistryClientProvider(op.sources, lister.OperatorsV1alpha1().CatalogSourceLister(), logger) - resolverSourceProvider := NewOperatorGroupToggleSourceProvider(op.sourceInvalidator, logger, op.lister.OperatorsV1().OperatorGroupLister()) - op.reconciler = reconciler.NewRegistryReconcilerFactory(lister, opClient, configmapRegistryImage, op.now, ssaClient, workloadUserID) - res := resolver.NewOperatorStepResolver(lister, crClient, operatorNamespace, resolverSourceProvider, logger) - op.resolver = resolver.NewInstrumentedResolver(res, metrics.RegisterDependencyResolutionSuccess, metrics.RegisterDependencyResolutionFailure) - - // Wire OLM CR sharedIndexInformers - crInformerFactory := externalversions.NewSharedInformerFactoryWithOptions(op.client, resyncPeriod()) - - // Fields are pruned from local copies of the objects managed - // by this informer in order to reduce cached size. - prunedCSVInformer := cache.NewSharedIndexInformer( - pruning.NewListerWatcher(op.client, metav1.NamespaceAll, - func(options *metav1.ListOptions) { - options.LabelSelector = fmt.Sprintf("!%s", v1alpha1.CopiedLabelKey) - }, - pruning.PrunerFunc(func(csv *v1alpha1.ClusterServiceVersion) { - *csv = v1alpha1.ClusterServiceVersion{ - TypeMeta: csv.TypeMeta, - ObjectMeta: metav1.ObjectMeta{ - Name: csv.Name, - Namespace: csv.Namespace, - Labels: csv.Labels, - Annotations: csv.Annotations, - }, - Spec: v1alpha1.ClusterServiceVersionSpec{ - CustomResourceDefinitions: csv.Spec.CustomResourceDefinitions, - APIServiceDefinitions: csv.Spec.APIServiceDefinitions, - Replaces: csv.Spec.Replaces, - Version: csv.Spec.Version, - }, - Status: v1alpha1.ClusterServiceVersionStatus{ - Phase: csv.Status.Phase, - Reason: csv.Status.Reason, - }, - } - })), - &v1alpha1.ClusterServiceVersion{}, - resyncPeriod(), - cache.Indexers{ - cache.NamespaceIndex: cache.MetaNamespaceIndexFunc, - }, - ) - csvLister := operatorsv1alpha1listers.NewClusterServiceVersionLister(prunedCSVInformer.GetIndexer()) - op.lister.OperatorsV1alpha1().RegisterClusterServiceVersionLister(metav1.NamespaceAll, csvLister) - if err := op.RegisterInformer(prunedCSVInformer); err != nil { - return nil, err - } - - // TODO: Add namespace resolve sync - - // Wire InstallPlans - ipInformer := crInformerFactory.Operators().V1alpha1().InstallPlans() - op.lister.OperatorsV1alpha1().RegisterInstallPlanLister(metav1.NamespaceAll, ipInformer.Lister()) - ipQueue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ips") - op.ipQueueSet.Set(metav1.NamespaceAll, ipQueue) - ipQueueInformer, err := queueinformer.NewQueueInformer( - ctx, - queueinformer.WithMetricsProvider(metrics.NewMetricsInstallPlan(op.lister.OperatorsV1alpha1().InstallPlanLister())), - queueinformer.WithLogger(op.logger), - queueinformer.WithQueue(ipQueue), - queueinformer.WithInformer(ipInformer.Informer()), - queueinformer.WithSyncer(queueinformer.LegacySyncHandler(op.syncInstallPlans).ToSyncer()), - ) - if err != nil { - return nil, err - } - if err := op.RegisterQueueInformer(ipQueueInformer); err != nil { - return nil, err - } - - operatorGroupInformer := crInformerFactory.Operators().V1().OperatorGroups() - op.lister.OperatorsV1().RegisterOperatorGroupLister(metav1.NamespaceAll, operatorGroupInformer.Lister()) - ogQueue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ogs") - op.ogQueueSet.Set(metav1.NamespaceAll, ogQueue) - operatorGroupQueueInformer, err := queueinformer.NewQueueInformer( - ctx, - queueinformer.WithLogger(op.logger), - queueinformer.WithQueue(ogQueue), - queueinformer.WithInformer(operatorGroupInformer.Informer()), - queueinformer.WithSyncer(queueinformer.LegacySyncHandler(op.syncOperatorGroups).ToSyncer()), - ) - if err != nil { - return nil, err - } - if err := op.RegisterQueueInformer(operatorGroupQueueInformer); err != nil { - return nil, err - } - - // Wire CatalogSources - catsrcInformer := crInformerFactory.Operators().V1alpha1().CatalogSources() - op.lister.OperatorsV1alpha1().RegisterCatalogSourceLister(metav1.NamespaceAll, catsrcInformer.Lister()) - catsrcQueue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "catsrcs") - op.catsrcQueueSet.Set(metav1.NamespaceAll, catsrcQueue) - catsrcQueueInformer, err := queueinformer.NewQueueInformer( - ctx, - queueinformer.WithMetricsProvider(metrics.NewMetricsCatalogSource(op.lister.OperatorsV1alpha1().CatalogSourceLister())), - queueinformer.WithLogger(op.logger), - queueinformer.WithQueue(catsrcQueue), - queueinformer.WithInformer(catsrcInformer.Informer()), - queueinformer.WithSyncer(queueinformer.LegacySyncHandler(op.syncCatalogSources).ToSyncerWithDelete(op.handleCatSrcDeletion)), - ) - if err != nil { - return nil, err - } - if err := op.RegisterQueueInformer(catsrcQueueInformer); err != nil { - return nil, err - } - - // Wire Subscriptions - subInformer := crInformerFactory.Operators().V1alpha1().Subscriptions() - op.lister.OperatorsV1alpha1().RegisterSubscriptionLister(metav1.NamespaceAll, subInformer.Lister()) - if err := subInformer.Informer().AddIndexers(cache.Indexers{index.PresentCatalogIndexFuncKey: index.PresentCatalogIndexFunc}); err != nil { - return nil, err - } - subIndexer := subInformer.Informer().GetIndexer() - op.catalogSubscriberIndexer[metav1.NamespaceAll] = subIndexer - - subQueue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "subs") - op.subQueueSet.Set(metav1.NamespaceAll, subQueue) - subSyncer, err := subscription.NewSyncer( - ctx, - subscription.WithLogger(op.logger), - subscription.WithClient(op.client), - subscription.WithOperatorLister(op.lister), - subscription.WithSubscriptionInformer(subInformer.Informer()), - subscription.WithCatalogInformer(catsrcInformer.Informer()), - subscription.WithInstallPlanInformer(ipInformer.Informer()), - subscription.WithSubscriptionQueue(subQueue), - subscription.WithAppendedReconcilers(subscription.ReconcilerFromLegacySyncHandler(op.syncSubscriptions, nil)), - subscription.WithRegistryReconcilerFactory(op.reconciler), - subscription.WithGlobalCatalogNamespace(op.namespace), - ) - if err != nil { - return nil, err - } - subQueueInformer, err := queueinformer.NewQueueInformer( - ctx, - queueinformer.WithMetricsProvider(metrics.NewMetricsSubscription(op.lister.OperatorsV1alpha1().SubscriptionLister())), - queueinformer.WithLogger(op.logger), - queueinformer.WithQueue(subQueue), - queueinformer.WithInformer(subInformer.Informer()), - queueinformer.WithSyncer(subSyncer), - ) - if err != nil { - return nil, err - } - if err := op.RegisterQueueInformer(subQueueInformer); err != nil { - return nil, err - } - - // Wire k8s sharedIndexInformers - k8sInformerFactory := informers.NewSharedInformerFactoryWithOptions(op.opClient.KubernetesInterface(), resyncPeriod()) - sharedIndexInformers := []cache.SharedIndexInformer{} - - // Wire Roles - roleInformer := k8sInformerFactory.Rbac().V1().Roles() - op.lister.RbacV1().RegisterRoleLister(metav1.NamespaceAll, roleInformer.Lister()) - sharedIndexInformers = append(sharedIndexInformers, roleInformer.Informer()) - - // Wire RoleBindings - roleBindingInformer := k8sInformerFactory.Rbac().V1().RoleBindings() - op.lister.RbacV1().RegisterRoleBindingLister(metav1.NamespaceAll, roleBindingInformer.Lister()) - sharedIndexInformers = append(sharedIndexInformers, roleBindingInformer.Informer()) - - // Wire ServiceAccounts - serviceAccountInformer := k8sInformerFactory.Core().V1().ServiceAccounts() - op.lister.CoreV1().RegisterServiceAccountLister(metav1.NamespaceAll, serviceAccountInformer.Lister()) - sharedIndexInformers = append(sharedIndexInformers, serviceAccountInformer.Informer()) - - // Wire Services - serviceInformer := k8sInformerFactory.Core().V1().Services() - op.lister.CoreV1().RegisterServiceLister(metav1.NamespaceAll, serviceInformer.Lister()) - sharedIndexInformers = append(sharedIndexInformers, serviceInformer.Informer()) - - // Wire Pods for CatalogSource - catsrcReq, err := labels.NewRequirement(reconciler.CatalogSourceLabelKey, selection.Exists, nil) - if err != nil { - return nil, err - } - - csPodLabels := labels.NewSelector() - csPodLabels = csPodLabels.Add(*catsrcReq) - csPodInformer := informers.NewSharedInformerFactoryWithOptions(op.opClient.KubernetesInterface(), resyncPeriod(), informers.WithTweakListOptions(func(options *metav1.ListOptions) { - options.LabelSelector = csPodLabels.String() - })).Core().V1().Pods() - op.lister.CoreV1().RegisterPodLister(metav1.NamespaceAll, csPodInformer.Lister()) - sharedIndexInformers = append(sharedIndexInformers, csPodInformer.Informer()) - - // Wire Pods for BundleUnpack job - buReq, err := labels.NewRequirement(bundle.BundleUnpackPodLabel, selection.Exists, nil) - if err != nil { - return nil, err - } - - buPodLabels := labels.NewSelector() - buPodLabels = buPodLabels.Add(*buReq) - buPodInformer := informers.NewSharedInformerFactoryWithOptions(op.opClient.KubernetesInterface(), resyncPeriod(), informers.WithTweakListOptions(func(options *metav1.ListOptions) { - options.LabelSelector = buPodLabels.String() - })).Core().V1().Pods() - sharedIndexInformers = append(sharedIndexInformers, buPodInformer.Informer()) - - // Wire ConfigMaps - configMapInformer := informers.NewSharedInformerFactoryWithOptions(op.opClient.KubernetesInterface(), resyncPeriod(), informers.WithTweakListOptions(func(options *metav1.ListOptions) { - options.LabelSelector = install.OLMManagedLabelKey - })).Core().V1().ConfigMaps() - op.lister.CoreV1().RegisterConfigMapLister(metav1.NamespaceAll, configMapInformer.Lister()) - sharedIndexInformers = append(sharedIndexInformers, configMapInformer.Informer()) - - // Wire Jobs - jobInformer := k8sInformerFactory.Batch().V1().Jobs() - sharedIndexInformers = append(sharedIndexInformers, jobInformer.Informer()) - - // Generate and register QueueInformers for k8s resources - k8sSyncer := queueinformer.LegacySyncHandler(op.syncObject).ToSyncerWithDelete(op.handleDeletion) - for _, informer := range sharedIndexInformers { - queueInformer, err := queueinformer.NewQueueInformer( - ctx, - queueinformer.WithLogger(op.logger), - queueinformer.WithInformer(informer), - queueinformer.WithSyncer(k8sSyncer), - ) - if err != nil { - return nil, err - } - - if err := op.RegisterQueueInformer(queueInformer); err != nil { - return nil, err - } - } - - // Setup the BundleUnpacker - op.bundleUnpacker, err = bundle.NewConfigmapUnpacker( - bundle.WithLogger(op.logger), - bundle.WithClient(op.opClient.KubernetesInterface()), - bundle.WithCatalogSourceLister(catsrcInformer.Lister()), - bundle.WithConfigMapLister(configMapInformer.Lister()), - bundle.WithJobLister(jobInformer.Lister()), - bundle.WithPodLister(buPodInformer.Lister()), - bundle.WithRoleLister(roleInformer.Lister()), - bundle.WithRoleBindingLister(roleBindingInformer.Lister()), - bundle.WithOPMImage(opmImage), - bundle.WithUtilImage(utilImage), - bundle.WithNow(op.now), - bundle.WithUnpackTimeout(op.bundleUnpackTimeout), - bundle.WithUserID(workloadUserID), - ) - if err != nil { - return nil, err - } - - // Register CustomResourceDefinition QueueInformer - crdInformer := extinf.NewSharedInformerFactory(op.opClient.ApiextensionsInterface(), resyncPeriod()).Apiextensions().V1().CustomResourceDefinitions() - op.lister.APIExtensionsV1().RegisterCustomResourceDefinitionLister(crdInformer.Lister()) - crdQueueInformer, err := queueinformer.NewQueueInformer( - ctx, - queueinformer.WithLogger(op.logger), - queueinformer.WithInformer(crdInformer.Informer()), - queueinformer.WithSyncer(queueinformer.LegacySyncHandler(op.syncObject).ToSyncerWithDelete(op.handleDeletion)), - ) - if err != nil { - return nil, err - } - if err := op.RegisterQueueInformer(crdQueueInformer); err != nil { - return nil, err - } - - // Namespace sync for resolving subscriptions - namespaceInformer := informers.NewSharedInformerFactory(op.opClient.KubernetesInterface(), resyncPeriod()).Core().V1().Namespaces() - op.lister.CoreV1().RegisterNamespaceLister(namespaceInformer.Lister()) - op.nsResolveQueue = workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "resolver") - namespaceQueueInformer, err := queueinformer.NewQueueInformer( - ctx, - queueinformer.WithLogger(op.logger), - queueinformer.WithQueue(op.nsResolveQueue), - queueinformer.WithInformer(namespaceInformer.Informer()), - queueinformer.WithSyncer(queueinformer.LegacySyncHandler(op.syncResolvingNamespace).ToSyncer()), - ) - if err != nil { - return nil, err - } - if err := op.RegisterQueueInformer(namespaceQueueInformer); err != nil { - return nil, err - } - - op.sources.Start(context.Background()) - - return op, nil + resyncPeriod := queueinformer.ResyncWithJitter(resync, 0.2) + config, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath) + if err != nil { + return nil, err + } + + // Create a new client for OLM types (CRs) + crClient, err := versioned.NewForConfig(config) + if err != nil { + return nil, err + } + + // Create a new client for dynamic types (CRs) + dynamicClient, err := dynamic.NewForConfig(config) + if err != nil { + return nil, err + } + + // Create a new queueinformer-based operator. + opClient, err := operatorclient.NewClientFromRestConfig(config) + if err != nil { + return nil, err + } + + queueOperator, err := queueinformer.NewOperator(opClient.KubernetesInterface().Discovery(), queueinformer.WithOperatorLogger(logger)) + if err != nil { + return nil, err + } + + // Create an OperatorLister + lister := operatorlister.NewLister() + + // eventRecorder can emit events + eventRecorder, err := event.NewRecorder(opClient.KubernetesInterface().CoreV1().Events(metav1.NamespaceAll)) + if err != nil { + return nil, err + } + + ssaClient, err := controllerclient.NewForConfig(config, scheme, RegistryFieldManager) + if err != nil { + return nil, err + } + + // Allocate the new instance of an Operator. + op := &Operator{ + Operator: queueOperator, + logger: logger, + clock: clock, + opClient: opClient, + dynamicClient: dynamicClient, + client: crClient, + lister: lister, + namespace: operatorNamespace, + recorder: eventRecorder, + catsrcQueueSet: queueinformer.NewEmptyResourceQueueSet(), + subQueueSet: queueinformer.NewEmptyResourceQueueSet(), + ipQueueSet: queueinformer.NewEmptyResourceQueueSet(), + ogQueueSet: queueinformer.NewEmptyResourceQueueSet(), + catalogSubscriberIndexer: map[string]cache.Indexer{}, + serviceAccountQuerier: scoped.NewUserDefinedServiceAccountQuerier(logger, crClient), + clientAttenuator: scoped.NewClientAttenuator(logger, config, opClient), + installPlanTimeout: installPlanTimeout, + bundleUnpackTimeout: bundleUnpackTimeout, + clientFactory: clients.NewFactory(config), + } + op.sources = grpc.NewSourceStore(logger, 10*time.Second, 10*time.Minute, op.syncSourceState) + op.sourceInvalidator = resolver.SourceProviderFromRegistryClientProvider(op.sources, lister.OperatorsV1alpha1().CatalogSourceLister(), logger) + resolverSourceProvider := NewOperatorGroupToggleSourceProvider(op.sourceInvalidator, logger, op.lister.OperatorsV1().OperatorGroupLister()) + op.reconciler = reconciler.NewRegistryReconcilerFactory(lister, opClient, configmapRegistryImage, op.now, ssaClient, workloadUserID) + res := resolver.NewOperatorStepResolver(lister, crClient, operatorNamespace, resolverSourceProvider, logger) + op.resolver = resolver.NewInstrumentedResolver(res, metrics.RegisterDependencyResolutionSuccess, metrics.RegisterDependencyResolutionFailure) + + // Wire OLM CR sharedIndexInformers + crInformerFactory := externalversions.NewSharedInformerFactoryWithOptions(op.client, resyncPeriod()) + + // Fields are pruned from local copies of the objects managed + // by this informer in order to reduce cached size. + prunedCSVInformer := cache.NewSharedIndexInformer( + pruning.NewListerWatcher(op.client, metav1.NamespaceAll, + func(options *metav1.ListOptions) { + options.LabelSelector = fmt.Sprintf("!%s", v1alpha1.CopiedLabelKey) + }, + pruning.PrunerFunc(func(csv *v1alpha1.ClusterServiceVersion) { + *csv = v1alpha1.ClusterServiceVersion{ + TypeMeta: csv.TypeMeta, + ObjectMeta: metav1.ObjectMeta{ + Name: csv.Name, + Namespace: csv.Namespace, + Labels: csv.Labels, + Annotations: csv.Annotations, + }, + Spec: v1alpha1.ClusterServiceVersionSpec{ + CustomResourceDefinitions: csv.Spec.CustomResourceDefinitions, + APIServiceDefinitions: csv.Spec.APIServiceDefinitions, + Replaces: csv.Spec.Replaces, + Version: csv.Spec.Version, + }, + Status: v1alpha1.ClusterServiceVersionStatus{ + Phase: csv.Status.Phase, + Reason: csv.Status.Reason, + }, + } + })), + &v1alpha1.ClusterServiceVersion{}, + resyncPeriod(), + cache.Indexers{ + cache.NamespaceIndex: cache.MetaNamespaceIndexFunc, + }, + ) + csvLister := operatorsv1alpha1listers.NewClusterServiceVersionLister(prunedCSVInformer.GetIndexer()) + op.lister.OperatorsV1alpha1().RegisterClusterServiceVersionLister(metav1.NamespaceAll, csvLister) + if err := op.RegisterInformer(prunedCSVInformer); err != nil { + return nil, err + } + + // TODO: Add namespace resolve sync + + // Wire InstallPlans + ipInformer := crInformerFactory.Operators().V1alpha1().InstallPlans() + op.lister.OperatorsV1alpha1().RegisterInstallPlanLister(metav1.NamespaceAll, ipInformer.Lister()) + ipQueue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ips") + op.ipQueueSet.Set(metav1.NamespaceAll, ipQueue) + ipQueueInformer, err := queueinformer.NewQueueInformer( + ctx, + queueinformer.WithMetricsProvider(metrics.NewMetricsInstallPlan(op.lister.OperatorsV1alpha1().InstallPlanLister())), + queueinformer.WithLogger(op.logger), + queueinformer.WithQueue(ipQueue), + queueinformer.WithInformer(ipInformer.Informer()), + queueinformer.WithSyncer(queueinformer.LegacySyncHandler(op.syncInstallPlans).ToSyncer()), + ) + if err != nil { + return nil, err + } + if err := op.RegisterQueueInformer(ipQueueInformer); err != nil { + return nil, err + } + + operatorGroupInformer := crInformerFactory.Operators().V1().OperatorGroups() + op.lister.OperatorsV1().RegisterOperatorGroupLister(metav1.NamespaceAll, operatorGroupInformer.Lister()) + ogQueue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ogs") + op.ogQueueSet.Set(metav1.NamespaceAll, ogQueue) + operatorGroupQueueInformer, err := queueinformer.NewQueueInformer( + ctx, + queueinformer.WithLogger(op.logger), + queueinformer.WithQueue(ogQueue), + queueinformer.WithInformer(operatorGroupInformer.Informer()), + queueinformer.WithSyncer(queueinformer.LegacySyncHandler(op.syncOperatorGroups).ToSyncer()), + ) + if err != nil { + return nil, err + } + if err := op.RegisterQueueInformer(operatorGroupQueueInformer); err != nil { + return nil, err + } + + // Wire CatalogSources + catsrcInformer := crInformerFactory.Operators().V1alpha1().CatalogSources() + op.lister.OperatorsV1alpha1().RegisterCatalogSourceLister(metav1.NamespaceAll, catsrcInformer.Lister()) + catsrcQueue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "catsrcs") + op.catsrcQueueSet.Set(metav1.NamespaceAll, catsrcQueue) + catsrcQueueInformer, err := queueinformer.NewQueueInformer( + ctx, + queueinformer.WithMetricsProvider(metrics.NewMetricsCatalogSource(op.lister.OperatorsV1alpha1().CatalogSourceLister())), + queueinformer.WithLogger(op.logger), + queueinformer.WithQueue(catsrcQueue), + queueinformer.WithInformer(catsrcInformer.Informer()), + queueinformer.WithSyncer(queueinformer.LegacySyncHandler(op.syncCatalogSources).ToSyncerWithDelete(op.handleCatSrcDeletion)), + ) + if err != nil { + return nil, err + } + if err := op.RegisterQueueInformer(catsrcQueueInformer); err != nil { + return nil, err + } + + // Wire Subscriptions + subInformer := crInformerFactory.Operators().V1alpha1().Subscriptions() + op.lister.OperatorsV1alpha1().RegisterSubscriptionLister(metav1.NamespaceAll, subInformer.Lister()) + if err := subInformer.Informer().AddIndexers(cache.Indexers{index.PresentCatalogIndexFuncKey: index.PresentCatalogIndexFunc}); err != nil { + return nil, err + } + subIndexer := subInformer.Informer().GetIndexer() + op.catalogSubscriberIndexer[metav1.NamespaceAll] = subIndexer + + subQueue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "subs") + op.subQueueSet.Set(metav1.NamespaceAll, subQueue) + subSyncer, err := subscription.NewSyncer( + ctx, + subscription.WithLogger(op.logger), + subscription.WithClient(op.client), + subscription.WithOperatorLister(op.lister), + subscription.WithSubscriptionInformer(subInformer.Informer()), + subscription.WithCatalogInformer(catsrcInformer.Informer()), + subscription.WithInstallPlanInformer(ipInformer.Informer()), + subscription.WithSubscriptionQueue(subQueue), + subscription.WithAppendedReconcilers(subscription.ReconcilerFromLegacySyncHandler(op.syncSubscriptions, nil)), + subscription.WithRegistryReconcilerFactory(op.reconciler), + subscription.WithGlobalCatalogNamespace(op.namespace), + ) + if err != nil { + return nil, err + } + subQueueInformer, err := queueinformer.NewQueueInformer( + ctx, + queueinformer.WithMetricsProvider(metrics.NewMetricsSubscription(op.lister.OperatorsV1alpha1().SubscriptionLister())), + queueinformer.WithLogger(op.logger), + queueinformer.WithQueue(subQueue), + queueinformer.WithInformer(subInformer.Informer()), + queueinformer.WithSyncer(subSyncer), + ) + if err != nil { + return nil, err + } + if err := op.RegisterQueueInformer(subQueueInformer); err != nil { + return nil, err + } + + // Wire k8s sharedIndexInformers + k8sInformerFactory := informers.NewSharedInformerFactoryWithOptions(op.opClient.KubernetesInterface(), resyncPeriod()) + sharedIndexInformers := []cache.SharedIndexInformer{} + + // Wire Roles + roleInformer := k8sInformerFactory.Rbac().V1().Roles() + op.lister.RbacV1().RegisterRoleLister(metav1.NamespaceAll, roleInformer.Lister()) + sharedIndexInformers = append(sharedIndexInformers, roleInformer.Informer()) + + // Wire RoleBindings + roleBindingInformer := k8sInformerFactory.Rbac().V1().RoleBindings() + op.lister.RbacV1().RegisterRoleBindingLister(metav1.NamespaceAll, roleBindingInformer.Lister()) + sharedIndexInformers = append(sharedIndexInformers, roleBindingInformer.Informer()) + + // Wire ServiceAccounts + serviceAccountInformer := k8sInformerFactory.Core().V1().ServiceAccounts() + op.lister.CoreV1().RegisterServiceAccountLister(metav1.NamespaceAll, serviceAccountInformer.Lister()) + sharedIndexInformers = append(sharedIndexInformers, serviceAccountInformer.Informer()) + + // Wire Services + serviceInformer := k8sInformerFactory.Core().V1().Services() + op.lister.CoreV1().RegisterServiceLister(metav1.NamespaceAll, serviceInformer.Lister()) + sharedIndexInformers = append(sharedIndexInformers, serviceInformer.Informer()) + + // Wire Pods for CatalogSource + catsrcReq, err := labels.NewRequirement(reconciler.CatalogSourceLabelKey, selection.Exists, nil) + if err != nil { + return nil, err + } + + csPodLabels := labels.NewSelector() + csPodLabels = csPodLabels.Add(*catsrcReq) + csPodInformer := informers.NewSharedInformerFactoryWithOptions(op.opClient.KubernetesInterface(), resyncPeriod(), informers.WithTweakListOptions(func(options *metav1.ListOptions) { + options.LabelSelector = csPodLabels.String() + })).Core().V1().Pods() + op.lister.CoreV1().RegisterPodLister(metav1.NamespaceAll, csPodInformer.Lister()) + sharedIndexInformers = append(sharedIndexInformers, csPodInformer.Informer()) + + // Wire Pods for BundleUnpack job + buReq, err := labels.NewRequirement(bundle.BundleUnpackPodLabel, selection.Exists, nil) + if err != nil { + return nil, err + } + + buPodLabels := labels.NewSelector() + buPodLabels = buPodLabels.Add(*buReq) + buPodInformer := informers.NewSharedInformerFactoryWithOptions(op.opClient.KubernetesInterface(), resyncPeriod(), informers.WithTweakListOptions(func(options *metav1.ListOptions) { + options.LabelSelector = buPodLabels.String() + })).Core().V1().Pods() + sharedIndexInformers = append(sharedIndexInformers, buPodInformer.Informer()) + + // Wire ConfigMaps + configMapInformer := informers.NewSharedInformerFactoryWithOptions(op.opClient.KubernetesInterface(), resyncPeriod(), informers.WithTweakListOptions(func(options *metav1.ListOptions) { + options.LabelSelector = install.OLMManagedLabelKey + })).Core().V1().ConfigMaps() + op.lister.CoreV1().RegisterConfigMapLister(metav1.NamespaceAll, configMapInformer.Lister()) + sharedIndexInformers = append(sharedIndexInformers, configMapInformer.Informer()) + + // Wire Jobs + jobInformer := k8sInformerFactory.Batch().V1().Jobs() + sharedIndexInformers = append(sharedIndexInformers, jobInformer.Informer()) + + // Generate and register QueueInformers for k8s resources + k8sSyncer := queueinformer.LegacySyncHandler(op.syncObject).ToSyncerWithDelete(op.handleDeletion) + for _, informer := range sharedIndexInformers { + queueInformer, err := queueinformer.NewQueueInformer( + ctx, + queueinformer.WithLogger(op.logger), + queueinformer.WithInformer(informer), + queueinformer.WithSyncer(k8sSyncer), + ) + if err != nil { + return nil, err + } + + if err := op.RegisterQueueInformer(queueInformer); err != nil { + return nil, err + } + } + + // Setup the BundleUnpacker + op.bundleUnpacker, err = bundle.NewConfigmapUnpacker( + bundle.WithLogger(op.logger), + bundle.WithClient(op.opClient.KubernetesInterface()), + bundle.WithCatalogSourceLister(catsrcInformer.Lister()), + bundle.WithConfigMapLister(configMapInformer.Lister()), + bundle.WithJobLister(jobInformer.Lister()), + bundle.WithPodLister(buPodInformer.Lister()), + bundle.WithRoleLister(roleInformer.Lister()), + bundle.WithRoleBindingLister(roleBindingInformer.Lister()), + bundle.WithOPMImage(opmImage), + bundle.WithUtilImage(utilImage), + bundle.WithNow(op.now), + bundle.WithUnpackTimeout(op.bundleUnpackTimeout), + bundle.WithUserID(workloadUserID), + ) + if err != nil { + return nil, err + } + + // Register CustomResourceDefinition QueueInformer + crdInformer := extinf.NewSharedInformerFactory(op.opClient.ApiextensionsInterface(), resyncPeriod()).Apiextensions().V1().CustomResourceDefinitions() + op.lister.APIExtensionsV1().RegisterCustomResourceDefinitionLister(crdInformer.Lister()) + crdQueueInformer, err := queueinformer.NewQueueInformer( + ctx, + queueinformer.WithLogger(op.logger), + queueinformer.WithInformer(crdInformer.Informer()), + queueinformer.WithSyncer(queueinformer.LegacySyncHandler(op.syncObject).ToSyncerWithDelete(op.handleDeletion)), + ) + if err != nil { + return nil, err + } + if err := op.RegisterQueueInformer(crdQueueInformer); err != nil { + return nil, err + } + + // Namespace sync for resolving subscriptions + namespaceInformer := informers.NewSharedInformerFactory(op.opClient.KubernetesInterface(), resyncPeriod()).Core().V1().Namespaces() + op.lister.CoreV1().RegisterNamespaceLister(namespaceInformer.Lister()) + op.nsResolveQueue = workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "resolver") + namespaceQueueInformer, err := queueinformer.NewQueueInformer( + ctx, + queueinformer.WithLogger(op.logger), + queueinformer.WithQueue(op.nsResolveQueue), + queueinformer.WithInformer(namespaceInformer.Informer()), + queueinformer.WithSyncer(queueinformer.LegacySyncHandler(op.syncResolvingNamespace).ToSyncer()), + ) + if err != nil { + return nil, err + } + if err := op.RegisterQueueInformer(namespaceQueueInformer); err != nil { + return nil, err + } + + op.sources.Start(context.Background()) + + return op, nil } func (o *Operator) now() metav1.Time { - return metav1.NewTime(o.clock.Now().UTC()) + return metav1.NewTime(o.clock.Now().UTC()) } func (o *Operator) syncSourceState(state grpc.SourceState) { - o.sourcesLastUpdate.Set(o.now().Time) - - o.logger.Debugf("state.Key.Namespace=%s state.Key.Name=%s state.State=%s", state.Key.Namespace, state.Key.Name, state.State.String()) - metrics.RegisterCatalogSourceState(state.Key.Name, state.Key.Namespace, state.State) - - switch state.State { - case connectivity.Ready: - o.sourceInvalidator.Invalidate(resolvercache.SourceKey(state.Key)) - if o.namespace == state.Key.Namespace { - namespaces, err := index.CatalogSubscriberNamespaces(o.catalogSubscriberIndexer, - state.Key.Name, state.Key.Namespace) - - if err == nil { - for ns := range namespaces { - o.nsResolveQueue.Add(ns) - } - } - } - - o.nsResolveQueue.Add(state.Key.Namespace) - } - if err := o.catsrcQueueSet.Requeue(state.Key.Namespace, state.Key.Name); err != nil { - o.logger.WithError(err).Info("couldn't requeue catalogsource from catalog status change") - } + o.sourcesLastUpdate.Set(o.now().Time) + + o.logger.Debugf("state.Key.Namespace=%s state.Key.Name=%s state.State=%s", state.Key.Namespace, state.Key.Name, state.State.String()) + metrics.RegisterCatalogSourceState(state.Key.Name, state.Key.Namespace, state.State) + + switch state.State { + case connectivity.Ready: + o.sourceInvalidator.Invalidate(resolvercache.SourceKey(state.Key)) + if o.namespace == state.Key.Namespace { + namespaces, err := index.CatalogSubscriberNamespaces(o.catalogSubscriberIndexer, + state.Key.Name, state.Key.Namespace) + + if err == nil { + for ns := range namespaces { + o.nsResolveQueue.Add(ns) + } + } + } + + o.nsResolveQueue.Add(state.Key.Namespace) + } + if err := o.catsrcQueueSet.Requeue(state.Key.Namespace, state.Key.Name); err != nil { + o.logger.WithError(err).Info("couldn't requeue catalogsource from catalog status change") + } } func (o *Operator) requeueOwners(obj metav1.Object) { - namespace := obj.GetNamespace() - logger := o.logger.WithFields(logrus.Fields{ - "name": obj.GetName(), - "namespace": namespace, - }) - - for _, owner := range obj.GetOwnerReferences() { - var queueSet *queueinformer.ResourceQueueSet - switch kind := owner.Kind; kind { - case v1alpha1.CatalogSourceKind: - if err := o.catsrcQueueSet.Requeue(namespace, owner.Name); err != nil { - logger.Warn(err.Error()) - } - queueSet = o.catsrcQueueSet - case v1alpha1.SubscriptionKind: - if err := o.catsrcQueueSet.Requeue(namespace, owner.Name); err != nil { - logger.Warn(err.Error()) - } - queueSet = o.subQueueSet - default: - logger.WithField("kind", kind).Trace("untracked owner kind") - } - - if queueSet != nil { - logger.WithField("ref", owner).Trace("requeuing owner") - if err := queueSet.Requeue(namespace, owner.Name); err != nil { - logger.Warn(err.Error()) - } - } - } + namespace := obj.GetNamespace() + logger := o.logger.WithFields(logrus.Fields{ + "name": obj.GetName(), + "namespace": namespace, + }) + + for _, owner := range obj.GetOwnerReferences() { + var queueSet *queueinformer.ResourceQueueSet + switch kind := owner.Kind; kind { + case v1alpha1.CatalogSourceKind: + if err := o.catsrcQueueSet.Requeue(namespace, owner.Name); err != nil { + logger.Warn(err.Error()) + } + queueSet = o.catsrcQueueSet + case v1alpha1.SubscriptionKind: + if err := o.catsrcQueueSet.Requeue(namespace, owner.Name); err != nil { + logger.Warn(err.Error()) + } + queueSet = o.subQueueSet + default: + logger.WithField("kind", kind).Trace("untracked owner kind") + } + + if queueSet != nil { + logger.WithField("ref", owner).Trace("requeuing owner") + if err := queueSet.Requeue(namespace, owner.Name); err != nil { + logger.Warn(err.Error()) + } + } + } } func (o *Operator) syncObject(obj interface{}) (syncError error) { - // Assert as metav1.Object - metaObj, ok := obj.(metav1.Object) - if !ok { - syncError = errors.New("casting to metav1 object failed") - o.logger.Warn(syncError.Error()) - return - } + // Assert as metav1.Object + metaObj, ok := obj.(metav1.Object) + if !ok { + syncError = errors.New("casting to metav1 object failed") + o.logger.Warn(syncError.Error()) + return + } - o.requeueOwners(metaObj) + o.requeueOwners(metaObj) - return o.triggerInstallPlanRetry(obj) + return o.triggerInstallPlanRetry(obj) } func (o *Operator) handleDeletion(obj interface{}) { - metaObj, ok := obj.(metav1.Object) - if !ok { - tombstone, ok := obj.(cache.DeletedFinalStateUnknown) - if !ok { - utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone %#v", obj)) - return - } - - metaObj, ok = tombstone.Obj.(metav1.Object) - if !ok { - utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a metav1 object %#v", obj)) - return - } - } - - o.logger.WithFields(logrus.Fields{ - "name": metaObj.GetName(), - "namespace": metaObj.GetNamespace(), - }).Debug("handling object deletion") - - o.requeueOwners(metaObj) + metaObj, ok := obj.(metav1.Object) + if !ok { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone %#v", obj)) + return + } + + metaObj, ok = tombstone.Obj.(metav1.Object) + if !ok { + utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a metav1 object %#v", obj)) + return + } + } + + o.logger.WithFields(logrus.Fields{ + "name": metaObj.GetName(), + "namespace": metaObj.GetNamespace(), + }).Debug("handling object deletion") + + o.requeueOwners(metaObj) } func (o *Operator) handleCatSrcDeletion(obj interface{}) { - catsrc, ok := obj.(metav1.Object) - if !ok { - if !ok { - tombstone, ok := obj.(cache.DeletedFinalStateUnknown) - if !ok { - utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone %#v", obj)) - return - } - - catsrc, ok = tombstone.Obj.(metav1.Object) - if !ok { - utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a Namespace %#v", obj)) - return - } - } - } - sourceKey := registry.CatalogKey{Name: catsrc.GetName(), Namespace: catsrc.GetNamespace()} - if err := o.sources.Remove(sourceKey); err != nil { - o.logger.WithError(err).Warn("error closing client") - } - o.logger.WithField("source", sourceKey).Info("removed client for deleted catalogsource") - - metrics.DeleteCatalogSourceStateMetric(catsrc.GetName(), catsrc.GetNamespace()) + catsrc, ok := obj.(metav1.Object) + if !ok { + if !ok { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone %#v", obj)) + return + } + + catsrc, ok = tombstone.Obj.(metav1.Object) + if !ok { + utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a Namespace %#v", obj)) + return + } + } + } + sourceKey := registry.CatalogKey{Name: catsrc.GetName(), Namespace: catsrc.GetNamespace()} + if err := o.sources.Remove(sourceKey); err != nil { + o.logger.WithError(err).Warn("error closing client") + } + o.logger.WithField("source", sourceKey).Info("removed client for deleted catalogsource") + + metrics.DeleteCatalogSourceStateMetric(catsrc.GetName(), catsrc.GetNamespace()) } func validateSourceType(logger *logrus.Entry, in *v1alpha1.CatalogSource) (out *v1alpha1.CatalogSource, continueSync bool, _ error) { - out = in - var err error - switch sourceType := out.Spec.SourceType; sourceType { - case v1alpha1.SourceTypeInternal, v1alpha1.SourceTypeConfigmap: - if out.Spec.ConfigMap == "" { - err = fmt.Errorf("configmap name unset: must be set for sourcetype: %s", sourceType) - } - case v1alpha1.SourceTypeGrpc: - if out.Spec.Image == "" && out.Spec.Address == "" { - err = fmt.Errorf("image and address unset: at least one must be set for sourcetype: %s", sourceType) - } - default: - err = fmt.Errorf("unknown sourcetype: %s", sourceType) - } - if err != nil { - out.SetError(v1alpha1.CatalogSourceSpecInvalidError, err) - return - } - - // The sourceType is valid, clear all status (other than status conditions array) if there's existing invalid spec reason - if out.Status.Reason == v1alpha1.CatalogSourceSpecInvalidError { - out.Status = v1alpha1.CatalogSourceStatus{ - Conditions: out.Status.Conditions, - } - } - continueSync = true - - return + out = in + var err error + switch sourceType := out.Spec.SourceType; sourceType { + case v1alpha1.SourceTypeInternal, v1alpha1.SourceTypeConfigmap: + if out.Spec.ConfigMap == "" { + err = fmt.Errorf("configmap name unset: must be set for sourcetype: %s", sourceType) + } + case v1alpha1.SourceTypeGrpc: + if out.Spec.Image == "" && out.Spec.Address == "" { + err = fmt.Errorf("image and address unset: at least one must be set for sourcetype: %s", sourceType) + } + default: + err = fmt.Errorf("unknown sourcetype: %s", sourceType) + } + if err != nil { + out.SetError(v1alpha1.CatalogSourceSpecInvalidError, err) + return + } + + // The sourceType is valid, clear all status (other than status conditions array) if there's existing invalid spec reason + if out.Status.Reason == v1alpha1.CatalogSourceSpecInvalidError { + out.Status = v1alpha1.CatalogSourceStatus{ + Conditions: out.Status.Conditions, + } + } + continueSync = true + + return } func (o *Operator) syncConfigMap(logger *logrus.Entry, in *v1alpha1.CatalogSource) (out *v1alpha1.CatalogSource, continueSync bool, syncError error) { - out = in - if !(in.Spec.SourceType == v1alpha1.SourceTypeInternal || in.Spec.SourceType == v1alpha1.SourceTypeConfigmap) { - continueSync = true - return - } - - out = in.DeepCopy() - - logger.Debug("checking catsrc configmap state") - - var updateLabel bool - // Get the catalog source's config map - configMap, err := o.lister.CoreV1().ConfigMapLister().ConfigMaps(in.GetNamespace()).Get(in.Spec.ConfigMap) - // Attempt to look up the CM via api call if there is a cache miss - if apierrors.IsNotFound(err) { - configMap, err = o.opClient.KubernetesInterface().CoreV1().ConfigMaps(in.GetNamespace()).Get(context.TODO(), in.Spec.ConfigMap, metav1.GetOptions{}) - // Found cm in the cluster, add managed label to configmap - if err == nil { - labels := configMap.GetLabels() - if labels == nil { - labels = make(map[string]string) - } - - labels[install.OLMManagedLabelKey] = "false" - configMap.SetLabels(labels) - updateLabel = true - } - } - if err != nil { - syncError = fmt.Errorf("failed to get catalog config map %s: %s", in.Spec.ConfigMap, err) - out.SetError(v1alpha1.CatalogSourceConfigMapError, syncError) - return - } - - if wasOwned := ownerutil.EnsureOwner(configMap, in); !wasOwned || updateLabel { - configMap, err = o.opClient.KubernetesInterface().CoreV1().ConfigMaps(configMap.GetNamespace()).Update(context.TODO(), configMap, metav1.UpdateOptions{}) - if err != nil { - syncError = fmt.Errorf("unable to write owner onto catalog source configmap - %v", err) - out.SetError(v1alpha1.CatalogSourceConfigMapError, syncError) - return - } - - logger.Debug("adopted configmap") - } - - if in.Status.ConfigMapResource == nil || !in.Status.ConfigMapResource.IsAMatch(&configMap.ObjectMeta) { - logger.Debug("updating catsrc configmap state") - // configmap ref nonexistent or updated, write out the new configmap ref to status and exit - out.Status.ConfigMapResource = &v1alpha1.ConfigMapResourceReference{ - Name: configMap.GetName(), - Namespace: configMap.GetNamespace(), - UID: configMap.GetUID(), - ResourceVersion: configMap.GetResourceVersion(), - LastUpdateTime: o.now(), - } - - return - } - - continueSync = true - return + out = in + if !(in.Spec.SourceType == v1alpha1.SourceTypeInternal || in.Spec.SourceType == v1alpha1.SourceTypeConfigmap) { + continueSync = true + return + } + + out = in.DeepCopy() + + logger.Debug("checking catsrc configmap state") + + var updateLabel bool + // Get the catalog source's config map + configMap, err := o.lister.CoreV1().ConfigMapLister().ConfigMaps(in.GetNamespace()).Get(in.Spec.ConfigMap) + // Attempt to look up the CM via api call if there is a cache miss + if apierrors.IsNotFound(err) { + configMap, err = o.opClient.KubernetesInterface().CoreV1().ConfigMaps(in.GetNamespace()).Get(context.TODO(), in.Spec.ConfigMap, metav1.GetOptions{}) + // Found cm in the cluster, add managed label to configmap + if err == nil { + labels := configMap.GetLabels() + if labels == nil { + labels = make(map[string]string) + } + + labels[install.OLMManagedLabelKey] = "false" + configMap.SetLabels(labels) + updateLabel = true + } + } + if err != nil { + syncError = fmt.Errorf("failed to get catalog config map %s: %s", in.Spec.ConfigMap, err) + out.SetError(v1alpha1.CatalogSourceConfigMapError, syncError) + return + } + + if wasOwned := ownerutil.EnsureOwner(configMap, in); !wasOwned || updateLabel { + configMap, err = o.opClient.KubernetesInterface().CoreV1().ConfigMaps(configMap.GetNamespace()).Update(context.TODO(), configMap, metav1.UpdateOptions{}) + if err != nil { + syncError = fmt.Errorf("unable to write owner onto catalog source configmap - %v", err) + out.SetError(v1alpha1.CatalogSourceConfigMapError, syncError) + return + } + + logger.Debug("adopted configmap") + } + + if in.Status.ConfigMapResource == nil || !in.Status.ConfigMapResource.IsAMatch(&configMap.ObjectMeta) { + logger.Debug("updating catsrc configmap state") + // configmap ref nonexistent or updated, write out the new configmap ref to status and exit + out.Status.ConfigMapResource = &v1alpha1.ConfigMapResourceReference{ + Name: configMap.GetName(), + Namespace: configMap.GetNamespace(), + UID: configMap.GetUID(), + ResourceVersion: configMap.GetResourceVersion(), + LastUpdateTime: o.now(), + } + + return + } + + continueSync = true + return } func (o *Operator) syncRegistryServer(logger *logrus.Entry, in *v1alpha1.CatalogSource) (out *v1alpha1.CatalogSource, continueSync bool, syncError error) { - out = in.DeepCopy() - - sourceKey := registry.CatalogKey{Name: in.GetName(), Namespace: in.GetNamespace()} - srcReconciler := o.reconciler.ReconcilerForSource(in) - if srcReconciler == nil { - // TODO: Add failure status on catalogsource and remove from sources - syncError = fmt.Errorf("no reconciler for source type %s", in.Spec.SourceType) - out.SetError(v1alpha1.CatalogSourceRegistryServerError, syncError) - return - } - - healthy, err := srcReconciler.CheckRegistryServer(in) - if err != nil { - syncError = err - out.SetError(v1alpha1.CatalogSourceRegistryServerError, syncError) - return - } - - logger.Debugf("check registry server healthy: %t", healthy) - - if healthy && in.Status.RegistryServiceStatus != nil { - logger.Debug("registry state good") - continueSync = true - // return here if catalog does not have polling enabled - if !out.Poll() { - return - } - } - - // Registry pod hasn't been created or hasn't been updated since the last configmap update, recreate it - logger.Debug("ensuring registry server") - - err = srcReconciler.EnsureRegistryServer(out) - if err != nil { - if _, ok := err.(reconciler.UpdateNotReadyErr); ok { - logger.Debug("requeueing registry server for catalog update check: update pod not yet ready") - o.catsrcQueueSet.RequeueAfter(out.GetNamespace(), out.GetName(), reconciler.CatalogPollingRequeuePeriod) - return - } - syncError = fmt.Errorf("couldn't ensure registry server - %v", err) - out.SetError(v1alpha1.CatalogSourceRegistryServerError, syncError) - return - } - - logger.Debug("ensured registry server") - - // requeue the catalog sync based on the polling interval, for accurate syncs of catalogs with polling enabled - if out.Spec.UpdateStrategy != nil && out.Spec.UpdateStrategy.RegistryPoll != nil { - if out.Spec.UpdateStrategy.Interval == nil { - syncError = fmt.Errorf("empty polling interval; cannot requeue registry server sync without a provided polling interval") - out.SetError(v1alpha1.CatalogSourceIntervalInvalidError, syncError) - return - } - if out.Spec.UpdateStrategy.RegistryPoll.ParsingError != "" && out.Status.Reason != v1alpha1.CatalogSourceIntervalInvalidError { - out.SetError(v1alpha1.CatalogSourceIntervalInvalidError, fmt.Errorf(out.Spec.UpdateStrategy.RegistryPoll.ParsingError)) - } - logger.Debugf("requeuing registry server sync based on polling interval %s", out.Spec.UpdateStrategy.Interval.Duration.String()) - resyncPeriod := reconciler.SyncRegistryUpdateInterval(out, time.Now()) - o.catsrcQueueSet.RequeueAfter(out.GetNamespace(), out.GetName(), queueinformer.ResyncWithJitter(resyncPeriod, 0.1)()) - return - } - - if err := o.sources.Remove(sourceKey); err != nil { - o.logger.WithError(err).Debug("error closing client connection") - } - - return + out = in.DeepCopy() + + sourceKey := registry.CatalogKey{Name: in.GetName(), Namespace: in.GetNamespace()} + srcReconciler := o.reconciler.ReconcilerForSource(in) + if srcReconciler == nil { + // TODO: Add failure status on catalogsource and remove from sources + syncError = fmt.Errorf("no reconciler for source type %s", in.Spec.SourceType) + out.SetError(v1alpha1.CatalogSourceRegistryServerError, syncError) + return + } + + healthy, err := srcReconciler.CheckRegistryServer(in) + if err != nil { + syncError = err + out.SetError(v1alpha1.CatalogSourceRegistryServerError, syncError) + return + } + + logger.Debugf("check registry server healthy: %t", healthy) + + if healthy && in.Status.RegistryServiceStatus != nil { + logger.Debug("registry state good") + continueSync = true + // return here if catalog does not have polling enabled + if !out.Poll() { + return + } + } + + // Registry pod hasn't been created or hasn't been updated since the last configmap update, recreate it + logger.Debug("ensuring registry server") + + err = srcReconciler.EnsureRegistryServer(out) + if err != nil { + if _, ok := err.(reconciler.UpdateNotReadyErr); ok { + logger.Debug("requeueing registry server for catalog update check: update pod not yet ready") + o.catsrcQueueSet.RequeueAfter(out.GetNamespace(), out.GetName(), reconciler.CatalogPollingRequeuePeriod) + return + } + syncError = fmt.Errorf("couldn't ensure registry server - %v", err) + out.SetError(v1alpha1.CatalogSourceRegistryServerError, syncError) + return + } + + logger.Debug("ensured registry server") + + // requeue the catalog sync based on the polling interval, for accurate syncs of catalogs with polling enabled + if out.Spec.UpdateStrategy != nil && out.Spec.UpdateStrategy.RegistryPoll != nil { + if out.Spec.UpdateStrategy.Interval == nil { + syncError = fmt.Errorf("empty polling interval; cannot requeue registry server sync without a provided polling interval") + out.SetError(v1alpha1.CatalogSourceIntervalInvalidError, syncError) + return + } + if out.Spec.UpdateStrategy.RegistryPoll.ParsingError != "" && out.Status.Reason != v1alpha1.CatalogSourceIntervalInvalidError { + out.SetError(v1alpha1.CatalogSourceIntervalInvalidError, fmt.Errorf(out.Spec.UpdateStrategy.RegistryPoll.ParsingError)) + } + logger.Debugf("requeuing registry server sync based on polling interval %s", out.Spec.UpdateStrategy.Interval.Duration.String()) + resyncPeriod := reconciler.SyncRegistryUpdateInterval(out, time.Now()) + o.catsrcQueueSet.RequeueAfter(out.GetNamespace(), out.GetName(), queueinformer.ResyncWithJitter(resyncPeriod, 0.1)()) + return + } + + if err := o.sources.Remove(sourceKey); err != nil { + o.logger.WithError(err).Debug("error closing client connection") + } + + return } func (o *Operator) syncConnection(logger *logrus.Entry, in *v1alpha1.CatalogSource) (out *v1alpha1.CatalogSource, continueSync bool, syncError error) { - out = in.DeepCopy() - - sourceKey := registry.CatalogKey{Name: in.GetName(), Namespace: in.GetNamespace()} - // update operator's view of sources - now := o.now() - address := in.Address() - - connectFunc := func() (source *grpc.SourceMeta, connErr error) { - newSource, err := o.sources.Add(sourceKey, address) - if err != nil { - connErr = fmt.Errorf("couldn't connect to registry - %v", err) - return - } - - if newSource == nil { - connErr = errors.New("couldn't connect to registry") - return - } - - source = &newSource.SourceMeta - return - } - - updateConnectionStateFunc := func(out *v1alpha1.CatalogSource, source *grpc.SourceMeta) { - out.Status.GRPCConnectionState = &v1alpha1.GRPCConnectionState{ - Address: source.Address, - LastObservedState: source.ConnectionState.String(), - LastConnectTime: source.LastConnect, - } - } - - source := o.sources.GetMeta(sourceKey) - if source == nil { - source, syncError = connectFunc() - if syncError != nil { - out.SetError(v1alpha1.CatalogSourceRegistryServerError, syncError) - return - } - - // Set connection status and return. - updateConnectionStateFunc(out, source) - return - } - - if source.Address != address { - source, syncError = connectFunc() - if syncError != nil { - out.SetError(v1alpha1.CatalogSourceRegistryServerError, syncError) - return - } - - // Set connection status and return. - updateConnectionStateFunc(out, source) - } - - // GRPCConnectionState update must fail before - if out.Status.GRPCConnectionState == nil { - updateConnectionStateFunc(out, source) - } - - // connection is already good, but we need to update the sync time - if o.sourcesLastUpdate.After(out.Status.GRPCConnectionState.LastConnectTime.Time) { - // Set connection status and return. - out.Status.GRPCConnectionState.LastConnectTime = now - out.Status.GRPCConnectionState.LastObservedState = source.ConnectionState.String() - out.Status.GRPCConnectionState.Address = source.Address - } - - return + out = in.DeepCopy() + + sourceKey := registry.CatalogKey{Name: in.GetName(), Namespace: in.GetNamespace()} + // update operator's view of sources + now := o.now() + address := in.Address() + + connectFunc := func() (source *grpc.SourceMeta, connErr error) { + newSource, err := o.sources.Add(sourceKey, address) + if err != nil { + connErr = fmt.Errorf("couldn't connect to registry - %v", err) + return + } + + if newSource == nil { + connErr = errors.New("couldn't connect to registry") + return + } + + source = &newSource.SourceMeta + return + } + + updateConnectionStateFunc := func(out *v1alpha1.CatalogSource, source *grpc.SourceMeta) { + out.Status.GRPCConnectionState = &v1alpha1.GRPCConnectionState{ + Address: source.Address, + LastObservedState: source.ConnectionState.String(), + LastConnectTime: source.LastConnect, + } + } + + source := o.sources.GetMeta(sourceKey) + if source == nil { + source, syncError = connectFunc() + if syncError != nil { + out.SetError(v1alpha1.CatalogSourceRegistryServerError, syncError) + return + } + + // Set connection status and return. + updateConnectionStateFunc(out, source) + return + } + + if source.Address != address { + source, syncError = connectFunc() + if syncError != nil { + out.SetError(v1alpha1.CatalogSourceRegistryServerError, syncError) + return + } + + // Set connection status and return. + updateConnectionStateFunc(out, source) + } + + // GRPCConnectionState update must fail before + if out.Status.GRPCConnectionState == nil { + updateConnectionStateFunc(out, source) + } + + // connection is already good, but we need to update the sync time + if o.sourcesLastUpdate.After(out.Status.GRPCConnectionState.LastConnectTime.Time) { + // Set connection status and return. + out.Status.GRPCConnectionState.LastConnectTime = now + out.Status.GRPCConnectionState.LastObservedState = source.ConnectionState.String() + out.Status.GRPCConnectionState.Address = source.Address + } + + return } func (o *Operator) syncCatalogSources(obj interface{}) (syncError error) { - catsrc, ok := obj.(*v1alpha1.CatalogSource) - if !ok { - o.logger.Debugf("wrong type: %#v", obj) - syncError = nil - return - } - - logger := o.logger.WithFields(logrus.Fields{ - "source": catsrc.GetName(), - "id": queueinformer.NewLoopID(), - }) - logger.Debug("syncing catsrc") - - syncFunc := func(in *v1alpha1.CatalogSource, chain []CatalogSourceSyncFunc) (out *v1alpha1.CatalogSource, syncErr error) { - out = in - for _, syncFunc := range chain { - cont := false - out, cont, syncErr = syncFunc(logger, in) - if syncErr != nil { - return - } - - if !cont { - return - } - - in = out - } - - return - } - - equalFunc := func(a, b *v1alpha1.CatalogSourceStatus) bool { - return reflect.DeepEqual(a, b) - } - - chain := []CatalogSourceSyncFunc{ - validateSourceType, - o.syncConfigMap, - o.syncRegistryServer, - o.syncConnection, - } - - in := catsrc.DeepCopy() - in.SetError("", nil) - - out, syncError := syncFunc(in, chain) - - if out == nil { - return - } - - if equalFunc(&catsrc.Status, &out.Status) { - return - } - - updateErr := catalogsource.UpdateStatus(logger, o.client, out) - if syncError == nil && updateErr != nil { - syncError = updateErr - } - - return + catsrc, ok := obj.(*v1alpha1.CatalogSource) + if !ok { + o.logger.Debugf("wrong type: %#v", obj) + syncError = nil + return + } + + logger := o.logger.WithFields(logrus.Fields{ + "source": catsrc.GetName(), + "id": queueinformer.NewLoopID(), + }) + logger.Debug("syncing catsrc") + + syncFunc := func(in *v1alpha1.CatalogSource, chain []CatalogSourceSyncFunc) (out *v1alpha1.CatalogSource, syncErr error) { + out = in + for _, syncFunc := range chain { + cont := false + out, cont, syncErr = syncFunc(logger, in) + if syncErr != nil { + return + } + + if !cont { + return + } + + in = out + } + + return + } + + equalFunc := func(a, b *v1alpha1.CatalogSourceStatus) bool { + return reflect.DeepEqual(a, b) + } + + chain := []CatalogSourceSyncFunc{ + validateSourceType, + o.syncConfigMap, + o.syncRegistryServer, + o.syncConnection, + } + + in := catsrc.DeepCopy() + in.SetError("", nil) + + out, syncError := syncFunc(in, chain) + + if out == nil { + return + } + + if equalFunc(&catsrc.Status, &out.Status) { + return + } + + updateErr := catalogsource.UpdateStatus(logger, o.client, out) + if syncError == nil && updateErr != nil { + syncError = updateErr + } + + return } func (o *Operator) syncResolvingNamespace(obj interface{}) error { - ns, ok := obj.(*corev1.Namespace) - if !ok { - o.logger.Debugf("wrong type: %#v", obj) - return fmt.Errorf("casting Namespace failed") - } - namespace := ns.GetName() - - logger := o.logger.WithFields(logrus.Fields{ - "namespace": namespace, - "id": queueinformer.NewLoopID(), - }) - - o.gcInstallPlans(logger, namespace) - - // get the set of sources that should be used for resolution and best-effort get their connections working - logger.Debug("resolving sources") - - logger.Debug("checking if subscriptions need update") - - subs, err := o.listSubscriptions(namespace) - if err != nil { - logger.WithError(err).Debug("couldn't list subscriptions") - return err - } - - // If there are no subscriptions, don't attempt to sync the namespace. - if len(subs) == 0 { - logger.Debug(fmt.Sprintf("No subscriptions were found in namespace %v", namespace)) - return nil - } - - ogLister := o.lister.OperatorsV1().OperatorGroupLister().OperatorGroups(namespace) - failForwardEnabled, err := resolver.IsFailForwardEnabled(ogLister) - if err != nil { - return err - } - - unpackTimeout, err := bundle.OperatorGroupBundleUnpackTimeout(ogLister) - if err != nil { - return err - } - - minUnpackRetryInterval, err := bundle.OperatorGroupBundleUnpackRetryInterval(ogLister) - if err != nil { - return err - } - - // TODO: parallel - maxGeneration := 0 - subscriptionUpdated := false - for i, sub := range subs { - logger := logger.WithFields(logrus.Fields{ - "sub": sub.GetName(), - "source": sub.Spec.CatalogSource, - "pkg": sub.Spec.Package, - "channel": sub.Spec.Channel, - }) - - if sub.Status.InstallPlanGeneration > maxGeneration { - maxGeneration = sub.Status.InstallPlanGeneration - } - - // ensure the installplan reference is correct - sub, changedIP, err := o.ensureSubscriptionInstallPlanState(logger, sub, failForwardEnabled) - if err != nil { - logger.Debugf("error ensuring installplan state: %v", err) - return err - } - subscriptionUpdated = subscriptionUpdated || changedIP - - // record the current state of the desired corresponding CSV in the status. no-op if we don't know the csv yet. - sub, changedCSV, err := o.ensureSubscriptionCSVState(logger, sub, failForwardEnabled) - if err != nil { - logger.Debugf("error recording current state of CSV in status: %v", err) - return err - } - - subscriptionUpdated = subscriptionUpdated || changedCSV - subs[i] = sub - } - if subscriptionUpdated { - logger.Debug("subscriptions were updated, wait for a new resolution") - return nil - } - - shouldUpdate := false - for _, sub := range subs { - shouldUpdate = shouldUpdate || !o.nothingToUpdate(logger, sub) - } - if !shouldUpdate { - logger.Debug("all subscriptions up to date") - return nil - } - - logger.Debug("resolving subscriptions in namespace") - - // resolve a set of steps to apply to a cluster, a set of subscriptions to create/update, and any errors - steps, bundleLookups, updatedSubs, err := o.resolver.ResolveSteps(namespace) - if err != nil { - go o.recorder.Event(ns, corev1.EventTypeWarning, "ResolutionFailed", err.Error()) - // If the error is constraints not satisfiable, then simply project the - // resolution failure event and move on without returning the error. - // Returning the error only triggers the namespace resync which is unnecessary - // given not-satisfiable error is terminal and most likely require intervention - // from users/admins. Resyncing the namespace again is unlikely to resolve - // not-satisfiable error - if _, ok := err.(solver.NotSatisfiable); ok { - logger.WithError(err).Debug("resolution failed") - _, updateErr := o.updateSubscriptionStatuses( - o.setSubsCond(subs, v1alpha1.SubscriptionCondition{ - Type: v1alpha1.SubscriptionResolutionFailed, - Reason: "ConstraintsNotSatisfiable", - Message: err.Error(), - Status: corev1.ConditionTrue, - })) - if updateErr != nil { - logger.WithError(updateErr).Debug("failed to update subs conditions") - return updateErr - } - return nil - } - - _, updateErr := o.updateSubscriptionStatuses( - o.setSubsCond(subs, v1alpha1.SubscriptionCondition{ - Type: v1alpha1.SubscriptionResolutionFailed, - Reason: "ErrorPreventedResolution", - Message: err.Error(), - Status: corev1.ConditionTrue, - })) - if updateErr != nil { - logger.WithError(updateErr).Debug("failed to update subs conditions") - return updateErr - } - return err - } - - // Attempt to unpack bundles before installing - // Note: This should probably use the attenuated client to prevent users from resolving resources they otherwise don't have access to. - if len(bundleLookups) > 0 { - logger.Debug("unpacking bundles") - - var unpacked bool - unpacked, steps, bundleLookups, err = o.unpackBundles(namespace, steps, bundleLookups, unpackTimeout, minUnpackRetryInterval) - if err != nil { - // If the error was fatal capture and fail - if olmerrors.IsFatal(err) { - _, updateErr := o.updateSubscriptionStatuses( - o.setSubsCond(subs, v1alpha1.SubscriptionCondition{ - Type: v1alpha1.SubscriptionBundleUnpackFailed, - Reason: "ErrorPreventedUnpacking", - Message: err.Error(), - Status: corev1.ConditionTrue, - })) - if updateErr != nil { - logger.WithError(updateErr).Debug("failed to update subs conditions") - return updateErr - } - return nil - } - // Retry sync if non-fatal error - return fmt.Errorf("bundle unpacking failed with an error: %w", err) - } - - // Check BundleLookup status conditions to see if the BundleLookupFailed condtion is true - // which means bundle lookup has failed and subscriptions need to be updated - // with a condition indicating the failure. - isFailed, cond := hasBundleLookupFailureCondition(bundleLookups) - if isFailed { - err := fmt.Errorf("bundle unpacking failed. Reason: %v, and Message: %v", cond.Reason, cond.Message) - logger.Infof("%v", err) - - _, updateErr := o.updateSubscriptionStatuses( - o.setSubsCond(subs, v1alpha1.SubscriptionCondition{ - Type: v1alpha1.SubscriptionBundleUnpackFailed, - Reason: "BundleUnpackFailed", - Message: err.Error(), - Status: corev1.ConditionTrue, - })) - if updateErr != nil { - logger.WithError(updateErr).Debug("failed to update subs conditions") - return updateErr - } - // Since this is likely requires intervention we do not want to - // requeue too often. We return no error here and rely on a - // periodic resync which will help to automatically resolve - // some issues such as unreachable bundle images caused by - // bad catalog updates. - return nil - } - - // This means that the unpack job is still running (most likely) or - // there was some issue which we did not handle above. - if !unpacked { - _, updateErr := o.updateSubscriptionStatuses( - o.setSubsCond(subs, v1alpha1.SubscriptionCondition{ - Type: v1alpha1.SubscriptionBundleUnpacking, - Reason: "UnpackingInProgress", - Status: corev1.ConditionTrue, - })) - if updateErr != nil { - logger.WithError(updateErr).Debug("failed to update subs conditions") - return updateErr - } - - logger.Debug("unpacking is not complete yet, requeueing") - o.nsResolveQueue.AddAfter(namespace, 5*time.Second) - return nil - } - } - - // create installplan if anything updated - if len(updatedSubs) > 0 { - logger.Debug("resolution caused subscription changes, creating installplan") - // Finish calculating max generation by checking the existing installplans - installPlans, err := o.listInstallPlans(namespace) - if err != nil { - return err - } - for _, ip := range installPlans { - if gen := ip.Spec.Generation; gen > maxGeneration { - maxGeneration = gen - } - } - - // any subscription in the namespace with manual approval will force generated installplans to be manual - // TODO: this is an odd artifact of the older resolver, and will probably confuse users. approval mode could be on the operatorgroup? - installPlanApproval := v1alpha1.ApprovalAutomatic - for _, sub := range subs { - if sub.Spec.InstallPlanApproval == v1alpha1.ApprovalManual { - installPlanApproval = v1alpha1.ApprovalManual - break - } - } - - installPlanReference, err := o.ensureInstallPlan(logger, namespace, maxGeneration+1, subs, installPlanApproval, steps, bundleLookups) - if err != nil { - logger.WithError(err).Debug("error ensuring installplan") - return err - } - updatedSubs = o.setIPReference(updatedSubs, maxGeneration+1, installPlanReference) - } else { - logger.Debugf("no subscriptions were updated") - } - - // Make sure that we no longer indicate unpacking progress - o.removeSubsCond(subs, v1alpha1.SubscriptionBundleUnpacking) - - // Remove BundleUnpackFailed condition from subscriptions - o.removeSubsCond(subs, v1alpha1.SubscriptionBundleUnpackFailed) - - // Remove resolutionfailed condition from subscriptions - o.removeSubsCond(subs, v1alpha1.SubscriptionResolutionFailed) - - newSub := true - for _, updatedSub := range updatedSubs { - updatedSub.Status.RemoveConditions(v1alpha1.SubscriptionResolutionFailed) - for i, sub := range subs { - if sub.Name == updatedSub.Name && sub.Namespace == updatedSub.Namespace { - subs[i] = updatedSub - newSub = false - break - } - } - if newSub { - subs = append(subs, updatedSub) - continue - } - newSub = true - } - - // Update subscriptions with all changes so far - _, updateErr := o.updateSubscriptionStatuses(subs) - if updateErr != nil { - logger.WithError(updateErr).Warn("failed to update subscription conditions") - return updateErr - } - - return nil + ns, ok := obj.(*corev1.Namespace) + if !ok { + o.logger.Debugf("wrong type: %#v", obj) + return fmt.Errorf("casting Namespace failed") + } + namespace := ns.GetName() + + logger := o.logger.WithFields(logrus.Fields{ + "namespace": namespace, + "id": queueinformer.NewLoopID(), + }) + + o.gcInstallPlans(logger, namespace) + + // get the set of sources that should be used for resolution and best-effort get their connections working + logger.Debug("resolving sources") + + logger.Debug("checking if subscriptions need update") + + subs, err := o.listSubscriptions(namespace) + if err != nil { + logger.WithError(err).Debug("couldn't list subscriptions") + return err + } + + // If there are no subscriptions, don't attempt to sync the namespace. + if len(subs) == 0 { + logger.Debug(fmt.Sprintf("No subscriptions were found in namespace %v", namespace)) + return nil + } + + ogLister := o.lister.OperatorsV1().OperatorGroupLister().OperatorGroups(namespace) + failForwardEnabled, err := resolver.IsFailForwardEnabled(ogLister) + if err != nil { + return err + } + + unpackTimeout, err := bundle.OperatorGroupBundleUnpackTimeout(ogLister) + if err != nil { + return err + } + + minUnpackRetryInterval, err := bundle.OperatorGroupBundleUnpackRetryInterval(ogLister) + if err != nil { + return err + } + + // TODO: parallel + maxGeneration := 0 + subscriptionUpdated := false + for i, sub := range subs { + logger := logger.WithFields(logrus.Fields{ + "sub": sub.GetName(), + "source": sub.Spec.CatalogSource, + "pkg": sub.Spec.Package, + "channel": sub.Spec.Channel, + }) + + if sub.Status.InstallPlanGeneration > maxGeneration { + maxGeneration = sub.Status.InstallPlanGeneration + } + + // ensure the installplan reference is correct + sub, changedIP, err := o.ensureSubscriptionInstallPlanState(logger, sub, failForwardEnabled) + if err != nil { + logger.Debugf("error ensuring installplan state: %v", err) + return err + } + subscriptionUpdated = subscriptionUpdated || changedIP + + // record the current state of the desired corresponding CSV in the status. no-op if we don't know the csv yet. + sub, changedCSV, err := o.ensureSubscriptionCSVState(logger, sub, failForwardEnabled) + if err != nil { + logger.Debugf("error recording current state of CSV in status: %v", err) + return err + } + + subscriptionUpdated = subscriptionUpdated || changedCSV + subs[i] = sub + } + if subscriptionUpdated { + logger.Debug("subscriptions were updated, wait for a new resolution") + return nil + } + + shouldUpdate := false + for _, sub := range subs { + shouldUpdate = shouldUpdate || !o.nothingToUpdate(logger, sub) + } + if !shouldUpdate { + logger.Debug("all subscriptions up to date") + return nil + } + + logger.Debug("resolving subscriptions in namespace") + + // resolve a set of steps to apply to a cluster, a set of subscriptions to create/update, and any errors + steps, bundleLookups, updatedSubs, err := o.resolver.ResolveSteps(namespace) + if err != nil { + go o.recorder.Event(ns, corev1.EventTypeWarning, "ResolutionFailed", err.Error()) + // If the error is constraints not satisfiable, then simply project the + // resolution failure event and move on without returning the error. + // Returning the error only triggers the namespace resync which is unnecessary + // given not-satisfiable error is terminal and most likely require intervention + // from users/admins. Resyncing the namespace again is unlikely to resolve + // not-satisfiable error + if _, ok := err.(solver.NotSatisfiable); ok { + logger.WithError(err).Debug("resolution failed") + _, updateErr := o.updateSubscriptionStatuses( + o.setSubsCond(subs, v1alpha1.SubscriptionCondition{ + Type: v1alpha1.SubscriptionResolutionFailed, + Reason: "ConstraintsNotSatisfiable", + Message: err.Error(), + Status: corev1.ConditionTrue, + })) + if updateErr != nil { + logger.WithError(updateErr).Debug("failed to update subs conditions") + return updateErr + } + return nil + } + + _, updateErr := o.updateSubscriptionStatuses( + o.setSubsCond(subs, v1alpha1.SubscriptionCondition{ + Type: v1alpha1.SubscriptionResolutionFailed, + Reason: "ErrorPreventedResolution", + Message: err.Error(), + Status: corev1.ConditionTrue, + })) + if updateErr != nil { + logger.WithError(updateErr).Debug("failed to update subs conditions") + return updateErr + } + return err + } + + // Attempt to unpack bundles before installing + // Note: This should probably use the attenuated client to prevent users from resolving resources they otherwise don't have access to. + if len(bundleLookups) > 0 { + logger.Debug("unpacking bundles") + + var unpacked bool + unpacked, steps, bundleLookups, err = o.unpackBundles(namespace, steps, bundleLookups, unpackTimeout, minUnpackRetryInterval) + if err != nil { + // If the error was fatal capture and fail + if olmerrors.IsFatal(err) { + _, updateErr := o.updateSubscriptionStatuses( + o.setSubsCond(subs, v1alpha1.SubscriptionCondition{ + Type: v1alpha1.SubscriptionBundleUnpackFailed, + Reason: "ErrorPreventedUnpacking", + Message: err.Error(), + Status: corev1.ConditionTrue, + })) + if updateErr != nil { + logger.WithError(updateErr).Debug("failed to update subs conditions") + return updateErr + } + return nil + } + // Retry sync if non-fatal error + return fmt.Errorf("bundle unpacking failed with an error: %w", err) + } + + // Check BundleLookup status conditions to see if the BundleLookupFailed condtion is true + // which means bundle lookup has failed and subscriptions need to be updated + // with a condition indicating the failure. + isFailed, cond := hasBundleLookupFailureCondition(bundleLookups) + if isFailed { + err := fmt.Errorf("bundle unpacking failed. Reason: %v, and Message: %v", cond.Reason, cond.Message) + logger.Infof("%v", err) + + _, updateErr := o.updateSubscriptionStatuses( + o.setSubsCond(subs, v1alpha1.SubscriptionCondition{ + Type: v1alpha1.SubscriptionBundleUnpackFailed, + Reason: "BundleUnpackFailed", + Message: err.Error(), + Status: corev1.ConditionTrue, + })) + if updateErr != nil { + logger.WithError(updateErr).Debug("failed to update subs conditions") + return updateErr + } + // Since this is likely requires intervention we do not want to + // requeue too often. We return no error here and rely on a + // periodic resync which will help to automatically resolve + // some issues such as unreachable bundle images caused by + // bad catalog updates. + return nil + } + + // This means that the unpack job is still running (most likely) or + // there was some issue which we did not handle above. + if !unpacked { + _, updateErr := o.updateSubscriptionStatuses( + o.setSubsCond(subs, v1alpha1.SubscriptionCondition{ + Type: v1alpha1.SubscriptionBundleUnpacking, + Reason: "UnpackingInProgress", + Status: corev1.ConditionTrue, + })) + if updateErr != nil { + logger.WithError(updateErr).Debug("failed to update subs conditions") + return updateErr + } + + logger.Debug("unpacking is not complete yet, requeueing") + o.nsResolveQueue.AddAfter(namespace, 5*time.Second) + return nil + } + } + + // create installplan if anything updated + if len(updatedSubs) > 0 { + logger.Debug("resolution caused subscription changes, creating installplan") + // Finish calculating max generation by checking the existing installplans + installPlans, err := o.listInstallPlans(namespace) + if err != nil { + return err + } + for _, ip := range installPlans { + if gen := ip.Spec.Generation; gen > maxGeneration { + maxGeneration = gen + } + } + + // any subscription in the namespace with manual approval will force generated installplans to be manual + // TODO: this is an odd artifact of the older resolver, and will probably confuse users. approval mode could be on the operatorgroup? + installPlanApproval := v1alpha1.ApprovalAutomatic + for _, sub := range subs { + if sub.Spec.InstallPlanApproval == v1alpha1.ApprovalManual { + installPlanApproval = v1alpha1.ApprovalManual + break + } + } + + installPlanReference, err := o.ensureInstallPlan(logger, namespace, maxGeneration+1, subs, installPlanApproval, steps, bundleLookups) + if err != nil { + logger.WithError(err).Debug("error ensuring installplan") + return err + } + updatedSubs = o.setIPReference(updatedSubs, maxGeneration+1, installPlanReference) + } else { + logger.Debugf("no subscriptions were updated") + } + + // Make sure that we no longer indicate unpacking progress + o.removeSubsCond(subs, v1alpha1.SubscriptionBundleUnpacking) + + // Remove BundleUnpackFailed condition from subscriptions + o.removeSubsCond(subs, v1alpha1.SubscriptionBundleUnpackFailed) + + // Remove resolutionfailed condition from subscriptions + o.removeSubsCond(subs, v1alpha1.SubscriptionResolutionFailed) + + newSub := true + for _, updatedSub := range updatedSubs { + updatedSub.Status.RemoveConditions(v1alpha1.SubscriptionResolutionFailed) + for i, sub := range subs { + if sub.Name == updatedSub.Name && sub.Namespace == updatedSub.Namespace { + subs[i] = updatedSub + newSub = false + break + } + } + if newSub { + subs = append(subs, updatedSub) + continue + } + newSub = true + } + + // Update subscriptions with all changes so far + _, updateErr := o.updateSubscriptionStatuses(subs) + if updateErr != nil { + logger.WithError(updateErr).Warn("failed to update subscription conditions") + return updateErr + } + + return nil } func (o *Operator) syncSubscriptions(obj interface{}) error { - sub, ok := obj.(*v1alpha1.Subscription) - if !ok { - o.logger.Debugf("wrong type: %#v", obj) - return fmt.Errorf("casting Subscription failed") - } + sub, ok := obj.(*v1alpha1.Subscription) + if !ok { + o.logger.Debugf("wrong type: %#v", obj) + return fmt.Errorf("casting Subscription failed") + } - o.nsResolveQueue.Add(sub.GetNamespace()) + o.nsResolveQueue.Add(sub.GetNamespace()) - return nil + return nil } // syncOperatorGroups requeues the namespace resolution queue on changes to an operatorgroup // This is because the operatorgroup is now an input to resolution via the global catalog exclusion annotation func (o *Operator) syncOperatorGroups(obj interface{}) error { - og, ok := obj.(*operatorsv1.OperatorGroup) - if !ok { - o.logger.Debugf("wrong type: %#v", obj) - return fmt.Errorf("casting OperatorGroup failed") - } + og, ok := obj.(*operatorsv1.OperatorGroup) + if !ok { + o.logger.Debugf("wrong type: %#v", obj) + return fmt.Errorf("casting OperatorGroup failed") + } - o.nsResolveQueue.Add(og.GetNamespace()) + o.nsResolveQueue.Add(og.GetNamespace()) - return nil + return nil } func (o *Operator) nothingToUpdate(logger *logrus.Entry, sub *v1alpha1.Subscription) bool { - if sub.Status.InstallPlanRef != nil && sub.Status.State == v1alpha1.SubscriptionStateUpgradePending { - logger.Debugf("skipping update: installplan already created") - return true - } - return false + if sub.Status.InstallPlanRef != nil && sub.Status.State == v1alpha1.SubscriptionStateUpgradePending { + logger.Debugf("skipping update: installplan already created") + return true + } + return false } func (o *Operator) ensureSubscriptionInstallPlanState(logger *logrus.Entry, sub *v1alpha1.Subscription, failForwardEnabled bool) (*v1alpha1.Subscription, bool, error) { - if sub.Status.InstallPlanRef != nil || sub.Status.Install != nil { - return sub, false, nil - } - - logger.Debug("checking for existing installplan") - - // check if there's an installplan that created this subscription (only if it doesn't have a reference yet) - // this indicates it was newly resolved by another operator, and we should reference that installplan in the status - ipName, ok := sub.GetAnnotations()[generatedByKey] - if !ok { - return sub, false, nil - } - - ip, err := o.client.OperatorsV1alpha1().InstallPlans(sub.GetNamespace()).Get(context.TODO(), ipName, metav1.GetOptions{}) - if err != nil { - logger.WithField("installplan", ipName).Warn("unable to get installplan from cache") - return nil, false, err - } - logger.WithField("installplan", ipName).Debug("found installplan that generated subscription") - - out := sub.DeepCopy() - ref, err := reference.GetReference(ip) - if err != nil { - logger.WithError(err).Warn("unable to generate installplan reference") - return nil, false, err - } - out.Status.InstallPlanRef = ref - out.Status.Install = v1alpha1.NewInstallPlanReference(ref) - out.Status.State = v1alpha1.SubscriptionStateUpgradePending - if failForwardEnabled && ip.Status.Phase == v1alpha1.InstallPlanPhaseFailed { - out.Status.State = v1alpha1.SubscriptionStateFailed - } - out.Status.CurrentCSV = out.Spec.StartingCSV - out.Status.LastUpdated = o.now() - - return out, true, nil + if sub.Status.InstallPlanRef != nil || sub.Status.Install != nil { + return sub, false, nil + } + + logger.Debug("checking for existing installplan") + + // check if there's an installplan that created this subscription (only if it doesn't have a reference yet) + // this indicates it was newly resolved by another operator, and we should reference that installplan in the status + ipName, ok := sub.GetAnnotations()[generatedByKey] + if !ok { + return sub, false, nil + } + + ip, err := o.client.OperatorsV1alpha1().InstallPlans(sub.GetNamespace()).Get(context.TODO(), ipName, metav1.GetOptions{}) + if err != nil { + logger.WithField("installplan", ipName).Warn("unable to get installplan from cache") + return nil, false, err + } + logger.WithField("installplan", ipName).Debug("found installplan that generated subscription") + + out := sub.DeepCopy() + ref, err := reference.GetReference(ip) + if err != nil { + logger.WithError(err).Warn("unable to generate installplan reference") + return nil, false, err + } + out.Status.InstallPlanRef = ref + out.Status.Install = v1alpha1.NewInstallPlanReference(ref) + out.Status.State = v1alpha1.SubscriptionStateUpgradePending + if failForwardEnabled && ip.Status.Phase == v1alpha1.InstallPlanPhaseFailed { + out.Status.State = v1alpha1.SubscriptionStateFailed + } + out.Status.CurrentCSV = out.Spec.StartingCSV + out.Status.LastUpdated = o.now() + + return out, true, nil } func (o *Operator) ensureSubscriptionCSVState(logger *logrus.Entry, sub *v1alpha1.Subscription, failForwardEnabled bool) (*v1alpha1.Subscription, bool, error) { - if sub.Status.CurrentCSV == "" { - return sub, false, nil - } - - _, err := o.client.OperatorsV1alpha1().ClusterServiceVersions(sub.GetNamespace()).Get(context.TODO(), sub.Status.CurrentCSV, metav1.GetOptions{}) - out := sub.DeepCopy() - if err != nil { - logger.WithError(err).WithField("currentCSV", sub.Status.CurrentCSV).Debug("error fetching csv listed in subscription status") - out.Status.State = v1alpha1.SubscriptionStateUpgradePending - if failForwardEnabled && sub.Status.InstallPlanRef != nil { - ip, err := o.client.OperatorsV1alpha1().InstallPlans(sub.GetNamespace()).Get(context.TODO(), sub.Status.InstallPlanRef.Name, metav1.GetOptions{}) - if err != nil { - logger.WithError(err).WithField("currentCSV", sub.Status.CurrentCSV).Debug("error fetching installplan listed in subscription status") - } else if ip.Status.Phase == v1alpha1.InstallPlanPhaseFailed { - out.Status.State = v1alpha1.SubscriptionStateFailed - } - } - } else { - out.Status.State = v1alpha1.SubscriptionStateAtLatest - out.Status.InstalledCSV = sub.Status.CurrentCSV - } - - if sub.Status.State == out.Status.State { - // The subscription status represents the cluster state - return sub, false, nil - } - out.Status.LastUpdated = o.now() - - // Update Subscription with status of transition. Log errors if we can't write them to the status. - updatedSub, err := o.client.OperatorsV1alpha1().Subscriptions(out.GetNamespace()).UpdateStatus(context.TODO(), out, metav1.UpdateOptions{}) - if err != nil { - logger.WithError(err).Info("error updating subscription status") - return nil, false, fmt.Errorf("error updating Subscription status: " + err.Error()) - } - - // subscription status represents cluster state - return updatedSub, true, nil + if sub.Status.CurrentCSV == "" { + return sub, false, nil + } + + _, err := o.client.OperatorsV1alpha1().ClusterServiceVersions(sub.GetNamespace()).Get(context.TODO(), sub.Status.CurrentCSV, metav1.GetOptions{}) + out := sub.DeepCopy() + if err != nil { + logger.WithError(err).WithField("currentCSV", sub.Status.CurrentCSV).Debug("error fetching csv listed in subscription status") + out.Status.State = v1alpha1.SubscriptionStateUpgradePending + if failForwardEnabled && sub.Status.InstallPlanRef != nil { + ip, err := o.client.OperatorsV1alpha1().InstallPlans(sub.GetNamespace()).Get(context.TODO(), sub.Status.InstallPlanRef.Name, metav1.GetOptions{}) + if err != nil { + logger.WithError(err).WithField("currentCSV", sub.Status.CurrentCSV).Debug("error fetching installplan listed in subscription status") + } else if ip.Status.Phase == v1alpha1.InstallPlanPhaseFailed { + out.Status.State = v1alpha1.SubscriptionStateFailed + } + } + } else { + out.Status.State = v1alpha1.SubscriptionStateAtLatest + out.Status.InstalledCSV = sub.Status.CurrentCSV + } + + if sub.Status.State == out.Status.State { + // The subscription status represents the cluster state + return sub, false, nil + } + out.Status.LastUpdated = o.now() + + // Update Subscription with status of transition. Log errors if we can't write them to the status. + updatedSub, err := o.client.OperatorsV1alpha1().Subscriptions(out.GetNamespace()).UpdateStatus(context.TODO(), out, metav1.UpdateOptions{}) + if err != nil { + logger.WithError(err).Info("error updating subscription status") + return nil, false, fmt.Errorf("error updating Subscription status: " + err.Error()) + } + + // subscription status represents cluster state + return updatedSub, true, nil } func (o *Operator) setIPReference(subs []*v1alpha1.Subscription, gen int, installPlanRef *corev1.ObjectReference) []*v1alpha1.Subscription { - var ( - lastUpdated = o.now() - ) - for _, sub := range subs { - sub.Status.LastUpdated = lastUpdated - if installPlanRef != nil { - sub.Status.InstallPlanRef = installPlanRef - sub.Status.Install = v1alpha1.NewInstallPlanReference(installPlanRef) - sub.Status.State = v1alpha1.SubscriptionStateUpgradePending - sub.Status.InstallPlanGeneration = gen - } - } - return subs + var ( + lastUpdated = o.now() + ) + for _, sub := range subs { + sub.Status.LastUpdated = lastUpdated + if installPlanRef != nil { + sub.Status.InstallPlanRef = installPlanRef + sub.Status.Install = v1alpha1.NewInstallPlanReference(installPlanRef) + sub.Status.State = v1alpha1.SubscriptionStateUpgradePending + sub.Status.InstallPlanGeneration = gen + } + } + return subs } func (o *Operator) ensureInstallPlan(logger *logrus.Entry, namespace string, gen int, subs []*v1alpha1.Subscription, installPlanApproval v1alpha1.Approval, steps []*v1alpha1.Step, bundleLookups []v1alpha1.BundleLookup) (*corev1.ObjectReference, error) { - if len(steps) == 0 && len(bundleLookups) == 0 { - return nil, nil - } - - // Check if any existing installplans are creating the same resources - installPlans, err := o.listInstallPlans(namespace) - if err != nil { - return nil, err - } - - // There are multiple(2) worker threads process the namespaceQueue. - // Both worker can work at the same time when 2 separate updates are made for the namespace. - // The following sequence causes 2 installplans are created for a subscription - // 1. worker 1 doesn't find the installplan - // 2. worker 2 doesn't find the installplan - // 3. both worker 1 and 2 create the installplan - // - // This lock prevents the step 2 in the sequence so that only one installplan is created for a subscription. - // The sequence is like the following with this lock - // 1. worker 1 locks - // 2. worker 1 doesn't find the installplan - // 3. worker 2 wait for unlock <--- difference - // 4. worker 1 creates the installplan - // 5. worker 1 unlocks - // 6. worker 2 locks - // 7. worker 2 finds the installplan <--- difference - // 8. worker 2 unlocks - o.muInstallPlan.Lock() - defer o.muInstallPlan.Unlock() - - for _, installPlan := range installPlans { - if installPlan.Spec.Generation == gen { - return reference.GetReference(installPlan) - } - } - logger.Warn("no installplan found with matching generation, creating new one") - - return o.createInstallPlan(namespace, gen, subs, installPlanApproval, steps, bundleLookups) + if len(steps) == 0 && len(bundleLookups) == 0 { + return nil, nil + } + + // Check if any existing installplans are creating the same resources + installPlans, err := o.listInstallPlans(namespace) + if err != nil { + return nil, err + } + + // There are multiple(2) worker threads process the namespaceQueue. + // Both worker can work at the same time when 2 separate updates are made for the namespace. + // The following sequence causes 2 installplans are created for a subscription + // 1. worker 1 doesn't find the installplan + // 2. worker 2 doesn't find the installplan + // 3. both worker 1 and 2 create the installplan + // + // This lock prevents the step 2 in the sequence so that only one installplan is created for a subscription. + // The sequence is like the following with this lock + // 1. worker 1 locks + // 2. worker 1 doesn't find the installplan + // 3. worker 2 wait for unlock <--- difference + // 4. worker 1 creates the installplan + // 5. worker 1 unlocks + // 6. worker 2 locks + // 7. worker 2 finds the installplan <--- difference + // 8. worker 2 unlocks + o.muInstallPlan.Lock() + defer o.muInstallPlan.Unlock() + + for _, installPlan := range installPlans { + if installPlan.Spec.Generation == gen { + return reference.GetReference(installPlan) + } + } + logger.Warn("no installplan found with matching generation, creating new one") + + return o.createInstallPlan(namespace, gen, subs, installPlanApproval, steps, bundleLookups) } func (o *Operator) createInstallPlan(namespace string, gen int, subs []*v1alpha1.Subscription, installPlanApproval v1alpha1.Approval, steps []*v1alpha1.Step, bundleLookups []v1alpha1.BundleLookup) (*corev1.ObjectReference, error) { - if len(steps) == 0 && len(bundleLookups) == 0 { - return nil, nil - } - - csvNames := []string{} - catalogSourceMap := map[string]struct{}{} - for _, s := range steps { - if s.Resource.Kind == "ClusterServiceVersion" { - csvNames = append(csvNames, s.Resource.Name) - } - catalogSourceMap[s.Resource.CatalogSource] = struct{}{} - } - - catalogSources := []string{} - for s := range catalogSourceMap { - catalogSources = append(catalogSources, s) - } - - phase := v1alpha1.InstallPlanPhaseInstalling - if installPlanApproval == v1alpha1.ApprovalManual { - phase = v1alpha1.InstallPlanPhaseRequiresApproval - } - ip := &v1alpha1.InstallPlan{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "install-", - Namespace: namespace, - }, - Spec: v1alpha1.InstallPlanSpec{ - ClusterServiceVersionNames: csvNames, - Approval: installPlanApproval, - Approved: installPlanApproval == v1alpha1.ApprovalAutomatic, - Generation: gen, - }, - } - for _, sub := range subs { - ownerutil.AddNonBlockingOwner(ip, sub) - } - - res, err := o.client.OperatorsV1alpha1().InstallPlans(namespace).Create(context.TODO(), ip, metav1.CreateOptions{}) - if err != nil { - return nil, err - } - - res.Status = v1alpha1.InstallPlanStatus{ - Phase: phase, - Plan: steps, - CatalogSources: catalogSources, - BundleLookups: bundleLookups, - } - res, err = o.client.OperatorsV1alpha1().InstallPlans(namespace).UpdateStatus(context.TODO(), res, metav1.UpdateOptions{}) - if err != nil { - return nil, err - } - - return reference.GetReference(res) + if len(steps) == 0 && len(bundleLookups) == 0 { + return nil, nil + } + + csvNames := []string{} + catalogSourceMap := map[string]struct{}{} + for _, s := range steps { + if s.Resource.Kind == "ClusterServiceVersion" { + csvNames = append(csvNames, s.Resource.Name) + } + catalogSourceMap[s.Resource.CatalogSource] = struct{}{} + } + + catalogSources := []string{} + for s := range catalogSourceMap { + catalogSources = append(catalogSources, s) + } + + phase := v1alpha1.InstallPlanPhaseInstalling + if installPlanApproval == v1alpha1.ApprovalManual { + phase = v1alpha1.InstallPlanPhaseRequiresApproval + } + ip := &v1alpha1.InstallPlan{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "install-", + Namespace: namespace, + }, + Spec: v1alpha1.InstallPlanSpec{ + ClusterServiceVersionNames: csvNames, + Approval: installPlanApproval, + Approved: installPlanApproval == v1alpha1.ApprovalAutomatic, + Generation: gen, + }, + } + for _, sub := range subs { + ownerutil.AddNonBlockingOwner(ip, sub) + } + + res, err := o.client.OperatorsV1alpha1().InstallPlans(namespace).Create(context.TODO(), ip, metav1.CreateOptions{}) + if err != nil { + return nil, err + } + + res.Status = v1alpha1.InstallPlanStatus{ + Phase: phase, + Plan: steps, + CatalogSources: catalogSources, + BundleLookups: bundleLookups, + } + res, err = o.client.OperatorsV1alpha1().InstallPlans(namespace).UpdateStatus(context.TODO(), res, metav1.UpdateOptions{}) + if err != nil { + return nil, err + } + + return reference.GetReference(res) } // setSubsCond will set the condition to the subscription if it doesn't already // exist or if it is different // Only return the list of updated subscriptions func (o *Operator) setSubsCond(subs []*v1alpha1.Subscription, cond v1alpha1.SubscriptionCondition) []*v1alpha1.Subscription { - var ( - lastUpdated = o.now() - subList []*v1alpha1.Subscription - ) - - for _, sub := range subs { - subCond := sub.Status.GetCondition(cond.Type) - if subCond.Equals(cond) { - continue - } - sub.Status.LastUpdated = lastUpdated - sub.Status.SetCondition(cond) - subList = append(subList, sub) - } - return subList + var ( + lastUpdated = o.now() + subList []*v1alpha1.Subscription + ) + + for _, sub := range subs { + subCond := sub.Status.GetCondition(cond.Type) + if subCond.Equals(cond) { + continue + } + sub.Status.LastUpdated = lastUpdated + sub.Status.SetCondition(cond) + subList = append(subList, sub) + } + return subList } // removeSubsCond removes the given condition from all of the subscriptions in the input func (o *Operator) removeSubsCond(subs []*v1alpha1.Subscription, condType v1alpha1.SubscriptionConditionType) { - lastUpdated := o.now() - for _, sub := range subs { - cond := sub.Status.GetCondition(condType) - // if status is ConditionUnknown, the condition doesn't exist. Just skip - if cond.Status == corev1.ConditionUnknown { - continue - } - sub.Status.LastUpdated = lastUpdated - sub.Status.RemoveConditions(condType) - } + lastUpdated := o.now() + for _, sub := range subs { + cond := sub.Status.GetCondition(condType) + // if status is ConditionUnknown, the condition doesn't exist. Just skip + if cond.Status == corev1.ConditionUnknown { + continue + } + sub.Status.LastUpdated = lastUpdated + sub.Status.RemoveConditions(condType) + } } func (o *Operator) updateSubscriptionStatuses(subs []*v1alpha1.Subscription) ([]*v1alpha1.Subscription, error) { - var ( - errs []error - mu sync.Mutex - wg sync.WaitGroup - getOpts = metav1.GetOptions{} - updateOpts = metav1.UpdateOptions{} - ) - - for _, sub := range subs { - wg.Add(1) - go func(sub *v1alpha1.Subscription) { - defer wg.Done() - - update := func() error { - // Update the status of the latest revision - latest, err := o.client.OperatorsV1alpha1().Subscriptions(sub.GetNamespace()).Get(context.TODO(), sub.GetName(), getOpts) - if err != nil { - return err - } - latest.Status = sub.Status - *sub = *latest - _, err = o.client.OperatorsV1alpha1().Subscriptions(sub.Namespace).UpdateStatus(context.TODO(), latest, updateOpts) - return err - } - if err := retry.RetryOnConflict(retry.DefaultRetry, update); err != nil { - mu.Lock() - defer mu.Unlock() - errs = append(errs, err) - } - }(sub) - } - wg.Wait() - return subs, utilerrors.NewAggregate(errs) + var ( + errs []error + mu sync.Mutex + wg sync.WaitGroup + getOpts = metav1.GetOptions{} + updateOpts = metav1.UpdateOptions{} + ) + + for _, sub := range subs { + wg.Add(1) + go func(sub *v1alpha1.Subscription) { + defer wg.Done() + + update := func() error { + // Update the status of the latest revision + latest, err := o.client.OperatorsV1alpha1().Subscriptions(sub.GetNamespace()).Get(context.TODO(), sub.GetName(), getOpts) + if err != nil { + return err + } + latest.Status = sub.Status + *sub = *latest + _, err = o.client.OperatorsV1alpha1().Subscriptions(sub.Namespace).UpdateStatus(context.TODO(), latest, updateOpts) + return err + } + if err := retry.RetryOnConflict(retry.DefaultRetry, update); err != nil { + mu.Lock() + defer mu.Unlock() + errs = append(errs, err) + } + }(sub) + } + wg.Wait() + return subs, utilerrors.NewAggregate(errs) } type UnpackedBundleReference struct { - Kind string `json:"kind"` - Name string `json:"name"` - Namespace string `json:"namespace"` - CatalogSourceName string `json:"catalogSourceName"` - CatalogSourceNamespace string `json:"catalogSourceNamespace"` - Replaces string `json:"replaces"` - Properties string `json:"properties"` + Kind string `json:"kind"` + Name string `json:"name"` + Namespace string `json:"namespace"` + CatalogSourceName string `json:"catalogSourceName"` + CatalogSourceNamespace string `json:"catalogSourceNamespace"` + Replaces string `json:"replaces"` + Properties string `json:"properties"` } func (o *Operator) unpackBundles(namespace string, installPlanSteps []*v1alpha1.Step, bundleLookups []v1alpha1.BundleLookup, unpackTimeout, unpackRetryInterval time.Duration) (bool, []*v1alpha1.Step, []v1alpha1.BundleLookup, error) { - unpacked := true - - outBundleLookups := make([]v1alpha1.BundleLookup, len(bundleLookups)) - for i := range bundleLookups { - bundleLookups[i].DeepCopyInto(&outBundleLookups[i]) - } - outInstallPlanSteps := make([]*v1alpha1.Step, len(installPlanSteps)) - for i := range installPlanSteps { - outInstallPlanSteps[i] = installPlanSteps[i].DeepCopy() - } - - var errs []error - for i := 0; i < len(outBundleLookups); i++ { - lookup := outBundleLookups[i] - res, err := o.bundleUnpacker.UnpackBundle(&lookup, unpackTimeout, unpackRetryInterval) - if err != nil { - errs = append(errs, err) - continue - } - outBundleLookups[i] = *res.BundleLookup - - // if the failed condition is present it means the bundle unpacking has failed - failedCondition := res.GetCondition(v1alpha1.BundleLookupFailed) - if failedCondition.Status == corev1.ConditionTrue { - unpacked = false - continue - } - - // if the bundle lookup pending condition is present it means that the bundle has not been unpacked - // status=true means we're still waiting for the job to unpack to configmap - pendingCondition := res.GetCondition(v1alpha1.BundleLookupPending) - if pendingCondition.Status == corev1.ConditionTrue { - unpacked = false - continue - } - - // if packed condition is missing, bundle has already been unpacked into steps, continue - if res.GetCondition(resolver.BundleLookupConditionPacked).Status == corev1.ConditionUnknown { - continue - } - - // Ensure that bundle can be applied by the current version of OLM by converting to bundleSteps - bundleSteps, err := resolver.NewStepsFromBundle(res.Bundle(), namespace, res.Replaces, res.CatalogSourceRef.Name, res.CatalogSourceRef.Namespace) - if err != nil { - if fatal := olmerrors.IsFatal(err); fatal { - return false, nil, nil, err - } - - errs = append(errs, fmt.Errorf("failed to turn bundle into steps: %v", err)) - unpacked = false - continue - } - - // step manifests are replaced with references to the configmap containing them - for i, s := range bundleSteps { - ref := UnpackedBundleReference{ - Kind: "ConfigMap", - Namespace: res.CatalogSourceRef.Namespace, - Name: res.Name(), - CatalogSourceName: res.CatalogSourceRef.Name, - CatalogSourceNamespace: res.CatalogSourceRef.Namespace, - Replaces: res.Replaces, - Properties: res.Properties, - } - r, err := json.Marshal(&ref) - if err != nil { - errs = append(errs, fmt.Errorf("failed to generate reference for configmap: %v", err)) - unpacked = false - continue - } - s.Resource.Manifest = string(r) - bundleSteps[i] = s - } - res.RemoveCondition(resolver.BundleLookupConditionPacked) - outBundleLookups[i] = *res.BundleLookup - outInstallPlanSteps = append(outInstallPlanSteps, bundleSteps...) - } - - if err := utilerrors.NewAggregate(errs); err != nil { - o.logger.Debugf("failed to unpack bundles: %v", err) - return false, nil, nil, err - } - - return unpacked, outInstallPlanSteps, outBundleLookups, nil + unpacked := true + + outBundleLookups := make([]v1alpha1.BundleLookup, len(bundleLookups)) + for i := range bundleLookups { + bundleLookups[i].DeepCopyInto(&outBundleLookups[i]) + } + outInstallPlanSteps := make([]*v1alpha1.Step, len(installPlanSteps)) + for i := range installPlanSteps { + outInstallPlanSteps[i] = installPlanSteps[i].DeepCopy() + } + + var errs []error + for i := 0; i < len(outBundleLookups); i++ { + lookup := outBundleLookups[i] + res, err := o.bundleUnpacker.UnpackBundle(&lookup, unpackTimeout, unpackRetryInterval) + if err != nil { + errs = append(errs, err) + continue + } + outBundleLookups[i] = *res.BundleLookup + + // if the failed condition is present it means the bundle unpacking has failed + failedCondition := res.GetCondition(v1alpha1.BundleLookupFailed) + if failedCondition.Status == corev1.ConditionTrue { + unpacked = false + continue + } + + // if the bundle lookup pending condition is present it means that the bundle has not been unpacked + // status=true means we're still waiting for the job to unpack to configmap + pendingCondition := res.GetCondition(v1alpha1.BundleLookupPending) + if pendingCondition.Status == corev1.ConditionTrue { + unpacked = false + continue + } + + // if packed condition is missing, bundle has already been unpacked into steps, continue + if res.GetCondition(resolver.BundleLookupConditionPacked).Status == corev1.ConditionUnknown { + continue + } + + // Ensure that bundle can be applied by the current version of OLM by converting to bundleSteps + bundleSteps, err := resolver.NewStepsFromBundle(res.Bundle(), namespace, res.Replaces, res.CatalogSourceRef.Name, res.CatalogSourceRef.Namespace) + if err != nil { + if fatal := olmerrors.IsFatal(err); fatal { + return false, nil, nil, err + } + + errs = append(errs, fmt.Errorf("failed to turn bundle into steps: %v", err)) + unpacked = false + continue + } + + // step manifests are replaced with references to the configmap containing them + for i, s := range bundleSteps { + ref := UnpackedBundleReference{ + Kind: "ConfigMap", + Namespace: res.CatalogSourceRef.Namespace, + Name: res.Name(), + CatalogSourceName: res.CatalogSourceRef.Name, + CatalogSourceNamespace: res.CatalogSourceRef.Namespace, + Replaces: res.Replaces, + Properties: res.Properties, + } + r, err := json.Marshal(&ref) + if err != nil { + errs = append(errs, fmt.Errorf("failed to generate reference for configmap: %v", err)) + unpacked = false + continue + } + s.Resource.Manifest = string(r) + bundleSteps[i] = s + } + res.RemoveCondition(resolver.BundleLookupConditionPacked) + outBundleLookups[i] = *res.BundleLookup + outInstallPlanSteps = append(outInstallPlanSteps, bundleSteps...) + } + + if err := utilerrors.NewAggregate(errs); err != nil { + o.logger.Debugf("failed to unpack bundles: %v", err) + return false, nil, nil, err + } + + return unpacked, outInstallPlanSteps, outBundleLookups, nil } // gcInstallPlans garbage collects installplans that are too old // installplans are ownerrefd to all subscription inputs, so they will not otherwise // be GCd unless all inputs have been deleted. func (o *Operator) gcInstallPlans(log logrus.FieldLogger, namespace string) { - allIps, err := o.lister.OperatorsV1alpha1().InstallPlanLister().InstallPlans(namespace).List(labels.Everything()) - if err != nil { - log.Warn("unable to list installplans for GC") - } - - if len(allIps) <= maxInstallPlanCount { - return - } - - // we only consider maxDeletesPerSweep more than the allowed number of installplans for delete at one time - ips := allIps - if len(ips) > maxInstallPlanCount+maxDeletesPerSweep { - ips = allIps[:maxInstallPlanCount+maxDeletesPerSweep] - } - - byGen := map[int][]*v1alpha1.InstallPlan{} - for _, ip := range ips { - gen, ok := byGen[ip.Spec.Generation] - if !ok { - gen = make([]*v1alpha1.InstallPlan, 0) - } - byGen[ip.Spec.Generation] = append(gen, ip) - } - - gens := make([]int, 0) - for i := range byGen { - gens = append(gens, i) - } - - sort.Ints(gens) - - toDelete := make([]*v1alpha1.InstallPlan, 0) - - for _, i := range gens { - g := byGen[i] - - if len(ips)-len(toDelete) <= maxInstallPlanCount { - break - } - - // if removing all installplans at this generation doesn't dip below the max, safe to delete all of them - if len(ips)-len(toDelete)-len(g) >= maxInstallPlanCount { - toDelete = append(toDelete, g...) - continue - } - - // CreationTimestamp sorting shouldn't ever be hit unless there is a bug that causes installplans to be - // generated without bumping the generation. It is here as a safeguard only. - - // sort by creation time - sort.Slice(g, func(i, j int) bool { - if !g[i].CreationTimestamp.Equal(&g[j].CreationTimestamp) { - return g[i].CreationTimestamp.Before(&g[j].CreationTimestamp) - } - // final fallback to lexicographic sort, in case many installplans are created with the same timestamp - return g[i].GetName() < g[j].GetName() - }) - toDelete = append(toDelete, g[:len(ips)-len(toDelete)-maxInstallPlanCount]...) - } - - for _, i := range toDelete { - if err := o.client.OperatorsV1alpha1().InstallPlans(namespace).Delete(context.TODO(), i.GetName(), metav1.DeleteOptions{}); err != nil { - log.WithField("deleting", i.GetName()).WithError(err).Warn("error GCing old installplan - may have already been deleted") - } - } + allIps, err := o.lister.OperatorsV1alpha1().InstallPlanLister().InstallPlans(namespace).List(labels.Everything()) + if err != nil { + log.Warn("unable to list installplans for GC") + } + + if len(allIps) <= maxInstallPlanCount { + return + } + + // we only consider maxDeletesPerSweep more than the allowed number of installplans for delete at one time + ips := allIps + if len(ips) > maxInstallPlanCount+maxDeletesPerSweep { + ips = allIps[:maxInstallPlanCount+maxDeletesPerSweep] + } + + byGen := map[int][]*v1alpha1.InstallPlan{} + for _, ip := range ips { + gen, ok := byGen[ip.Spec.Generation] + if !ok { + gen = make([]*v1alpha1.InstallPlan, 0) + } + byGen[ip.Spec.Generation] = append(gen, ip) + } + + gens := make([]int, 0) + for i := range byGen { + gens = append(gens, i) + } + + sort.Ints(gens) + + toDelete := make([]*v1alpha1.InstallPlan, 0) + + for _, i := range gens { + g := byGen[i] + + if len(ips)-len(toDelete) <= maxInstallPlanCount { + break + } + + // if removing all installplans at this generation doesn't dip below the max, safe to delete all of them + if len(ips)-len(toDelete)-len(g) >= maxInstallPlanCount { + toDelete = append(toDelete, g...) + continue + } + + // CreationTimestamp sorting shouldn't ever be hit unless there is a bug that causes installplans to be + // generated without bumping the generation. It is here as a safeguard only. + + // sort by creation time + sort.Slice(g, func(i, j int) bool { + if !g[i].CreationTimestamp.Equal(&g[j].CreationTimestamp) { + return g[i].CreationTimestamp.Before(&g[j].CreationTimestamp) + } + // final fallback to lexicographic sort, in case many installplans are created with the same timestamp + return g[i].GetName() < g[j].GetName() + }) + toDelete = append(toDelete, g[:len(ips)-len(toDelete)-maxInstallPlanCount]...) + } + + for _, i := range toDelete { + if err := o.client.OperatorsV1alpha1().InstallPlans(namespace).Delete(context.TODO(), i.GetName(), metav1.DeleteOptions{}); err != nil { + log.WithField("deleting", i.GetName()).WithError(err).Warn("error GCing old installplan - may have already been deleted") + } + } } func (o *Operator) syncInstallPlans(obj interface{}) (syncError error) { - plan, ok := obj.(*v1alpha1.InstallPlan) - if !ok { - o.logger.Debugf("wrong type: %#v", obj) - return fmt.Errorf("casting InstallPlan failed") - } - - logger := o.logger.WithFields(logrus.Fields{ - "id": queueinformer.NewLoopID(), - "ip": plan.GetName(), - "namespace": plan.GetNamespace(), - "phase": plan.Status.Phase, - }) - - logger.Info("syncing") - - if len(plan.Status.Plan) == 0 && len(plan.Status.BundleLookups) == 0 { - logger.Info("skip processing installplan without status - subscription sync responsible for initial status") - return - } - - // Complete and Failed are terminal phases - if plan.Status.Phase == v1alpha1.InstallPlanPhaseFailed || plan.Status.Phase == v1alpha1.InstallPlanPhaseComplete { - return - } - - querier := o.serviceAccountQuerier.NamespaceQuerier(plan.GetNamespace()) - ref, err := querier() - out := plan.DeepCopy() - if err != nil { - // Set status condition/message and retry sync if any error - ipFailError := fmt.Errorf("attenuated service account query failed - %v", err) - logger.Infof(ipFailError.Error()) - _, err := o.setInstallPlanInstalledCond(out, v1alpha1.InstallPlanReasonInstallCheckFailed, err.Error(), logger) - if err != nil { - syncError = err - return - } - syncError = ipFailError - return - } - // reset condition/message if it had been set in previous sync. This condition is being reset since any delay in the next steps - // (bundle unpacking/plan step errors being retried for a duration) could lead to this condition sticking around, even after - // the serviceAccountQuerier returns no error since the error has been resolved (by creating the required resources), which would - // be confusing to the user - - // NOTE: this makes the assumption that the InstallPlanInstalledCheckFailed reason is only set in the previous if clause, which is - // true in the current iteration of the catalog operator. Any future implementation change that aims at setting the reason as - // InstallPlanInstalledCheckFailed must make sure that either this assumption is not breached, or the condition being set elsewhere - // is not being unset here unintentionally. - if cond := out.Status.GetCondition(v1alpha1.InstallPlanInstalled); cond.Reason == v1alpha1.InstallPlanReasonInstallCheckFailed { - plan, err = o.setInstallPlanInstalledCond(out, v1alpha1.InstallPlanConditionReason(corev1.ConditionUnknown), "", logger) - if err != nil { - syncError = err - return - } - } - - if ref != nil { - out := plan.DeepCopy() - out.Status.AttenuatedServiceAccountRef = ref - - if !reflect.DeepEqual(plan, out) { - if _, updateErr := o.client.OperatorsV1alpha1().InstallPlans(out.GetNamespace()).UpdateStatus(context.TODO(), out, metav1.UpdateOptions{}); updateErr != nil { - syncError = fmt.Errorf("failed to attach attenuated ServiceAccount to status - %v", updateErr) - return - } - - logger.WithField("attenuated-sa", ref.Name).Info("successfully attached attenuated ServiceAccount to status") - return - } - } - - outInstallPlan, syncError := transitionInstallPlanState(logger.Logger, o, *plan, o.now(), o.installPlanTimeout) - - if syncError != nil { - logger = logger.WithField("syncError", syncError) - } - - if outInstallPlan.Status.Phase == v1alpha1.InstallPlanPhaseInstalling { - defer o.ipQueueSet.RequeueAfter(outInstallPlan.GetNamespace(), outInstallPlan.GetName(), time.Second*5) - } - - defer o.requeueSubscriptionForInstallPlan(plan, logger) - - // Update InstallPlan with status of transition. Log errors if we can't write them to the status. - if _, err := o.client.OperatorsV1alpha1().InstallPlans(plan.GetNamespace()).UpdateStatus(context.TODO(), outInstallPlan, metav1.UpdateOptions{}); err != nil { - logger = logger.WithField("updateError", err.Error()) - updateErr := errors.New("error updating InstallPlan status: " + err.Error()) - if syncError == nil { - logger.Info("error updating InstallPlan status") - return updateErr - } - logger.Info("error transitioning InstallPlan") - syncError = fmt.Errorf("error transitioning InstallPlan: %s and error updating InstallPlan status: %s", syncError, updateErr) - } - - return + plan, ok := obj.(*v1alpha1.InstallPlan) + if !ok { + o.logger.Debugf("wrong type: %#v", obj) + return fmt.Errorf("casting InstallPlan failed") + } + + logger := o.logger.WithFields(logrus.Fields{ + "id": queueinformer.NewLoopID(), + "ip": plan.GetName(), + "namespace": plan.GetNamespace(), + "phase": plan.Status.Phase, + }) + + logger.Info("syncing") + + if len(plan.Status.Plan) == 0 && len(plan.Status.BundleLookups) == 0 { + logger.Info("skip processing installplan without status - subscription sync responsible for initial status") + return + } + + // Complete and Failed are terminal phases + if plan.Status.Phase == v1alpha1.InstallPlanPhaseFailed || plan.Status.Phase == v1alpha1.InstallPlanPhaseComplete { + return + } + + querier := o.serviceAccountQuerier.NamespaceQuerier(plan.GetNamespace()) + ref, err := querier() + out := plan.DeepCopy() + if err != nil { + // Set status condition/message and retry sync if any error + ipFailError := fmt.Errorf("attenuated service account query failed - %v", err) + logger.Infof(ipFailError.Error()) + _, err := o.setInstallPlanInstalledCond(out, v1alpha1.InstallPlanReasonInstallCheckFailed, err.Error(), logger) + if err != nil { + syncError = err + return + } + syncError = ipFailError + return + } + // reset condition/message if it had been set in previous sync. This condition is being reset since any delay in the next steps + // (bundle unpacking/plan step errors being retried for a duration) could lead to this condition sticking around, even after + // the serviceAccountQuerier returns no error since the error has been resolved (by creating the required resources), which would + // be confusing to the user + + // NOTE: this makes the assumption that the InstallPlanInstalledCheckFailed reason is only set in the previous if clause, which is + // true in the current iteration of the catalog operator. Any future implementation change that aims at setting the reason as + // InstallPlanInstalledCheckFailed must make sure that either this assumption is not breached, or the condition being set elsewhere + // is not being unset here unintentionally. + if cond := out.Status.GetCondition(v1alpha1.InstallPlanInstalled); cond.Reason == v1alpha1.InstallPlanReasonInstallCheckFailed { + plan, err = o.setInstallPlanInstalledCond(out, v1alpha1.InstallPlanConditionReason(corev1.ConditionUnknown), "", logger) + if err != nil { + syncError = err + return + } + } + + if ref != nil { + out := plan.DeepCopy() + out.Status.AttenuatedServiceAccountRef = ref + + if !reflect.DeepEqual(plan, out) { + if _, updateErr := o.client.OperatorsV1alpha1().InstallPlans(out.GetNamespace()).UpdateStatus(context.TODO(), out, metav1.UpdateOptions{}); updateErr != nil { + syncError = fmt.Errorf("failed to attach attenuated ServiceAccount to status - %v", updateErr) + return + } + + logger.WithField("attenuated-sa", ref.Name).Info("successfully attached attenuated ServiceAccount to status") + return + } + } + + outInstallPlan, syncError := transitionInstallPlanState(logger.Logger, o, *plan, o.now(), o.installPlanTimeout) + + if syncError != nil { + logger = logger.WithField("syncError", syncError) + } + + if outInstallPlan.Status.Phase == v1alpha1.InstallPlanPhaseInstalling { + defer o.ipQueueSet.RequeueAfter(outInstallPlan.GetNamespace(), outInstallPlan.GetName(), time.Second*5) + } + + defer o.requeueSubscriptionForInstallPlan(plan, logger) + + // Update InstallPlan with status of transition. Log errors if we can't write them to the status. + if _, err := o.client.OperatorsV1alpha1().InstallPlans(plan.GetNamespace()).UpdateStatus(context.TODO(), outInstallPlan, metav1.UpdateOptions{}); err != nil { + logger = logger.WithField("updateError", err.Error()) + updateErr := errors.New("error updating InstallPlan status: " + err.Error()) + if syncError == nil { + logger.Info("error updating InstallPlan status") + return updateErr + } + logger.Info("error transitioning InstallPlan") + syncError = fmt.Errorf("error transitioning InstallPlan: %s and error updating InstallPlan status: %s", syncError, updateErr) + } + + return } func hasBundleLookupFailureCondition(bundleLookups []v1alpha1.BundleLookup) (bool, *v1alpha1.BundleLookupCondition) { - for _, bundleLookup := range bundleLookups { - for _, cond := range bundleLookup.Conditions { - if cond.Type == v1alpha1.BundleLookupFailed && cond.Status == corev1.ConditionTrue { - return true, &cond - } - } - } - return false, nil + for _, bundleLookup := range bundleLookups { + for _, cond := range bundleLookup.Conditions { + if cond.Type == v1alpha1.BundleLookupFailed && cond.Status == corev1.ConditionTrue { + return true, &cond + } + } + } + return false, nil } func (o *Operator) requeueSubscriptionForInstallPlan(plan *v1alpha1.InstallPlan, logger *logrus.Entry) { - // Notify subscription loop of installplan changes - owners := ownerutil.GetOwnersByKind(plan, v1alpha1.SubscriptionKind) - - if len(owners) == 0 { - logger.Trace("no installplan owner subscriptions found to requeue") - return - } - - for _, owner := range owners { - logger.WithField("owner", owner).Debug("requeueing installplan owner") - if err := o.subQueueSet.Requeue(plan.GetNamespace(), owner.Name); err != nil { - logger.WithError(err).Warn("error requeuing installplan owner") - } - } + // Notify subscription loop of installplan changes + owners := ownerutil.GetOwnersByKind(plan, v1alpha1.SubscriptionKind) + + if len(owners) == 0 { + logger.Trace("no installplan owner subscriptions found to requeue") + return + } + + for _, owner := range owners { + logger.WithField("owner", owner).Debug("requeueing installplan owner") + if err := o.subQueueSet.Requeue(plan.GetNamespace(), owner.Name); err != nil { + logger.WithError(err).Warn("error requeuing installplan owner") + } + } } func (o *Operator) setInstallPlanInstalledCond(ip *v1alpha1.InstallPlan, reason v1alpha1.InstallPlanConditionReason, message string, logger *logrus.Entry) (*v1alpha1.InstallPlan, error) { - now := o.now() - ip.Status.SetCondition(v1alpha1.ConditionFailed(v1alpha1.InstallPlanInstalled, reason, message, &now)) - outIP, err := o.client.OperatorsV1alpha1().InstallPlans(ip.GetNamespace()).UpdateStatus(context.TODO(), ip, metav1.UpdateOptions{}) - if err != nil { - logger = logger.WithField("updateError", err.Error()) - logger.Errorf("error updating InstallPlan status") - return nil, nil - } - return outIP, nil + now := o.now() + ip.Status.SetCondition(v1alpha1.ConditionFailed(v1alpha1.InstallPlanInstalled, reason, message, &now)) + outIP, err := o.client.OperatorsV1alpha1().InstallPlans(ip.GetNamespace()).UpdateStatus(context.TODO(), ip, metav1.UpdateOptions{}) + if err != nil { + logger = logger.WithField("updateError", err.Error()) + logger.Errorf("error updating InstallPlan status") + return nil, nil + } + return outIP, nil } type installPlanTransitioner interface { - ExecutePlan(*v1alpha1.InstallPlan) error + ExecutePlan(*v1alpha1.InstallPlan) error } var _ installPlanTransitioner = &Operator{} func transitionInstallPlanState(log logrus.FieldLogger, transitioner installPlanTransitioner, in v1alpha1.InstallPlan, now metav1.Time, timeout time.Duration) (*v1alpha1.InstallPlan, error) { - out := in.DeepCopy() - - switch in.Status.Phase { - case v1alpha1.InstallPlanPhaseRequiresApproval: - if out.Spec.Approved { - out.Status.Phase = v1alpha1.InstallPlanPhaseInstalling - out.Status.Message = "" - log.Debugf("approved, setting to %s", out.Status.Phase) - } else { - log.Debug("not approved, skipping sync") - } - return out, nil - - case v1alpha1.InstallPlanPhaseInstalling: - if out.Status.StartTime == nil { - out.Status.StartTime = &now - } - log.Debug("attempting to install") - if err := transitioner.ExecutePlan(out); err != nil { - if now.Sub(out.Status.StartTime.Time) >= timeout { - out.Status.SetCondition(v1alpha1.ConditionFailed(v1alpha1.InstallPlanInstalled, - v1alpha1.InstallPlanReasonComponentFailed, err.Error(), &now)) - out.Status.Phase = v1alpha1.InstallPlanPhaseFailed - out.Status.Message = err.Error() - } else { - out.Status.Message = fmt.Sprintf("retrying execution due to error: %s", err.Error()) - } - return out, err - } else if !out.Status.NeedsRequeue() { - // Loop over one final time to check and see if everything is good. - out.Status.SetCondition(v1alpha1.ConditionMet(v1alpha1.InstallPlanInstalled, &now)) - out.Status.Phase = v1alpha1.InstallPlanPhaseComplete - out.Status.Message = "" - } - return out, nil - default: - return out, nil - } + out := in.DeepCopy() + + switch in.Status.Phase { + case v1alpha1.InstallPlanPhaseRequiresApproval: + if out.Spec.Approved { + out.Status.Phase = v1alpha1.InstallPlanPhaseInstalling + out.Status.Message = "" + log.Debugf("approved, setting to %s", out.Status.Phase) + } else { + log.Debug("not approved, skipping sync") + } + return out, nil + + case v1alpha1.InstallPlanPhaseInstalling: + if out.Status.StartTime == nil { + out.Status.StartTime = &now + } + log.Debug("attempting to install") + if err := transitioner.ExecutePlan(out); err != nil { + if now.Sub(out.Status.StartTime.Time) >= timeout { + out.Status.SetCondition(v1alpha1.ConditionFailed(v1alpha1.InstallPlanInstalled, + v1alpha1.InstallPlanReasonComponentFailed, err.Error(), &now)) + out.Status.Phase = v1alpha1.InstallPlanPhaseFailed + out.Status.Message = err.Error() + } else { + out.Status.Message = fmt.Sprintf("retrying execution due to error: %s", err.Error()) + } + return out, err + } else if !out.Status.NeedsRequeue() { + // Loop over one final time to check and see if everything is good. + out.Status.SetCondition(v1alpha1.ConditionMet(v1alpha1.InstallPlanInstalled, &now)) + out.Status.Phase = v1alpha1.InstallPlanPhaseComplete + out.Status.Message = "" + } + return out, nil + default: + return out, nil + } } // Validate all existing served versions against new CRD's validation (if changed) func validateV1CRDCompatibility(dynamicClient dynamic.Interface, oldCRD *apiextensionsv1.CustomResourceDefinition, newCRD *apiextensionsv1.CustomResourceDefinition) error { - logrus.Debugf("Comparing %#v to %#v", oldCRD.Spec.Versions, newCRD.Spec.Versions) - - // If validation schema is unchanged, return right away - newestSchema := newCRD.Spec.Versions[len(newCRD.Spec.Versions)-1].Schema - for i, oldVersion := range oldCRD.Spec.Versions { - if !reflect.DeepEqual(oldVersion.Schema, newestSchema) { - break - } - if i == len(oldCRD.Spec.Versions)-1 { - // we are on the last iteration - // schema has not changed between versions at this point. - return nil - } - } - - convertedCRD := &apiextensions.CustomResourceDefinition{} - if err := apiextensionsv1.Convert_v1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(newCRD, convertedCRD, nil); err != nil { - return err - } - for _, version := range oldCRD.Spec.Versions { - if version.Served { - gvr := schema.GroupVersionResource{Group: oldCRD.Spec.Group, Version: version.Name, Resource: oldCRD.Spec.Names.Plural} - err := validateExistingCRs(dynamicClient, gvr, convertedCRD) - if err != nil { - return err - } - } - } - - logrus.Debugf("Successfully validated CRD %s\n", newCRD.Name) - return nil + logrus.Debugf("Comparing %#v to %#v", oldCRD.Spec.Versions, newCRD.Spec.Versions) + + oldVersionSet := sets.New[string]() + for _, oldVersion := range oldCRD.Spec.Versions { + if !oldVersionSet.Has(oldVersion.Name) && oldVersion.Served { + oldVersionSet.Insert(oldVersion.Name) + } + } + + validationsMap := make(map[string]*apiextensions.CustomResourceValidation, 0) + for _, newVersion := range newCRD.Spec.Versions { + if oldVersionSet.Has(newVersion.Name) && newVersion.Served { + // If the new CRD's version is present in the cluster and still + // served then fill the map entry with the new validation + convertedValidation := &apiextensions.CustomResourceValidation{} + if err := apiextensionsv1.Convert_v1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(newVersion.Schema, convertedValidation, nil); err != nil { + return err + } + validationsMap[newVersion.Name] = convertedValidation + } + } + return validateExistingCRs(dynamicClient, schema.GroupResource{Group: newCRD.Spec.Group, Resource: newCRD.Spec.Names.Plural}, validationsMap) } // Validate all existing served versions against new CRD's validation (if changed) func validateV1Beta1CRDCompatibility(dynamicClient dynamic.Interface, oldCRD *apiextensionsv1beta1.CustomResourceDefinition, newCRD *apiextensionsv1beta1.CustomResourceDefinition) error { - logrus.Debugf("Comparing %#v to %#v", oldCRD.Spec.Validation, newCRD.Spec.Validation) - - // TODO return early of all versions are equal - convertedCRD := &apiextensions.CustomResourceDefinition{} - if err := apiextensionsv1beta1.Convert_v1beta1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(newCRD, convertedCRD, nil); err != nil { - return err - } - for _, version := range oldCRD.Spec.Versions { - if version.Served { - gvr := schema.GroupVersionResource{Group: oldCRD.Spec.Group, Version: version.Name, Resource: oldCRD.Spec.Names.Plural} - err := validateExistingCRs(dynamicClient, gvr, convertedCRD) - if err != nil { - return err - } - } - } - - if oldCRD.Spec.Version != "" { - gvr := schema.GroupVersionResource{Group: oldCRD.Spec.Group, Version: oldCRD.Spec.Version, Resource: oldCRD.Spec.Names.Plural} - err := validateExistingCRs(dynamicClient, gvr, convertedCRD) - if err != nil { - return err - } - } - logrus.Debugf("Successfully validated CRD %s\n", newCRD.Name) - return nil + logrus.Debugf("Comparing %#v to %#v", oldCRD.Spec.Validation, newCRD.Spec.Validation) + oldVersionSet := sets.New[string]() + if len(oldCRD.Spec.Versions) == 0 { + // apiextensionsv1beta1 special case: if spec.Versions is empty, use the global version and validation + oldVersionSet.Insert(oldCRD.Spec.Version) + } + for _, oldVersion := range oldCRD.Spec.Versions { + // collect served versions from spec.Versions if the list is present + if !oldVersionSet.Has(oldVersion.Name) && oldVersion.Served { + oldVersionSet.Insert(oldVersion.Name) + } + } + + validationsMap := make(map[string]*apiextensions.CustomResourceValidation, 0) + gr := schema.GroupResource{Group: newCRD.Spec.Group, Resource: newCRD.Spec.Names.Plural} + if len(newCRD.Spec.Versions) == 0 { + // apiextensionsv1beta1 special case: if spec.Versions of newCRD is empty, use the global version and validation + if oldVersionSet.Has(newCRD.Spec.Version) { + convertedValidation := &apiextensions.CustomResourceValidation{} + if err := apiextensionsv1beta1.Convert_v1beta1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(newCRD.Spec.Validation, convertedValidation, nil); err != nil { + return err + } + validationsMap[newCRD.Spec.Version] = convertedValidation + } + } + for _, newVersion := range newCRD.Spec.Versions { + if oldVersionSet.Has(newVersion.Name) && newVersion.Served { + // If the new CRD's version is present in the cluster and still + // served then fill the map entry with the new validation + if newCRD.Spec.Validation != nil { + // apiextensionsv1beta1 special case: spec.Validation and spec.Versions[].Schema are mutually exclusive; + // if spec.Versions is non-empty and spec.Validation is set then we can validate once against any + // single existing version. + convertedValidation := &apiextensions.CustomResourceValidation{} + if err := apiextensionsv1beta1.Convert_v1beta1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(newCRD.Spec.Validation, convertedValidation, nil); err != nil { + return err + } + return validateExistingCRs(dynamicClient, gr, map[string]*apiextensions.CustomResourceValidation{newVersion.Name: convertedValidation}) + } + convertedValidation := &apiextensions.CustomResourceValidation{} + if err := apiextensionsv1beta1.Convert_v1beta1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(newVersion.Schema, convertedValidation, nil); err != nil { + return err + } + validationsMap[newVersion.Name] = convertedValidation + } + } + return validateExistingCRs(dynamicClient, gr, validationsMap) } -func validateExistingCRs(dynamicClient dynamic.Interface, gvr schema.GroupVersionResource, newCRD *apiextensions.CustomResourceDefinition) error { - pager := pager.New(pager.SimplePageFunc(func(opts metav1.ListOptions) (runtime.Object, error) { - return dynamicClient.Resource(gvr).List(context.TODO(), opts) - })) - validationFn := func(obj runtime.Object) error { - // lister will only provide unstructured objects as runtime.Object, so this should never fail to convert - // if it does, it's a programming error - cr := obj.(*unstructured.Unstructured) - validator, _, err := validation.NewSchemaValidator(newCRD.Spec.Validation) - if err != nil { - return fmt.Errorf("error creating validator for schema %#v: %s", newCRD.Spec.Validation, err) - } - err = validation.ValidateCustomResource(field.NewPath(""), cr.UnstructuredContent(), validator).ToAggregate() - if err != nil { - var namespacedName string - if cr.GetNamespace() == "" { - namespacedName = cr.GetName() - } else { - namespacedName = fmt.Sprintf("%s/%s", cr.GetNamespace(), cr.GetName()) - } - return fmt.Errorf("error validating %s %q: updated validation is too restrictive: %v", cr.GroupVersionKind(), namespacedName, err) - } - return nil - } - err := pager.EachListItem(context.Background(), metav1.ListOptions{}, validationFn) - if err != nil { - return err - } - - return nil +// validateExistingCRs lists all CRs for each version entry in validationsMap, then validates each using the paired validation. +func validateExistingCRs(dynamicClient dynamic.Interface, gr schema.GroupResource, validationsMap map[string]*apiextensions.CustomResourceValidation) error { + for version, schemaValidation := range validationsMap { + // create validator from given crdValidation + validator, _, err := validation.NewSchemaValidator(schemaValidation) + if err != nil { + return fmt.Errorf("error creating validator for schema version %s: %s", version, err) + } + + gvr := schema.GroupVersionResource{Group: gr.Group, Version: version, Resource: gr.Resource} + crList, err := dynamicClient.Resource(gvr).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return fmt.Errorf("error listing resources in GroupVersionResource %#v: %s", gvr, err) + } + + // validate each CR against this version schema + for _, cr := range crList.Items { + err = validation.ValidateCustomResource(field.NewPath(""), cr.UnstructuredContent(), validator).ToAggregate() + if err != nil { + var namespacedName string + if cr.GetNamespace() == "" { + namespacedName = cr.GetName() + } else { + namespacedName = fmt.Sprintf("%s/%s", cr.GetNamespace(), cr.GetName()) + } + return fmt.Errorf("error validating %s %q: updated validation is too restrictive: %v", cr.GroupVersionKind(), namespacedName, err) + } + } + return nil + } + return nil } type warningRecorder struct { - m sync.Mutex - warnings []string + m sync.Mutex + warnings []string } func (wr *warningRecorder) HandleWarningHeader(code int, agent string, text string) { - if code != 299 { - return - } - wr.m.Lock() - defer wr.m.Unlock() - wr.warnings = append(wr.warnings, text) + if code != 299 { + return + } + wr.m.Lock() + defer wr.m.Unlock() + wr.warnings = append(wr.warnings, text) } func (wr *warningRecorder) PopWarnings() []string { - wr.m.Lock() - defer wr.m.Unlock() + wr.m.Lock() + defer wr.m.Unlock() - result := wr.warnings - wr.warnings = nil - return result + result := wr.warnings + wr.warnings = nil + return result } // ExecutePlan applies a planned InstallPlan to a namespace. func (o *Operator) ExecutePlan(plan *v1alpha1.InstallPlan) error { - if plan.Status.Phase != v1alpha1.InstallPlanPhaseInstalling { - panic("attempted to install a plan that wasn't in the installing phase") - } - - namespace := plan.GetNamespace() - - // Get the set of initial installplan csv names - initialCSVNames := getCSVNameSet(plan) - // Get pre-existing CRD owners to make decisions about applying resolved CSVs - existingCRDOwners, err := o.getExistingAPIOwners(plan.GetNamespace()) - if err != nil { - return err - } - - var wr warningRecorder - factory := o.clientFactory.WithConfigTransformer(clients.SetWarningHandler(&wr)) - - // Does the namespace have an operator group that specifies a user defined - // service account? If so, then we should use a scoped client for plan - // execution. - attenuate, err := o.clientAttenuator.AttenuateToServiceAccount(scoped.StaticQuerier(plan.Status.AttenuatedServiceAccountRef)) - if err != nil { - o.logger.Errorf("failed to get a client for plan execution: %v", err) - return err - } - attenuatedFactory := factory.WithConfigTransformer(attenuate) - kubeclient, err := attenuatedFactory.NewOperatorClient() - if err != nil { - o.logger.Errorf("failed to get a client for plan execution: %v", err) - return err - } - crclient, err := attenuatedFactory.NewKubernetesClient() - if err != nil { - o.logger.Errorf("failed to get a client for plan execution: %v", err) - return err - } - dynamicClient, err := attenuatedFactory.NewDynamicClient() - if err != nil { - o.logger.Errorf("failed to get a client for plan execution: %v", err) - return err - } - - ensurer := newStepEnsurer(kubeclient, crclient, dynamicClient) - r := newManifestResolver(plan.GetNamespace(), o.lister.CoreV1().ConfigMapLister(), o.logger) - - discoveryQuerier := newDiscoveryQuerier(o.opClient.KubernetesInterface().Discovery()) - - // CRDs should be installed via the default OLM (cluster-admin) client and not the scoped client specified by the AttenuatedServiceAccount - // the StepBuilder is currently only implemented for CRD types - // TODO give the StepBuilder both OLM and scoped clients when it supports new scoped types - builderKubeClient, err := factory.NewOperatorClient() - if err != nil { - o.logger.Errorf("failed to get a client for plan execution- %v", err) - return err - } - builderDynamicClient, err := factory.NewDynamicClient() - if err != nil { - o.logger.Errorf("failed to get a client for plan execution- %v", err) - return err - } - b := newBuilder(plan, o.lister.OperatorsV1alpha1().ClusterServiceVersionLister(), builderKubeClient, builderDynamicClient, r, o.logger) - - for i, step := range plan.Status.Plan { - if err := func(i int, step *v1alpha1.Step) error { - wr.PopWarnings() - defer func() { - warnings := wr.PopWarnings() - if len(warnings) == 0 { - return - } - var obj runtime.Object - if ref, err := reference.GetReference(plan); err != nil { - o.logger.WithError(err).Warnf("error getting plan reference") - obj = plan - } else { - ref.FieldPath = fmt.Sprintf("status.plan[%d]", i) - obj = ref - } - msg := fmt.Sprintf("%d warning(s) generated during operator installation (%s %q): %s", len(warnings), step.Resource.Kind, step.Resource.Name, strings.Join(warnings, ", ")) - if step.Resolving != "" { - msg = fmt.Sprintf("%d warning(s) generated during installation of operator %q (%s %q): %s", len(warnings), step.Resolving, step.Resource.Kind, step.Resource.Name, strings.Join(warnings, ", ")) - } - o.recorder.Event(obj, corev1.EventTypeWarning, "AppliedWithWarnings", msg) - metrics.EmitInstallPlanWarning() - }() - - doStep := true - s, err := b.create(*step) - if err != nil { - if _, ok := err.(notSupportedStepperErr); ok { - // stepper not implemented for this type yet - // stepper currently only implemented for CRD types - doStep = false - } else { - return err - } - } - if doStep { - status, err := s.Status() - if err != nil { - return err - } - plan.Status.Plan[i].Status = status - return nil - } - - switch step.Status { - case v1alpha1.StepStatusPresent, v1alpha1.StepStatusCreated, v1alpha1.StepStatusWaitingForAPI: - return nil - case v1alpha1.StepStatusUnknown, v1alpha1.StepStatusNotPresent: - manifest, err := r.ManifestForStep(step) - if err != nil { - return err - } - o.logger.WithFields(logrus.Fields{"kind": step.Resource.Kind, "name": step.Resource.Name}).Debug("execute resource") - switch step.Resource.Kind { - case v1alpha1.ClusterServiceVersionKind: - // Marshal the manifest into a CSV instance. - var csv v1alpha1.ClusterServiceVersion - err := json.Unmarshal([]byte(manifest), &csv) - if err != nil { - return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) - } - - // Check if the resolved CSV is in the initial set - if _, ok := initialCSVNames[csv.GetName()]; !ok { - // Check for pre-existing CSVs that own the same CRDs - competingOwners, err := competingCRDOwnersExist(plan.GetNamespace(), &csv, existingCRDOwners) - if err != nil { - return errorwrap.Wrapf(err, "error checking crd owners for: %s", csv.GetName()) - } - - // TODO: decide on fail/continue logic for pre-existing dependent CSVs that own the same CRD(s) - if competingOwners { - // For now, error out - return fmt.Errorf("pre-existing CRD owners found for owned CRD(s) of dependent CSV %s", csv.GetName()) - } - } - - // Attempt to create the CSV. - csv.SetNamespace(namespace) - - status, err := ensurer.EnsureClusterServiceVersion(&csv) - if err != nil { - return err - } - - plan.Status.Plan[i].Status = status - - case v1alpha1.SubscriptionKind: - // Marshal the manifest into a subscription instance. - var sub v1alpha1.Subscription - err := json.Unmarshal([]byte(manifest), &sub) - if err != nil { - return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) - } - - // Add the InstallPlan's name as an annotation - if annotations := sub.GetAnnotations(); annotations != nil { - annotations[generatedByKey] = plan.GetName() - } else { - sub.SetAnnotations(map[string]string{generatedByKey: plan.GetName()}) - } - - // Attempt to create the Subscription - sub.SetNamespace(namespace) - - status, err := ensurer.EnsureSubscription(&sub) - if err != nil { - return err - } - - plan.Status.Plan[i].Status = status - - case resolver.BundleSecretKind: - var s corev1.Secret - err := json.Unmarshal([]byte(manifest), &s) - if err != nil { - return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) - } - - // add ownerrefs on the secret that point to the CSV in the bundle - if step.Resolving != "" { - owner := &v1alpha1.ClusterServiceVersion{} - owner.SetNamespace(plan.GetNamespace()) - owner.SetName(step.Resolving) - ownerutil.AddNonBlockingOwner(&s, owner) - } - - // Update UIDs on all CSV OwnerReferences - updated, err := o.getUpdatedOwnerReferences(s.OwnerReferences, plan.Namespace) - if err != nil { - return errorwrap.Wrapf(err, "error generating ownerrefs for secret %s", s.GetName()) - } - s.SetOwnerReferences(updated) - s.SetNamespace(namespace) - - status, err := ensurer.EnsureBundleSecret(plan.Namespace, &s) - if err != nil { - return err - } - - plan.Status.Plan[i].Status = status - - case secretKind: - status, err := ensurer.EnsureSecret(o.namespace, plan.GetNamespace(), step.Resource.Name) - if err != nil { - return err - } - - plan.Status.Plan[i].Status = status - - case clusterRoleKind: - // Marshal the manifest into a ClusterRole instance. - var cr rbacv1.ClusterRole - err := json.Unmarshal([]byte(manifest), &cr) - if err != nil { - return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) - } - - status, err := ensurer.EnsureClusterRole(&cr, step) - if err != nil { - return err - } - - plan.Status.Plan[i].Status = status - - case clusterRoleBindingKind: - // Marshal the manifest into a RoleBinding instance. - var rb rbacv1.ClusterRoleBinding - err := json.Unmarshal([]byte(manifest), &rb) - if err != nil { - return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) - } - - status, err := ensurer.EnsureClusterRoleBinding(&rb, step) - if err != nil { - return err - } - - plan.Status.Plan[i].Status = status - - case roleKind: - // Marshal the manifest into a Role instance. - var r rbacv1.Role - err := json.Unmarshal([]byte(manifest), &r) - if err != nil { - return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) - } - - // Update UIDs on all CSV OwnerReferences - updated, err := o.getUpdatedOwnerReferences(r.OwnerReferences, plan.Namespace) - if err != nil { - return errorwrap.Wrapf(err, "error generating ownerrefs for role %s", r.GetName()) - } - r.SetOwnerReferences(updated) - r.SetNamespace(namespace) - - status, err := ensurer.EnsureRole(plan.Namespace, &r) - if err != nil { - return err - } - - plan.Status.Plan[i].Status = status - - case roleBindingKind: - // Marshal the manifest into a RoleBinding instance. - var rb rbacv1.RoleBinding - err := json.Unmarshal([]byte(manifest), &rb) - if err != nil { - return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) - } - - // Update UIDs on all CSV OwnerReferences - updated, err := o.getUpdatedOwnerReferences(rb.OwnerReferences, plan.Namespace) - if err != nil { - return errorwrap.Wrapf(err, "error generating ownerrefs for rolebinding %s", rb.GetName()) - } - rb.SetOwnerReferences(updated) - rb.SetNamespace(namespace) - - status, err := ensurer.EnsureRoleBinding(plan.Namespace, &rb) - if err != nil { - return err - } - - plan.Status.Plan[i].Status = status - - case serviceAccountKind: - // Marshal the manifest into a ServiceAccount instance. - var sa corev1.ServiceAccount - err := json.Unmarshal([]byte(manifest), &sa) - if err != nil { - return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) - } - - // Update UIDs on all CSV OwnerReferences - updated, err := o.getUpdatedOwnerReferences(sa.OwnerReferences, plan.Namespace) - if err != nil { - return errorwrap.Wrapf(err, "error generating ownerrefs for service account: %s", sa.GetName()) - } - sa.SetOwnerReferences(updated) - sa.SetNamespace(namespace) - - status, err := ensurer.EnsureServiceAccount(namespace, &sa) - if err != nil { - return err - } - - plan.Status.Plan[i].Status = status - - case serviceKind: - // Marshal the manifest into a Service instance - var s corev1.Service - err := json.Unmarshal([]byte(manifest), &s) - if err != nil { - return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) - } - - // add ownerrefs on the service that point to the CSV in the bundle - if step.Resolving != "" { - owner := &v1alpha1.ClusterServiceVersion{} - owner.SetNamespace(plan.GetNamespace()) - owner.SetName(step.Resolving) - ownerutil.AddNonBlockingOwner(&s, owner) - } - - // Update UIDs on all CSV OwnerReferences - updated, err := o.getUpdatedOwnerReferences(s.OwnerReferences, plan.Namespace) - if err != nil { - return errorwrap.Wrapf(err, "error generating ownerrefs for service: %s", s.GetName()) - } - s.SetOwnerReferences(updated) - s.SetNamespace(namespace) - - status, err := ensurer.EnsureService(namespace, &s) - if err != nil { - return err - } - - plan.Status.Plan[i].Status = status - - case configMapKind: - var cfg corev1.ConfigMap - err := json.Unmarshal([]byte(manifest), &cfg) - if err != nil { - return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) - } - - // add ownerrefs on the configmap that point to the CSV in the bundle - if step.Resolving != "" { - owner := &v1alpha1.ClusterServiceVersion{} - owner.SetNamespace(plan.GetNamespace()) - owner.SetName(step.Resolving) - ownerutil.AddNonBlockingOwner(&cfg, owner) - } - - // Update UIDs on all CSV OwnerReferences - updated, err := o.getUpdatedOwnerReferences(cfg.OwnerReferences, plan.Namespace) - if err != nil { - return errorwrap.Wrapf(err, "error generating ownerrefs for configmap: %s", cfg.GetName()) - } - cfg.SetOwnerReferences(updated) - cfg.SetNamespace(namespace) - - status, err := ensurer.EnsureConfigMap(plan.Namespace, &cfg) - if err != nil { - return err - } - - plan.Status.Plan[i].Status = status - - default: - if !isSupported(step.Resource.Kind) { - // Not a supported resource - plan.Status.Plan[i].Status = v1alpha1.StepStatusUnsupportedResource - return v1alpha1.ErrInvalidInstallPlan - } - - // Marshal the manifest into an unstructured object - dec := yaml.NewYAMLOrJSONDecoder(strings.NewReader(manifest), 10) - unstructuredObject := &unstructured.Unstructured{} - if err := dec.Decode(unstructuredObject); err != nil { - return errorwrap.Wrapf(err, "error decoding %s object to an unstructured object", step.Resource.Name) - } - - // Get the resource from the GVK. - gvk := unstructuredObject.GroupVersionKind() - r, err := o.apiresourceFromGVK(gvk) - if err != nil { - return err - } - - // Create the GVR - gvr := schema.GroupVersionResource{ - Group: gvk.Group, - Version: gvk.Version, - Resource: r.Name, - } - - if step.Resolving != "" { - owner := &v1alpha1.ClusterServiceVersion{} - owner.SetNamespace(plan.GetNamespace()) - owner.SetName(step.Resolving) - - if r.Namespaced { - // Set OwnerReferences for namespace-scoped resource - ownerutil.AddNonBlockingOwner(unstructuredObject, owner) - - // Update UIDs on all CSV OwnerReferences - updated, err := o.getUpdatedOwnerReferences(unstructuredObject.GetOwnerReferences(), plan.Namespace) - if err != nil { - return errorwrap.Wrapf(err, "error generating ownerrefs for unstructured object: %s", unstructuredObject.GetName()) - } - - unstructuredObject.SetOwnerReferences(updated) - } else { - // Add owner labels to cluster-scoped resource - if err := ownerutil.AddOwnerLabels(unstructuredObject, owner); err != nil { - return err - } - } - } - - // Set up the dynamic client ResourceInterface and set ownerrefs - var resourceInterface dynamic.ResourceInterface - if r.Namespaced { - unstructuredObject.SetNamespace(namespace) - resourceInterface = dynamicClient.Resource(gvr).Namespace(namespace) - } else { - resourceInterface = dynamicClient.Resource(gvr) - } - - // Ensure Unstructured Object - status, err := ensurer.EnsureUnstructuredObject(resourceInterface, unstructuredObject) - if err != nil { - return err - } - - plan.Status.Plan[i].Status = status - } - default: - return v1alpha1.ErrInvalidInstallPlan - } - return nil - }(i, step); err != nil { - if apierrors.IsNotFound(err) { - // Check for APIVersions present in the installplan steps that are not available on the server. - // The check is made via discovery per step in the plan. Transient communication failures to the api-server are handled by the plan retry logic. - notFoundErr := discoveryQuerier.WithStepResource(step.Resource).QueryForGVK() - if notFoundErr != nil { - return notFoundErr - } - } - return err - } - } - - // Loop over one final time to check and see if everything is good. - for _, step := range plan.Status.Plan { - switch step.Status { - case v1alpha1.StepStatusCreated, v1alpha1.StepStatusPresent: - default: - return nil - } - } - - return nil + if plan.Status.Phase != v1alpha1.InstallPlanPhaseInstalling { + panic("attempted to install a plan that wasn't in the installing phase") + } + + namespace := plan.GetNamespace() + + // Get the set of initial installplan csv names + initialCSVNames := getCSVNameSet(plan) + // Get pre-existing CRD owners to make decisions about applying resolved CSVs + existingCRDOwners, err := o.getExistingAPIOwners(plan.GetNamespace()) + if err != nil { + return err + } + + var wr warningRecorder + factory := o.clientFactory.WithConfigTransformer(clients.SetWarningHandler(&wr)) + + // Does the namespace have an operator group that specifies a user defined + // service account? If so, then we should use a scoped client for plan + // execution. + attenuate, err := o.clientAttenuator.AttenuateToServiceAccount(scoped.StaticQuerier(plan.Status.AttenuatedServiceAccountRef)) + if err != nil { + o.logger.Errorf("failed to get a client for plan execution: %v", err) + return err + } + attenuatedFactory := factory.WithConfigTransformer(attenuate) + kubeclient, err := attenuatedFactory.NewOperatorClient() + if err != nil { + o.logger.Errorf("failed to get a client for plan execution: %v", err) + return err + } + crclient, err := attenuatedFactory.NewKubernetesClient() + if err != nil { + o.logger.Errorf("failed to get a client for plan execution: %v", err) + return err + } + dynamicClient, err := attenuatedFactory.NewDynamicClient() + if err != nil { + o.logger.Errorf("failed to get a client for plan execution: %v", err) + return err + } + + ensurer := newStepEnsurer(kubeclient, crclient, dynamicClient) + r := newManifestResolver(plan.GetNamespace(), o.lister.CoreV1().ConfigMapLister(), o.logger) + + discoveryQuerier := newDiscoveryQuerier(o.opClient.KubernetesInterface().Discovery()) + + // CRDs should be installed via the default OLM (cluster-admin) client and not the scoped client specified by the AttenuatedServiceAccount + // the StepBuilder is currently only implemented for CRD types + // TODO give the StepBuilder both OLM and scoped clients when it supports new scoped types + builderKubeClient, err := factory.NewOperatorClient() + if err != nil { + o.logger.Errorf("failed to get a client for plan execution- %v", err) + return err + } + builderDynamicClient, err := factory.NewDynamicClient() + if err != nil { + o.logger.Errorf("failed to get a client for plan execution- %v", err) + return err + } + b := newBuilder(plan, o.lister.OperatorsV1alpha1().ClusterServiceVersionLister(), builderKubeClient, builderDynamicClient, r, o.logger) + + for i, step := range plan.Status.Plan { + if err := func(i int, step *v1alpha1.Step) error { + wr.PopWarnings() + defer func() { + warnings := wr.PopWarnings() + if len(warnings) == 0 { + return + } + var obj runtime.Object + if ref, err := reference.GetReference(plan); err != nil { + o.logger.WithError(err).Warnf("error getting plan reference") + obj = plan + } else { + ref.FieldPath = fmt.Sprintf("status.plan[%d]", i) + obj = ref + } + msg := fmt.Sprintf("%d warning(s) generated during operator installation (%s %q): %s", len(warnings), step.Resource.Kind, step.Resource.Name, strings.Join(warnings, ", ")) + if step.Resolving != "" { + msg = fmt.Sprintf("%d warning(s) generated during installation of operator %q (%s %q): %s", len(warnings), step.Resolving, step.Resource.Kind, step.Resource.Name, strings.Join(warnings, ", ")) + } + o.recorder.Event(obj, corev1.EventTypeWarning, "AppliedWithWarnings", msg) + metrics.EmitInstallPlanWarning() + }() + + doStep := true + s, err := b.create(*step) + if err != nil { + if _, ok := err.(notSupportedStepperErr); ok { + // stepper not implemented for this type yet + // stepper currently only implemented for CRD types + doStep = false + } else { + return err + } + } + if doStep { + status, err := s.Status() + if err != nil { + return err + } + plan.Status.Plan[i].Status = status + return nil + } + + switch step.Status { + case v1alpha1.StepStatusPresent, v1alpha1.StepStatusCreated, v1alpha1.StepStatusWaitingForAPI: + return nil + case v1alpha1.StepStatusUnknown, v1alpha1.StepStatusNotPresent: + manifest, err := r.ManifestForStep(step) + if err != nil { + return err + } + o.logger.WithFields(logrus.Fields{"kind": step.Resource.Kind, "name": step.Resource.Name}).Debug("execute resource") + switch step.Resource.Kind { + case v1alpha1.ClusterServiceVersionKind: + // Marshal the manifest into a CSV instance. + var csv v1alpha1.ClusterServiceVersion + err := json.Unmarshal([]byte(manifest), &csv) + if err != nil { + return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) + } + + // Check if the resolved CSV is in the initial set + if _, ok := initialCSVNames[csv.GetName()]; !ok { + // Check for pre-existing CSVs that own the same CRDs + competingOwners, err := competingCRDOwnersExist(plan.GetNamespace(), &csv, existingCRDOwners) + if err != nil { + return errorwrap.Wrapf(err, "error checking crd owners for: %s", csv.GetName()) + } + + // TODO: decide on fail/continue logic for pre-existing dependent CSVs that own the same CRD(s) + if competingOwners { + // For now, error out + return fmt.Errorf("pre-existing CRD owners found for owned CRD(s) of dependent CSV %s", csv.GetName()) + } + } + + // Attempt to create the CSV. + csv.SetNamespace(namespace) + + status, err := ensurer.EnsureClusterServiceVersion(&csv) + if err != nil { + return err + } + + plan.Status.Plan[i].Status = status + + case v1alpha1.SubscriptionKind: + // Marshal the manifest into a subscription instance. + var sub v1alpha1.Subscription + err := json.Unmarshal([]byte(manifest), &sub) + if err != nil { + return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) + } + + // Add the InstallPlan's name as an annotation + if annotations := sub.GetAnnotations(); annotations != nil { + annotations[generatedByKey] = plan.GetName() + } else { + sub.SetAnnotations(map[string]string{generatedByKey: plan.GetName()}) + } + + // Attempt to create the Subscription + sub.SetNamespace(namespace) + + status, err := ensurer.EnsureSubscription(&sub) + if err != nil { + return err + } + + plan.Status.Plan[i].Status = status + + case resolver.BundleSecretKind: + var s corev1.Secret + err := json.Unmarshal([]byte(manifest), &s) + if err != nil { + return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) + } + + // add ownerrefs on the secret that point to the CSV in the bundle + if step.Resolving != "" { + owner := &v1alpha1.ClusterServiceVersion{} + owner.SetNamespace(plan.GetNamespace()) + owner.SetName(step.Resolving) + ownerutil.AddNonBlockingOwner(&s, owner) + } + + // Update UIDs on all CSV OwnerReferences + updated, err := o.getUpdatedOwnerReferences(s.OwnerReferences, plan.Namespace) + if err != nil { + return errorwrap.Wrapf(err, "error generating ownerrefs for secret %s", s.GetName()) + } + s.SetOwnerReferences(updated) + s.SetNamespace(namespace) + + status, err := ensurer.EnsureBundleSecret(plan.Namespace, &s) + if err != nil { + return err + } + + plan.Status.Plan[i].Status = status + + case secretKind: + status, err := ensurer.EnsureSecret(o.namespace, plan.GetNamespace(), step.Resource.Name) + if err != nil { + return err + } + + plan.Status.Plan[i].Status = status + + case clusterRoleKind: + // Marshal the manifest into a ClusterRole instance. + var cr rbacv1.ClusterRole + err := json.Unmarshal([]byte(manifest), &cr) + if err != nil { + return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) + } + + status, err := ensurer.EnsureClusterRole(&cr, step) + if err != nil { + return err + } + + plan.Status.Plan[i].Status = status + + case clusterRoleBindingKind: + // Marshal the manifest into a RoleBinding instance. + var rb rbacv1.ClusterRoleBinding + err := json.Unmarshal([]byte(manifest), &rb) + if err != nil { + return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) + } + + status, err := ensurer.EnsureClusterRoleBinding(&rb, step) + if err != nil { + return err + } + + plan.Status.Plan[i].Status = status + + case roleKind: + // Marshal the manifest into a Role instance. + var r rbacv1.Role + err := json.Unmarshal([]byte(manifest), &r) + if err != nil { + return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) + } + + // Update UIDs on all CSV OwnerReferences + updated, err := o.getUpdatedOwnerReferences(r.OwnerReferences, plan.Namespace) + if err != nil { + return errorwrap.Wrapf(err, "error generating ownerrefs for role %s", r.GetName()) + } + r.SetOwnerReferences(updated) + r.SetNamespace(namespace) + + status, err := ensurer.EnsureRole(plan.Namespace, &r) + if err != nil { + return err + } + + plan.Status.Plan[i].Status = status + + case roleBindingKind: + // Marshal the manifest into a RoleBinding instance. + var rb rbacv1.RoleBinding + err := json.Unmarshal([]byte(manifest), &rb) + if err != nil { + return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) + } + + // Update UIDs on all CSV OwnerReferences + updated, err := o.getUpdatedOwnerReferences(rb.OwnerReferences, plan.Namespace) + if err != nil { + return errorwrap.Wrapf(err, "error generating ownerrefs for rolebinding %s", rb.GetName()) + } + rb.SetOwnerReferences(updated) + rb.SetNamespace(namespace) + + status, err := ensurer.EnsureRoleBinding(plan.Namespace, &rb) + if err != nil { + return err + } + + plan.Status.Plan[i].Status = status + + case serviceAccountKind: + // Marshal the manifest into a ServiceAccount instance. + var sa corev1.ServiceAccount + err := json.Unmarshal([]byte(manifest), &sa) + if err != nil { + return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) + } + + // Update UIDs on all CSV OwnerReferences + updated, err := o.getUpdatedOwnerReferences(sa.OwnerReferences, plan.Namespace) + if err != nil { + return errorwrap.Wrapf(err, "error generating ownerrefs for service account: %s", sa.GetName()) + } + sa.SetOwnerReferences(updated) + sa.SetNamespace(namespace) + + status, err := ensurer.EnsureServiceAccount(namespace, &sa) + if err != nil { + return err + } + + plan.Status.Plan[i].Status = status + + case serviceKind: + // Marshal the manifest into a Service instance + var s corev1.Service + err := json.Unmarshal([]byte(manifest), &s) + if err != nil { + return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) + } + + // add ownerrefs on the service that point to the CSV in the bundle + if step.Resolving != "" { + owner := &v1alpha1.ClusterServiceVersion{} + owner.SetNamespace(plan.GetNamespace()) + owner.SetName(step.Resolving) + ownerutil.AddNonBlockingOwner(&s, owner) + } + + // Update UIDs on all CSV OwnerReferences + updated, err := o.getUpdatedOwnerReferences(s.OwnerReferences, plan.Namespace) + if err != nil { + return errorwrap.Wrapf(err, "error generating ownerrefs for service: %s", s.GetName()) + } + s.SetOwnerReferences(updated) + s.SetNamespace(namespace) + + status, err := ensurer.EnsureService(namespace, &s) + if err != nil { + return err + } + + plan.Status.Plan[i].Status = status + + case configMapKind: + var cfg corev1.ConfigMap + err := json.Unmarshal([]byte(manifest), &cfg) + if err != nil { + return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) + } + + // add ownerrefs on the configmap that point to the CSV in the bundle + if step.Resolving != "" { + owner := &v1alpha1.ClusterServiceVersion{} + owner.SetNamespace(plan.GetNamespace()) + owner.SetName(step.Resolving) + ownerutil.AddNonBlockingOwner(&cfg, owner) + } + + // Update UIDs on all CSV OwnerReferences + updated, err := o.getUpdatedOwnerReferences(cfg.OwnerReferences, plan.Namespace) + if err != nil { + return errorwrap.Wrapf(err, "error generating ownerrefs for configmap: %s", cfg.GetName()) + } + cfg.SetOwnerReferences(updated) + cfg.SetNamespace(namespace) + + status, err := ensurer.EnsureConfigMap(plan.Namespace, &cfg) + if err != nil { + return err + } + + plan.Status.Plan[i].Status = status + + default: + if !isSupported(step.Resource.Kind) { + // Not a supported resource + plan.Status.Plan[i].Status = v1alpha1.StepStatusUnsupportedResource + return v1alpha1.ErrInvalidInstallPlan + } + + // Marshal the manifest into an unstructured object + dec := yaml.NewYAMLOrJSONDecoder(strings.NewReader(manifest), 10) + unstructuredObject := &unstructured.Unstructured{} + if err := dec.Decode(unstructuredObject); err != nil { + return errorwrap.Wrapf(err, "error decoding %s object to an unstructured object", step.Resource.Name) + } + + // Get the resource from the GVK. + gvk := unstructuredObject.GroupVersionKind() + r, err := o.apiresourceFromGVK(gvk) + if err != nil { + return err + } + + // Create the GVR + gvr := schema.GroupVersionResource{ + Group: gvk.Group, + Version: gvk.Version, + Resource: r.Name, + } + + if step.Resolving != "" { + owner := &v1alpha1.ClusterServiceVersion{} + owner.SetNamespace(plan.GetNamespace()) + owner.SetName(step.Resolving) + + if r.Namespaced { + // Set OwnerReferences for namespace-scoped resource + ownerutil.AddNonBlockingOwner(unstructuredObject, owner) + + // Update UIDs on all CSV OwnerReferences + updated, err := o.getUpdatedOwnerReferences(unstructuredObject.GetOwnerReferences(), plan.Namespace) + if err != nil { + return errorwrap.Wrapf(err, "error generating ownerrefs for unstructured object: %s", unstructuredObject.GetName()) + } + + unstructuredObject.SetOwnerReferences(updated) + } else { + // Add owner labels to cluster-scoped resource + if err := ownerutil.AddOwnerLabels(unstructuredObject, owner); err != nil { + return err + } + } + } + + // Set up the dynamic client ResourceInterface and set ownerrefs + var resourceInterface dynamic.ResourceInterface + if r.Namespaced { + unstructuredObject.SetNamespace(namespace) + resourceInterface = dynamicClient.Resource(gvr).Namespace(namespace) + } else { + resourceInterface = dynamicClient.Resource(gvr) + } + + // Ensure Unstructured Object + status, err := ensurer.EnsureUnstructuredObject(resourceInterface, unstructuredObject) + if err != nil { + return err + } + + plan.Status.Plan[i].Status = status + } + default: + return v1alpha1.ErrInvalidInstallPlan + } + return nil + }(i, step); err != nil { + if apierrors.IsNotFound(err) { + // Check for APIVersions present in the installplan steps that are not available on the server. + // The check is made via discovery per step in the plan. Transient communication failures to the api-server are handled by the plan retry logic. + notFoundErr := discoveryQuerier.WithStepResource(step.Resource).QueryForGVK() + if notFoundErr != nil { + return notFoundErr + } + } + return err + } + } + + // Loop over one final time to check and see if everything is good. + for _, step := range plan.Status.Plan { + switch step.Status { + case v1alpha1.StepStatusCreated, v1alpha1.StepStatusPresent: + default: + return nil + } + } + + return nil } // getExistingAPIOwners creates a map of CRD names to existing owner CSVs in the given namespace func (o *Operator) getExistingAPIOwners(namespace string) (map[string][]string, error) { - // Get a list of CSVs in the namespace - csvList, err := o.client.OperatorsV1alpha1().ClusterServiceVersions(namespace).List(context.TODO(), metav1.ListOptions{}) - - if err != nil { - return nil, err - } - - // Map CRD names to existing owner CSV CRs in the namespace - owners := make(map[string][]string) - for _, csv := range csvList.Items { - for _, crd := range csv.Spec.CustomResourceDefinitions.Owned { - owners[crd.Name] = append(owners[crd.Name], csv.GetName()) - } - for _, api := range csv.Spec.APIServiceDefinitions.Owned { - owners[api.Group] = append(owners[api.Group], csv.GetName()) - } - } - - return owners, nil + // Get a list of CSVs in the namespace + csvList, err := o.client.OperatorsV1alpha1().ClusterServiceVersions(namespace).List(context.TODO(), metav1.ListOptions{}) + + if err != nil { + return nil, err + } + + // Map CRD names to existing owner CSV CRs in the namespace + owners := make(map[string][]string) + for _, csv := range csvList.Items { + for _, crd := range csv.Spec.CustomResourceDefinitions.Owned { + owners[crd.Name] = append(owners[crd.Name], csv.GetName()) + } + for _, api := range csv.Spec.APIServiceDefinitions.Owned { + owners[api.Group] = append(owners[api.Group], csv.GetName()) + } + } + + return owners, nil } func (o *Operator) getUpdatedOwnerReferences(refs []metav1.OwnerReference, namespace string) ([]metav1.OwnerReference, error) { - updated := append([]metav1.OwnerReference(nil), refs...) - - for i, owner := range refs { - if owner.Kind == v1alpha1.ClusterServiceVersionKind { - csv, err := o.client.OperatorsV1alpha1().ClusterServiceVersions(namespace).Get(context.TODO(), owner.Name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - owner.UID = csv.GetUID() - updated[i] = owner - } - } - return updated, nil + updated := append([]metav1.OwnerReference(nil), refs...) + + for i, owner := range refs { + if owner.Kind == v1alpha1.ClusterServiceVersionKind { + csv, err := o.client.OperatorsV1alpha1().ClusterServiceVersions(namespace).Get(context.TODO(), owner.Name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + owner.UID = csv.GetUID() + updated[i] = owner + } + } + return updated, nil } func (o *Operator) listSubscriptions(namespace string) (subs []*v1alpha1.Subscription, err error) { - list, err := o.client.OperatorsV1alpha1().Subscriptions(namespace).List(context.TODO(), metav1.ListOptions{}) - if err != nil { - return - } + list, err := o.client.OperatorsV1alpha1().Subscriptions(namespace).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return + } - subs = make([]*v1alpha1.Subscription, 0) - for i := range list.Items { - subs = append(subs, &list.Items[i]) - } + subs = make([]*v1alpha1.Subscription, 0) + for i := range list.Items { + subs = append(subs, &list.Items[i]) + } - return + return } func (o *Operator) listInstallPlans(namespace string) (ips []*v1alpha1.InstallPlan, err error) { - list, err := o.client.OperatorsV1alpha1().InstallPlans(namespace).List(context.TODO(), metav1.ListOptions{}) - if err != nil { - return - } + list, err := o.client.OperatorsV1alpha1().InstallPlans(namespace).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return + } - ips = make([]*v1alpha1.InstallPlan, 0) - for i := range list.Items { - ips = append(ips, &list.Items[i]) - } + ips = make([]*v1alpha1.InstallPlan, 0) + for i := range list.Items { + ips = append(ips, &list.Items[i]) + } - return + return } // competingCRDOwnersExist returns true if there exists a CSV that owns at least one of the given CSVs owned CRDs (that's not the given CSV) func competingCRDOwnersExist(namespace string, csv *v1alpha1.ClusterServiceVersion, existingOwners map[string][]string) (bool, error) { - // Attempt to find a pre-existing owner in the namespace for any owned crd - for _, crdDesc := range csv.Spec.CustomResourceDefinitions.Owned { - crdOwners := existingOwners[crdDesc.Name] - l := len(crdOwners) - switch { - case l == 1: - // One competing owner found - if crdOwners[0] != csv.GetName() { - return true, nil - } - case l > 1: - return true, olmerrors.NewMultipleExistingCRDOwnersError(crdOwners, crdDesc.Name, namespace) - } - } - - return false, nil + // Attempt to find a pre-existing owner in the namespace for any owned crd + for _, crdDesc := range csv.Spec.CustomResourceDefinitions.Owned { + crdOwners := existingOwners[crdDesc.Name] + l := len(crdOwners) + switch { + case l == 1: + // One competing owner found + if crdOwners[0] != csv.GetName() { + return true, nil + } + case l > 1: + return true, olmerrors.NewMultipleExistingCRDOwnersError(crdOwners, crdDesc.Name, namespace) + } + } + + return false, nil } // getCSVNameSet returns a set of the given installplan's csv names func getCSVNameSet(plan *v1alpha1.InstallPlan) map[string]struct{} { - csvNameSet := make(map[string]struct{}) - for _, name := range plan.Spec.ClusterServiceVersionNames { - csvNameSet[name] = struct{}{} - } + csvNameSet := make(map[string]struct{}) + for _, name := range plan.Spec.ClusterServiceVersionNames { + csvNameSet[name] = struct{}{} + } - return csvNameSet + return csvNameSet } func (o *Operator) apiresourceFromGVK(gvk schema.GroupVersionKind) (metav1.APIResource, error) { - logger := o.logger.WithFields(logrus.Fields{ - "group": gvk.Group, - "version": gvk.Version, - "kind": gvk.Kind, - }) - - resources, err := o.opClient.KubernetesInterface().Discovery().ServerResourcesForGroupVersion(gvk.GroupVersion().String()) - if err != nil { - logger.WithField("err", err).Info("could not query for GVK in api discovery") - return metav1.APIResource{}, err - } - for _, r := range resources.APIResources { - if r.Kind == gvk.Kind { - return r, nil - } - } - logger.Info("couldn't find GVK in api discovery") - return metav1.APIResource{}, olmerrors.GroupVersionKindNotFoundError{Group: gvk.Group, Version: gvk.Version, Kind: gvk.Kind} + logger := o.logger.WithFields(logrus.Fields{ + "group": gvk.Group, + "version": gvk.Version, + "kind": gvk.Kind, + }) + + resources, err := o.opClient.KubernetesInterface().Discovery().ServerResourcesForGroupVersion(gvk.GroupVersion().String()) + if err != nil { + logger.WithField("err", err).Info("could not query for GVK in api discovery") + return metav1.APIResource{}, err + } + for _, r := range resources.APIResources { + if r.Kind == gvk.Kind { + return r, nil + } + } + logger.Info("couldn't find GVK in api discovery") + return metav1.APIResource{}, olmerrors.GroupVersionKindNotFoundError{Group: gvk.Group, Version: gvk.Version, Kind: gvk.Kind} } diff --git a/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/operator_test.go b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/operator_test.go index fb67cefa73..bdb89466ac 100644 --- a/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/operator_test.go +++ b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/operator_test.go @@ -1,2155 +1,2269 @@ package catalog import ( - "context" - "encoding/json" - "errors" - "fmt" - "math/rand" - "os" - "reflect" - "strings" - "testing" - "testing/quick" - "time" - - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" - - "github.com/sirupsen/logrus" - "github.com/stretchr/testify/require" - "golang.org/x/time/rate" - "gopkg.in/yaml.v2" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - apiextensionsfake "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - utilyaml "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/apiserver/pkg/storage/names" - fakedynamic "k8s.io/client-go/dynamic/fake" - "k8s.io/client-go/informers" - k8sfake "k8s.io/client-go/kubernetes/fake" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/workqueue" - apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" - apiregistrationfake "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake" - utilclock "k8s.io/utils/clock" - utilclocktesting "k8s.io/utils/clock/testing" - - operatorsv1 "github.com/operator-framework/api/pkg/operators/v1" - "github.com/operator-framework/api/pkg/operators/v1alpha1" - "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/fake" - "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/informers/externalversions" - "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/bundle" - "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/bundle/bundlefakes" - olmerrors "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/errors" - "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry" - "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/grpc" - "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/reconciler" - "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/resolver" - "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/resolver/solver" - "github.com/operator-framework/operator-lifecycle-manager/pkg/fakes" - "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/clientfake" - controllerclient "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/controller-runtime/client" - "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/operatorclient" - "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/operatorlister" - "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/ownerutil" - "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/queueinformer" - "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/scoped" + "context" + "encoding/json" + "errors" + "fmt" + "math/rand" + "os" + "reflect" + "strings" + "testing" + "testing/quick" + "time" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" + "golang.org/x/time/rate" + "gopkg.in/yaml.v2" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apiextensionsfake "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + utilyaml "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/apiserver/pkg/storage/names" + fakedynamic "k8s.io/client-go/dynamic/fake" + "k8s.io/client-go/informers" + k8sfake "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" + apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" + apiregistrationfake "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake" + utilclock "k8s.io/utils/clock" + utilclocktesting "k8s.io/utils/clock/testing" + + operatorsv1 "github.com/operator-framework/api/pkg/operators/v1" + "github.com/operator-framework/api/pkg/operators/v1alpha1" + "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/fake" + "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/informers/externalversions" + "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/bundle" + "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/bundle/bundlefakes" + olmerrors "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/errors" + "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry" + "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/grpc" + "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/reconciler" + "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/resolver" + "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/resolver/solver" + "github.com/operator-framework/operator-lifecycle-manager/pkg/fakes" + "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/clientfake" + controllerclient "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/controller-runtime/client" + "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/operatorclient" + "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/operatorlister" + "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/ownerutil" + "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/queueinformer" + "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/scoped" ) type mockTransitioner struct { - err error + err error } var _ installPlanTransitioner = &mockTransitioner{} func (m *mockTransitioner) ExecutePlan(plan *v1alpha1.InstallPlan) error { - return m.err + return m.err } func TestCreateInstallPlanHasExpectedClusterServiceVersionNames(t *testing.T) { - namespace := "foo" - tests := []struct { - testName string - steps []*v1alpha1.Step - bundleLookups []v1alpha1.BundleLookup - expectedClusterServiceVersionNames []string - }{ - /****************************************************************************** - Historically, when creating an installPlan it's spec.ClusterServiceVersionNames - was derived from two sources: - 1. The names of CSVs found in "steps" of the installPlan's status.plan - 2. The metadata associated with the bundle image - - These sources couldn't result in duplicate entries as the unpacking job would - finish after the installPlan was created and the steps weren't populated until - the unpacking job finished. - - OLM was later updated to complete the unpacking jobs prior to creating - the installPlan, which caused CSVs to be listed twice as the createInstallPlan - function was called with steps and a bundle. - *****************************************************************************/ - { - testName: "Check that CSVs are not listed twice if steps and bundles are provided", - steps: []*v1alpha1.Step{{ - Resolving: "csv", - Resource: v1alpha1.StepResource{ - CatalogSource: "catalog", - CatalogSourceNamespace: namespace, - Group: "operators.coreos.com", - Version: "v1alpha1", - Kind: "ClusterServiceVersion", - Name: "csvA", - Manifest: toManifest(t, csv("csvA", namespace, nil, nil)), - }, - Status: v1alpha1.StepStatusUnknown, - }}, - bundleLookups: []v1alpha1.BundleLookup{ - { - Identifier: "csvA", - }, - }, - expectedClusterServiceVersionNames: []string{"csvA"}, - }, - } - for _, tt := range tests { - t.Run(tt.testName, func(t *testing.T) { - ctx, cancel := context.WithCancel(context.TODO()) - defer cancel() - - op, err := NewFakeOperator(ctx, namespace, []string{namespace}) - require.NoError(t, err) - - _, err = op.createInstallPlan(namespace, 0, nil, v1alpha1.ApprovalAutomatic, tt.steps, tt.bundleLookups) - require.NoError(t, err) - - ipList, err := op.client.OperatorsV1alpha1().InstallPlans(namespace).List(ctx, metav1.ListOptions{}) - require.NoError(t, err) - require.Len(t, ipList.Items, 1) - require.Equal(t, tt.expectedClusterServiceVersionNames, ipList.Items[0].Spec.ClusterServiceVersionNames) - }) - } + namespace := "foo" + tests := []struct { + testName string + steps []*v1alpha1.Step + bundleLookups []v1alpha1.BundleLookup + expectedClusterServiceVersionNames []string + }{ + /****************************************************************************** + Historically, when creating an installPlan it's spec.ClusterServiceVersionNames + was derived from two sources: + 1. The names of CSVs found in "steps" of the installPlan's status.plan + 2. The metadata associated with the bundle image + + These sources couldn't result in duplicate entries as the unpacking job would + finish after the installPlan was created and the steps weren't populated until + the unpacking job finished. + + OLM was later updated to complete the unpacking jobs prior to creating + the installPlan, which caused CSVs to be listed twice as the createInstallPlan + function was called with steps and a bundle. + *****************************************************************************/ + { + testName: "Check that CSVs are not listed twice if steps and bundles are provided", + steps: []*v1alpha1.Step{{ + Resolving: "csv", + Resource: v1alpha1.StepResource{ + CatalogSource: "catalog", + CatalogSourceNamespace: namespace, + Group: "operators.coreos.com", + Version: "v1alpha1", + Kind: "ClusterServiceVersion", + Name: "csvA", + Manifest: toManifest(t, csv("csvA", namespace, nil, nil)), + }, + Status: v1alpha1.StepStatusUnknown, + }}, + bundleLookups: []v1alpha1.BundleLookup{ + { + Identifier: "csvA", + }, + }, + expectedClusterServiceVersionNames: []string{"csvA"}, + }, + } + for _, tt := range tests { + t.Run(tt.testName, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + op, err := NewFakeOperator(ctx, namespace, []string{namespace}) + require.NoError(t, err) + + _, err = op.createInstallPlan(namespace, 0, nil, v1alpha1.ApprovalAutomatic, tt.steps, tt.bundleLookups) + require.NoError(t, err) + + ipList, err := op.client.OperatorsV1alpha1().InstallPlans(namespace).List(ctx, metav1.ListOptions{}) + require.NoError(t, err) + require.Len(t, ipList.Items, 1) + require.Equal(t, tt.expectedClusterServiceVersionNames, ipList.Items[0].Spec.ClusterServiceVersionNames) + }) + } } func TestTransitionInstallPlan(t *testing.T) { - errMsg := "transition test error" - err := errors.New(errMsg) - clockFake := utilclocktesting.NewFakeClock(time.Date(2018, time.January, 26, 20, 40, 0, 0, time.UTC)) - now := metav1.NewTime(clockFake.Now()) - - installed := &v1alpha1.InstallPlanCondition{ - Type: v1alpha1.InstallPlanInstalled, - Status: corev1.ConditionTrue, - } - failed := &v1alpha1.InstallPlanCondition{ - Type: v1alpha1.InstallPlanInstalled, - Status: corev1.ConditionFalse, - Reason: v1alpha1.InstallPlanReasonComponentFailed, - Message: errMsg, - } - - tests := []struct { - initial v1alpha1.InstallPlanPhase - transError error - approval v1alpha1.Approval - approved bool - expected v1alpha1.InstallPlanPhase - condition *v1alpha1.InstallPlanCondition - timeout time.Duration - }{ - {v1alpha1.InstallPlanPhaseInstalling, nil, v1alpha1.ApprovalAutomatic, false, v1alpha1.InstallPlanPhaseComplete, installed, 0}, - {v1alpha1.InstallPlanPhaseInstalling, nil, v1alpha1.ApprovalAutomatic, true, v1alpha1.InstallPlanPhaseComplete, installed, 0}, - {v1alpha1.InstallPlanPhaseInstalling, err, v1alpha1.ApprovalAutomatic, false, v1alpha1.InstallPlanPhaseFailed, failed, 0}, - {v1alpha1.InstallPlanPhaseInstalling, err, v1alpha1.ApprovalAutomatic, true, v1alpha1.InstallPlanPhaseFailed, failed, 0}, - {v1alpha1.InstallPlanPhaseInstalling, err, v1alpha1.ApprovalAutomatic, false, v1alpha1.InstallPlanPhaseInstalling, nil, 1}, - {v1alpha1.InstallPlanPhaseInstalling, err, v1alpha1.ApprovalAutomatic, true, v1alpha1.InstallPlanPhaseInstalling, nil, 1}, - - {v1alpha1.InstallPlanPhaseRequiresApproval, nil, v1alpha1.ApprovalManual, false, v1alpha1.InstallPlanPhaseRequiresApproval, nil, 0}, - {v1alpha1.InstallPlanPhaseRequiresApproval, nil, v1alpha1.ApprovalManual, true, v1alpha1.InstallPlanPhaseInstalling, nil, 0}, - } - for _, tt := range tests { - // Create a plan in the provided initial phase. - plan := &v1alpha1.InstallPlan{ - Spec: v1alpha1.InstallPlanSpec{ - Approval: tt.approval, - Approved: tt.approved, - }, - Status: v1alpha1.InstallPlanStatus{ - Phase: tt.initial, - Conditions: []v1alpha1.InstallPlanCondition{}, - }, - } - - // Create a transitioner that returns the provided error. - transitioner := &mockTransitioner{tt.transError} - - // Attempt to transition phases. - out, _ := transitionInstallPlanState(logrus.New(), transitioner, *plan, now, tt.timeout) - - // Assert that the final phase is as expected. - require.Equal(t, tt.expected, out.Status.Phase) - - // Assert that the condition set is as expected - if tt.condition == nil { - require.Equal(t, 0, len(out.Status.Conditions)) - } else { - require.Equal(t, 1, len(out.Status.Conditions)) - require.Equal(t, tt.condition.Type, out.Status.Conditions[0].Type) - require.Equal(t, tt.condition.Status, out.Status.Conditions[0].Status) - require.Equal(t, tt.condition.Reason, out.Status.Conditions[0].Reason) - require.Equal(t, tt.condition.Message, out.Status.Conditions[0].Message) - } - } + errMsg := "transition test error" + err := errors.New(errMsg) + clockFake := utilclocktesting.NewFakeClock(time.Date(2018, time.January, 26, 20, 40, 0, 0, time.UTC)) + now := metav1.NewTime(clockFake.Now()) + + installed := &v1alpha1.InstallPlanCondition{ + Type: v1alpha1.InstallPlanInstalled, + Status: corev1.ConditionTrue, + } + failed := &v1alpha1.InstallPlanCondition{ + Type: v1alpha1.InstallPlanInstalled, + Status: corev1.ConditionFalse, + Reason: v1alpha1.InstallPlanReasonComponentFailed, + Message: errMsg, + } + + tests := []struct { + initial v1alpha1.InstallPlanPhase + transError error + approval v1alpha1.Approval + approved bool + expected v1alpha1.InstallPlanPhase + condition *v1alpha1.InstallPlanCondition + timeout time.Duration + }{ + {v1alpha1.InstallPlanPhaseInstalling, nil, v1alpha1.ApprovalAutomatic, false, v1alpha1.InstallPlanPhaseComplete, installed, 0}, + {v1alpha1.InstallPlanPhaseInstalling, nil, v1alpha1.ApprovalAutomatic, true, v1alpha1.InstallPlanPhaseComplete, installed, 0}, + {v1alpha1.InstallPlanPhaseInstalling, err, v1alpha1.ApprovalAutomatic, false, v1alpha1.InstallPlanPhaseFailed, failed, 0}, + {v1alpha1.InstallPlanPhaseInstalling, err, v1alpha1.ApprovalAutomatic, true, v1alpha1.InstallPlanPhaseFailed, failed, 0}, + {v1alpha1.InstallPlanPhaseInstalling, err, v1alpha1.ApprovalAutomatic, false, v1alpha1.InstallPlanPhaseInstalling, nil, 1}, + {v1alpha1.InstallPlanPhaseInstalling, err, v1alpha1.ApprovalAutomatic, true, v1alpha1.InstallPlanPhaseInstalling, nil, 1}, + + {v1alpha1.InstallPlanPhaseRequiresApproval, nil, v1alpha1.ApprovalManual, false, v1alpha1.InstallPlanPhaseRequiresApproval, nil, 0}, + {v1alpha1.InstallPlanPhaseRequiresApproval, nil, v1alpha1.ApprovalManual, true, v1alpha1.InstallPlanPhaseInstalling, nil, 0}, + } + for _, tt := range tests { + // Create a plan in the provided initial phase. + plan := &v1alpha1.InstallPlan{ + Spec: v1alpha1.InstallPlanSpec{ + Approval: tt.approval, + Approved: tt.approved, + }, + Status: v1alpha1.InstallPlanStatus{ + Phase: tt.initial, + Conditions: []v1alpha1.InstallPlanCondition{}, + }, + } + + // Create a transitioner that returns the provided error. + transitioner := &mockTransitioner{tt.transError} + + // Attempt to transition phases. + out, _ := transitionInstallPlanState(logrus.New(), transitioner, *plan, now, tt.timeout) + + // Assert that the final phase is as expected. + require.Equal(t, tt.expected, out.Status.Phase) + + // Assert that the condition set is as expected + if tt.condition == nil { + require.Equal(t, 0, len(out.Status.Conditions)) + } else { + require.Equal(t, 1, len(out.Status.Conditions)) + require.Equal(t, tt.condition.Type, out.Status.Conditions[0].Type) + require.Equal(t, tt.condition.Status, out.Status.Conditions[0].Status) + require.Equal(t, tt.condition.Reason, out.Status.Conditions[0].Reason) + require.Equal(t, tt.condition.Message, out.Status.Conditions[0].Message) + } + } } func TestSyncInstallPlanUnhappy(t *testing.T) { - namespace := "ns" - ipWithSteps := withSteps(installPlan("p", namespace, v1alpha1.InstallPlanPhaseInstalling, "csv"), - []*v1alpha1.Step{ - { - Resource: v1alpha1.StepResource{ - CatalogSource: "catalog", - CatalogSourceNamespace: namespace, - Group: "", - Version: "v1", - Kind: "ServiceAccount", - Name: "sa", - Manifest: toManifest(t, serviceAccount("sa", namespace, "", - objectReference("init secret"))), - }, - Status: v1alpha1.StepStatusUnknown, - }, - }, - ) - - tests := []struct { - testName string - err error - in *v1alpha1.InstallPlan - expectedPhase v1alpha1.InstallPlanPhase - expectedCondition *v1alpha1.InstallPlanCondition - clientObjs []runtime.Object - }{ - { - testName: "NoStatus", - err: nil, - expectedPhase: v1alpha1.InstallPlanPhaseNone, - in: installPlan("p", namespace, v1alpha1.InstallPlanPhaseNone), - }, - { - // This checks that an installplan's status.Condition contains a condition with error message when no operatorgroup is present - testName: "HasSteps/NoOperatorGroup", - err: fmt.Errorf("attenuated service account query failed - no operator group found that is managing this namespace"), - expectedPhase: v1alpha1.InstallPlanPhaseInstalling, - expectedCondition: &v1alpha1.InstallPlanCondition{Type: v1alpha1.InstallPlanInstalled, Status: corev1.ConditionFalse, Reason: v1alpha1.InstallPlanReasonInstallCheckFailed, - Message: "no operator group found that is managing this namespace"}, - in: ipWithSteps, - }, - { - // This checks that an installplan's status.Condition contains a condition with error message when multiple operator groups are present for the same namespace - testName: "HasSteps/TooManyOperatorGroups", - err: fmt.Errorf("attenuated service account query failed - more than one operator group(s) are managing this namespace count=2"), - expectedPhase: v1alpha1.InstallPlanPhaseInstalling, - in: ipWithSteps, - expectedCondition: &v1alpha1.InstallPlanCondition{Type: v1alpha1.InstallPlanInstalled, Status: corev1.ConditionFalse, Reason: v1alpha1.InstallPlanReasonInstallCheckFailed, - Message: "more than one operator group(s) are managing this namespace count=2"}, - clientObjs: []runtime.Object{ - operatorGroup("og1", "sa", namespace, - &corev1.ObjectReference{ - Kind: "ServiceAccount", - Namespace: namespace, - Name: "sa", - }), - operatorGroup("og2", "sa", namespace, - &corev1.ObjectReference{ - Kind: "ServiceAccount", - Namespace: namespace, - Name: "sa", - }), - }, - }, - { - // This checks that an installplan's status.Condition contains a condition with error message when no service account is synced for the operator group, i.e the service account ref doesn't exist - testName: "HasSteps/NonExistentServiceAccount", - err: fmt.Errorf("attenuated service account query failed - please make sure the service account exists. sa=sa1 operatorgroup=ns/og"), - expectedPhase: v1alpha1.InstallPlanPhaseInstalling, - expectedCondition: &v1alpha1.InstallPlanCondition{Type: v1alpha1.InstallPlanInstalled, Status: corev1.ConditionFalse, Reason: v1alpha1.InstallPlanReasonInstallCheckFailed, - Message: "please make sure the service account exists. sa=sa1 operatorgroup=ns/og"}, - in: ipWithSteps, - clientObjs: []runtime.Object{ - operatorGroup("og", "sa1", namespace, nil), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.testName, func(t *testing.T) { - ctx, cancel := context.WithCancel(context.TODO()) - defer cancel() - - tt.clientObjs = append(tt.clientObjs, tt.in) - - op, err := NewFakeOperator(ctx, namespace, []string{namespace}, withClientObjs(tt.clientObjs...)) - require.NoError(t, err) - - err = op.syncInstallPlans(tt.in) - require.Equal(t, tt.err, err) - - ip, err := op.client.OperatorsV1alpha1().InstallPlans(namespace).Get(ctx, tt.in.Name, metav1.GetOptions{}) - require.NoError(t, err) - - require.Equal(t, tt.expectedPhase, ip.Status.Phase) - - if tt.expectedCondition != nil { - require.True(t, hasExpectedCondition(ip, *tt.expectedCondition)) - } - }) - } + namespace := "ns" + ipWithSteps := withSteps(installPlan("p", namespace, v1alpha1.InstallPlanPhaseInstalling, "csv"), + []*v1alpha1.Step{ + { + Resource: v1alpha1.StepResource{ + CatalogSource: "catalog", + CatalogSourceNamespace: namespace, + Group: "", + Version: "v1", + Kind: "ServiceAccount", + Name: "sa", + Manifest: toManifest(t, serviceAccount("sa", namespace, "", + objectReference("init secret"))), + }, + Status: v1alpha1.StepStatusUnknown, + }, + }, + ) + + tests := []struct { + testName string + err error + in *v1alpha1.InstallPlan + expectedPhase v1alpha1.InstallPlanPhase + expectedCondition *v1alpha1.InstallPlanCondition + clientObjs []runtime.Object + }{ + { + testName: "NoStatus", + err: nil, + expectedPhase: v1alpha1.InstallPlanPhaseNone, + in: installPlan("p", namespace, v1alpha1.InstallPlanPhaseNone), + }, + { + // This checks that an installplan's status.Condition contains a condition with error message when no operatorgroup is present + testName: "HasSteps/NoOperatorGroup", + err: fmt.Errorf("attenuated service account query failed - no operator group found that is managing this namespace"), + expectedPhase: v1alpha1.InstallPlanPhaseInstalling, + expectedCondition: &v1alpha1.InstallPlanCondition{Type: v1alpha1.InstallPlanInstalled, Status: corev1.ConditionFalse, Reason: v1alpha1.InstallPlanReasonInstallCheckFailed, + Message: "no operator group found that is managing this namespace"}, + in: ipWithSteps, + }, + { + // This checks that an installplan's status.Condition contains a condition with error message when multiple operator groups are present for the same namespace + testName: "HasSteps/TooManyOperatorGroups", + err: fmt.Errorf("attenuated service account query failed - more than one operator group(s) are managing this namespace count=2"), + expectedPhase: v1alpha1.InstallPlanPhaseInstalling, + in: ipWithSteps, + expectedCondition: &v1alpha1.InstallPlanCondition{Type: v1alpha1.InstallPlanInstalled, Status: corev1.ConditionFalse, Reason: v1alpha1.InstallPlanReasonInstallCheckFailed, + Message: "more than one operator group(s) are managing this namespace count=2"}, + clientObjs: []runtime.Object{ + operatorGroup("og1", "sa", namespace, + &corev1.ObjectReference{ + Kind: "ServiceAccount", + Namespace: namespace, + Name: "sa", + }), + operatorGroup("og2", "sa", namespace, + &corev1.ObjectReference{ + Kind: "ServiceAccount", + Namespace: namespace, + Name: "sa", + }), + }, + }, + { + // This checks that an installplan's status.Condition contains a condition with error message when no service account is synced for the operator group, i.e the service account ref doesn't exist + testName: "HasSteps/NonExistentServiceAccount", + err: fmt.Errorf("attenuated service account query failed - please make sure the service account exists. sa=sa1 operatorgroup=ns/og"), + expectedPhase: v1alpha1.InstallPlanPhaseInstalling, + expectedCondition: &v1alpha1.InstallPlanCondition{Type: v1alpha1.InstallPlanInstalled, Status: corev1.ConditionFalse, Reason: v1alpha1.InstallPlanReasonInstallCheckFailed, + Message: "please make sure the service account exists. sa=sa1 operatorgroup=ns/og"}, + in: ipWithSteps, + clientObjs: []runtime.Object{ + operatorGroup("og", "sa1", namespace, nil), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.testName, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + tt.clientObjs = append(tt.clientObjs, tt.in) + + op, err := NewFakeOperator(ctx, namespace, []string{namespace}, withClientObjs(tt.clientObjs...)) + require.NoError(t, err) + + err = op.syncInstallPlans(tt.in) + require.Equal(t, tt.err, err) + + ip, err := op.client.OperatorsV1alpha1().InstallPlans(namespace).Get(ctx, tt.in.Name, metav1.GetOptions{}) + require.NoError(t, err) + + require.Equal(t, tt.expectedPhase, ip.Status.Phase) + + if tt.expectedCondition != nil { + require.True(t, hasExpectedCondition(ip, *tt.expectedCondition)) + } + }) + } } type ipSet []v1alpha1.InstallPlan func (ipSet) Generate(rand *rand.Rand, size int) reflect.Value { - ips := []v1alpha1.InstallPlan{} - - // each i is the generation value - for i := 0; i < rand.Intn(size)+1; i++ { - // generate a few at each generation to account for bugs that don't increment the generation - for j := 0; j < rand.Intn(3); j++ { - ips = append(ips, v1alpha1.InstallPlan{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "ns", - Name: names.SimpleNameGenerator.GenerateName(fmt.Sprintf("%d", i)), - }, - Spec: v1alpha1.InstallPlanSpec{ - Generation: i, - }, - }) - } - } - return reflect.ValueOf(ipSet(ips)) + ips := []v1alpha1.InstallPlan{} + + // each i is the generation value + for i := 0; i < rand.Intn(size)+1; i++ { + // generate a few at each generation to account for bugs that don't increment the generation + for j := 0; j < rand.Intn(3); j++ { + ips = append(ips, v1alpha1.InstallPlan{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: names.SimpleNameGenerator.GenerateName(fmt.Sprintf("%d", i)), + }, + Spec: v1alpha1.InstallPlanSpec{ + Generation: i, + }, + }) + } + } + return reflect.ValueOf(ipSet(ips)) } func TestGCInstallPlans(t *testing.T) { - f := func(ips ipSet) bool { - if len(ips) == 0 { - return true - } - ctx, cancel := context.WithCancel(context.TODO()) - defer cancel() - - var maxGen int64 - for _, i := range ips { - if g := i.Generation; g > maxGen { - maxGen = g - } - } - objs := make([]runtime.Object, 0) - for _, i := range ips { - objs = append(objs, i.DeepCopy()) - } - op, err := NewFakeOperator(ctx, "ns", []string{"ns"}, withClientObjs(objs...)) - require.NoError(t, err) - - var out []v1alpha1.InstallPlan - for { - op.gcInstallPlans(logrus.New(), "ns") - require.NoError(t, err) - - outList, err := op.client.OperatorsV1alpha1().InstallPlans("ns").List(ctx, metav1.ListOptions{}) - require.NoError(t, err) - out = outList.Items - - if len(out) <= maxInstallPlanCount { - break - } - } - - keptMax := false - for _, o := range out { - if o.Generation == maxGen { - keptMax = true - break - } - } - require.True(t, keptMax) - - if len(ips) < maxInstallPlanCount { - return len(out) == len(ips) - } - return len(out) == maxInstallPlanCount - } - require.NoError(t, quick.Check(f, nil)) + f := func(ips ipSet) bool { + if len(ips) == 0 { + return true + } + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + var maxGen int64 + for _, i := range ips { + if g := i.Generation; g > maxGen { + maxGen = g + } + } + objs := make([]runtime.Object, 0) + for _, i := range ips { + objs = append(objs, i.DeepCopy()) + } + op, err := NewFakeOperator(ctx, "ns", []string{"ns"}, withClientObjs(objs...)) + require.NoError(t, err) + + var out []v1alpha1.InstallPlan + for { + op.gcInstallPlans(logrus.New(), "ns") + require.NoError(t, err) + + outList, err := op.client.OperatorsV1alpha1().InstallPlans("ns").List(ctx, metav1.ListOptions{}) + require.NoError(t, err) + out = outList.Items + + if len(out) <= maxInstallPlanCount { + break + } + } + + keptMax := false + for _, o := range out { + if o.Generation == maxGen { + keptMax = true + break + } + } + require.True(t, keptMax) + + if len(ips) < maxInstallPlanCount { + return len(out) == len(ips) + } + return len(out) == maxInstallPlanCount + } + require.NoError(t, quick.Check(f, nil)) } func TestExecutePlan(t *testing.T) { - namespace := "ns" - - tests := []struct { - testName string - in *v1alpha1.InstallPlan - extObjs []runtime.Object - want []runtime.Object - err error - }{ - { - testName: "NoSteps", - in: installPlan("p", namespace, v1alpha1.InstallPlanPhaseInstalling), - want: []runtime.Object{}, - err: nil, - }, - { - testName: "MultipleSteps", - in: withSteps(installPlan("p", namespace, v1alpha1.InstallPlanPhaseInstalling, "csv"), - []*v1alpha1.Step{ - { - Resource: v1alpha1.StepResource{ - CatalogSource: "catalog", - CatalogSourceNamespace: namespace, - Group: "", - Version: "v1", - Kind: "Service", - Name: "service", - Manifest: toManifest(t, service("service", namespace)), - }, - Status: v1alpha1.StepStatusUnknown, - }, - { - Resource: v1alpha1.StepResource{ - CatalogSource: "catalog", - CatalogSourceNamespace: namespace, - Group: "operators.coreos.com", - Version: "v1alpha1", - Kind: "ClusterServiceVersion", - Name: "csv", - Manifest: toManifest(t, csv("csv", namespace, nil, nil)), - }, - Status: v1alpha1.StepStatusUnknown, - }, - }, - ), - want: []runtime.Object{service("service", namespace), csv("csv", namespace, nil, nil)}, - err: nil, - }, - { - testName: "CreateServiceAccount", - in: withSteps(installPlan("p", namespace, v1alpha1.InstallPlanPhaseInstalling, "csv"), - []*v1alpha1.Step{ - { - Resource: v1alpha1.StepResource{ - CatalogSource: "catalog", - CatalogSourceNamespace: namespace, - Group: "", - Version: "v1", - Kind: "ServiceAccount", - Name: "sa", - Manifest: toManifest(t, serviceAccount("sa", namespace, "", - objectReference("init secret"))), - }, - Status: v1alpha1.StepStatusUnknown, - }, - }, - ), - want: []runtime.Object{serviceAccount("sa", namespace, "", objectReference("init secret"))}, - err: nil, - }, - { - testName: "CreateConfigMap", - in: withSteps(installPlan("p", namespace, v1alpha1.InstallPlanPhaseInstalling, "csv"), - []*v1alpha1.Step{ - { - Resource: v1alpha1.StepResource{ - CatalogSource: "catalog", - CatalogSourceNamespace: namespace, - Group: "", - Version: "v1", - Kind: "ConfigMap", - Name: "cfg", - Manifest: toManifest(t, configMap("cfg", namespace)), - }, - Status: v1alpha1.StepStatusUnknown, - }, - }, - ), - want: []runtime.Object{configMap("cfg", namespace)}, - err: nil, - }, - { - testName: "CreateSecretFromBundle", - in: withSteps(installPlan("p", namespace, v1alpha1.InstallPlanPhaseInstalling, "csv"), - []*v1alpha1.Step{ - { - Resource: v1alpha1.StepResource{ - CatalogSource: "catalog", - CatalogSourceNamespace: namespace, - Group: "", - Version: "v1", - Kind: "BundleSecret", - Name: "s", - Manifest: toManifest(t, secret("s", namespace)), - }, - Status: v1alpha1.StepStatusUnknown, - }, - }, - ), - want: []runtime.Object{secret("s", namespace)}, - err: nil, - }, - { - testName: "DoesNotCreateSecretNotFromBundle", - in: withSteps(installPlan("p", namespace, v1alpha1.InstallPlanPhaseInstalling, "csv"), - []*v1alpha1.Step{ - { - Resource: v1alpha1.StepResource{ - CatalogSource: "catalog", - CatalogSourceNamespace: namespace, - Group: "", - Version: "v1", - Kind: "Secret", - Name: "s", - Manifest: toManifest(t, secret("s", namespace)), - }, - Status: v1alpha1.StepStatusUnknown, - }, - }, - ), - want: []runtime.Object{}, - err: fmt.Errorf("secret s does not exist - secrets \"s\" not found"), - }, - { - testName: "UpdateServiceAccountWithSameFields", - in: withSteps(installPlan("p", namespace, v1alpha1.InstallPlanPhaseInstalling, "csv"), - []*v1alpha1.Step{ - { - Resource: v1alpha1.StepResource{ - CatalogSource: "catalog", - CatalogSourceNamespace: namespace, - Group: "", - Version: "v1", - Kind: "ServiceAccount", - Name: "sa", - Manifest: toManifest(t, serviceAccount("sa", namespace, "name", - objectReference("init secret"))), - }, - Status: v1alpha1.StepStatusUnknown, - }, - { - Resource: v1alpha1.StepResource{ - CatalogSource: "catalog", - CatalogSourceNamespace: namespace, - Group: "", - Version: "v1", - Kind: "ServiceAccount", - Name: "sa", - Manifest: toManifest(t, serviceAccount("sa", namespace, "name", nil)), - }, - Status: v1alpha1.StepStatusUnknown, - }, - }, - ), - want: []runtime.Object{serviceAccount("sa", namespace, "name", objectReference("init secret"))}, - err: nil, - }, - { - testName: "UpdateServiceAccountWithDiffFields", - in: withSteps(installPlan("p", namespace, v1alpha1.InstallPlanPhaseInstalling, "csv"), - []*v1alpha1.Step{ - { - Resource: v1alpha1.StepResource{ - CatalogSource: "catalog", - CatalogSourceNamespace: namespace, - Group: "", - Version: "v1", - Kind: "ServiceAccount", - Name: "sa", - Manifest: toManifest(t, serviceAccount("sa", namespace, "old_name", - objectReference("init secret"))), - }, - Status: v1alpha1.StepStatusUnknown, - }, - { - Resource: v1alpha1.StepResource{ - CatalogSource: "catalog", - CatalogSourceNamespace: namespace, - Group: "", - Version: "v1", - Kind: "ServiceAccount", - Name: "sa", - Manifest: toManifest(t, serviceAccount("sa", namespace, "new_name", nil)), - }, - Status: v1alpha1.StepStatusUnknown, - }, - }, - ), - want: []runtime.Object{serviceAccount("sa", namespace, "new_name", objectReference("init secret"))}, - err: nil, - }, - { - testName: "DynamicResourcesAreOwnerReferencedToCSV", - in: withSteps(installPlan("p", namespace, v1alpha1.InstallPlanPhaseInstalling, "csv"), - []*v1alpha1.Step{ - { - Resolving: "csv", - Resource: v1alpha1.StepResource{ - CatalogSource: "catalog", - CatalogSourceNamespace: namespace, - Group: "operators.coreos.com", - Version: "v1alpha1", - Kind: "ClusterServiceVersion", - Name: "csv", - Manifest: toManifest(t, csv("csv", namespace, nil, nil)), - }, - Status: v1alpha1.StepStatusUnknown, - }, - { - Resolving: "csv", - Resource: v1alpha1.StepResource{ - CatalogSource: "catalog", - CatalogSourceNamespace: namespace, - Group: "monitoring.coreos.com", - Version: "v1", - Kind: "PrometheusRule", - Name: "rule", - Manifest: toManifest(t, decodeFile(t, "./testdata/prometheusrule.cr.yaml", &unstructured.Unstructured{})), - }, - Status: v1alpha1.StepStatusUnknown, - }, - }, - ), - extObjs: []runtime.Object{decodeFile(t, "./testdata/prometheusrule.crd.yaml", &apiextensionsv1beta1.CustomResourceDefinition{})}, - want: []runtime.Object{ - csv("csv", namespace, nil, nil), - modify(t, decodeFile(t, "./testdata/prometheusrule.cr.yaml", &unstructured.Unstructured{}), - withNamespace(namespace), - withOwner(csv("csv", namespace, nil, nil)), - ), - }, - err: nil, - }, - { - testName: "V1CRDResourceIsCreated", - in: withSteps(installPlan("p", namespace, v1alpha1.InstallPlanPhaseInstalling, "crdv1"), - []*v1alpha1.Step{ - { - Resource: v1alpha1.StepResource{ - CatalogSource: "catalog", - CatalogSourceNamespace: namespace, - Group: "apiextensions.k8s.io", - Version: "v1", - Kind: crdKind, - Name: "crd", - Manifest: toManifest(t, - &apiextensionsv1.CustomResourceDefinition{ - TypeMeta: metav1.TypeMeta{ - Kind: "CustomResourceDefinition", - APIVersion: "apiextensions.k8s.io/v1", // v1 CRD version of API - }, - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: apiextensionsv1.CustomResourceDefinitionSpec{}, - }), - }, - Status: v1alpha1.StepStatusUnknown, - }, - }), - want: []runtime.Object{ - &apiextensionsv1.CustomResourceDefinition{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - TypeMeta: metav1.TypeMeta{ - Kind: "CustomResourceDefinition", - APIVersion: "apiextensions.k8s.io/v1", // v1 CRD version of API - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.testName, func(t *testing.T) { - ctx, cancel := context.WithCancel(context.TODO()) - defer cancel() - - op, err := NewFakeOperator(ctx, namespace, []string{namespace}, withClientObjs(tt.in), withExtObjs(tt.extObjs...)) - require.NoError(t, err) - - err = op.ExecutePlan(tt.in) - require.Equal(t, tt.err, err) - - getOpts := metav1.GetOptions{} - for _, obj := range tt.want { - var err error - var fetched runtime.Object - switch o := obj.(type) { - case *appsv1.Deployment: - fetched, err = op.opClient.GetDeployment(namespace, o.GetName()) - case *rbacv1.ClusterRole: - fetched, err = op.opClient.GetClusterRole(o.GetName()) - case *rbacv1.Role: - fetched, err = op.opClient.GetRole(namespace, o.GetName()) - case *rbacv1.ClusterRoleBinding: - fetched, err = op.opClient.GetClusterRoleBinding(o.GetName()) - case *rbacv1.RoleBinding: - fetched, err = op.opClient.GetRoleBinding(namespace, o.GetName()) - case *corev1.ServiceAccount: - fetched, err = op.opClient.GetServiceAccount(namespace, o.GetName()) - case *corev1.Secret: - fetched, err = op.opClient.GetSecret(namespace, o.GetName()) - case *corev1.Service: - fetched, err = op.opClient.GetService(namespace, o.GetName()) - case *corev1.ConfigMap: - fetched, err = op.opClient.GetConfigMap(namespace, o.GetName()) - case *apiextensionsv1beta1.CustomResourceDefinition: - fetched, err = op.opClient.ApiextensionsInterface().ApiextensionsV1beta1().CustomResourceDefinitions().Get(context.TODO(), o.GetName(), getOpts) - case *apiextensionsv1.CustomResourceDefinition: - fetched, err = op.opClient.ApiextensionsInterface().ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), o.GetName(), getOpts) - case *v1alpha1.ClusterServiceVersion: - fetched, err = op.client.OperatorsV1alpha1().ClusterServiceVersions(namespace).Get(context.TODO(), o.GetName(), getOpts) - case *unstructured.Unstructured: - // Get the resource from the GVK - gvk := o.GroupVersionKind() - var r metav1.APIResource - r, err = op.apiresourceFromGVK(gvk) - require.NoError(t, err) - - gvr := schema.GroupVersionResource{ - Group: gvk.Group, - Version: gvk.Version, - Resource: r.Name, - } - - if r.Namespaced { - fetched, err = op.dynamicClient.Resource(gvr).Namespace(namespace).Get(context.TODO(), o.GetName(), getOpts) - break - } - - fetched, err = op.dynamicClient.Resource(gvr).Get(context.TODO(), o.GetName(), getOpts) - default: - require.Failf(t, "couldn't find expected object", "%#v", obj) - } - - require.NoError(t, err, "couldn't fetch %s %v", namespace, obj) - require.EqualValues(t, obj, fetched) - } - }) - } + namespace := "ns" + + tests := []struct { + testName string + in *v1alpha1.InstallPlan + extObjs []runtime.Object + want []runtime.Object + err error + }{ + { + testName: "NoSteps", + in: installPlan("p", namespace, v1alpha1.InstallPlanPhaseInstalling), + want: []runtime.Object{}, + err: nil, + }, + { + testName: "MultipleSteps", + in: withSteps(installPlan("p", namespace, v1alpha1.InstallPlanPhaseInstalling, "csv"), + []*v1alpha1.Step{ + { + Resource: v1alpha1.StepResource{ + CatalogSource: "catalog", + CatalogSourceNamespace: namespace, + Group: "", + Version: "v1", + Kind: "Service", + Name: "service", + Manifest: toManifest(t, service("service", namespace)), + }, + Status: v1alpha1.StepStatusUnknown, + }, + { + Resource: v1alpha1.StepResource{ + CatalogSource: "catalog", + CatalogSourceNamespace: namespace, + Group: "operators.coreos.com", + Version: "v1alpha1", + Kind: "ClusterServiceVersion", + Name: "csv", + Manifest: toManifest(t, csv("csv", namespace, nil, nil)), + }, + Status: v1alpha1.StepStatusUnknown, + }, + }, + ), + want: []runtime.Object{service("service", namespace), csv("csv", namespace, nil, nil)}, + err: nil, + }, + { + testName: "CreateServiceAccount", + in: withSteps(installPlan("p", namespace, v1alpha1.InstallPlanPhaseInstalling, "csv"), + []*v1alpha1.Step{ + { + Resource: v1alpha1.StepResource{ + CatalogSource: "catalog", + CatalogSourceNamespace: namespace, + Group: "", + Version: "v1", + Kind: "ServiceAccount", + Name: "sa", + Manifest: toManifest(t, serviceAccount("sa", namespace, "", + objectReference("init secret"))), + }, + Status: v1alpha1.StepStatusUnknown, + }, + }, + ), + want: []runtime.Object{serviceAccount("sa", namespace, "", objectReference("init secret"))}, + err: nil, + }, + { + testName: "CreateConfigMap", + in: withSteps(installPlan("p", namespace, v1alpha1.InstallPlanPhaseInstalling, "csv"), + []*v1alpha1.Step{ + { + Resource: v1alpha1.StepResource{ + CatalogSource: "catalog", + CatalogSourceNamespace: namespace, + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "cfg", + Manifest: toManifest(t, configMap("cfg", namespace)), + }, + Status: v1alpha1.StepStatusUnknown, + }, + }, + ), + want: []runtime.Object{configMap("cfg", namespace)}, + err: nil, + }, + { + testName: "CreateSecretFromBundle", + in: withSteps(installPlan("p", namespace, v1alpha1.InstallPlanPhaseInstalling, "csv"), + []*v1alpha1.Step{ + { + Resource: v1alpha1.StepResource{ + CatalogSource: "catalog", + CatalogSourceNamespace: namespace, + Group: "", + Version: "v1", + Kind: "BundleSecret", + Name: "s", + Manifest: toManifest(t, secret("s", namespace)), + }, + Status: v1alpha1.StepStatusUnknown, + }, + }, + ), + want: []runtime.Object{secret("s", namespace)}, + err: nil, + }, + { + testName: "DoesNotCreateSecretNotFromBundle", + in: withSteps(installPlan("p", namespace, v1alpha1.InstallPlanPhaseInstalling, "csv"), + []*v1alpha1.Step{ + { + Resource: v1alpha1.StepResource{ + CatalogSource: "catalog", + CatalogSourceNamespace: namespace, + Group: "", + Version: "v1", + Kind: "Secret", + Name: "s", + Manifest: toManifest(t, secret("s", namespace)), + }, + Status: v1alpha1.StepStatusUnknown, + }, + }, + ), + want: []runtime.Object{}, + err: fmt.Errorf("secret s does not exist - secrets \"s\" not found"), + }, + { + testName: "UpdateServiceAccountWithSameFields", + in: withSteps(installPlan("p", namespace, v1alpha1.InstallPlanPhaseInstalling, "csv"), + []*v1alpha1.Step{ + { + Resource: v1alpha1.StepResource{ + CatalogSource: "catalog", + CatalogSourceNamespace: namespace, + Group: "", + Version: "v1", + Kind: "ServiceAccount", + Name: "sa", + Manifest: toManifest(t, serviceAccount("sa", namespace, "name", + objectReference("init secret"))), + }, + Status: v1alpha1.StepStatusUnknown, + }, + { + Resource: v1alpha1.StepResource{ + CatalogSource: "catalog", + CatalogSourceNamespace: namespace, + Group: "", + Version: "v1", + Kind: "ServiceAccount", + Name: "sa", + Manifest: toManifest(t, serviceAccount("sa", namespace, "name", nil)), + }, + Status: v1alpha1.StepStatusUnknown, + }, + }, + ), + want: []runtime.Object{serviceAccount("sa", namespace, "name", objectReference("init secret"))}, + err: nil, + }, + { + testName: "UpdateServiceAccountWithDiffFields", + in: withSteps(installPlan("p", namespace, v1alpha1.InstallPlanPhaseInstalling, "csv"), + []*v1alpha1.Step{ + { + Resource: v1alpha1.StepResource{ + CatalogSource: "catalog", + CatalogSourceNamespace: namespace, + Group: "", + Version: "v1", + Kind: "ServiceAccount", + Name: "sa", + Manifest: toManifest(t, serviceAccount("sa", namespace, "old_name", + objectReference("init secret"))), + }, + Status: v1alpha1.StepStatusUnknown, + }, + { + Resource: v1alpha1.StepResource{ + CatalogSource: "catalog", + CatalogSourceNamespace: namespace, + Group: "", + Version: "v1", + Kind: "ServiceAccount", + Name: "sa", + Manifest: toManifest(t, serviceAccount("sa", namespace, "new_name", nil)), + }, + Status: v1alpha1.StepStatusUnknown, + }, + }, + ), + want: []runtime.Object{serviceAccount("sa", namespace, "new_name", objectReference("init secret"))}, + err: nil, + }, + { + testName: "DynamicResourcesAreOwnerReferencedToCSV", + in: withSteps(installPlan("p", namespace, v1alpha1.InstallPlanPhaseInstalling, "csv"), + []*v1alpha1.Step{ + { + Resolving: "csv", + Resource: v1alpha1.StepResource{ + CatalogSource: "catalog", + CatalogSourceNamespace: namespace, + Group: "operators.coreos.com", + Version: "v1alpha1", + Kind: "ClusterServiceVersion", + Name: "csv", + Manifest: toManifest(t, csv("csv", namespace, nil, nil)), + }, + Status: v1alpha1.StepStatusUnknown, + }, + { + Resolving: "csv", + Resource: v1alpha1.StepResource{ + CatalogSource: "catalog", + CatalogSourceNamespace: namespace, + Group: "monitoring.coreos.com", + Version: "v1", + Kind: "PrometheusRule", + Name: "rule", + Manifest: toManifest(t, decodeFile(t, "./testdata/prometheusrule.cr.yaml", &unstructured.Unstructured{})), + }, + Status: v1alpha1.StepStatusUnknown, + }, + }, + ), + extObjs: []runtime.Object{decodeFile(t, "./testdata/prometheusrule.crd.yaml", &apiextensionsv1beta1.CustomResourceDefinition{})}, + want: []runtime.Object{ + csv("csv", namespace, nil, nil), + modify(t, decodeFile(t, "./testdata/prometheusrule.cr.yaml", &unstructured.Unstructured{}), + withNamespace(namespace), + withOwner(csv("csv", namespace, nil, nil)), + ), + }, + err: nil, + }, + { + testName: "V1CRDResourceIsCreated", + in: withSteps(installPlan("p", namespace, v1alpha1.InstallPlanPhaseInstalling, "crdv1"), + []*v1alpha1.Step{ + { + Resource: v1alpha1.StepResource{ + CatalogSource: "catalog", + CatalogSourceNamespace: namespace, + Group: "apiextensions.k8s.io", + Version: "v1", + Kind: crdKind, + Name: "crd", + Manifest: toManifest(t, + &apiextensionsv1.CustomResourceDefinition{ + TypeMeta: metav1.TypeMeta{ + Kind: "CustomResourceDefinition", + APIVersion: "apiextensions.k8s.io/v1", // v1 CRD version of API + }, + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: apiextensionsv1.CustomResourceDefinitionSpec{}, + }), + }, + Status: v1alpha1.StepStatusUnknown, + }, + }), + want: []runtime.Object{ + &apiextensionsv1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + TypeMeta: metav1.TypeMeta{ + Kind: "CustomResourceDefinition", + APIVersion: "apiextensions.k8s.io/v1", // v1 CRD version of API + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.testName, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + op, err := NewFakeOperator(ctx, namespace, []string{namespace}, withClientObjs(tt.in), withExtObjs(tt.extObjs...)) + require.NoError(t, err) + + err = op.ExecutePlan(tt.in) + require.Equal(t, tt.err, err) + + getOpts := metav1.GetOptions{} + for _, obj := range tt.want { + var err error + var fetched runtime.Object + switch o := obj.(type) { + case *appsv1.Deployment: + fetched, err = op.opClient.GetDeployment(namespace, o.GetName()) + case *rbacv1.ClusterRole: + fetched, err = op.opClient.GetClusterRole(o.GetName()) + case *rbacv1.Role: + fetched, err = op.opClient.GetRole(namespace, o.GetName()) + case *rbacv1.ClusterRoleBinding: + fetched, err = op.opClient.GetClusterRoleBinding(o.GetName()) + case *rbacv1.RoleBinding: + fetched, err = op.opClient.GetRoleBinding(namespace, o.GetName()) + case *corev1.ServiceAccount: + fetched, err = op.opClient.GetServiceAccount(namespace, o.GetName()) + case *corev1.Secret: + fetched, err = op.opClient.GetSecret(namespace, o.GetName()) + case *corev1.Service: + fetched, err = op.opClient.GetService(namespace, o.GetName()) + case *corev1.ConfigMap: + fetched, err = op.opClient.GetConfigMap(namespace, o.GetName()) + case *apiextensionsv1beta1.CustomResourceDefinition: + fetched, err = op.opClient.ApiextensionsInterface().ApiextensionsV1beta1().CustomResourceDefinitions().Get(context.TODO(), o.GetName(), getOpts) + case *apiextensionsv1.CustomResourceDefinition: + fetched, err = op.opClient.ApiextensionsInterface().ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), o.GetName(), getOpts) + case *v1alpha1.ClusterServiceVersion: + fetched, err = op.client.OperatorsV1alpha1().ClusterServiceVersions(namespace).Get(context.TODO(), o.GetName(), getOpts) + case *unstructured.Unstructured: + // Get the resource from the GVK + gvk := o.GroupVersionKind() + var r metav1.APIResource + r, err = op.apiresourceFromGVK(gvk) + require.NoError(t, err) + + gvr := schema.GroupVersionResource{ + Group: gvk.Group, + Version: gvk.Version, + Resource: r.Name, + } + + if r.Namespaced { + fetched, err = op.dynamicClient.Resource(gvr).Namespace(namespace).Get(context.TODO(), o.GetName(), getOpts) + break + } + + fetched, err = op.dynamicClient.Resource(gvr).Get(context.TODO(), o.GetName(), getOpts) + default: + require.Failf(t, "couldn't find expected object", "%#v", obj) + } + + require.NoError(t, err, "couldn't fetch %s %v", namespace, obj) + require.EqualValues(t, obj, fetched) + } + }) + } } func TestSupportedDynamicResources(t *testing.T) { - tests := []struct { - testName string - resource v1alpha1.StepResource - expectedResult bool - }{ - { - testName: "UnsupportedObject", - resource: v1alpha1.StepResource{ - Kind: "UnsupportedKind", - }, - expectedResult: false, - }, - { - testName: "ServiceMonitorResource", - resource: v1alpha1.StepResource{ - Kind: "ServiceMonitor", - }, - expectedResult: true, - }, - { - testName: "UnsupportedObject", - resource: v1alpha1.StepResource{ - Kind: "PrometheusRule", - }, - expectedResult: true, - }, - } - - for _, tt := range tests { - t.Run(tt.testName, func(t *testing.T) { - require.Equal(t, tt.expectedResult, isSupported(tt.resource.Kind)) - }) - } + tests := []struct { + testName string + resource v1alpha1.StepResource + expectedResult bool + }{ + { + testName: "UnsupportedObject", + resource: v1alpha1.StepResource{ + Kind: "UnsupportedKind", + }, + expectedResult: false, + }, + { + testName: "ServiceMonitorResource", + resource: v1alpha1.StepResource{ + Kind: "ServiceMonitor", + }, + expectedResult: true, + }, + { + testName: "UnsupportedObject", + resource: v1alpha1.StepResource{ + Kind: "PrometheusRule", + }, + expectedResult: true, + }, + } + + for _, tt := range tests { + t.Run(tt.testName, func(t *testing.T) { + require.Equal(t, tt.expectedResult, isSupported(tt.resource.Kind)) + }) + } } func TestExecutePlanDynamicResources(t *testing.T) { - namespace := "ns" - unsupportedYaml := yamlFromFilePath(t, "testdata/unsupportedkind.cr.yaml") - - tests := []struct { - testName string - in *v1alpha1.InstallPlan - err error - }{ - { - testName: "UnsupportedObject", - in: withSteps(installPlan("p", namespace, v1alpha1.InstallPlanPhaseInstalling, "csv"), - []*v1alpha1.Step{ - { - Resource: v1alpha1.StepResource{ - CatalogSource: "catalog", - CatalogSourceNamespace: namespace, - Group: "some.unsupported.group", - Version: "v1", - Kind: "UnsupportedKind", - Name: "unsupportedkind", - Manifest: unsupportedYaml, - }, - Status: v1alpha1.StepStatusUnknown, - }, - }, - ), - err: v1alpha1.ErrInvalidInstallPlan, - }, - } - - for _, tt := range tests { - t.Run(tt.testName, func(t *testing.T) { - ctx, cancel := context.WithCancel(context.TODO()) - defer cancel() - - op, err := NewFakeOperator(ctx, namespace, []string{namespace}, withClientObjs(tt.in)) - require.NoError(t, err) - - err = op.ExecutePlan(tt.in) - require.Equal(t, tt.err, err) - }) - } + namespace := "ns" + unsupportedYaml := yamlFromFilePath(t, "testdata/unsupportedkind.cr.yaml") + + tests := []struct { + testName string + in *v1alpha1.InstallPlan + err error + }{ + { + testName: "UnsupportedObject", + in: withSteps(installPlan("p", namespace, v1alpha1.InstallPlanPhaseInstalling, "csv"), + []*v1alpha1.Step{ + { + Resource: v1alpha1.StepResource{ + CatalogSource: "catalog", + CatalogSourceNamespace: namespace, + Group: "some.unsupported.group", + Version: "v1", + Kind: "UnsupportedKind", + Name: "unsupportedkind", + Manifest: unsupportedYaml, + }, + Status: v1alpha1.StepStatusUnknown, + }, + }, + ), + err: v1alpha1.ErrInvalidInstallPlan, + }, + } + + for _, tt := range tests { + t.Run(tt.testName, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + op, err := NewFakeOperator(ctx, namespace, []string{namespace}, withClientObjs(tt.in)) + require.NoError(t, err) + + err = op.ExecutePlan(tt.in) + require.Equal(t, tt.err, err) + }) + } } func withStatus(catalogSource v1alpha1.CatalogSource, status v1alpha1.CatalogSourceStatus) *v1alpha1.CatalogSource { - copy := catalogSource.DeepCopy() - copy.Status = status - return copy + copy := catalogSource.DeepCopy() + copy.Status = status + return copy } func TestSyncCatalogSources(t *testing.T) { - clockFake := utilclocktesting.NewFakeClock(time.Date(2018, time.January, 26, 20, 40, 0, 0, time.UTC)) - now := metav1.NewTime(clockFake.Now()) - - configmapCatalog := &v1alpha1.CatalogSource{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cool-catalog", - Namespace: "cool-namespace", - UID: types.UID("catalog-uid"), - }, - Spec: v1alpha1.CatalogSourceSpec{ - ConfigMap: "cool-configmap", - SourceType: v1alpha1.SourceTypeInternal, - }, - } - grpcCatalog := &v1alpha1.CatalogSource{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cool-catalog", - Namespace: "cool-namespace", - UID: types.UID("catalog-uid"), - Labels: map[string]string{"olm.catalogSource": "cool-catalog"}, - }, - Spec: v1alpha1.CatalogSourceSpec{ - Image: "catalog-image", - SourceType: v1alpha1.SourceTypeGrpc, - }, - } - tests := []struct { - testName string - namespace string - catalogSource *v1alpha1.CatalogSource - k8sObjs []runtime.Object - configMap *corev1.ConfigMap - expectedStatus *v1alpha1.CatalogSourceStatus - expectedObjs []runtime.Object - expectedError error - existingSources []sourceAddress - }{ - { - testName: "CatalogSourceWithInvalidSourceType", - namespace: "cool-namespace", - catalogSource: &v1alpha1.CatalogSource{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cool-catalog", - Namespace: "cool-namespace", - UID: types.UID("catalog-uid"), - }, - Spec: v1alpha1.CatalogSourceSpec{ - SourceType: "nope", - }, - }, - expectedStatus: &v1alpha1.CatalogSourceStatus{ - Message: "unknown sourcetype: nope", - Reason: v1alpha1.CatalogSourceSpecInvalidError, - }, - }, - { - testName: "CatalogSourceWithBackingConfigMap", - namespace: "cool-namespace", - catalogSource: configmapCatalog, - k8sObjs: []runtime.Object{ - &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cool-configmap", - Namespace: "cool-namespace", - UID: types.UID("configmap-uid"), - ResourceVersion: "resource-version", - }, - Data: fakeConfigMapData(), - }, - }, - expectedStatus: &v1alpha1.CatalogSourceStatus{ - ConfigMapResource: &v1alpha1.ConfigMapResourceReference{ - Name: "cool-configmap", - Namespace: "cool-namespace", - UID: types.UID("configmap-uid"), - ResourceVersion: "resource-version", - LastUpdateTime: now, - }, - RegistryServiceStatus: nil, - }, - expectedError: nil, - }, - { - testName: "CatalogSourceUpdatedByDifferentCatalogOperator", - namespace: "cool-namespace", - catalogSource: &v1alpha1.CatalogSource{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cool-catalog", - Namespace: "cool-namespace", - UID: types.UID("catalog-uid"), - }, - Spec: v1alpha1.CatalogSourceSpec{ - ConfigMap: "cool-configmap", - SourceType: v1alpha1.SourceTypeConfigmap, - }, - Status: v1alpha1.CatalogSourceStatus{ - ConfigMapResource: &v1alpha1.ConfigMapResourceReference{ - Name: "cool-configmap", - Namespace: "cool-namespace", - UID: types.UID("configmap-uid"), - ResourceVersion: "resource-version", - LastUpdateTime: now, - }, - RegistryServiceStatus: nil, - }, - }, - k8sObjs: []runtime.Object{ - &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cool-configmap", - Namespace: "cool-namespace", - UID: types.UID("configmap-uid"), - ResourceVersion: "resource-version", - }, - Data: fakeConfigMapData(), - }, - }, - expectedStatus: &v1alpha1.CatalogSourceStatus{ - ConfigMapResource: &v1alpha1.ConfigMapResourceReference{ - Name: "cool-configmap", - Namespace: "cool-namespace", - UID: types.UID("configmap-uid"), - ResourceVersion: "resource-version", - LastUpdateTime: now, - }, - RegistryServiceStatus: &v1alpha1.RegistryServiceStatus{ - Protocol: "grpc", - ServiceName: "cool-catalog", - ServiceNamespace: "cool-namespace", - Port: "50051", - CreatedAt: now, - }, - }, - expectedError: nil, - }, - { - testName: "CatalogSourceWithMissingConfigMap", - namespace: "cool-namespace", - catalogSource: configmapCatalog, - k8sObjs: []runtime.Object{ - &corev1.ConfigMap{}, - }, - expectedStatus: nil, - expectedError: errors.New("failed to get catalog config map cool-configmap: configmaps \"cool-configmap\" not found"), - }, - { - testName: "CatalogSourceWithGrpcImage", - namespace: "cool-namespace", - catalogSource: grpcCatalog, - expectedStatus: &v1alpha1.CatalogSourceStatus{ - RegistryServiceStatus: &v1alpha1.RegistryServiceStatus{ - Protocol: "grpc", - ServiceName: "cool-catalog", - ServiceNamespace: "cool-namespace", - Port: "50051", - CreatedAt: now, - }, - }, - expectedError: nil, - expectedObjs: []runtime.Object{ - pod(*grpcCatalog), - }, - }, - { - testName: "CatalogSourceWithGrpcImage/EnsuresCorrectImage", - namespace: "cool-namespace", - catalogSource: grpcCatalog, - k8sObjs: []runtime.Object{ - pod(v1alpha1.CatalogSource{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cool-catalog", - Namespace: "cool-namespace", - UID: types.UID("catalog-uid"), - Labels: map[string]string{"olm.catalogSource": "cool-catalog"}, - }, - Spec: v1alpha1.CatalogSourceSpec{ - Image: "old-image", - SourceType: v1alpha1.SourceTypeGrpc, - }, - }), - }, - expectedStatus: &v1alpha1.CatalogSourceStatus{ - RegistryServiceStatus: &v1alpha1.RegistryServiceStatus{ - Protocol: "grpc", - ServiceName: "cool-catalog", - ServiceNamespace: "cool-namespace", - Port: "50051", - CreatedAt: now, - }, - }, - expectedError: nil, - expectedObjs: []runtime.Object{ - pod(*grpcCatalog), - }, - }, - { - testName: "CatalogSourceWithGrpcType/EnsuresImageOrAddressIsSet", - namespace: "cool-namespace", - catalogSource: &v1alpha1.CatalogSource{ - ObjectMeta: metav1.ObjectMeta{ - Name: "invalid-spec-catalog", - Namespace: "cool-namespace", - UID: types.UID("catalog-uid"), - Labels: map[string]string{"olm.catalogSource": "invalid-spec-catalog"}, - }, - Spec: v1alpha1.CatalogSourceSpec{ - SourceType: v1alpha1.SourceTypeGrpc, - }, - }, - expectedStatus: &v1alpha1.CatalogSourceStatus{ - Message: fmt.Sprintf("image and address unset: at least one must be set for sourcetype: %s", v1alpha1.SourceTypeGrpc), - Reason: v1alpha1.CatalogSourceSpecInvalidError, - }, - expectedError: nil, - }, - { - testName: "CatalogSourceWithInternalType/EnsuresConfigMapIsSet", - namespace: "cool-namespace", - catalogSource: &v1alpha1.CatalogSource{ - ObjectMeta: metav1.ObjectMeta{ - Name: "invalid-spec-catalog", - Namespace: "cool-namespace", - UID: types.UID("catalog-uid"), - Labels: map[string]string{"olm.catalogSource": "invalid-spec-catalog"}, - }, - Spec: v1alpha1.CatalogSourceSpec{ - SourceType: v1alpha1.SourceTypeInternal, - }, - }, - expectedStatus: &v1alpha1.CatalogSourceStatus{ - Message: fmt.Sprintf("configmap name unset: must be set for sourcetype: %s", v1alpha1.SourceTypeInternal), - Reason: v1alpha1.CatalogSourceSpecInvalidError, - }, - expectedError: nil, - }, - { - testName: "CatalogSourceWithConfigMapType/EnsuresConfigMapIsSet", - namespace: "cool-namespace", - catalogSource: &v1alpha1.CatalogSource{ - ObjectMeta: metav1.ObjectMeta{ - Name: "invalid-spec-catalog", - Namespace: "cool-namespace", - UID: types.UID("catalog-uid"), - Labels: map[string]string{"olm.catalogSource": "invalid-spec-catalog"}, - }, - Spec: v1alpha1.CatalogSourceSpec{ - SourceType: v1alpha1.SourceTypeConfigmap, - }, - }, - expectedStatus: &v1alpha1.CatalogSourceStatus{ - Message: fmt.Sprintf("configmap name unset: must be set for sourcetype: %s", v1alpha1.SourceTypeConfigmap), - Reason: v1alpha1.CatalogSourceSpecInvalidError, - }, - expectedError: nil, - }, - { - testName: "GRPCConnectionStateAddressIsUpdated", - namespace: "cool-namespace", - catalogSource: withStatus(*grpcCatalog, v1alpha1.CatalogSourceStatus{ - RegistryServiceStatus: &v1alpha1.RegistryServiceStatus{ - Protocol: "grpc", - ServiceName: "cool-catalog", - ServiceNamespace: "cool-namespace", - Port: "50051", - CreatedAt: now, - }, - GRPCConnectionState: &v1alpha1.GRPCConnectionState{ - Address: "..svc:", // Needs to be updated to cool-catalog.cool-namespace.svc:50051 - }, - }), - k8sObjs: []runtime.Object{ - pod(*grpcCatalog), - service(grpcCatalog.GetName(), grpcCatalog.GetNamespace()), - serviceAccount(grpcCatalog.GetName(), grpcCatalog.GetNamespace(), "", objectReference("init secret")), - }, - existingSources: []sourceAddress{ - { - sourceKey: registry.CatalogKey{Name: "cool-catalog", Namespace: "cool-namespace"}, - address: "cool-catalog.cool-namespace.svc:50051", - }, - }, - expectedStatus: &v1alpha1.CatalogSourceStatus{ - RegistryServiceStatus: &v1alpha1.RegistryServiceStatus{ - Protocol: "grpc", - ServiceName: "cool-catalog", - ServiceNamespace: "cool-namespace", - Port: "50051", - CreatedAt: now, - }, - GRPCConnectionState: &v1alpha1.GRPCConnectionState{ - Address: "cool-catalog.cool-namespace.svc:50051", - LastObservedState: "", - LastConnectTime: now, - }, - }, - expectedError: nil, - }, - } - for _, tt := range tests { - t.Run(tt.testName, func(t *testing.T) { - // Create existing objects - clientObjs := []runtime.Object{tt.catalogSource} - - // Create test operator - ctx, cancel := context.WithCancel(context.TODO()) - defer cancel() - - op, err := NewFakeOperator(ctx, tt.namespace, []string{tt.namespace}, withClock(clockFake), withClientObjs(clientObjs...), withK8sObjs(tt.k8sObjs...), withSources(tt.existingSources...)) - require.NoError(t, err) - - // Run sync - err = op.syncCatalogSources(tt.catalogSource) - if tt.expectedError != nil { - require.EqualError(t, err, tt.expectedError.Error()) - } else { - require.NoError(t, err) - } - - // Get updated catalog and check status - updated, err := op.client.OperatorsV1alpha1().CatalogSources(tt.catalogSource.GetNamespace()).Get(context.TODO(), tt.catalogSource.GetName(), metav1.GetOptions{}) - require.NoError(t, err) - require.NotEmpty(t, updated) - - if tt.expectedStatus != nil { - if tt.expectedStatus.GRPCConnectionState != nil { - updated.Status.GRPCConnectionState.LastConnectTime = now - // Ignore LastObservedState difference if an expected LastObservedState is no provided - if tt.expectedStatus.GRPCConnectionState.LastObservedState == "" { - updated.Status.GRPCConnectionState.LastObservedState = "" - } - } - require.NotEmpty(t, updated.Status) - require.Equal(t, *tt.expectedStatus, updated.Status) - - if tt.catalogSource.Spec.ConfigMap != "" { - configMap, err := op.opClient.KubernetesInterface().CoreV1().ConfigMaps(tt.catalogSource.GetNamespace()).Get(context.TODO(), tt.catalogSource.Spec.ConfigMap, metav1.GetOptions{}) - require.NoError(t, err) - require.True(t, ownerutil.EnsureOwner(configMap, updated)) - } - } - - for _, o := range tt.expectedObjs { - switch o := o.(type) { - case *corev1.Pod: - t.Log("verifying pod") - pods, err := op.opClient.KubernetesInterface().CoreV1().Pods(tt.catalogSource.Namespace).List(context.TODO(), metav1.ListOptions{}) - require.NoError(t, err) - require.Len(t, pods.Items, 1) - - // set the name to the generated name - o.SetName(pods.Items[0].GetName()) - require.EqualValues(t, o, &pods.Items[0]) - } - } - }) - } + clockFake := utilclocktesting.NewFakeClock(time.Date(2018, time.January, 26, 20, 40, 0, 0, time.UTC)) + now := metav1.NewTime(clockFake.Now()) + + configmapCatalog := &v1alpha1.CatalogSource{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cool-catalog", + Namespace: "cool-namespace", + UID: types.UID("catalog-uid"), + }, + Spec: v1alpha1.CatalogSourceSpec{ + ConfigMap: "cool-configmap", + SourceType: v1alpha1.SourceTypeInternal, + }, + } + grpcCatalog := &v1alpha1.CatalogSource{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cool-catalog", + Namespace: "cool-namespace", + UID: types.UID("catalog-uid"), + Labels: map[string]string{"olm.catalogSource": "cool-catalog"}, + }, + Spec: v1alpha1.CatalogSourceSpec{ + Image: "catalog-image", + SourceType: v1alpha1.SourceTypeGrpc, + }, + } + tests := []struct { + testName string + namespace string + catalogSource *v1alpha1.CatalogSource + k8sObjs []runtime.Object + configMap *corev1.ConfigMap + expectedStatus *v1alpha1.CatalogSourceStatus + expectedObjs []runtime.Object + expectedError error + existingSources []sourceAddress + }{ + { + testName: "CatalogSourceWithInvalidSourceType", + namespace: "cool-namespace", + catalogSource: &v1alpha1.CatalogSource{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cool-catalog", + Namespace: "cool-namespace", + UID: types.UID("catalog-uid"), + }, + Spec: v1alpha1.CatalogSourceSpec{ + SourceType: "nope", + }, + }, + expectedStatus: &v1alpha1.CatalogSourceStatus{ + Message: "unknown sourcetype: nope", + Reason: v1alpha1.CatalogSourceSpecInvalidError, + }, + }, + { + testName: "CatalogSourceWithBackingConfigMap", + namespace: "cool-namespace", + catalogSource: configmapCatalog, + k8sObjs: []runtime.Object{ + &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cool-configmap", + Namespace: "cool-namespace", + UID: types.UID("configmap-uid"), + ResourceVersion: "resource-version", + }, + Data: fakeConfigMapData(), + }, + }, + expectedStatus: &v1alpha1.CatalogSourceStatus{ + ConfigMapResource: &v1alpha1.ConfigMapResourceReference{ + Name: "cool-configmap", + Namespace: "cool-namespace", + UID: types.UID("configmap-uid"), + ResourceVersion: "resource-version", + LastUpdateTime: now, + }, + RegistryServiceStatus: nil, + }, + expectedError: nil, + }, + { + testName: "CatalogSourceUpdatedByDifferentCatalogOperator", + namespace: "cool-namespace", + catalogSource: &v1alpha1.CatalogSource{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cool-catalog", + Namespace: "cool-namespace", + UID: types.UID("catalog-uid"), + }, + Spec: v1alpha1.CatalogSourceSpec{ + ConfigMap: "cool-configmap", + SourceType: v1alpha1.SourceTypeConfigmap, + }, + Status: v1alpha1.CatalogSourceStatus{ + ConfigMapResource: &v1alpha1.ConfigMapResourceReference{ + Name: "cool-configmap", + Namespace: "cool-namespace", + UID: types.UID("configmap-uid"), + ResourceVersion: "resource-version", + LastUpdateTime: now, + }, + RegistryServiceStatus: nil, + }, + }, + k8sObjs: []runtime.Object{ + &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cool-configmap", + Namespace: "cool-namespace", + UID: types.UID("configmap-uid"), + ResourceVersion: "resource-version", + }, + Data: fakeConfigMapData(), + }, + }, + expectedStatus: &v1alpha1.CatalogSourceStatus{ + ConfigMapResource: &v1alpha1.ConfigMapResourceReference{ + Name: "cool-configmap", + Namespace: "cool-namespace", + UID: types.UID("configmap-uid"), + ResourceVersion: "resource-version", + LastUpdateTime: now, + }, + RegistryServiceStatus: &v1alpha1.RegistryServiceStatus{ + Protocol: "grpc", + ServiceName: "cool-catalog", + ServiceNamespace: "cool-namespace", + Port: "50051", + CreatedAt: now, + }, + }, + expectedError: nil, + }, + { + testName: "CatalogSourceWithMissingConfigMap", + namespace: "cool-namespace", + catalogSource: configmapCatalog, + k8sObjs: []runtime.Object{ + &corev1.ConfigMap{}, + }, + expectedStatus: nil, + expectedError: errors.New("failed to get catalog config map cool-configmap: configmaps \"cool-configmap\" not found"), + }, + { + testName: "CatalogSourceWithGrpcImage", + namespace: "cool-namespace", + catalogSource: grpcCatalog, + expectedStatus: &v1alpha1.CatalogSourceStatus{ + RegistryServiceStatus: &v1alpha1.RegistryServiceStatus{ + Protocol: "grpc", + ServiceName: "cool-catalog", + ServiceNamespace: "cool-namespace", + Port: "50051", + CreatedAt: now, + }, + }, + expectedError: nil, + expectedObjs: []runtime.Object{ + pod(*grpcCatalog), + }, + }, + { + testName: "CatalogSourceWithGrpcImage/EnsuresCorrectImage", + namespace: "cool-namespace", + catalogSource: grpcCatalog, + k8sObjs: []runtime.Object{ + pod(v1alpha1.CatalogSource{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cool-catalog", + Namespace: "cool-namespace", + UID: types.UID("catalog-uid"), + Labels: map[string]string{"olm.catalogSource": "cool-catalog"}, + }, + Spec: v1alpha1.CatalogSourceSpec{ + Image: "old-image", + SourceType: v1alpha1.SourceTypeGrpc, + }, + }), + }, + expectedStatus: &v1alpha1.CatalogSourceStatus{ + RegistryServiceStatus: &v1alpha1.RegistryServiceStatus{ + Protocol: "grpc", + ServiceName: "cool-catalog", + ServiceNamespace: "cool-namespace", + Port: "50051", + CreatedAt: now, + }, + }, + expectedError: nil, + expectedObjs: []runtime.Object{ + pod(*grpcCatalog), + }, + }, + { + testName: "CatalogSourceWithGrpcType/EnsuresImageOrAddressIsSet", + namespace: "cool-namespace", + catalogSource: &v1alpha1.CatalogSource{ + ObjectMeta: metav1.ObjectMeta{ + Name: "invalid-spec-catalog", + Namespace: "cool-namespace", + UID: types.UID("catalog-uid"), + Labels: map[string]string{"olm.catalogSource": "invalid-spec-catalog"}, + }, + Spec: v1alpha1.CatalogSourceSpec{ + SourceType: v1alpha1.SourceTypeGrpc, + }, + }, + expectedStatus: &v1alpha1.CatalogSourceStatus{ + Message: fmt.Sprintf("image and address unset: at least one must be set for sourcetype: %s", v1alpha1.SourceTypeGrpc), + Reason: v1alpha1.CatalogSourceSpecInvalidError, + }, + expectedError: nil, + }, + { + testName: "CatalogSourceWithInternalType/EnsuresConfigMapIsSet", + namespace: "cool-namespace", + catalogSource: &v1alpha1.CatalogSource{ + ObjectMeta: metav1.ObjectMeta{ + Name: "invalid-spec-catalog", + Namespace: "cool-namespace", + UID: types.UID("catalog-uid"), + Labels: map[string]string{"olm.catalogSource": "invalid-spec-catalog"}, + }, + Spec: v1alpha1.CatalogSourceSpec{ + SourceType: v1alpha1.SourceTypeInternal, + }, + }, + expectedStatus: &v1alpha1.CatalogSourceStatus{ + Message: fmt.Sprintf("configmap name unset: must be set for sourcetype: %s", v1alpha1.SourceTypeInternal), + Reason: v1alpha1.CatalogSourceSpecInvalidError, + }, + expectedError: nil, + }, + { + testName: "CatalogSourceWithConfigMapType/EnsuresConfigMapIsSet", + namespace: "cool-namespace", + catalogSource: &v1alpha1.CatalogSource{ + ObjectMeta: metav1.ObjectMeta{ + Name: "invalid-spec-catalog", + Namespace: "cool-namespace", + UID: types.UID("catalog-uid"), + Labels: map[string]string{"olm.catalogSource": "invalid-spec-catalog"}, + }, + Spec: v1alpha1.CatalogSourceSpec{ + SourceType: v1alpha1.SourceTypeConfigmap, + }, + }, + expectedStatus: &v1alpha1.CatalogSourceStatus{ + Message: fmt.Sprintf("configmap name unset: must be set for sourcetype: %s", v1alpha1.SourceTypeConfigmap), + Reason: v1alpha1.CatalogSourceSpecInvalidError, + }, + expectedError: nil, + }, + { + testName: "GRPCConnectionStateAddressIsUpdated", + namespace: "cool-namespace", + catalogSource: withStatus(*grpcCatalog, v1alpha1.CatalogSourceStatus{ + RegistryServiceStatus: &v1alpha1.RegistryServiceStatus{ + Protocol: "grpc", + ServiceName: "cool-catalog", + ServiceNamespace: "cool-namespace", + Port: "50051", + CreatedAt: now, + }, + GRPCConnectionState: &v1alpha1.GRPCConnectionState{ + Address: "..svc:", // Needs to be updated to cool-catalog.cool-namespace.svc:50051 + }, + }), + k8sObjs: []runtime.Object{ + pod(*grpcCatalog), + service(grpcCatalog.GetName(), grpcCatalog.GetNamespace()), + serviceAccount(grpcCatalog.GetName(), grpcCatalog.GetNamespace(), "", objectReference("init secret")), + }, + existingSources: []sourceAddress{ + { + sourceKey: registry.CatalogKey{Name: "cool-catalog", Namespace: "cool-namespace"}, + address: "cool-catalog.cool-namespace.svc:50051", + }, + }, + expectedStatus: &v1alpha1.CatalogSourceStatus{ + RegistryServiceStatus: &v1alpha1.RegistryServiceStatus{ + Protocol: "grpc", + ServiceName: "cool-catalog", + ServiceNamespace: "cool-namespace", + Port: "50051", + CreatedAt: now, + }, + GRPCConnectionState: &v1alpha1.GRPCConnectionState{ + Address: "cool-catalog.cool-namespace.svc:50051", + LastObservedState: "", + LastConnectTime: now, + }, + }, + expectedError: nil, + }, + } + for _, tt := range tests { + t.Run(tt.testName, func(t *testing.T) { + // Create existing objects + clientObjs := []runtime.Object{tt.catalogSource} + + // Create test operator + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + op, err := NewFakeOperator(ctx, tt.namespace, []string{tt.namespace}, withClock(clockFake), withClientObjs(clientObjs...), withK8sObjs(tt.k8sObjs...), withSources(tt.existingSources...)) + require.NoError(t, err) + + // Run sync + err = op.syncCatalogSources(tt.catalogSource) + if tt.expectedError != nil { + require.EqualError(t, err, tt.expectedError.Error()) + } else { + require.NoError(t, err) + } + + // Get updated catalog and check status + updated, err := op.client.OperatorsV1alpha1().CatalogSources(tt.catalogSource.GetNamespace()).Get(context.TODO(), tt.catalogSource.GetName(), metav1.GetOptions{}) + require.NoError(t, err) + require.NotEmpty(t, updated) + + if tt.expectedStatus != nil { + if tt.expectedStatus.GRPCConnectionState != nil { + updated.Status.GRPCConnectionState.LastConnectTime = now + // Ignore LastObservedState difference if an expected LastObservedState is no provided + if tt.expectedStatus.GRPCConnectionState.LastObservedState == "" { + updated.Status.GRPCConnectionState.LastObservedState = "" + } + } + require.NotEmpty(t, updated.Status) + require.Equal(t, *tt.expectedStatus, updated.Status) + + if tt.catalogSource.Spec.ConfigMap != "" { + configMap, err := op.opClient.KubernetesInterface().CoreV1().ConfigMaps(tt.catalogSource.GetNamespace()).Get(context.TODO(), tt.catalogSource.Spec.ConfigMap, metav1.GetOptions{}) + require.NoError(t, err) + require.True(t, ownerutil.EnsureOwner(configMap, updated)) + } + } + + for _, o := range tt.expectedObjs { + switch o := o.(type) { + case *corev1.Pod: + t.Log("verifying pod") + pods, err := op.opClient.KubernetesInterface().CoreV1().Pods(tt.catalogSource.Namespace).List(context.TODO(), metav1.ListOptions{}) + require.NoError(t, err) + require.Len(t, pods.Items, 1) + + // set the name to the generated name + o.SetName(pods.Items[0].GetName()) + require.EqualValues(t, o, &pods.Items[0]) + } + } + }) + } } func TestSyncResolvingNamespace(t *testing.T) { - clockFake := utilclocktesting.NewFakeClock(time.Date(2018, time.January, 26, 20, 40, 0, 0, time.UTC)) - now := metav1.NewTime(clockFake.Now()) - testNamespace := "testNamespace" - og := &operatorsv1.OperatorGroup{ - ObjectMeta: metav1.ObjectMeta{ - Name: "og", - Namespace: testNamespace, - }, - } - - type fields struct { - clientOptions []clientfake.Option - resolveErr error - existingOLMObjs []runtime.Object - } - tests := []struct { - name string - fields fields - wantSubscriptions []*v1alpha1.Subscription - wantErr error - }{ - { - name: "NoError", - fields: fields{ - clientOptions: []clientfake.Option{clientfake.WithSelfLinks(t)}, - existingOLMObjs: []runtime.Object{ - &v1alpha1.Subscription{ - TypeMeta: metav1.TypeMeta{ - Kind: v1alpha1.SubscriptionKind, - APIVersion: v1alpha1.SchemeGroupVersion.String(), - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "sub", - Namespace: testNamespace, - }, - Spec: &v1alpha1.SubscriptionSpec{ - CatalogSource: "src", - CatalogSourceNamespace: testNamespace, - }, - Status: v1alpha1.SubscriptionStatus{ - CurrentCSV: "", - State: "", - }, - }, - }, - }, - wantSubscriptions: []*v1alpha1.Subscription{ - { - TypeMeta: metav1.TypeMeta{ - Kind: v1alpha1.SubscriptionKind, - APIVersion: v1alpha1.SchemeGroupVersion.String(), - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "sub", - Namespace: testNamespace, - }, - Spec: &v1alpha1.SubscriptionSpec{ - CatalogSource: "src", - CatalogSourceNamespace: testNamespace, - }, - Status: v1alpha1.SubscriptionStatus{ - CurrentCSV: "", - State: "", - }, - }, - }, - }, - { - name: "NotSatisfiableError", - fields: fields{ - clientOptions: []clientfake.Option{clientfake.WithSelfLinks(t)}, - existingOLMObjs: []runtime.Object{ - &v1alpha1.Subscription{ - TypeMeta: metav1.TypeMeta{ - Kind: v1alpha1.SubscriptionKind, - APIVersion: v1alpha1.SchemeGroupVersion.String(), - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "sub", - Namespace: testNamespace, - }, - Spec: &v1alpha1.SubscriptionSpec{ - CatalogSource: "src", - CatalogSourceNamespace: testNamespace, - }, - Status: v1alpha1.SubscriptionStatus{ - CurrentCSV: "", - State: "", - }, - }, - }, - resolveErr: solver.NotSatisfiable{ - { - Variable: resolver.NewSubscriptionVariable("a", nil), - Constraint: resolver.PrettyConstraint(solver.Mandatory(), "something"), - }, - }, - }, - wantSubscriptions: []*v1alpha1.Subscription{ - { - TypeMeta: metav1.TypeMeta{ - Kind: v1alpha1.SubscriptionKind, - APIVersion: v1alpha1.SchemeGroupVersion.String(), - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "sub", - Namespace: testNamespace, - }, - Spec: &v1alpha1.SubscriptionSpec{ - CatalogSource: "src", - CatalogSourceNamespace: testNamespace, - }, - Status: v1alpha1.SubscriptionStatus{ - CurrentCSV: "", - State: "", - Conditions: []v1alpha1.SubscriptionCondition{ - { - Type: v1alpha1.SubscriptionResolutionFailed, - Reason: "ConstraintsNotSatisfiable", - Message: "constraints not satisfiable: something", - Status: corev1.ConditionTrue, - }, - }, - LastUpdated: now, - }, - }, - }, - }, - { - name: "OtherError", - fields: fields{ - clientOptions: []clientfake.Option{clientfake.WithSelfLinks(t)}, - existingOLMObjs: []runtime.Object{ - &v1alpha1.ClusterServiceVersion{ - ObjectMeta: metav1.ObjectMeta{ - Name: "csv.v.1", - Namespace: testNamespace, - }, - Status: v1alpha1.ClusterServiceVersionStatus{ - Phase: v1alpha1.CSVPhaseSucceeded, - }, - }, - &v1alpha1.Subscription{ - TypeMeta: metav1.TypeMeta{ - Kind: v1alpha1.SubscriptionKind, - APIVersion: v1alpha1.SchemeGroupVersion.String(), - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "sub", - Namespace: testNamespace, - }, - Spec: &v1alpha1.SubscriptionSpec{ - CatalogSource: "src", - CatalogSourceNamespace: testNamespace, - }, - Status: v1alpha1.SubscriptionStatus{ - CurrentCSV: "", - State: "", - }, - }, - }, - resolveErr: fmt.Errorf("some error"), - }, - wantSubscriptions: []*v1alpha1.Subscription{ - { - TypeMeta: metav1.TypeMeta{ - Kind: v1alpha1.SubscriptionKind, - APIVersion: v1alpha1.SchemeGroupVersion.String(), - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "sub", - Namespace: testNamespace, - }, - Spec: &v1alpha1.SubscriptionSpec{ - CatalogSource: "src", - CatalogSourceNamespace: testNamespace, - }, - Status: v1alpha1.SubscriptionStatus{ - CurrentCSV: "", - State: "", - Conditions: []v1alpha1.SubscriptionCondition{ - { - Type: v1alpha1.SubscriptionResolutionFailed, - Reason: "ErrorPreventedResolution", - Message: "some error", - Status: corev1.ConditionTrue, - }, - }, - LastUpdated: now, - }, - }, - }, - wantErr: fmt.Errorf("some error"), - }, - { - name: "HadErrorShouldClearError", - fields: fields{ - clientOptions: []clientfake.Option{clientfake.WithSelfLinks(t)}, - existingOLMObjs: []runtime.Object{ - &v1alpha1.Subscription{ - TypeMeta: metav1.TypeMeta{ - Kind: v1alpha1.SubscriptionKind, - APIVersion: v1alpha1.SchemeGroupVersion.String(), - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "sub", - Namespace: testNamespace, - }, - Spec: &v1alpha1.SubscriptionSpec{ - CatalogSource: "src", - CatalogSourceNamespace: testNamespace, - }, - Status: v1alpha1.SubscriptionStatus{ - InstalledCSV: "sub-csv", - State: "AtLatestKnown", - Conditions: []v1alpha1.SubscriptionCondition{ - { - Type: v1alpha1.SubscriptionResolutionFailed, - Reason: "ConstraintsNotSatisfiable", - Message: "constraints not satisfiable: no operators found from catalog src in namespace testNamespace referenced by subscrition sub, subscription sub exists", - Status: corev1.ConditionTrue, - }, - }, - }, - }, - }, - resolveErr: nil, - }, - wantSubscriptions: []*v1alpha1.Subscription{ - { - TypeMeta: metav1.TypeMeta{ - Kind: v1alpha1.SubscriptionKind, - APIVersion: v1alpha1.SchemeGroupVersion.String(), - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "sub", - Namespace: testNamespace, - }, - Spec: &v1alpha1.SubscriptionSpec{ - CatalogSource: "src", - CatalogSourceNamespace: testNamespace, - }, - Status: v1alpha1.SubscriptionStatus{ - InstalledCSV: "sub-csv", - State: "AtLatestKnown", - LastUpdated: now, - }, - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Create test operator - ctx, cancel := context.WithCancel(context.TODO()) - defer cancel() - - o, err := NewFakeOperator(ctx, testNamespace, []string{testNamespace}, withClock(clockFake), withClientObjs(append(tt.fields.existingOLMObjs, og)...), withFakeClientOptions(tt.fields.clientOptions...)) - require.NoError(t, err) - - o.reconciler = &fakes.FakeRegistryReconcilerFactory{ - ReconcilerForSourceStub: func(source *v1alpha1.CatalogSource) reconciler.RegistryReconciler { - return &fakes.FakeRegistryReconciler{ - EnsureRegistryServerStub: func(source *v1alpha1.CatalogSource) error { - return nil - }, - } - }, - } - - o.resolver = &fakes.FakeStepResolver{ - ResolveStepsStub: func(string) ([]*v1alpha1.Step, []v1alpha1.BundleLookup, []*v1alpha1.Subscription, error) { - return nil, nil, nil, tt.fields.resolveErr - }, - } - - namespace := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: testNamespace, - }, - } - - err = o.syncResolvingNamespace(namespace) - if tt.wantErr != nil { - require.Equal(t, tt.wantErr, err) - } else { - require.NoError(t, err) - } - - for _, s := range tt.wantSubscriptions { - fetched, err := o.client.OperatorsV1alpha1().Subscriptions(testNamespace).Get(context.TODO(), s.GetName(), metav1.GetOptions{}) - require.NoError(t, err) - require.Equal(t, s, fetched) - } - }) - } + clockFake := utilclocktesting.NewFakeClock(time.Date(2018, time.January, 26, 20, 40, 0, 0, time.UTC)) + now := metav1.NewTime(clockFake.Now()) + testNamespace := "testNamespace" + og := &operatorsv1.OperatorGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "og", + Namespace: testNamespace, + }, + } + + type fields struct { + clientOptions []clientfake.Option + resolveErr error + existingOLMObjs []runtime.Object + } + tests := []struct { + name string + fields fields + wantSubscriptions []*v1alpha1.Subscription + wantErr error + }{ + { + name: "NoError", + fields: fields{ + clientOptions: []clientfake.Option{clientfake.WithSelfLinks(t)}, + existingOLMObjs: []runtime.Object{ + &v1alpha1.Subscription{ + TypeMeta: metav1.TypeMeta{ + Kind: v1alpha1.SubscriptionKind, + APIVersion: v1alpha1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "sub", + Namespace: testNamespace, + }, + Spec: &v1alpha1.SubscriptionSpec{ + CatalogSource: "src", + CatalogSourceNamespace: testNamespace, + }, + Status: v1alpha1.SubscriptionStatus{ + CurrentCSV: "", + State: "", + }, + }, + }, + }, + wantSubscriptions: []*v1alpha1.Subscription{ + { + TypeMeta: metav1.TypeMeta{ + Kind: v1alpha1.SubscriptionKind, + APIVersion: v1alpha1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "sub", + Namespace: testNamespace, + }, + Spec: &v1alpha1.SubscriptionSpec{ + CatalogSource: "src", + CatalogSourceNamespace: testNamespace, + }, + Status: v1alpha1.SubscriptionStatus{ + CurrentCSV: "", + State: "", + }, + }, + }, + }, + { + name: "NotSatisfiableError", + fields: fields{ + clientOptions: []clientfake.Option{clientfake.WithSelfLinks(t)}, + existingOLMObjs: []runtime.Object{ + &v1alpha1.Subscription{ + TypeMeta: metav1.TypeMeta{ + Kind: v1alpha1.SubscriptionKind, + APIVersion: v1alpha1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "sub", + Namespace: testNamespace, + }, + Spec: &v1alpha1.SubscriptionSpec{ + CatalogSource: "src", + CatalogSourceNamespace: testNamespace, + }, + Status: v1alpha1.SubscriptionStatus{ + CurrentCSV: "", + State: "", + }, + }, + }, + resolveErr: solver.NotSatisfiable{ + { + Variable: resolver.NewSubscriptionVariable("a", nil), + Constraint: resolver.PrettyConstraint(solver.Mandatory(), "something"), + }, + }, + }, + wantSubscriptions: []*v1alpha1.Subscription{ + { + TypeMeta: metav1.TypeMeta{ + Kind: v1alpha1.SubscriptionKind, + APIVersion: v1alpha1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "sub", + Namespace: testNamespace, + }, + Spec: &v1alpha1.SubscriptionSpec{ + CatalogSource: "src", + CatalogSourceNamespace: testNamespace, + }, + Status: v1alpha1.SubscriptionStatus{ + CurrentCSV: "", + State: "", + Conditions: []v1alpha1.SubscriptionCondition{ + { + Type: v1alpha1.SubscriptionResolutionFailed, + Reason: "ConstraintsNotSatisfiable", + Message: "constraints not satisfiable: something", + Status: corev1.ConditionTrue, + }, + }, + LastUpdated: now, + }, + }, + }, + }, + { + name: "OtherError", + fields: fields{ + clientOptions: []clientfake.Option{clientfake.WithSelfLinks(t)}, + existingOLMObjs: []runtime.Object{ + &v1alpha1.ClusterServiceVersion{ + ObjectMeta: metav1.ObjectMeta{ + Name: "csv.v.1", + Namespace: testNamespace, + }, + Status: v1alpha1.ClusterServiceVersionStatus{ + Phase: v1alpha1.CSVPhaseSucceeded, + }, + }, + &v1alpha1.Subscription{ + TypeMeta: metav1.TypeMeta{ + Kind: v1alpha1.SubscriptionKind, + APIVersion: v1alpha1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "sub", + Namespace: testNamespace, + }, + Spec: &v1alpha1.SubscriptionSpec{ + CatalogSource: "src", + CatalogSourceNamespace: testNamespace, + }, + Status: v1alpha1.SubscriptionStatus{ + CurrentCSV: "", + State: "", + }, + }, + }, + resolveErr: fmt.Errorf("some error"), + }, + wantSubscriptions: []*v1alpha1.Subscription{ + { + TypeMeta: metav1.TypeMeta{ + Kind: v1alpha1.SubscriptionKind, + APIVersion: v1alpha1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "sub", + Namespace: testNamespace, + }, + Spec: &v1alpha1.SubscriptionSpec{ + CatalogSource: "src", + CatalogSourceNamespace: testNamespace, + }, + Status: v1alpha1.SubscriptionStatus{ + CurrentCSV: "", + State: "", + Conditions: []v1alpha1.SubscriptionCondition{ + { + Type: v1alpha1.SubscriptionResolutionFailed, + Reason: "ErrorPreventedResolution", + Message: "some error", + Status: corev1.ConditionTrue, + }, + }, + LastUpdated: now, + }, + }, + }, + wantErr: fmt.Errorf("some error"), + }, + { + name: "HadErrorShouldClearError", + fields: fields{ + clientOptions: []clientfake.Option{clientfake.WithSelfLinks(t)}, + existingOLMObjs: []runtime.Object{ + &v1alpha1.Subscription{ + TypeMeta: metav1.TypeMeta{ + Kind: v1alpha1.SubscriptionKind, + APIVersion: v1alpha1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "sub", + Namespace: testNamespace, + }, + Spec: &v1alpha1.SubscriptionSpec{ + CatalogSource: "src", + CatalogSourceNamespace: testNamespace, + }, + Status: v1alpha1.SubscriptionStatus{ + InstalledCSV: "sub-csv", + State: "AtLatestKnown", + Conditions: []v1alpha1.SubscriptionCondition{ + { + Type: v1alpha1.SubscriptionResolutionFailed, + Reason: "ConstraintsNotSatisfiable", + Message: "constraints not satisfiable: no operators found from catalog src in namespace testNamespace referenced by subscrition sub, subscription sub exists", + Status: corev1.ConditionTrue, + }, + }, + }, + }, + }, + resolveErr: nil, + }, + wantSubscriptions: []*v1alpha1.Subscription{ + { + TypeMeta: metav1.TypeMeta{ + Kind: v1alpha1.SubscriptionKind, + APIVersion: v1alpha1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "sub", + Namespace: testNamespace, + }, + Spec: &v1alpha1.SubscriptionSpec{ + CatalogSource: "src", + CatalogSourceNamespace: testNamespace, + }, + Status: v1alpha1.SubscriptionStatus{ + InstalledCSV: "sub-csv", + State: "AtLatestKnown", + LastUpdated: now, + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create test operator + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + o, err := NewFakeOperator(ctx, testNamespace, []string{testNamespace}, withClock(clockFake), withClientObjs(append(tt.fields.existingOLMObjs, og)...), withFakeClientOptions(tt.fields.clientOptions...)) + require.NoError(t, err) + + o.reconciler = &fakes.FakeRegistryReconcilerFactory{ + ReconcilerForSourceStub: func(source *v1alpha1.CatalogSource) reconciler.RegistryReconciler { + return &fakes.FakeRegistryReconciler{ + EnsureRegistryServerStub: func(source *v1alpha1.CatalogSource) error { + return nil + }, + } + }, + } + + o.resolver = &fakes.FakeStepResolver{ + ResolveStepsStub: func(string) ([]*v1alpha1.Step, []v1alpha1.BundleLookup, []*v1alpha1.Subscription, error) { + return nil, nil, nil, tt.fields.resolveErr + }, + } + + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: testNamespace, + }, + } + + err = o.syncResolvingNamespace(namespace) + if tt.wantErr != nil { + require.Equal(t, tt.wantErr, err) + } else { + require.NoError(t, err) + } + + for _, s := range tt.wantSubscriptions { + fetched, err := o.client.OperatorsV1alpha1().Subscriptions(testNamespace).Get(context.TODO(), s.GetName(), metav1.GetOptions{}) + require.NoError(t, err) + require.Equal(t, s, fetched) + } + }) + } } func TestCompetingCRDOwnersExist(t *testing.T) { - t.Parallel() - - testNamespace := "default" - tests := []struct { - name string - csv *v1alpha1.ClusterServiceVersion - existingCRDOwners map[string][]string - expectedErr error - expectedResult bool - }{ - { - name: "NoCompetingOwnersExist", - csv: csv("turkey", testNamespace, []string{"feathers"}, nil), - existingCRDOwners: nil, - expectedErr: nil, - expectedResult: false, - }, - { - name: "OnlyCompetingWithSelf", - csv: csv("turkey", testNamespace, []string{"feathers"}, nil), - existingCRDOwners: map[string][]string{ - "feathers": {"turkey"}, - }, - expectedErr: nil, - expectedResult: false, - }, - { - name: "CompetingOwnersExist", - csv: csv("turkey", testNamespace, []string{"feathers"}, nil), - existingCRDOwners: map[string][]string{ - "feathers": {"seagull"}, - }, - expectedErr: nil, - expectedResult: true, - }, - { - name: "CompetingOwnerExistsOnSecondCRD", - csv: csv("turkey", testNamespace, []string{"feathers", "beak"}, nil), - existingCRDOwners: map[string][]string{ - "milk": {"cow"}, - "beak": {"squid"}, - }, - expectedErr: nil, - expectedResult: true, - }, - { - name: "MoreThanOneCompetingOwnerExists", - csv: csv("turkey", testNamespace, []string{"feathers"}, nil), - existingCRDOwners: map[string][]string{ - "feathers": {"seagull", "turkey"}, - }, - expectedErr: olmerrors.NewMultipleExistingCRDOwnersError([]string{"seagull", "turkey"}, "feathers", testNamespace), - expectedResult: true, - }, - } - for _, xt := range tests { - tt := xt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - competing, err := competingCRDOwnersExist(testNamespace, tt.csv, tt.existingCRDOwners) - - // Assert the error is as expected - if tt.expectedErr == nil { - require.Nil(t, err) - } else { - require.Equal(t, tt.expectedErr, err) - } - - require.Equal(t, competing, tt.expectedResult) - }) - } + t.Parallel() + + testNamespace := "default" + tests := []struct { + name string + csv *v1alpha1.ClusterServiceVersion + existingCRDOwners map[string][]string + expectedErr error + expectedResult bool + }{ + { + name: "NoCompetingOwnersExist", + csv: csv("turkey", testNamespace, []string{"feathers"}, nil), + existingCRDOwners: nil, + expectedErr: nil, + expectedResult: false, + }, + { + name: "OnlyCompetingWithSelf", + csv: csv("turkey", testNamespace, []string{"feathers"}, nil), + existingCRDOwners: map[string][]string{ + "feathers": {"turkey"}, + }, + expectedErr: nil, + expectedResult: false, + }, + { + name: "CompetingOwnersExist", + csv: csv("turkey", testNamespace, []string{"feathers"}, nil), + existingCRDOwners: map[string][]string{ + "feathers": {"seagull"}, + }, + expectedErr: nil, + expectedResult: true, + }, + { + name: "CompetingOwnerExistsOnSecondCRD", + csv: csv("turkey", testNamespace, []string{"feathers", "beak"}, nil), + existingCRDOwners: map[string][]string{ + "milk": {"cow"}, + "beak": {"squid"}, + }, + expectedErr: nil, + expectedResult: true, + }, + { + name: "MoreThanOneCompetingOwnerExists", + csv: csv("turkey", testNamespace, []string{"feathers"}, nil), + existingCRDOwners: map[string][]string{ + "feathers": {"seagull", "turkey"}, + }, + expectedErr: olmerrors.NewMultipleExistingCRDOwnersError([]string{"seagull", "turkey"}, "feathers", testNamespace), + expectedResult: true, + }, + } + for _, xt := range tests { + tt := xt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + competing, err := competingCRDOwnersExist(testNamespace, tt.csv, tt.existingCRDOwners) + + // Assert the error is as expected + if tt.expectedErr == nil { + require.Nil(t, err) + } else { + require.Equal(t, tt.expectedErr, err) + } + + require.Equal(t, competing, tt.expectedResult) + }) + } } -func TestValidateExistingCRs(t *testing.T) { - unstructuredForFile := func(file string) *unstructured.Unstructured { - data, err := os.ReadFile(file) - require.NoError(t, err) - dec := utilyaml.NewYAMLOrJSONDecoder(strings.NewReader(string(data)), 30) - k8sFile := &unstructured.Unstructured{} - require.NoError(t, dec.Decode(k8sFile)) - return k8sFile - } - - unversionedCRDForV1beta1File := func(file string) *apiextensions.CustomResourceDefinition { - data, err := os.ReadFile(file) - require.NoError(t, err) - dec := utilyaml.NewYAMLOrJSONDecoder(strings.NewReader(string(data)), 30) - k8sFile := &apiextensionsv1beta1.CustomResourceDefinition{} - require.NoError(t, dec.Decode(k8sFile)) - convertedCRD := &apiextensions.CustomResourceDefinition{} - require.NoError(t, apiextensionsv1beta1.Convert_v1beta1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(k8sFile, convertedCRD, nil)) - return convertedCRD - } - - tests := []struct { - name string - existingObjects []runtime.Object - gvr schema.GroupVersionResource - newCRD *apiextensions.CustomResourceDefinition - want error - }{ - { - name: "label validation", - existingObjects: []runtime.Object{ - unstructuredForFile("testdata/hivebug/cr.yaml"), - }, - gvr: schema.GroupVersionResource{ - Group: "hive.openshift.io", - Version: "v1", - Resource: "machinepools", - }, - newCRD: unversionedCRDForV1beta1File("testdata/hivebug/crd.yaml"), - }, - { - name: "fail validation", - existingObjects: []runtime.Object{ - unstructuredForFile("testdata/hivebug/fail.yaml"), - }, - gvr: schema.GroupVersionResource{ - Group: "hive.openshift.io", - Version: "v1", - Resource: "machinepools", - }, - newCRD: unversionedCRDForV1beta1File("testdata/hivebug/crd.yaml"), - want: fmt.Errorf("error validating hive.openshift.io/v1, Kind=MachinePool \"test\": updated validation is too restrictive: [[].spec.clusterDeploymentRef: Invalid value: \"null\": spec.clusterDeploymentRef in body must be of type object: \"null\", [].spec.name: Required value, [].spec.platform: Required value]"), - }, - { - name: "crd with incorrect comparison", - existingObjects: []runtime.Object{ - unstructuredForFile("testdata/postgrestolerations/pgadmin.cr.yaml"), - }, - gvr: schema.GroupVersionResource{ - Group: "postgres-operator.crunchydata.com", - Version: "v1beta1", - Resource: "pgadmins", - }, - newCRD: unversionedCRDForV1beta1File("testdata/postgrestolerations/crd.yaml"), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - client := fakedynamic.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), map[schema.GroupVersionResource]string{ - tt.gvr: "UnstructuredList", - }, tt.existingObjects...) - require.Equal(t, tt.want, validateExistingCRs(client, tt.gvr, tt.newCRD)) - }) - } +func TestValidateV1Beta1CRDCompatibility(t *testing.T) { + unstructuredForFile := func(file string) *unstructured.Unstructured { + data, err := os.ReadFile(file) + require.NoError(t, err) + dec := utilyaml.NewYAMLOrJSONDecoder(strings.NewReader(string(data)), 30) + k8sFile := &unstructured.Unstructured{} + require.NoError(t, dec.Decode(k8sFile)) + return k8sFile + } + + unversionedCRDForV1beta1File := func(file string) *apiextensionsv1beta1.CustomResourceDefinition { + data, err := os.ReadFile(file) + require.NoError(t, err) + dec := utilyaml.NewYAMLOrJSONDecoder(strings.NewReader(string(data)), 30) + k8sFile := &apiextensionsv1beta1.CustomResourceDefinition{} + require.NoError(t, dec.Decode(k8sFile)) + return k8sFile + } + + tests := []struct { + name string + existingObjects []runtime.Object + gvr schema.GroupVersionResource + oldCRD *apiextensionsv1beta1.CustomResourceDefinition + newCRD *apiextensionsv1beta1.CustomResourceDefinition + want error + }{ + { + name: "label validation", + existingObjects: []runtime.Object{ + unstructuredForFile("testdata/hivebug/cr.yaml"), + }, + gvr: schema.GroupVersionResource{ + Group: "hive.openshift.io", + Version: "v1", + Resource: "machinepools", + }, + oldCRD: unversionedCRDForV1beta1File("testdata/hivebug/crd.yaml"), + newCRD: unversionedCRDForV1beta1File("testdata/hivebug/crd.yaml"), + }, + { + name: "fail validation", + existingObjects: []runtime.Object{ + unstructuredForFile("testdata/hivebug/fail.yaml"), + }, + gvr: schema.GroupVersionResource{ + Group: "hive.openshift.io", + Version: "v1", + Resource: "machinepools", + }, + oldCRD: unversionedCRDForV1beta1File("testdata/hivebug/crd.yaml"), + newCRD: unversionedCRDForV1beta1File("testdata/hivebug/crd.yaml"), + want: fmt.Errorf("error validating hive.openshift.io/v1, Kind=MachinePool \"test\": updated validation is too restrictive: [[].spec.clusterDeploymentRef: Invalid value: \"null\": spec.clusterDeploymentRef in body must be of type object: \"null\", [].spec.name: Required value, [].spec.platform: Required value]"), + }, + { + name: "backwards incompatible change", + existingObjects: []runtime.Object{ + unstructuredForFile("testdata/apiextensionsv1beta1/cr.yaml"), + }, + gvr: schema.GroupVersionResource{ + Group: "cluster.com", + Version: "v1alpha1", + Resource: "testcrd", + }, + oldCRD: unversionedCRDForV1beta1File("testdata/apiextensionsv1beta1/crd.old.yaml"), + newCRD: unversionedCRDForV1beta1File("testdata/apiextensionsv1beta1/crd.yaml"), + want: fmt.Errorf("error validating cluster.com/v1alpha1, Kind=testcrd \"my-cr-1\": updated validation is too restrictive: [].spec.scalar: Invalid value: 2: spec.scalar in body should be greater than or equal to 3"), + }, + { + name: "unserved version", + existingObjects: []runtime.Object{ + unstructuredForFile("testdata/apiextensionsv1beta1/cr.yaml"), + unstructuredForFile("testdata/apiextensionsv1beta1/cr.v2.yaml"), + }, + gvr: schema.GroupVersionResource{ + Group: "cluster.com", + Version: "v1alpha1", + Resource: "testcrd", + }, + oldCRD: unversionedCRDForV1beta1File("testdata/apiextensionsv1beta1/crd.old.yaml"), + newCRD: unversionedCRDForV1beta1File("testdata/apiextensionsv1beta1/crd.unserved.yaml"), + }, + { + name: "cr not validated against currently unserved version", + existingObjects: []runtime.Object{ + unstructuredForFile("testdata/apiextensionsv1beta1/cr.yaml"), + unstructuredForFile("testdata/apiextensionsv1beta1/cr.v2.yaml"), + }, + oldCRD: unversionedCRDForV1beta1File("testdata/apiextensionsv1beta1/crd.unserved.yaml"), + newCRD: unversionedCRDForV1beta1File("testdata/apiextensionsv1beta1/crd.yaml"), + }, + { + name: "crd with no versions list", + existingObjects: []runtime.Object{ + unstructuredForFile("testdata/apiextensionsv1beta1/cr.yaml"), + unstructuredForFile("testdata/apiextensionsv1beta1/cr.v2.yaml"), + }, + oldCRD: unversionedCRDForV1beta1File("testdata/apiextensionsv1beta1/crd.no-versions-list.old.yaml"), + newCRD: unversionedCRDForV1beta1File("testdata/apiextensionsv1beta1/crd.no-versions-list.yaml"), + want: fmt.Errorf("error validating cluster.com/v1alpha1, Kind=testcrd \"my-cr-1\": updated validation is too restrictive: [].spec.scalar: Invalid value: 2: spec.scalar in body should be greater than or equal to 3"), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client := fakedynamic.NewSimpleDynamicClient(runtime.NewScheme(), tt.existingObjects...) + require.Equal(t, tt.want, validateV1Beta1CRDCompatibility(client, tt.oldCRD, tt.newCRD)) + }) + } +} + +func TestValidateV1CRDCompatibility(t *testing.T) { + unstructuredForFile := func(file string) *unstructured.Unstructured { + data, err := os.ReadFile(file) + require.NoError(t, err) + dec := utilyaml.NewYAMLOrJSONDecoder(strings.NewReader(string(data)), 30) + k8sFile := &unstructured.Unstructured{} + require.NoError(t, dec.Decode(k8sFile)) + return k8sFile + } + + unversionedCRDForV1File := func(file string) *apiextensionsv1.CustomResourceDefinition { + data, err := os.ReadFile(file) + require.NoError(t, err) + dec := utilyaml.NewYAMLOrJSONDecoder(strings.NewReader(string(data)), 30) + k8sFile := &apiextensionsv1.CustomResourceDefinition{} + require.NoError(t, dec.Decode(k8sFile)) + return k8sFile + } + + tests := []struct { + name string + existingCRs []runtime.Object + gvr schema.GroupVersionResource + oldCRD *apiextensionsv1.CustomResourceDefinition + newCRD *apiextensionsv1.CustomResourceDefinition + want error + }{ + { + name: "valid", + existingCRs: []runtime.Object{ + unstructuredForFile("testdata/apiextensionsv1/crontabs.cr.valid.v1.yaml"), + unstructuredForFile("testdata/apiextensionsv1/crontabs.cr.valid.v2.yaml"), + }, + oldCRD: unversionedCRDForV1File("testdata/apiextensionsv1/crontabs.crd.old.yaml"), + newCRD: unversionedCRDForV1File("testdata/apiextensionsv1/crontabs.crd.yaml"), + }, + { + name: "validation failure", + existingCRs: []runtime.Object{ + unstructuredForFile("testdata/apiextensionsv1/crontabs.cr.valid.v1.yaml"), + unstructuredForFile("testdata/apiextensionsv1/crontabs.cr.fail.v2.yaml"), + }, + oldCRD: unversionedCRDForV1File("testdata/apiextensionsv1/crontabs.crd.old.yaml"), + newCRD: unversionedCRDForV1File("testdata/apiextensionsv1/crontabs.crd.yaml"), + want: fmt.Errorf("error validating stable.example.com/v2, Kind=CronTab \"my-crontab\": updated validation is too restrictive: [].spec.replicas: Invalid value: 10: spec.replicas in body should be less than or equal to 9"), + }, + { + name: "cr not invalidated by unserved version", + existingCRs: []runtime.Object{ + unstructuredForFile("testdata/apiextensionsv1/crontabs.cr.valid.v1.yaml"), + unstructuredForFile("testdata/apiextensionsv1/crontabs.cr.valid.v2.yaml"), + }, + oldCRD: unversionedCRDForV1File("testdata/apiextensionsv1/crontabs.crd.old.yaml"), + newCRD: unversionedCRDForV1File("testdata/apiextensionsv1/crontabs.crd.unserved.yaml"), + }, + { + name: "cr not validated against currently unserved version", + existingCRs: []runtime.Object{ + unstructuredForFile("testdata/apiextensionsv1/crontabs.cr.valid.v1.yaml"), + unstructuredForFile("testdata/apiextensionsv1/crontabs.cr.valid.v2.yaml"), + }, + oldCRD: unversionedCRDForV1File("testdata/apiextensionsv1/crontabs.crd.old.unserved.yaml"), + newCRD: unversionedCRDForV1File("testdata/apiextensionsv1/crontabs.crd.yaml"), + }, + { + name: "validation failure with single CRD version", + existingCRs: []runtime.Object{ + unstructuredForFile("testdata/apiextensionsv1/single-version-cr.yaml"), + }, + oldCRD: unversionedCRDForV1File("testdata/apiextensionsv1/single-version-crd.old.yaml"), + newCRD: unversionedCRDForV1File("testdata/apiextensionsv1/single-version-crd.yaml"), + want: fmt.Errorf("error validating cluster.com/v1alpha1, Kind=testcrd \"my-cr-1\": updated validation is too restrictive: [].spec.scalar: Invalid value: 100: spec.scalar in body should be less than or equal to 50"), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client := fakedynamic.NewSimpleDynamicClient(runtime.NewScheme(), tt.existingCRs...) + require.Equal(t, tt.want, validateV1CRDCompatibility(client, tt.oldCRD, tt.newCRD)) + }) + } } func TestSyncRegistryServer(t *testing.T) { - namespace := "ns" - - tests := []struct { - testName string - err error - catSrc *v1alpha1.CatalogSource - clientObjs []runtime.Object - }{ - { - testName: "EmptyRegistryPoll", - err: fmt.Errorf("empty polling interval; cannot requeue registry server sync without a provided polling interval"), - catSrc: &v1alpha1.CatalogSource{ - Spec: v1alpha1.CatalogSourceSpec{ - UpdateStrategy: &v1alpha1.UpdateStrategy{ - RegistryPoll: &v1alpha1.RegistryPoll{}, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.testName, func(t *testing.T) { - ctx, cancel := context.WithCancel(context.TODO()) - defer cancel() - - tt.clientObjs = append(tt.clientObjs, tt.catSrc) - op, err := NewFakeOperator(ctx, namespace, []string{namespace}, withClientObjs(tt.clientObjs...)) - require.NoError(t, err) - - op.reconciler = &fakes.FakeRegistryReconcilerFactory{ - ReconcilerForSourceStub: func(source *v1alpha1.CatalogSource) reconciler.RegistryReconciler { - return &fakes.FakeRegistryReconciler{ - EnsureRegistryServerStub: func(source *v1alpha1.CatalogSource) error { - return nil - }, - } - }, - } - require.NotPanics(t, func() { - _, _, err = op.syncRegistryServer(logrus.NewEntry(op.logger), tt.catSrc) - }) - require.Equal(t, tt.err, err) - }) - } + namespace := "ns" + + tests := []struct { + testName string + err error + catSrc *v1alpha1.CatalogSource + clientObjs []runtime.Object + }{ + { + testName: "EmptyRegistryPoll", + err: fmt.Errorf("empty polling interval; cannot requeue registry server sync without a provided polling interval"), + catSrc: &v1alpha1.CatalogSource{ + Spec: v1alpha1.CatalogSourceSpec{ + UpdateStrategy: &v1alpha1.UpdateStrategy{ + RegistryPoll: &v1alpha1.RegistryPoll{}, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.testName, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + tt.clientObjs = append(tt.clientObjs, tt.catSrc) + op, err := NewFakeOperator(ctx, namespace, []string{namespace}, withClientObjs(tt.clientObjs...)) + require.NoError(t, err) + + op.reconciler = &fakes.FakeRegistryReconcilerFactory{ + ReconcilerForSourceStub: func(source *v1alpha1.CatalogSource) reconciler.RegistryReconciler { + return &fakes.FakeRegistryReconciler{ + EnsureRegistryServerStub: func(source *v1alpha1.CatalogSource) error { + return nil + }, + } + }, + } + require.NotPanics(t, func() { + _, _, err = op.syncRegistryServer(logrus.NewEntry(op.logger), tt.catSrc) + }) + require.Equal(t, tt.err, err) + }) + } } func fakeConfigMapData() map[string]string { - data := make(map[string]string) - yaml, err := yaml.Marshal([]apiextensionsv1beta1.CustomResourceDefinition{crd("fake-crd")}) - if err != nil { - return data - } - - data["customResourceDefinitions"] = string(yaml) - return data + data := make(map[string]string) + yaml, err := yaml.Marshal([]apiextensionsv1beta1.CustomResourceDefinition{crd("fake-crd")}) + if err != nil { + return data + } + + data["customResourceDefinitions"] = string(yaml) + return data } // fakeOperatorConfig is the configuration for a fake operator. type fakeOperatorConfig struct { - clock utilclock.Clock - clientObjs []runtime.Object - k8sObjs []runtime.Object - extObjs []runtime.Object - regObjs []runtime.Object - clientOptions []clientfake.Option - logger *logrus.Logger - resolver resolver.StepResolver - recorder record.EventRecorder - reconciler reconciler.RegistryReconcilerFactory - bundleUnpacker bundle.Unpacker - sources []sourceAddress + clock utilclock.Clock + clientObjs []runtime.Object + k8sObjs []runtime.Object + extObjs []runtime.Object + regObjs []runtime.Object + clientOptions []clientfake.Option + logger *logrus.Logger + resolver resolver.StepResolver + recorder record.EventRecorder + reconciler reconciler.RegistryReconcilerFactory + bundleUnpacker bundle.Unpacker + sources []sourceAddress } // fakeOperatorOption applies an option to the given fake operator configuration. type fakeOperatorOption func(*fakeOperatorConfig) func withResolver(res resolver.StepResolver) fakeOperatorOption { - return func(config *fakeOperatorConfig) { - config.resolver = res - } + return func(config *fakeOperatorConfig) { + config.resolver = res + } } func withBundleUnpacker(bundleUnpacker bundle.Unpacker) fakeOperatorOption { - return func(config *fakeOperatorConfig) { - config.bundleUnpacker = bundleUnpacker - } + return func(config *fakeOperatorConfig) { + config.bundleUnpacker = bundleUnpacker + } } func withSources(sources ...sourceAddress) fakeOperatorOption { - return func(config *fakeOperatorConfig) { - config.sources = sources - } + return func(config *fakeOperatorConfig) { + config.sources = sources + } } func withClock(clock utilclock.Clock) fakeOperatorOption { - return func(config *fakeOperatorConfig) { - config.clock = clock - } + return func(config *fakeOperatorConfig) { + config.clock = clock + } } func withClientObjs(clientObjs ...runtime.Object) fakeOperatorOption { - return func(config *fakeOperatorConfig) { - config.clientObjs = clientObjs - } + return func(config *fakeOperatorConfig) { + config.clientObjs = clientObjs + } } func withK8sObjs(k8sObjs ...runtime.Object) fakeOperatorOption { - return func(config *fakeOperatorConfig) { - config.k8sObjs = k8sObjs - } + return func(config *fakeOperatorConfig) { + config.k8sObjs = k8sObjs + } } func withExtObjs(extObjs ...runtime.Object) fakeOperatorOption { - return func(config *fakeOperatorConfig) { - config.extObjs = extObjs - } + return func(config *fakeOperatorConfig) { + config.extObjs = extObjs + } } func withFakeClientOptions(options ...clientfake.Option) fakeOperatorOption { - return func(config *fakeOperatorConfig) { - config.clientOptions = options - } + return func(config *fakeOperatorConfig) { + config.clientOptions = options + } } type sourceAddress struct { - address string - sourceKey registry.CatalogKey + address string + sourceKey registry.CatalogKey } // NewFakeOperator creates a new operator using fake clients. func NewFakeOperator(ctx context.Context, namespace string, namespaces []string, fakeOptions ...fakeOperatorOption) (*Operator, error) { - // Apply options to default config - config := &fakeOperatorConfig{ - logger: logrus.StandardLogger(), - clock: utilclock.RealClock{}, - resolver: &fakes.FakeStepResolver{}, - recorder: &record.FakeRecorder{}, - bundleUnpacker: &bundlefakes.FakeUnpacker{}, - } - for _, option := range fakeOptions { - option(config) - } - - // Create client fakes - clientFake := fake.NewReactionForwardingClientsetDecorator(config.clientObjs, config.clientOptions...) - // TODO: Using the ReactionForwardingClientsetDecorator for k8s objects causes issues with adding Resources for discovery. - // For now, directly use a SimpleClientset instead. - k8sClientFake := k8sfake.NewSimpleClientset(config.k8sObjs...) - k8sClientFake.Resources = apiResourcesForObjects(append(config.extObjs, config.regObjs...)) - opClientFake := operatorclient.NewClient(k8sClientFake, apiextensionsfake.NewSimpleClientset(config.extObjs...), apiregistrationfake.NewSimpleClientset(config.regObjs...)) - dynamicClientFake := fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()) - - // Create operator namespace - _, err := opClientFake.KubernetesInterface().CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{}) - if err != nil { - return nil, err - } - - wakeupInterval := 5 * time.Minute - lister := operatorlister.NewLister() - var sharedInformers []cache.SharedIndexInformer - for _, ns := range namespaces { - if ns != namespace { - _, err := opClientFake.KubernetesInterface().CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}, metav1.CreateOptions{}) - if err != nil { - return nil, err - } - } - } - - // Create informers and register listers - operatorsFactory := externalversions.NewSharedInformerFactoryWithOptions(clientFake, wakeupInterval, externalversions.WithNamespace(metav1.NamespaceAll)) - catsrcInformer := operatorsFactory.Operators().V1alpha1().CatalogSources() - subInformer := operatorsFactory.Operators().V1alpha1().Subscriptions() - ipInformer := operatorsFactory.Operators().V1alpha1().InstallPlans() - csvInformer := operatorsFactory.Operators().V1alpha1().ClusterServiceVersions() - ogInformer := operatorsFactory.Operators().V1().OperatorGroups() - sharedInformers = append(sharedInformers, catsrcInformer.Informer(), subInformer.Informer(), ipInformer.Informer(), csvInformer.Informer(), ogInformer.Informer()) - - lister.OperatorsV1alpha1().RegisterCatalogSourceLister(metav1.NamespaceAll, catsrcInformer.Lister()) - lister.OperatorsV1alpha1().RegisterSubscriptionLister(metav1.NamespaceAll, subInformer.Lister()) - lister.OperatorsV1alpha1().RegisterInstallPlanLister(metav1.NamespaceAll, ipInformer.Lister()) - lister.OperatorsV1alpha1().RegisterClusterServiceVersionLister(metav1.NamespaceAll, csvInformer.Lister()) - lister.OperatorsV1().RegisterOperatorGroupLister(metav1.NamespaceAll, ogInformer.Lister()) - - factory := informers.NewSharedInformerFactoryWithOptions(opClientFake.KubernetesInterface(), wakeupInterval, informers.WithNamespace(metav1.NamespaceAll)) - roleInformer := factory.Rbac().V1().Roles() - roleBindingInformer := factory.Rbac().V1().RoleBindings() - serviceAccountInformer := factory.Core().V1().ServiceAccounts() - serviceInformer := factory.Core().V1().Services() - podInformer := factory.Core().V1().Pods() - configMapInformer := factory.Core().V1().ConfigMaps() - sharedInformers = append(sharedInformers, roleInformer.Informer(), roleBindingInformer.Informer(), serviceAccountInformer.Informer(), serviceInformer.Informer(), podInformer.Informer(), configMapInformer.Informer()) - - lister.RbacV1().RegisterRoleLister(metav1.NamespaceAll, roleInformer.Lister()) - lister.RbacV1().RegisterRoleBindingLister(metav1.NamespaceAll, roleBindingInformer.Lister()) - lister.CoreV1().RegisterServiceAccountLister(metav1.NamespaceAll, serviceAccountInformer.Lister()) - lister.CoreV1().RegisterServiceLister(metav1.NamespaceAll, serviceInformer.Lister()) - lister.CoreV1().RegisterPodLister(metav1.NamespaceAll, podInformer.Lister()) - lister.CoreV1().RegisterConfigMapLister(metav1.NamespaceAll, configMapInformer.Lister()) - logger := logrus.New() - - // Create the new operator - queueOperator, err := queueinformer.NewOperator(opClientFake.KubernetesInterface().Discovery()) - if err != nil { - return nil, fmt.Errorf("failed to create queueinformer operator: %w", err) - } - for _, informer := range sharedInformers { - queueOperator.RegisterInformer(informer) - } - - op := &Operator{ - Operator: queueOperator, - clock: config.clock, - logger: config.logger, - opClient: opClientFake, - dynamicClient: dynamicClientFake, - client: clientFake, - lister: lister, - namespace: namespace, - nsResolveQueue: workqueue.NewNamedRateLimitingQueue( - workqueue.NewMaxOfRateLimiter( - workqueue.NewItemExponentialFailureRateLimiter(1*time.Second, 1000*time.Second), - // 1 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item) - &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(1), 100)}, - ), "resolver"), - resolver: config.resolver, - reconciler: config.reconciler, - recorder: config.recorder, - clientAttenuator: scoped.NewClientAttenuator(logger, &rest.Config{}, opClientFake), - serviceAccountQuerier: scoped.NewUserDefinedServiceAccountQuerier(logger, clientFake), - bundleUnpacker: config.bundleUnpacker, - catsrcQueueSet: queueinformer.NewEmptyResourceQueueSet(), - clientFactory: &stubClientFactory{ - operatorClient: opClientFake, - kubernetesClient: clientFake, - dynamicClient: dynamicClientFake, - }, - } - op.sources = grpc.NewSourceStore(config.logger, 1*time.Second, 5*time.Second, op.syncSourceState) - if op.reconciler == nil { - s := runtime.NewScheme() - err := k8sfake.AddToScheme(s) - if err != nil { - return nil, err - } - applier := controllerclient.NewFakeApplier(s, "testowner") - - op.reconciler = reconciler.NewRegistryReconcilerFactory(lister, op.opClient, "test:pod", op.now, applier, 1001) - } - - op.RunInformers(ctx) - op.sources.Start(ctx) - for _, source := range config.sources { - op.sources.Add(source.sourceKey, source.address) - } - - if ok := cache.WaitForCacheSync(ctx.Done(), op.HasSynced); !ok { - return nil, fmt.Errorf("failed to wait for caches to sync") - } - - return op, nil + // Apply options to default config + config := &fakeOperatorConfig{ + logger: logrus.StandardLogger(), + clock: utilclock.RealClock{}, + resolver: &fakes.FakeStepResolver{}, + recorder: &record.FakeRecorder{}, + bundleUnpacker: &bundlefakes.FakeUnpacker{}, + } + for _, option := range fakeOptions { + option(config) + } + + // Create client fakes + clientFake := fake.NewReactionForwardingClientsetDecorator(config.clientObjs, config.clientOptions...) + // TODO: Using the ReactionForwardingClientsetDecorator for k8s objects causes issues with adding Resources for discovery. + // For now, directly use a SimpleClientset instead. + k8sClientFake := k8sfake.NewSimpleClientset(config.k8sObjs...) + k8sClientFake.Resources = apiResourcesForObjects(append(config.extObjs, config.regObjs...)) + opClientFake := operatorclient.NewClient(k8sClientFake, apiextensionsfake.NewSimpleClientset(config.extObjs...), apiregistrationfake.NewSimpleClientset(config.regObjs...)) + dynamicClientFake := fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()) + + // Create operator namespace + _, err := opClientFake.KubernetesInterface().CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{}) + if err != nil { + return nil, err + } + + wakeupInterval := 5 * time.Minute + lister := operatorlister.NewLister() + var sharedInformers []cache.SharedIndexInformer + for _, ns := range namespaces { + if ns != namespace { + _, err := opClientFake.KubernetesInterface().CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}, metav1.CreateOptions{}) + if err != nil { + return nil, err + } + } + } + + // Create informers and register listers + operatorsFactory := externalversions.NewSharedInformerFactoryWithOptions(clientFake, wakeupInterval, externalversions.WithNamespace(metav1.NamespaceAll)) + catsrcInformer := operatorsFactory.Operators().V1alpha1().CatalogSources() + subInformer := operatorsFactory.Operators().V1alpha1().Subscriptions() + ipInformer := operatorsFactory.Operators().V1alpha1().InstallPlans() + csvInformer := operatorsFactory.Operators().V1alpha1().ClusterServiceVersions() + ogInformer := operatorsFactory.Operators().V1().OperatorGroups() + sharedInformers = append(sharedInformers, catsrcInformer.Informer(), subInformer.Informer(), ipInformer.Informer(), csvInformer.Informer(), ogInformer.Informer()) + + lister.OperatorsV1alpha1().RegisterCatalogSourceLister(metav1.NamespaceAll, catsrcInformer.Lister()) + lister.OperatorsV1alpha1().RegisterSubscriptionLister(metav1.NamespaceAll, subInformer.Lister()) + lister.OperatorsV1alpha1().RegisterInstallPlanLister(metav1.NamespaceAll, ipInformer.Lister()) + lister.OperatorsV1alpha1().RegisterClusterServiceVersionLister(metav1.NamespaceAll, csvInformer.Lister()) + lister.OperatorsV1().RegisterOperatorGroupLister(metav1.NamespaceAll, ogInformer.Lister()) + + factory := informers.NewSharedInformerFactoryWithOptions(opClientFake.KubernetesInterface(), wakeupInterval, informers.WithNamespace(metav1.NamespaceAll)) + roleInformer := factory.Rbac().V1().Roles() + roleBindingInformer := factory.Rbac().V1().RoleBindings() + serviceAccountInformer := factory.Core().V1().ServiceAccounts() + serviceInformer := factory.Core().V1().Services() + podInformer := factory.Core().V1().Pods() + configMapInformer := factory.Core().V1().ConfigMaps() + sharedInformers = append(sharedInformers, roleInformer.Informer(), roleBindingInformer.Informer(), serviceAccountInformer.Informer(), serviceInformer.Informer(), podInformer.Informer(), configMapInformer.Informer()) + + lister.RbacV1().RegisterRoleLister(metav1.NamespaceAll, roleInformer.Lister()) + lister.RbacV1().RegisterRoleBindingLister(metav1.NamespaceAll, roleBindingInformer.Lister()) + lister.CoreV1().RegisterServiceAccountLister(metav1.NamespaceAll, serviceAccountInformer.Lister()) + lister.CoreV1().RegisterServiceLister(metav1.NamespaceAll, serviceInformer.Lister()) + lister.CoreV1().RegisterPodLister(metav1.NamespaceAll, podInformer.Lister()) + lister.CoreV1().RegisterConfigMapLister(metav1.NamespaceAll, configMapInformer.Lister()) + logger := logrus.New() + + // Create the new operator + queueOperator, err := queueinformer.NewOperator(opClientFake.KubernetesInterface().Discovery()) + if err != nil { + return nil, fmt.Errorf("failed to create queueinformer operator: %w", err) + } + for _, informer := range sharedInformers { + queueOperator.RegisterInformer(informer) + } + + op := &Operator{ + Operator: queueOperator, + clock: config.clock, + logger: config.logger, + opClient: opClientFake, + dynamicClient: dynamicClientFake, + client: clientFake, + lister: lister, + namespace: namespace, + nsResolveQueue: workqueue.NewNamedRateLimitingQueue( + workqueue.NewMaxOfRateLimiter( + workqueue.NewItemExponentialFailureRateLimiter(1*time.Second, 1000*time.Second), + // 1 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item) + &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(1), 100)}, + ), "resolver"), + resolver: config.resolver, + reconciler: config.reconciler, + recorder: config.recorder, + clientAttenuator: scoped.NewClientAttenuator(logger, &rest.Config{}, opClientFake), + serviceAccountQuerier: scoped.NewUserDefinedServiceAccountQuerier(logger, clientFake), + bundleUnpacker: config.bundleUnpacker, + catsrcQueueSet: queueinformer.NewEmptyResourceQueueSet(), + clientFactory: &stubClientFactory{ + operatorClient: opClientFake, + kubernetesClient: clientFake, + dynamicClient: dynamicClientFake, + }, + } + op.sources = grpc.NewSourceStore(config.logger, 1*time.Second, 5*time.Second, op.syncSourceState) + if op.reconciler == nil { + s := runtime.NewScheme() + err := k8sfake.AddToScheme(s) + if err != nil { + return nil, err + } + applier := controllerclient.NewFakeApplier(s, "testowner") + + op.reconciler = reconciler.NewRegistryReconcilerFactory(lister, op.opClient, "test:pod", op.now, applier, 1001) + } + + op.RunInformers(ctx) + op.sources.Start(ctx) + for _, source := range config.sources { + op.sources.Add(source.sourceKey, source.address) + } + + if ok := cache.WaitForCacheSync(ctx.Done(), op.HasSynced); !ok { + return nil, fmt.Errorf("failed to wait for caches to sync") + } + + return op, nil } func installPlan(name, namespace string, phase v1alpha1.InstallPlanPhase, names ...string) *v1alpha1.InstallPlan { - return &v1alpha1.InstallPlan{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Spec: v1alpha1.InstallPlanSpec{ - ClusterServiceVersionNames: names, - }, - Status: v1alpha1.InstallPlanStatus{ - Phase: phase, - Plan: []*v1alpha1.Step{}, - }, - } + return &v1alpha1.InstallPlan{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: v1alpha1.InstallPlanSpec{ + ClusterServiceVersionNames: names, + }, + Status: v1alpha1.InstallPlanStatus{ + Phase: phase, + Plan: []*v1alpha1.Step{}, + }, + } } func withSteps(plan *v1alpha1.InstallPlan, steps []*v1alpha1.Step) *v1alpha1.InstallPlan { - plan.Status.Plan = steps - return plan + plan.Status.Plan = steps + return plan } func csv(name, namespace string, owned, required []string) *v1alpha1.ClusterServiceVersion { - requiredCRDDescs := make([]v1alpha1.CRDDescription, 0) - for _, name := range required { - requiredCRDDescs = append(requiredCRDDescs, v1alpha1.CRDDescription{Name: name, Version: "v1", Kind: name}) - } - if len(requiredCRDDescs) == 0 { - requiredCRDDescs = nil - } - - ownedCRDDescs := make([]v1alpha1.CRDDescription, 0) - for _, name := range owned { - ownedCRDDescs = append(ownedCRDDescs, v1alpha1.CRDDescription{Name: name, Version: "v1", Kind: name}) - } - if len(ownedCRDDescs) == 0 { - ownedCRDDescs = nil - } - - return &v1alpha1.ClusterServiceVersion{ - TypeMeta: metav1.TypeMeta{ - Kind: csvKind, - APIVersion: "operators.coreos.com/v1alpha1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Spec: v1alpha1.ClusterServiceVersionSpec{ - CustomResourceDefinitions: v1alpha1.CustomResourceDefinitions{ - Owned: ownedCRDDescs, - Required: requiredCRDDescs, - }, - }, - } + requiredCRDDescs := make([]v1alpha1.CRDDescription, 0) + for _, name := range required { + requiredCRDDescs = append(requiredCRDDescs, v1alpha1.CRDDescription{Name: name, Version: "v1", Kind: name}) + } + if len(requiredCRDDescs) == 0 { + requiredCRDDescs = nil + } + + ownedCRDDescs := make([]v1alpha1.CRDDescription, 0) + for _, name := range owned { + ownedCRDDescs = append(ownedCRDDescs, v1alpha1.CRDDescription{Name: name, Version: "v1", Kind: name}) + } + if len(ownedCRDDescs) == 0 { + ownedCRDDescs = nil + } + + return &v1alpha1.ClusterServiceVersion{ + TypeMeta: metav1.TypeMeta{ + Kind: csvKind, + APIVersion: "operators.coreos.com/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: v1alpha1.ClusterServiceVersionSpec{ + CustomResourceDefinitions: v1alpha1.CustomResourceDefinitions{ + Owned: ownedCRDDescs, + Required: requiredCRDDescs, + }, + }, + } } func crd(name string) apiextensionsv1beta1.CustomResourceDefinition { - return apiextensionsv1beta1.CustomResourceDefinition{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{ - Group: name + "group", - Version: "v1", - Names: apiextensionsv1beta1.CustomResourceDefinitionNames{ - Kind: name, - }, - }, - } + return apiextensionsv1beta1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{ + Group: name + "group", + Version: "v1", + Names: apiextensionsv1beta1.CustomResourceDefinitionNames{ + Kind: name, + }, + }, + } } func service(name, namespace string) *corev1.Service { - return &corev1.Service{ - TypeMeta: metav1.TypeMeta{ - Kind: serviceKind, - APIVersion: "", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - } + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: serviceKind, + APIVersion: "", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } } func secret(name, namespace string) *corev1.Secret { - return &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - } + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } } func serviceAccount(name, namespace, generateName string, secretRef *corev1.ObjectReference) *corev1.ServiceAccount { - if secretRef == nil { - return &corev1.ServiceAccount{ - TypeMeta: metav1.TypeMeta{Kind: serviceAccountKind, APIVersion: ""}, - ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace, GenerateName: generateName}, - } - } - return &corev1.ServiceAccount{ - TypeMeta: metav1.TypeMeta{Kind: serviceAccountKind, APIVersion: ""}, - ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace, GenerateName: generateName}, - Secrets: []corev1.ObjectReference{*secretRef}, - } + if secretRef == nil { + return &corev1.ServiceAccount{ + TypeMeta: metav1.TypeMeta{Kind: serviceAccountKind, APIVersion: ""}, + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace, GenerateName: generateName}, + } + } + return &corev1.ServiceAccount{ + TypeMeta: metav1.TypeMeta{Kind: serviceAccountKind, APIVersion: ""}, + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace, GenerateName: generateName}, + Secrets: []corev1.ObjectReference{*secretRef}, + } } func configMap(name, namespace string) *corev1.ConfigMap { - return &corev1.ConfigMap{ - TypeMeta: metav1.TypeMeta{Kind: configMapKind}, - ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}, - } + return &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{Kind: configMapKind}, + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}, + } } func objectReference(name string) *corev1.ObjectReference { - if name == "" { - return &corev1.ObjectReference{} - } - return &corev1.ObjectReference{Name: name} + if name == "" { + return &corev1.ObjectReference{} + } + return &corev1.ObjectReference{Name: name} } func yamlFromFilePath(t *testing.T, fileName string) string { - yaml, err := os.ReadFile(fileName) - require.NoError(t, err) + yaml, err := os.ReadFile(fileName) + require.NoError(t, err) - return string(yaml) + return string(yaml) } func toManifest(t *testing.T, obj runtime.Object) string { - raw, err := json.Marshal(obj) - require.NoError(t, err) + raw, err := json.Marshal(obj) + require.NoError(t, err) - return string(raw) + return string(raw) } func pod(s v1alpha1.CatalogSource) *corev1.Pod { - pod := reconciler.Pod(&s, "registry-server", s.Spec.Image, s.GetName(), s.GetLabels(), s.GetAnnotations(), 5, 10, 1001) - ownerutil.AddOwner(pod, &s, false, true) - return pod + pod := reconciler.Pod(&s, "registry-server", s.Spec.Image, s.GetName(), s.GetLabels(), s.GetAnnotations(), 5, 10, 1001) + ownerutil.AddOwner(pod, &s, false, true) + return pod } func decodeFile(t *testing.T, file string, to runtime.Object) runtime.Object { - manifest := yamlFromFilePath(t, file) - dec := utilyaml.NewYAMLOrJSONDecoder(strings.NewReader(manifest), 10) - require.NoError(t, dec.Decode(to)) + manifest := yamlFromFilePath(t, file) + dec := utilyaml.NewYAMLOrJSONDecoder(strings.NewReader(manifest), 10) + require.NoError(t, dec.Decode(to)) - return to + return to } type modifierFunc func(t *testing.T, obj runtime.Object) runtime.Object func modify(t *testing.T, obj runtime.Object, modifiers ...modifierFunc) runtime.Object { - o := obj.DeepCopyObject() - for _, modifier := range modifiers { - o = modifier(t, o) - } + o := obj.DeepCopyObject() + for _, modifier := range modifiers { + o = modifier(t, o) + } - return o + return o } type metaModifierFunc func(m metav1.Object) func modifyMeta(mf metaModifierFunc) modifierFunc { - return func(t *testing.T, obj runtime.Object) runtime.Object { - accessor, err := meta.Accessor(obj) - require.NoError(t, err) + return func(t *testing.T, obj runtime.Object) runtime.Object { + accessor, err := meta.Accessor(obj) + require.NoError(t, err) - mf(accessor) + mf(accessor) - return obj - } + return obj + } } func withNamespace(namespace string) modifierFunc { - return modifyMeta(func(m metav1.Object) { - m.SetNamespace(namespace) - }) + return modifyMeta(func(m metav1.Object) { + m.SetNamespace(namespace) + }) } func withOwner(owner ownerutil.Owner) modifierFunc { - return modifyMeta(func(m metav1.Object) { - ownerutil.AddNonBlockingOwner(m, owner) - }) + return modifyMeta(func(m metav1.Object) { + ownerutil.AddNonBlockingOwner(m, owner) + }) } func apiResourcesForObjects(objs []runtime.Object) []*metav1.APIResourceList { - apis := []*metav1.APIResourceList{} - for _, o := range objs { - switch o := o.(type) { - case *apiextensionsv1beta1.CustomResourceDefinition: - crd := o - apis = append(apis, &metav1.APIResourceList{ - GroupVersion: metav1.GroupVersion{Group: crd.Spec.Group, Version: crd.Spec.Versions[0].Name}.String(), - APIResources: []metav1.APIResource{ - { - Name: crd.GetName(), - SingularName: crd.Spec.Names.Singular, - Namespaced: crd.Spec.Scope == apiextensionsv1beta1.NamespaceScoped, - Group: crd.Spec.Group, - Version: crd.Spec.Versions[0].Name, - Kind: crd.Spec.Names.Kind, - }, - }, - }) - case *apiregistrationv1.APIService: - a := o - names := strings.Split(a.Name, ".") - apis = append(apis, &metav1.APIResourceList{ - GroupVersion: metav1.GroupVersion{Group: names[1], Version: a.Spec.Version}.String(), - APIResources: []metav1.APIResource{ - { - Name: names[1], - Group: names[1], - Version: a.Spec.Version, - Kind: names[1] + "Kind", - }, - }, - }) - } - } - return apis + apis := []*metav1.APIResourceList{} + for _, o := range objs { + switch o := o.(type) { + case *apiextensionsv1beta1.CustomResourceDefinition: + crd := o + apis = append(apis, &metav1.APIResourceList{ + GroupVersion: metav1.GroupVersion{Group: crd.Spec.Group, Version: crd.Spec.Versions[0].Name}.String(), + APIResources: []metav1.APIResource{ + { + Name: crd.GetName(), + SingularName: crd.Spec.Names.Singular, + Namespaced: crd.Spec.Scope == apiextensionsv1beta1.NamespaceScoped, + Group: crd.Spec.Group, + Version: crd.Spec.Versions[0].Name, + Kind: crd.Spec.Names.Kind, + }, + }, + }) + case *apiregistrationv1.APIService: + a := o + names := strings.Split(a.Name, ".") + apis = append(apis, &metav1.APIResourceList{ + GroupVersion: metav1.GroupVersion{Group: names[1], Version: a.Spec.Version}.String(), + APIResources: []metav1.APIResource{ + { + Name: names[1], + Group: names[1], + Version: a.Spec.Version, + Kind: names[1] + "Kind", + }, + }, + }) + } + } + return apis } func operatorGroup(ogName, saName, namespace string, saRef *corev1.ObjectReference) *operatorsv1.OperatorGroup { - return &operatorsv1.OperatorGroup{ - ObjectMeta: metav1.ObjectMeta{ - Name: ogName, - Namespace: namespace, - }, - Spec: operatorsv1.OperatorGroupSpec{ - TargetNamespaces: []string{namespace}, - ServiceAccountName: saName, - }, - Status: operatorsv1.OperatorGroupStatus{ - Namespaces: []string{namespace}, - ServiceAccountRef: saRef, - }, - } + return &operatorsv1.OperatorGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: ogName, + Namespace: namespace, + }, + Spec: operatorsv1.OperatorGroupSpec{ + TargetNamespaces: []string{namespace}, + ServiceAccountName: saName, + }, + Status: operatorsv1.OperatorGroupStatus{ + Namespaces: []string{namespace}, + ServiceAccountRef: saRef, + }, + } } func hasExpectedCondition(ip *v1alpha1.InstallPlan, expectedCondition v1alpha1.InstallPlanCondition) bool { - for _, cond := range ip.Status.Conditions { - if cond.Type == expectedCondition.Type && cond.Message == expectedCondition.Message && cond.Status == expectedCondition.Status { - return true - } - } - return false + for _, cond := range ip.Status.Conditions { + if cond.Type == expectedCondition.Type && cond.Message == expectedCondition.Message && cond.Status == expectedCondition.Status { + return true + } + } + return false } diff --git a/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1/crontabs.cr.fail.v2.yaml b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1/crontabs.cr.fail.v2.yaml new file mode 100644 index 0000000000..884b755799 --- /dev/null +++ b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1/crontabs.cr.fail.v2.yaml @@ -0,0 +1,8 @@ +apiVersion: stable.example.com/v2 +kind: CronTab +metadata: + name: my-crontab +spec: + cronSpec: "* * * * *" + image: "" + replicas: 10 diff --git a/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1/crontabs.cr.valid.v1.yaml b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1/crontabs.cr.valid.v1.yaml new file mode 100644 index 0000000000..74ace7c94f --- /dev/null +++ b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1/crontabs.cr.valid.v1.yaml @@ -0,0 +1,8 @@ +apiVersion: stable.example.com/v1 +kind: CronTab +metadata: + name: my-crontab-v1 +spec: + cronSpec: "* * * * *" + image: "" + replicas: 9 diff --git a/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1/crontabs.cr.valid.v2.yaml b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1/crontabs.cr.valid.v2.yaml new file mode 100644 index 0000000000..de6ad8780f --- /dev/null +++ b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1/crontabs.cr.valid.v2.yaml @@ -0,0 +1,8 @@ +apiVersion: stable.example.com/v2 +kind: CronTab +metadata: + name: my-crontab-v2 +spec: + cronSpec: "* * * * *" + image: "" + replicas: 9 diff --git a/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1/crontabs.crd.old.unserved.yaml b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1/crontabs.crd.old.unserved.yaml new file mode 100644 index 0000000000..9bd02584a7 --- /dev/null +++ b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1/crontabs.crd.old.unserved.yaml @@ -0,0 +1,53 @@ +# Adapated from: https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: crontabs.stable.example.com +spec: + group: stable.example.com + versions: + - name: v1 + served: false + storage: false + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + cronSpec: + type: string + pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$' + image: + type: string + replicas: + type: integer + minimum: 100 + maximum: 200 + - name: v2 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + cronSpec: + type: string + pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$' + image: + type: string + replicas: + type: integer + minimum: 1 + maximum: 10 + scope: Namespaced + names: + plural: crontabs + singular: crontab + kind: CronTab + shortNames: + - ct diff --git a/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1/crontabs.crd.old.yaml b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1/crontabs.crd.old.yaml new file mode 100644 index 0000000000..4c27283bc3 --- /dev/null +++ b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1/crontabs.crd.old.yaml @@ -0,0 +1,53 @@ +# Adapated from: https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: crontabs.stable.example.com +spec: + group: stable.example.com + versions: + - name: v1 + served: true + storage: false + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + cronSpec: + type: string + pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$' + image: + type: string + replicas: + type: integer + minimum: 1 + maximum: 10 + - name: v2 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + cronSpec: + type: string + pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$' + image: + type: string + replicas: + type: integer + minimum: 1 + maximum: 10 + scope: Namespaced + names: + plural: crontabs + singular: crontab + kind: CronTab + shortNames: + - ct diff --git a/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1/crontabs.crd.unserved.yaml b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1/crontabs.crd.unserved.yaml new file mode 100644 index 0000000000..47d0994e43 --- /dev/null +++ b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1/crontabs.crd.unserved.yaml @@ -0,0 +1,53 @@ +# Adapated from: https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: crontabs.stable.example.com +spec: + group: stable.example.com + versions: + - name: v1 + served: false + storage: false + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + cronSpec: + type: string + pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$' + image: + type: string + replicas: + type: integer + minimum: 100 + maximum: 200 + - name: v2 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + cronSpec: + type: string + pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$' + image: + type: string + replicas: + type: integer + minimum: 1 + maximum: 9 + scope: Namespaced + names: + plural: crontabs + singular: crontab + kind: CronTab + shortNames: + - ct diff --git a/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1/crontabs.crd.yaml b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1/crontabs.crd.yaml new file mode 100644 index 0000000000..7c3f299e14 --- /dev/null +++ b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1/crontabs.crd.yaml @@ -0,0 +1,53 @@ +# Adapated from: https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: crontabs.stable.example.com +spec: + group: stable.example.com + versions: + - name: v1 + served: true + storage: false + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + cronSpec: + type: string + pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$' + image: + type: string + replicas: + type: integer + minimum: 1 + maximum: 10 + - name: v2 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + cronSpec: + type: string + pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$' + image: + type: string + replicas: + type: integer + minimum: 1 + maximum: 9 + scope: Namespaced + names: + plural: crontabs + singular: crontab + kind: CronTab + shortNames: + - ct diff --git a/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1/single-version-cr.yaml b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1/single-version-cr.yaml new file mode 100644 index 0000000000..4185458e68 --- /dev/null +++ b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1/single-version-cr.yaml @@ -0,0 +1,6 @@ +apiVersion: cluster.com/v1alpha1 +kind: testcrd +metadata: + name: my-cr-1 +spec: + scalar: 100 diff --git a/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1/single-version-crd.old.yaml b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1/single-version-crd.old.yaml new file mode 100644 index 0000000000..47f077c010 --- /dev/null +++ b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1/single-version-crd.old.yaml @@ -0,0 +1,30 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: testcrd.cluster.com +spec: + conversion: + strategy: None + group: cluster.com + names: + kind: testcrd + listKind: testcrdList + plural: testcrds + singular: testcrd + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + spec: + description: Spec of a test object. + properties: + scalar: + description: Scalar value that should have a min and max. + type: integer + type: object + type: object + served: true + storage: true + \ No newline at end of file diff --git a/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1/single-version-crd.yaml b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1/single-version-crd.yaml new file mode 100644 index 0000000000..0cb4ae7df4 --- /dev/null +++ b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1/single-version-crd.yaml @@ -0,0 +1,31 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: testcrd.cluster.com +spec: + conversion: + strategy: None + group: cluster.com + names: + kind: testcrd + listKind: testcrdList + plural: testcrds + singular: testcrd + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + spec: + description: Spec of a test object. + properties: + scalar: + description: Scalar value that should have a min and max. + maximum: 50 + minimum: 2 + type: integer + type: object + type: object + served: true + storage: true diff --git a/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1beta1/cr.v2.yaml b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1beta1/cr.v2.yaml new file mode 100644 index 0000000000..12d2f801c4 --- /dev/null +++ b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1beta1/cr.v2.yaml @@ -0,0 +1,6 @@ +apiVersion: cluster.com/v2 +kind: testcrd +metadata: + name: my-cr-1 +spec: + scalar: 3 diff --git a/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1beta1/cr.yaml b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1beta1/cr.yaml new file mode 100644 index 0000000000..1b255e2f48 --- /dev/null +++ b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1beta1/cr.yaml @@ -0,0 +1,6 @@ +apiVersion: cluster.com/v1alpha1 +kind: testcrd +metadata: + name: my-cr-1 +spec: + scalar: 2 diff --git a/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1beta1/crd.no-versions-list.old.yaml b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1beta1/crd.no-versions-list.old.yaml new file mode 100644 index 0000000000..b0735d4fc5 --- /dev/null +++ b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1beta1/crd.no-versions-list.old.yaml @@ -0,0 +1,28 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: testcrd.cluster.com +spec: + conversion: + strategy: None + group: cluster.com + names: + kind: testcrd + listKind: testcrdList + plural: testcrds + singular: testcrd + scope: Namespaced + validation: + openAPIV3Schema: + properties: + spec: + description: Spec of a test object. + properties: + scalar: + description: Scalar value that should have a min and max. + maximum: 50 + minimum: 1 + type: integer + type: object + type: object + version: v1alpha1 diff --git a/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1beta1/crd.no-versions-list.yaml b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1beta1/crd.no-versions-list.yaml new file mode 100644 index 0000000000..f2963bd990 --- /dev/null +++ b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1beta1/crd.no-versions-list.yaml @@ -0,0 +1,28 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: testcrd.cluster.com +spec: + conversion: + strategy: None + group: cluster.com + names: + kind: testcrd + listKind: testcrdList + plural: testcrds + singular: testcrd + scope: Namespaced + validation: + openAPIV3Schema: + properties: + spec: + description: Spec of a test object. + properties: + scalar: + description: Scalar value that should have a min and max. + maximum: 50 + minimum: 3 + type: integer + type: object + type: object + version: v1alpha1 diff --git a/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1beta1/crd.old.yaml b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1beta1/crd.old.yaml new file mode 100644 index 0000000000..6691150d74 --- /dev/null +++ b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1beta1/crd.old.yaml @@ -0,0 +1,34 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: testcrd.cluster.com +spec: + conversion: + strategy: None + group: cluster.com + names: + kind: testcrd + listKind: testcrdList + plural: testcrds + singular: testcrd + scope: Namespaced + validation: + openAPIV3Schema: + properties: + spec: + description: Spec of a test object. + properties: + scalar: + description: Scalar value that should have a min and max. + maximum: 50 + minimum: 2 + type: integer + type: object + type: object + versions: + - name: v1alpha1 + served: true + storage: false + - name: v2 + served: true + storage: true diff --git a/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1beta1/crd.unserved.yaml b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1beta1/crd.unserved.yaml new file mode 100644 index 0000000000..e15cba147b --- /dev/null +++ b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1beta1/crd.unserved.yaml @@ -0,0 +1,34 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: testcrd.cluster.com +spec: + conversion: + strategy: None + group: cluster.com + names: + kind: testcrd + listKind: testcrdList + plural: testcrds + singular: testcrd + scope: Namespaced + validation: + openAPIV3Schema: + properties: + spec: + description: Spec of a test object. + properties: + scalar: + description: Scalar value that should have a min and max. + maximum: 50 + minimum: 2 + type: integer + type: object + type: object + versions: + - name: v1alpha1 + served: false + storage: false + - name: v2 + served: true + storage: true diff --git a/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1beta1/crd.yaml b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1beta1/crd.yaml new file mode 100644 index 0000000000..f3e559734b --- /dev/null +++ b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/testdata/apiextensionsv1beta1/crd.yaml @@ -0,0 +1,34 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: testcrd.cluster.com +spec: + conversion: + strategy: None + group: cluster.com + names: + kind: testcrd + listKind: testcrdList + plural: testcrds + singular: testcrd + scope: Namespaced + validation: + openAPIV3Schema: + properties: + spec: + description: Spec of a test object. + properties: + scalar: + description: Scalar value that should have a min and max. + maximum: 50 + minimum: 3 + type: integer + type: object + type: object + versions: + - name: v1alpha1 + served: true + storage: false + - name: v2 + served: true + storage: true diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/catalog/operator.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/catalog/operator.go index 32f5f652d8..6706cecec1 100644 --- a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/catalog/operator.go +++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/catalog/operator.go @@ -1,2559 +1,2572 @@ package catalog import ( - "context" - "encoding/json" - "errors" - "fmt" - "reflect" - "sort" - "strings" - "sync" - "time" - - errorwrap "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "google.golang.org/grpc/connectivity" - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - "k8s.io/apiextensions-apiserver/pkg/apiserver/validation" - extinf "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/selection" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/informers" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/client-go/tools/pager" - "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/retry" - "k8s.io/client-go/util/workqueue" - utilclock "k8s.io/utils/clock" - - "github.com/operator-framework/api/pkg/operators/reference" - operatorsv1 "github.com/operator-framework/api/pkg/operators/v1" - "github.com/operator-framework/api/pkg/operators/v1alpha1" - "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned" - "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/informers/externalversions" - operatorsv1alpha1listers "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/listers/operators/v1alpha1" - "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/bundle" - olmerrors "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/errors" - "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/install" - "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/catalog/subscription" - "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/internal/pruning" - "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry" - "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/grpc" - "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/reconciler" - "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/resolver" - resolvercache "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/resolver/cache" - "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/resolver/solver" - "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/catalogsource" - "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/clients" - controllerclient "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/controller-runtime/client" - "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/event" - index "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/index" - "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/operatorclient" - "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/operatorlister" - "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/ownerutil" - "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/queueinformer" - "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/scoped" - sharedtime "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/time" - "github.com/operator-framework/operator-lifecycle-manager/pkg/metrics" + "context" + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" + "strings" + "sync" + "time" + + errorwrap "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "google.golang.org/grpc/connectivity" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apiextensions-apiserver/pkg/apiserver/validation" + extinf "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/selection" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/informers" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/retry" + "k8s.io/client-go/util/workqueue" + utilclock "k8s.io/utils/clock" + + "github.com/operator-framework/api/pkg/operators/reference" + operatorsv1 "github.com/operator-framework/api/pkg/operators/v1" + "github.com/operator-framework/api/pkg/operators/v1alpha1" + "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned" + "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/informers/externalversions" + operatorsv1alpha1listers "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/listers/operators/v1alpha1" + "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/bundle" + olmerrors "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/errors" + "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/install" + "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/catalog/subscription" + "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/internal/pruning" + "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry" + "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/grpc" + "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/reconciler" + "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/resolver" + resolvercache "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/resolver/cache" + "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/resolver/solver" + "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/catalogsource" + "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/clients" + controllerclient "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/controller-runtime/client" + "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/event" + index "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/index" + "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/operatorclient" + "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/operatorlister" + "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/ownerutil" + "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/queueinformer" + "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/scoped" + sharedtime "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/time" + "github.com/operator-framework/operator-lifecycle-manager/pkg/metrics" ) const ( - crdKind = "CustomResourceDefinition" - secretKind = "Secret" - clusterRoleKind = "ClusterRole" - clusterRoleBindingKind = "ClusterRoleBinding" - configMapKind = "ConfigMap" - csvKind = "ClusterServiceVersion" - serviceAccountKind = "ServiceAccount" - serviceKind = "Service" - roleKind = "Role" - roleBindingKind = "RoleBinding" - generatedByKey = "olm.generated-by" - maxInstallPlanCount = 5 - maxDeletesPerSweep = 5 - RegistryFieldManager = "olm.registry" + crdKind = "CustomResourceDefinition" + secretKind = "Secret" + clusterRoleKind = "ClusterRole" + clusterRoleBindingKind = "ClusterRoleBinding" + configMapKind = "ConfigMap" + csvKind = "ClusterServiceVersion" + serviceAccountKind = "ServiceAccount" + serviceKind = "Service" + roleKind = "Role" + roleBindingKind = "RoleBinding" + generatedByKey = "olm.generated-by" + maxInstallPlanCount = 5 + maxDeletesPerSweep = 5 + RegistryFieldManager = "olm.registry" ) // Operator represents a Kubernetes operator that executes InstallPlans by // resolving dependencies in a catalog. type Operator struct { - queueinformer.Operator - - logger *logrus.Logger - clock utilclock.Clock - opClient operatorclient.ClientInterface - client versioned.Interface - dynamicClient dynamic.Interface - lister operatorlister.OperatorLister - catsrcQueueSet *queueinformer.ResourceQueueSet - subQueueSet *queueinformer.ResourceQueueSet - ipQueueSet *queueinformer.ResourceQueueSet - ogQueueSet *queueinformer.ResourceQueueSet - nsResolveQueue workqueue.RateLimitingInterface - namespace string - recorder record.EventRecorder - sources *grpc.SourceStore - sourcesLastUpdate sharedtime.SharedTime - resolver resolver.StepResolver - reconciler reconciler.RegistryReconcilerFactory - catalogSubscriberIndexer map[string]cache.Indexer - clientAttenuator *scoped.ClientAttenuator - serviceAccountQuerier *scoped.UserDefinedServiceAccountQuerier - bundleUnpacker bundle.Unpacker - installPlanTimeout time.Duration - bundleUnpackTimeout time.Duration - clientFactory clients.Factory - muInstallPlan sync.Mutex - sourceInvalidator *resolver.RegistrySourceProvider + queueinformer.Operator + + logger *logrus.Logger + clock utilclock.Clock + opClient operatorclient.ClientInterface + client versioned.Interface + dynamicClient dynamic.Interface + lister operatorlister.OperatorLister + catsrcQueueSet *queueinformer.ResourceQueueSet + subQueueSet *queueinformer.ResourceQueueSet + ipQueueSet *queueinformer.ResourceQueueSet + ogQueueSet *queueinformer.ResourceQueueSet + nsResolveQueue workqueue.RateLimitingInterface + namespace string + recorder record.EventRecorder + sources *grpc.SourceStore + sourcesLastUpdate sharedtime.SharedTime + resolver resolver.StepResolver + reconciler reconciler.RegistryReconcilerFactory + catalogSubscriberIndexer map[string]cache.Indexer + clientAttenuator *scoped.ClientAttenuator + serviceAccountQuerier *scoped.UserDefinedServiceAccountQuerier + bundleUnpacker bundle.Unpacker + installPlanTimeout time.Duration + bundleUnpackTimeout time.Duration + clientFactory clients.Factory + muInstallPlan sync.Mutex + sourceInvalidator *resolver.RegistrySourceProvider } type CatalogSourceSyncFunc func(logger *logrus.Entry, in *v1alpha1.CatalogSource) (out *v1alpha1.CatalogSource, continueSync bool, syncError error) // NewOperator creates a new Catalog Operator. func NewOperator(ctx context.Context, kubeconfigPath string, clock utilclock.Clock, logger *logrus.Logger, resync time.Duration, configmapRegistryImage, opmImage, utilImage string, operatorNamespace string, scheme *runtime.Scheme, installPlanTimeout time.Duration, bundleUnpackTimeout time.Duration, workloadUserID int64) (*Operator, error) { - resyncPeriod := queueinformer.ResyncWithJitter(resync, 0.2) - config, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath) - if err != nil { - return nil, err - } - - // Create a new client for OLM types (CRs) - crClient, err := versioned.NewForConfig(config) - if err != nil { - return nil, err - } - - // Create a new client for dynamic types (CRs) - dynamicClient, err := dynamic.NewForConfig(config) - if err != nil { - return nil, err - } - - // Create a new queueinformer-based operator. - opClient, err := operatorclient.NewClientFromRestConfig(config) - if err != nil { - return nil, err - } - - queueOperator, err := queueinformer.NewOperator(opClient.KubernetesInterface().Discovery(), queueinformer.WithOperatorLogger(logger)) - if err != nil { - return nil, err - } - - // Create an OperatorLister - lister := operatorlister.NewLister() - - // eventRecorder can emit events - eventRecorder, err := event.NewRecorder(opClient.KubernetesInterface().CoreV1().Events(metav1.NamespaceAll)) - if err != nil { - return nil, err - } - - ssaClient, err := controllerclient.NewForConfig(config, scheme, RegistryFieldManager) - if err != nil { - return nil, err - } - - // Allocate the new instance of an Operator. - op := &Operator{ - Operator: queueOperator, - logger: logger, - clock: clock, - opClient: opClient, - dynamicClient: dynamicClient, - client: crClient, - lister: lister, - namespace: operatorNamespace, - recorder: eventRecorder, - catsrcQueueSet: queueinformer.NewEmptyResourceQueueSet(), - subQueueSet: queueinformer.NewEmptyResourceQueueSet(), - ipQueueSet: queueinformer.NewEmptyResourceQueueSet(), - ogQueueSet: queueinformer.NewEmptyResourceQueueSet(), - catalogSubscriberIndexer: map[string]cache.Indexer{}, - serviceAccountQuerier: scoped.NewUserDefinedServiceAccountQuerier(logger, crClient), - clientAttenuator: scoped.NewClientAttenuator(logger, config, opClient), - installPlanTimeout: installPlanTimeout, - bundleUnpackTimeout: bundleUnpackTimeout, - clientFactory: clients.NewFactory(config), - } - op.sources = grpc.NewSourceStore(logger, 10*time.Second, 10*time.Minute, op.syncSourceState) - op.sourceInvalidator = resolver.SourceProviderFromRegistryClientProvider(op.sources, lister.OperatorsV1alpha1().CatalogSourceLister(), logger) - resolverSourceProvider := NewOperatorGroupToggleSourceProvider(op.sourceInvalidator, logger, op.lister.OperatorsV1().OperatorGroupLister()) - op.reconciler = reconciler.NewRegistryReconcilerFactory(lister, opClient, configmapRegistryImage, op.now, ssaClient, workloadUserID) - res := resolver.NewOperatorStepResolver(lister, crClient, operatorNamespace, resolverSourceProvider, logger) - op.resolver = resolver.NewInstrumentedResolver(res, metrics.RegisterDependencyResolutionSuccess, metrics.RegisterDependencyResolutionFailure) - - // Wire OLM CR sharedIndexInformers - crInformerFactory := externalversions.NewSharedInformerFactoryWithOptions(op.client, resyncPeriod()) - - // Fields are pruned from local copies of the objects managed - // by this informer in order to reduce cached size. - prunedCSVInformer := cache.NewSharedIndexInformer( - pruning.NewListerWatcher(op.client, metav1.NamespaceAll, - func(options *metav1.ListOptions) { - options.LabelSelector = fmt.Sprintf("!%s", v1alpha1.CopiedLabelKey) - }, - pruning.PrunerFunc(func(csv *v1alpha1.ClusterServiceVersion) { - *csv = v1alpha1.ClusterServiceVersion{ - TypeMeta: csv.TypeMeta, - ObjectMeta: metav1.ObjectMeta{ - Name: csv.Name, - Namespace: csv.Namespace, - Labels: csv.Labels, - Annotations: csv.Annotations, - }, - Spec: v1alpha1.ClusterServiceVersionSpec{ - CustomResourceDefinitions: csv.Spec.CustomResourceDefinitions, - APIServiceDefinitions: csv.Spec.APIServiceDefinitions, - Replaces: csv.Spec.Replaces, - Version: csv.Spec.Version, - }, - Status: v1alpha1.ClusterServiceVersionStatus{ - Phase: csv.Status.Phase, - Reason: csv.Status.Reason, - }, - } - })), - &v1alpha1.ClusterServiceVersion{}, - resyncPeriod(), - cache.Indexers{ - cache.NamespaceIndex: cache.MetaNamespaceIndexFunc, - }, - ) - csvLister := operatorsv1alpha1listers.NewClusterServiceVersionLister(prunedCSVInformer.GetIndexer()) - op.lister.OperatorsV1alpha1().RegisterClusterServiceVersionLister(metav1.NamespaceAll, csvLister) - if err := op.RegisterInformer(prunedCSVInformer); err != nil { - return nil, err - } - - // TODO: Add namespace resolve sync - - // Wire InstallPlans - ipInformer := crInformerFactory.Operators().V1alpha1().InstallPlans() - op.lister.OperatorsV1alpha1().RegisterInstallPlanLister(metav1.NamespaceAll, ipInformer.Lister()) - ipQueue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ips") - op.ipQueueSet.Set(metav1.NamespaceAll, ipQueue) - ipQueueInformer, err := queueinformer.NewQueueInformer( - ctx, - queueinformer.WithMetricsProvider(metrics.NewMetricsInstallPlan(op.lister.OperatorsV1alpha1().InstallPlanLister())), - queueinformer.WithLogger(op.logger), - queueinformer.WithQueue(ipQueue), - queueinformer.WithInformer(ipInformer.Informer()), - queueinformer.WithSyncer(queueinformer.LegacySyncHandler(op.syncInstallPlans).ToSyncer()), - ) - if err != nil { - return nil, err - } - if err := op.RegisterQueueInformer(ipQueueInformer); err != nil { - return nil, err - } - - operatorGroupInformer := crInformerFactory.Operators().V1().OperatorGroups() - op.lister.OperatorsV1().RegisterOperatorGroupLister(metav1.NamespaceAll, operatorGroupInformer.Lister()) - ogQueue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ogs") - op.ogQueueSet.Set(metav1.NamespaceAll, ogQueue) - operatorGroupQueueInformer, err := queueinformer.NewQueueInformer( - ctx, - queueinformer.WithLogger(op.logger), - queueinformer.WithQueue(ogQueue), - queueinformer.WithInformer(operatorGroupInformer.Informer()), - queueinformer.WithSyncer(queueinformer.LegacySyncHandler(op.syncOperatorGroups).ToSyncer()), - ) - if err != nil { - return nil, err - } - if err := op.RegisterQueueInformer(operatorGroupQueueInformer); err != nil { - return nil, err - } - - // Wire CatalogSources - catsrcInformer := crInformerFactory.Operators().V1alpha1().CatalogSources() - op.lister.OperatorsV1alpha1().RegisterCatalogSourceLister(metav1.NamespaceAll, catsrcInformer.Lister()) - catsrcQueue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "catsrcs") - op.catsrcQueueSet.Set(metav1.NamespaceAll, catsrcQueue) - catsrcQueueInformer, err := queueinformer.NewQueueInformer( - ctx, - queueinformer.WithMetricsProvider(metrics.NewMetricsCatalogSource(op.lister.OperatorsV1alpha1().CatalogSourceLister())), - queueinformer.WithLogger(op.logger), - queueinformer.WithQueue(catsrcQueue), - queueinformer.WithInformer(catsrcInformer.Informer()), - queueinformer.WithSyncer(queueinformer.LegacySyncHandler(op.syncCatalogSources).ToSyncerWithDelete(op.handleCatSrcDeletion)), - ) - if err != nil { - return nil, err - } - if err := op.RegisterQueueInformer(catsrcQueueInformer); err != nil { - return nil, err - } - - // Wire Subscriptions - subInformer := crInformerFactory.Operators().V1alpha1().Subscriptions() - op.lister.OperatorsV1alpha1().RegisterSubscriptionLister(metav1.NamespaceAll, subInformer.Lister()) - if err := subInformer.Informer().AddIndexers(cache.Indexers{index.PresentCatalogIndexFuncKey: index.PresentCatalogIndexFunc}); err != nil { - return nil, err - } - subIndexer := subInformer.Informer().GetIndexer() - op.catalogSubscriberIndexer[metav1.NamespaceAll] = subIndexer - - subQueue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "subs") - op.subQueueSet.Set(metav1.NamespaceAll, subQueue) - subSyncer, err := subscription.NewSyncer( - ctx, - subscription.WithLogger(op.logger), - subscription.WithClient(op.client), - subscription.WithOperatorLister(op.lister), - subscription.WithSubscriptionInformer(subInformer.Informer()), - subscription.WithCatalogInformer(catsrcInformer.Informer()), - subscription.WithInstallPlanInformer(ipInformer.Informer()), - subscription.WithSubscriptionQueue(subQueue), - subscription.WithAppendedReconcilers(subscription.ReconcilerFromLegacySyncHandler(op.syncSubscriptions, nil)), - subscription.WithRegistryReconcilerFactory(op.reconciler), - subscription.WithGlobalCatalogNamespace(op.namespace), - ) - if err != nil { - return nil, err - } - subQueueInformer, err := queueinformer.NewQueueInformer( - ctx, - queueinformer.WithMetricsProvider(metrics.NewMetricsSubscription(op.lister.OperatorsV1alpha1().SubscriptionLister())), - queueinformer.WithLogger(op.logger), - queueinformer.WithQueue(subQueue), - queueinformer.WithInformer(subInformer.Informer()), - queueinformer.WithSyncer(subSyncer), - ) - if err != nil { - return nil, err - } - if err := op.RegisterQueueInformer(subQueueInformer); err != nil { - return nil, err - } - - // Wire k8s sharedIndexInformers - k8sInformerFactory := informers.NewSharedInformerFactoryWithOptions(op.opClient.KubernetesInterface(), resyncPeriod()) - sharedIndexInformers := []cache.SharedIndexInformer{} - - // Wire Roles - roleInformer := k8sInformerFactory.Rbac().V1().Roles() - op.lister.RbacV1().RegisterRoleLister(metav1.NamespaceAll, roleInformer.Lister()) - sharedIndexInformers = append(sharedIndexInformers, roleInformer.Informer()) - - // Wire RoleBindings - roleBindingInformer := k8sInformerFactory.Rbac().V1().RoleBindings() - op.lister.RbacV1().RegisterRoleBindingLister(metav1.NamespaceAll, roleBindingInformer.Lister()) - sharedIndexInformers = append(sharedIndexInformers, roleBindingInformer.Informer()) - - // Wire ServiceAccounts - serviceAccountInformer := k8sInformerFactory.Core().V1().ServiceAccounts() - op.lister.CoreV1().RegisterServiceAccountLister(metav1.NamespaceAll, serviceAccountInformer.Lister()) - sharedIndexInformers = append(sharedIndexInformers, serviceAccountInformer.Informer()) - - // Wire Services - serviceInformer := k8sInformerFactory.Core().V1().Services() - op.lister.CoreV1().RegisterServiceLister(metav1.NamespaceAll, serviceInformer.Lister()) - sharedIndexInformers = append(sharedIndexInformers, serviceInformer.Informer()) - - // Wire Pods for CatalogSource - catsrcReq, err := labels.NewRequirement(reconciler.CatalogSourceLabelKey, selection.Exists, nil) - if err != nil { - return nil, err - } - - csPodLabels := labels.NewSelector() - csPodLabels = csPodLabels.Add(*catsrcReq) - csPodInformer := informers.NewSharedInformerFactoryWithOptions(op.opClient.KubernetesInterface(), resyncPeriod(), informers.WithTweakListOptions(func(options *metav1.ListOptions) { - options.LabelSelector = csPodLabels.String() - })).Core().V1().Pods() - op.lister.CoreV1().RegisterPodLister(metav1.NamespaceAll, csPodInformer.Lister()) - sharedIndexInformers = append(sharedIndexInformers, csPodInformer.Informer()) - - // Wire Pods for BundleUnpack job - buReq, err := labels.NewRequirement(bundle.BundleUnpackPodLabel, selection.Exists, nil) - if err != nil { - return nil, err - } - - buPodLabels := labels.NewSelector() - buPodLabels = buPodLabels.Add(*buReq) - buPodInformer := informers.NewSharedInformerFactoryWithOptions(op.opClient.KubernetesInterface(), resyncPeriod(), informers.WithTweakListOptions(func(options *metav1.ListOptions) { - options.LabelSelector = buPodLabels.String() - })).Core().V1().Pods() - sharedIndexInformers = append(sharedIndexInformers, buPodInformer.Informer()) - - // Wire ConfigMaps - configMapInformer := informers.NewSharedInformerFactoryWithOptions(op.opClient.KubernetesInterface(), resyncPeriod(), informers.WithTweakListOptions(func(options *metav1.ListOptions) { - options.LabelSelector = install.OLMManagedLabelKey - })).Core().V1().ConfigMaps() - op.lister.CoreV1().RegisterConfigMapLister(metav1.NamespaceAll, configMapInformer.Lister()) - sharedIndexInformers = append(sharedIndexInformers, configMapInformer.Informer()) - - // Wire Jobs - jobInformer := k8sInformerFactory.Batch().V1().Jobs() - sharedIndexInformers = append(sharedIndexInformers, jobInformer.Informer()) - - // Generate and register QueueInformers for k8s resources - k8sSyncer := queueinformer.LegacySyncHandler(op.syncObject).ToSyncerWithDelete(op.handleDeletion) - for _, informer := range sharedIndexInformers { - queueInformer, err := queueinformer.NewQueueInformer( - ctx, - queueinformer.WithLogger(op.logger), - queueinformer.WithInformer(informer), - queueinformer.WithSyncer(k8sSyncer), - ) - if err != nil { - return nil, err - } - - if err := op.RegisterQueueInformer(queueInformer); err != nil { - return nil, err - } - } - - // Setup the BundleUnpacker - op.bundleUnpacker, err = bundle.NewConfigmapUnpacker( - bundle.WithLogger(op.logger), - bundle.WithClient(op.opClient.KubernetesInterface()), - bundle.WithCatalogSourceLister(catsrcInformer.Lister()), - bundle.WithConfigMapLister(configMapInformer.Lister()), - bundle.WithJobLister(jobInformer.Lister()), - bundle.WithPodLister(buPodInformer.Lister()), - bundle.WithRoleLister(roleInformer.Lister()), - bundle.WithRoleBindingLister(roleBindingInformer.Lister()), - bundle.WithOPMImage(opmImage), - bundle.WithUtilImage(utilImage), - bundle.WithNow(op.now), - bundle.WithUnpackTimeout(op.bundleUnpackTimeout), - bundle.WithUserID(workloadUserID), - ) - if err != nil { - return nil, err - } - - // Register CustomResourceDefinition QueueInformer - crdInformer := extinf.NewSharedInformerFactory(op.opClient.ApiextensionsInterface(), resyncPeriod()).Apiextensions().V1().CustomResourceDefinitions() - op.lister.APIExtensionsV1().RegisterCustomResourceDefinitionLister(crdInformer.Lister()) - crdQueueInformer, err := queueinformer.NewQueueInformer( - ctx, - queueinformer.WithLogger(op.logger), - queueinformer.WithInformer(crdInformer.Informer()), - queueinformer.WithSyncer(queueinformer.LegacySyncHandler(op.syncObject).ToSyncerWithDelete(op.handleDeletion)), - ) - if err != nil { - return nil, err - } - if err := op.RegisterQueueInformer(crdQueueInformer); err != nil { - return nil, err - } - - // Namespace sync for resolving subscriptions - namespaceInformer := informers.NewSharedInformerFactory(op.opClient.KubernetesInterface(), resyncPeriod()).Core().V1().Namespaces() - op.lister.CoreV1().RegisterNamespaceLister(namespaceInformer.Lister()) - op.nsResolveQueue = workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "resolver") - namespaceQueueInformer, err := queueinformer.NewQueueInformer( - ctx, - queueinformer.WithLogger(op.logger), - queueinformer.WithQueue(op.nsResolveQueue), - queueinformer.WithInformer(namespaceInformer.Informer()), - queueinformer.WithSyncer(queueinformer.LegacySyncHandler(op.syncResolvingNamespace).ToSyncer()), - ) - if err != nil { - return nil, err - } - if err := op.RegisterQueueInformer(namespaceQueueInformer); err != nil { - return nil, err - } - - op.sources.Start(context.Background()) - - return op, nil + resyncPeriod := queueinformer.ResyncWithJitter(resync, 0.2) + config, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath) + if err != nil { + return nil, err + } + + // Create a new client for OLM types (CRs) + crClient, err := versioned.NewForConfig(config) + if err != nil { + return nil, err + } + + // Create a new client for dynamic types (CRs) + dynamicClient, err := dynamic.NewForConfig(config) + if err != nil { + return nil, err + } + + // Create a new queueinformer-based operator. + opClient, err := operatorclient.NewClientFromRestConfig(config) + if err != nil { + return nil, err + } + + queueOperator, err := queueinformer.NewOperator(opClient.KubernetesInterface().Discovery(), queueinformer.WithOperatorLogger(logger)) + if err != nil { + return nil, err + } + + // Create an OperatorLister + lister := operatorlister.NewLister() + + // eventRecorder can emit events + eventRecorder, err := event.NewRecorder(opClient.KubernetesInterface().CoreV1().Events(metav1.NamespaceAll)) + if err != nil { + return nil, err + } + + ssaClient, err := controllerclient.NewForConfig(config, scheme, RegistryFieldManager) + if err != nil { + return nil, err + } + + // Allocate the new instance of an Operator. + op := &Operator{ + Operator: queueOperator, + logger: logger, + clock: clock, + opClient: opClient, + dynamicClient: dynamicClient, + client: crClient, + lister: lister, + namespace: operatorNamespace, + recorder: eventRecorder, + catsrcQueueSet: queueinformer.NewEmptyResourceQueueSet(), + subQueueSet: queueinformer.NewEmptyResourceQueueSet(), + ipQueueSet: queueinformer.NewEmptyResourceQueueSet(), + ogQueueSet: queueinformer.NewEmptyResourceQueueSet(), + catalogSubscriberIndexer: map[string]cache.Indexer{}, + serviceAccountQuerier: scoped.NewUserDefinedServiceAccountQuerier(logger, crClient), + clientAttenuator: scoped.NewClientAttenuator(logger, config, opClient), + installPlanTimeout: installPlanTimeout, + bundleUnpackTimeout: bundleUnpackTimeout, + clientFactory: clients.NewFactory(config), + } + op.sources = grpc.NewSourceStore(logger, 10*time.Second, 10*time.Minute, op.syncSourceState) + op.sourceInvalidator = resolver.SourceProviderFromRegistryClientProvider(op.sources, lister.OperatorsV1alpha1().CatalogSourceLister(), logger) + resolverSourceProvider := NewOperatorGroupToggleSourceProvider(op.sourceInvalidator, logger, op.lister.OperatorsV1().OperatorGroupLister()) + op.reconciler = reconciler.NewRegistryReconcilerFactory(lister, opClient, configmapRegistryImage, op.now, ssaClient, workloadUserID) + res := resolver.NewOperatorStepResolver(lister, crClient, operatorNamespace, resolverSourceProvider, logger) + op.resolver = resolver.NewInstrumentedResolver(res, metrics.RegisterDependencyResolutionSuccess, metrics.RegisterDependencyResolutionFailure) + + // Wire OLM CR sharedIndexInformers + crInformerFactory := externalversions.NewSharedInformerFactoryWithOptions(op.client, resyncPeriod()) + + // Fields are pruned from local copies of the objects managed + // by this informer in order to reduce cached size. + prunedCSVInformer := cache.NewSharedIndexInformer( + pruning.NewListerWatcher(op.client, metav1.NamespaceAll, + func(options *metav1.ListOptions) { + options.LabelSelector = fmt.Sprintf("!%s", v1alpha1.CopiedLabelKey) + }, + pruning.PrunerFunc(func(csv *v1alpha1.ClusterServiceVersion) { + *csv = v1alpha1.ClusterServiceVersion{ + TypeMeta: csv.TypeMeta, + ObjectMeta: metav1.ObjectMeta{ + Name: csv.Name, + Namespace: csv.Namespace, + Labels: csv.Labels, + Annotations: csv.Annotations, + }, + Spec: v1alpha1.ClusterServiceVersionSpec{ + CustomResourceDefinitions: csv.Spec.CustomResourceDefinitions, + APIServiceDefinitions: csv.Spec.APIServiceDefinitions, + Replaces: csv.Spec.Replaces, + Version: csv.Spec.Version, + }, + Status: v1alpha1.ClusterServiceVersionStatus{ + Phase: csv.Status.Phase, + Reason: csv.Status.Reason, + }, + } + })), + &v1alpha1.ClusterServiceVersion{}, + resyncPeriod(), + cache.Indexers{ + cache.NamespaceIndex: cache.MetaNamespaceIndexFunc, + }, + ) + csvLister := operatorsv1alpha1listers.NewClusterServiceVersionLister(prunedCSVInformer.GetIndexer()) + op.lister.OperatorsV1alpha1().RegisterClusterServiceVersionLister(metav1.NamespaceAll, csvLister) + if err := op.RegisterInformer(prunedCSVInformer); err != nil { + return nil, err + } + + // TODO: Add namespace resolve sync + + // Wire InstallPlans + ipInformer := crInformerFactory.Operators().V1alpha1().InstallPlans() + op.lister.OperatorsV1alpha1().RegisterInstallPlanLister(metav1.NamespaceAll, ipInformer.Lister()) + ipQueue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ips") + op.ipQueueSet.Set(metav1.NamespaceAll, ipQueue) + ipQueueInformer, err := queueinformer.NewQueueInformer( + ctx, + queueinformer.WithMetricsProvider(metrics.NewMetricsInstallPlan(op.lister.OperatorsV1alpha1().InstallPlanLister())), + queueinformer.WithLogger(op.logger), + queueinformer.WithQueue(ipQueue), + queueinformer.WithInformer(ipInformer.Informer()), + queueinformer.WithSyncer(queueinformer.LegacySyncHandler(op.syncInstallPlans).ToSyncer()), + ) + if err != nil { + return nil, err + } + if err := op.RegisterQueueInformer(ipQueueInformer); err != nil { + return nil, err + } + + operatorGroupInformer := crInformerFactory.Operators().V1().OperatorGroups() + op.lister.OperatorsV1().RegisterOperatorGroupLister(metav1.NamespaceAll, operatorGroupInformer.Lister()) + ogQueue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ogs") + op.ogQueueSet.Set(metav1.NamespaceAll, ogQueue) + operatorGroupQueueInformer, err := queueinformer.NewQueueInformer( + ctx, + queueinformer.WithLogger(op.logger), + queueinformer.WithQueue(ogQueue), + queueinformer.WithInformer(operatorGroupInformer.Informer()), + queueinformer.WithSyncer(queueinformer.LegacySyncHandler(op.syncOperatorGroups).ToSyncer()), + ) + if err != nil { + return nil, err + } + if err := op.RegisterQueueInformer(operatorGroupQueueInformer); err != nil { + return nil, err + } + + // Wire CatalogSources + catsrcInformer := crInformerFactory.Operators().V1alpha1().CatalogSources() + op.lister.OperatorsV1alpha1().RegisterCatalogSourceLister(metav1.NamespaceAll, catsrcInformer.Lister()) + catsrcQueue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "catsrcs") + op.catsrcQueueSet.Set(metav1.NamespaceAll, catsrcQueue) + catsrcQueueInformer, err := queueinformer.NewQueueInformer( + ctx, + queueinformer.WithMetricsProvider(metrics.NewMetricsCatalogSource(op.lister.OperatorsV1alpha1().CatalogSourceLister())), + queueinformer.WithLogger(op.logger), + queueinformer.WithQueue(catsrcQueue), + queueinformer.WithInformer(catsrcInformer.Informer()), + queueinformer.WithSyncer(queueinformer.LegacySyncHandler(op.syncCatalogSources).ToSyncerWithDelete(op.handleCatSrcDeletion)), + ) + if err != nil { + return nil, err + } + if err := op.RegisterQueueInformer(catsrcQueueInformer); err != nil { + return nil, err + } + + // Wire Subscriptions + subInformer := crInformerFactory.Operators().V1alpha1().Subscriptions() + op.lister.OperatorsV1alpha1().RegisterSubscriptionLister(metav1.NamespaceAll, subInformer.Lister()) + if err := subInformer.Informer().AddIndexers(cache.Indexers{index.PresentCatalogIndexFuncKey: index.PresentCatalogIndexFunc}); err != nil { + return nil, err + } + subIndexer := subInformer.Informer().GetIndexer() + op.catalogSubscriberIndexer[metav1.NamespaceAll] = subIndexer + + subQueue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "subs") + op.subQueueSet.Set(metav1.NamespaceAll, subQueue) + subSyncer, err := subscription.NewSyncer( + ctx, + subscription.WithLogger(op.logger), + subscription.WithClient(op.client), + subscription.WithOperatorLister(op.lister), + subscription.WithSubscriptionInformer(subInformer.Informer()), + subscription.WithCatalogInformer(catsrcInformer.Informer()), + subscription.WithInstallPlanInformer(ipInformer.Informer()), + subscription.WithSubscriptionQueue(subQueue), + subscription.WithAppendedReconcilers(subscription.ReconcilerFromLegacySyncHandler(op.syncSubscriptions, nil)), + subscription.WithRegistryReconcilerFactory(op.reconciler), + subscription.WithGlobalCatalogNamespace(op.namespace), + ) + if err != nil { + return nil, err + } + subQueueInformer, err := queueinformer.NewQueueInformer( + ctx, + queueinformer.WithMetricsProvider(metrics.NewMetricsSubscription(op.lister.OperatorsV1alpha1().SubscriptionLister())), + queueinformer.WithLogger(op.logger), + queueinformer.WithQueue(subQueue), + queueinformer.WithInformer(subInformer.Informer()), + queueinformer.WithSyncer(subSyncer), + ) + if err != nil { + return nil, err + } + if err := op.RegisterQueueInformer(subQueueInformer); err != nil { + return nil, err + } + + // Wire k8s sharedIndexInformers + k8sInformerFactory := informers.NewSharedInformerFactoryWithOptions(op.opClient.KubernetesInterface(), resyncPeriod()) + sharedIndexInformers := []cache.SharedIndexInformer{} + + // Wire Roles + roleInformer := k8sInformerFactory.Rbac().V1().Roles() + op.lister.RbacV1().RegisterRoleLister(metav1.NamespaceAll, roleInformer.Lister()) + sharedIndexInformers = append(sharedIndexInformers, roleInformer.Informer()) + + // Wire RoleBindings + roleBindingInformer := k8sInformerFactory.Rbac().V1().RoleBindings() + op.lister.RbacV1().RegisterRoleBindingLister(metav1.NamespaceAll, roleBindingInformer.Lister()) + sharedIndexInformers = append(sharedIndexInformers, roleBindingInformer.Informer()) + + // Wire ServiceAccounts + serviceAccountInformer := k8sInformerFactory.Core().V1().ServiceAccounts() + op.lister.CoreV1().RegisterServiceAccountLister(metav1.NamespaceAll, serviceAccountInformer.Lister()) + sharedIndexInformers = append(sharedIndexInformers, serviceAccountInformer.Informer()) + + // Wire Services + serviceInformer := k8sInformerFactory.Core().V1().Services() + op.lister.CoreV1().RegisterServiceLister(metav1.NamespaceAll, serviceInformer.Lister()) + sharedIndexInformers = append(sharedIndexInformers, serviceInformer.Informer()) + + // Wire Pods for CatalogSource + catsrcReq, err := labels.NewRequirement(reconciler.CatalogSourceLabelKey, selection.Exists, nil) + if err != nil { + return nil, err + } + + csPodLabels := labels.NewSelector() + csPodLabels = csPodLabels.Add(*catsrcReq) + csPodInformer := informers.NewSharedInformerFactoryWithOptions(op.opClient.KubernetesInterface(), resyncPeriod(), informers.WithTweakListOptions(func(options *metav1.ListOptions) { + options.LabelSelector = csPodLabels.String() + })).Core().V1().Pods() + op.lister.CoreV1().RegisterPodLister(metav1.NamespaceAll, csPodInformer.Lister()) + sharedIndexInformers = append(sharedIndexInformers, csPodInformer.Informer()) + + // Wire Pods for BundleUnpack job + buReq, err := labels.NewRequirement(bundle.BundleUnpackPodLabel, selection.Exists, nil) + if err != nil { + return nil, err + } + + buPodLabels := labels.NewSelector() + buPodLabels = buPodLabels.Add(*buReq) + buPodInformer := informers.NewSharedInformerFactoryWithOptions(op.opClient.KubernetesInterface(), resyncPeriod(), informers.WithTweakListOptions(func(options *metav1.ListOptions) { + options.LabelSelector = buPodLabels.String() + })).Core().V1().Pods() + sharedIndexInformers = append(sharedIndexInformers, buPodInformer.Informer()) + + // Wire ConfigMaps + configMapInformer := informers.NewSharedInformerFactoryWithOptions(op.opClient.KubernetesInterface(), resyncPeriod(), informers.WithTweakListOptions(func(options *metav1.ListOptions) { + options.LabelSelector = install.OLMManagedLabelKey + })).Core().V1().ConfigMaps() + op.lister.CoreV1().RegisterConfigMapLister(metav1.NamespaceAll, configMapInformer.Lister()) + sharedIndexInformers = append(sharedIndexInformers, configMapInformer.Informer()) + + // Wire Jobs + jobInformer := k8sInformerFactory.Batch().V1().Jobs() + sharedIndexInformers = append(sharedIndexInformers, jobInformer.Informer()) + + // Generate and register QueueInformers for k8s resources + k8sSyncer := queueinformer.LegacySyncHandler(op.syncObject).ToSyncerWithDelete(op.handleDeletion) + for _, informer := range sharedIndexInformers { + queueInformer, err := queueinformer.NewQueueInformer( + ctx, + queueinformer.WithLogger(op.logger), + queueinformer.WithInformer(informer), + queueinformer.WithSyncer(k8sSyncer), + ) + if err != nil { + return nil, err + } + + if err := op.RegisterQueueInformer(queueInformer); err != nil { + return nil, err + } + } + + // Setup the BundleUnpacker + op.bundleUnpacker, err = bundle.NewConfigmapUnpacker( + bundle.WithLogger(op.logger), + bundle.WithClient(op.opClient.KubernetesInterface()), + bundle.WithCatalogSourceLister(catsrcInformer.Lister()), + bundle.WithConfigMapLister(configMapInformer.Lister()), + bundle.WithJobLister(jobInformer.Lister()), + bundle.WithPodLister(buPodInformer.Lister()), + bundle.WithRoleLister(roleInformer.Lister()), + bundle.WithRoleBindingLister(roleBindingInformer.Lister()), + bundle.WithOPMImage(opmImage), + bundle.WithUtilImage(utilImage), + bundle.WithNow(op.now), + bundle.WithUnpackTimeout(op.bundleUnpackTimeout), + bundle.WithUserID(workloadUserID), + ) + if err != nil { + return nil, err + } + + // Register CustomResourceDefinition QueueInformer + crdInformer := extinf.NewSharedInformerFactory(op.opClient.ApiextensionsInterface(), resyncPeriod()).Apiextensions().V1().CustomResourceDefinitions() + op.lister.APIExtensionsV1().RegisterCustomResourceDefinitionLister(crdInformer.Lister()) + crdQueueInformer, err := queueinformer.NewQueueInformer( + ctx, + queueinformer.WithLogger(op.logger), + queueinformer.WithInformer(crdInformer.Informer()), + queueinformer.WithSyncer(queueinformer.LegacySyncHandler(op.syncObject).ToSyncerWithDelete(op.handleDeletion)), + ) + if err != nil { + return nil, err + } + if err := op.RegisterQueueInformer(crdQueueInformer); err != nil { + return nil, err + } + + // Namespace sync for resolving subscriptions + namespaceInformer := informers.NewSharedInformerFactory(op.opClient.KubernetesInterface(), resyncPeriod()).Core().V1().Namespaces() + op.lister.CoreV1().RegisterNamespaceLister(namespaceInformer.Lister()) + op.nsResolveQueue = workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "resolver") + namespaceQueueInformer, err := queueinformer.NewQueueInformer( + ctx, + queueinformer.WithLogger(op.logger), + queueinformer.WithQueue(op.nsResolveQueue), + queueinformer.WithInformer(namespaceInformer.Informer()), + queueinformer.WithSyncer(queueinformer.LegacySyncHandler(op.syncResolvingNamespace).ToSyncer()), + ) + if err != nil { + return nil, err + } + if err := op.RegisterQueueInformer(namespaceQueueInformer); err != nil { + return nil, err + } + + op.sources.Start(context.Background()) + + return op, nil } func (o *Operator) now() metav1.Time { - return metav1.NewTime(o.clock.Now().UTC()) + return metav1.NewTime(o.clock.Now().UTC()) } func (o *Operator) syncSourceState(state grpc.SourceState) { - o.sourcesLastUpdate.Set(o.now().Time) - - o.logger.Debugf("state.Key.Namespace=%s state.Key.Name=%s state.State=%s", state.Key.Namespace, state.Key.Name, state.State.String()) - metrics.RegisterCatalogSourceState(state.Key.Name, state.Key.Namespace, state.State) - - switch state.State { - case connectivity.Ready: - o.sourceInvalidator.Invalidate(resolvercache.SourceKey(state.Key)) - if o.namespace == state.Key.Namespace { - namespaces, err := index.CatalogSubscriberNamespaces(o.catalogSubscriberIndexer, - state.Key.Name, state.Key.Namespace) - - if err == nil { - for ns := range namespaces { - o.nsResolveQueue.Add(ns) - } - } - } - - o.nsResolveQueue.Add(state.Key.Namespace) - } - if err := o.catsrcQueueSet.Requeue(state.Key.Namespace, state.Key.Name); err != nil { - o.logger.WithError(err).Info("couldn't requeue catalogsource from catalog status change") - } + o.sourcesLastUpdate.Set(o.now().Time) + + o.logger.Debugf("state.Key.Namespace=%s state.Key.Name=%s state.State=%s", state.Key.Namespace, state.Key.Name, state.State.String()) + metrics.RegisterCatalogSourceState(state.Key.Name, state.Key.Namespace, state.State) + + switch state.State { + case connectivity.Ready: + o.sourceInvalidator.Invalidate(resolvercache.SourceKey(state.Key)) + if o.namespace == state.Key.Namespace { + namespaces, err := index.CatalogSubscriberNamespaces(o.catalogSubscriberIndexer, + state.Key.Name, state.Key.Namespace) + + if err == nil { + for ns := range namespaces { + o.nsResolveQueue.Add(ns) + } + } + } + + o.nsResolveQueue.Add(state.Key.Namespace) + } + if err := o.catsrcQueueSet.Requeue(state.Key.Namespace, state.Key.Name); err != nil { + o.logger.WithError(err).Info("couldn't requeue catalogsource from catalog status change") + } } func (o *Operator) requeueOwners(obj metav1.Object) { - namespace := obj.GetNamespace() - logger := o.logger.WithFields(logrus.Fields{ - "name": obj.GetName(), - "namespace": namespace, - }) - - for _, owner := range obj.GetOwnerReferences() { - var queueSet *queueinformer.ResourceQueueSet - switch kind := owner.Kind; kind { - case v1alpha1.CatalogSourceKind: - if err := o.catsrcQueueSet.Requeue(namespace, owner.Name); err != nil { - logger.Warn(err.Error()) - } - queueSet = o.catsrcQueueSet - case v1alpha1.SubscriptionKind: - if err := o.catsrcQueueSet.Requeue(namespace, owner.Name); err != nil { - logger.Warn(err.Error()) - } - queueSet = o.subQueueSet - default: - logger.WithField("kind", kind).Trace("untracked owner kind") - } - - if queueSet != nil { - logger.WithField("ref", owner).Trace("requeuing owner") - if err := queueSet.Requeue(namespace, owner.Name); err != nil { - logger.Warn(err.Error()) - } - } - } + namespace := obj.GetNamespace() + logger := o.logger.WithFields(logrus.Fields{ + "name": obj.GetName(), + "namespace": namespace, + }) + + for _, owner := range obj.GetOwnerReferences() { + var queueSet *queueinformer.ResourceQueueSet + switch kind := owner.Kind; kind { + case v1alpha1.CatalogSourceKind: + if err := o.catsrcQueueSet.Requeue(namespace, owner.Name); err != nil { + logger.Warn(err.Error()) + } + queueSet = o.catsrcQueueSet + case v1alpha1.SubscriptionKind: + if err := o.catsrcQueueSet.Requeue(namespace, owner.Name); err != nil { + logger.Warn(err.Error()) + } + queueSet = o.subQueueSet + default: + logger.WithField("kind", kind).Trace("untracked owner kind") + } + + if queueSet != nil { + logger.WithField("ref", owner).Trace("requeuing owner") + if err := queueSet.Requeue(namespace, owner.Name); err != nil { + logger.Warn(err.Error()) + } + } + } } func (o *Operator) syncObject(obj interface{}) (syncError error) { - // Assert as metav1.Object - metaObj, ok := obj.(metav1.Object) - if !ok { - syncError = errors.New("casting to metav1 object failed") - o.logger.Warn(syncError.Error()) - return - } + // Assert as metav1.Object + metaObj, ok := obj.(metav1.Object) + if !ok { + syncError = errors.New("casting to metav1 object failed") + o.logger.Warn(syncError.Error()) + return + } - o.requeueOwners(metaObj) + o.requeueOwners(metaObj) - return o.triggerInstallPlanRetry(obj) + return o.triggerInstallPlanRetry(obj) } func (o *Operator) handleDeletion(obj interface{}) { - metaObj, ok := obj.(metav1.Object) - if !ok { - tombstone, ok := obj.(cache.DeletedFinalStateUnknown) - if !ok { - utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone %#v", obj)) - return - } - - metaObj, ok = tombstone.Obj.(metav1.Object) - if !ok { - utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a metav1 object %#v", obj)) - return - } - } - - o.logger.WithFields(logrus.Fields{ - "name": metaObj.GetName(), - "namespace": metaObj.GetNamespace(), - }).Debug("handling object deletion") - - o.requeueOwners(metaObj) + metaObj, ok := obj.(metav1.Object) + if !ok { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone %#v", obj)) + return + } + + metaObj, ok = tombstone.Obj.(metav1.Object) + if !ok { + utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a metav1 object %#v", obj)) + return + } + } + + o.logger.WithFields(logrus.Fields{ + "name": metaObj.GetName(), + "namespace": metaObj.GetNamespace(), + }).Debug("handling object deletion") + + o.requeueOwners(metaObj) } func (o *Operator) handleCatSrcDeletion(obj interface{}) { - catsrc, ok := obj.(metav1.Object) - if !ok { - if !ok { - tombstone, ok := obj.(cache.DeletedFinalStateUnknown) - if !ok { - utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone %#v", obj)) - return - } - - catsrc, ok = tombstone.Obj.(metav1.Object) - if !ok { - utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a Namespace %#v", obj)) - return - } - } - } - sourceKey := registry.CatalogKey{Name: catsrc.GetName(), Namespace: catsrc.GetNamespace()} - if err := o.sources.Remove(sourceKey); err != nil { - o.logger.WithError(err).Warn("error closing client") - } - o.logger.WithField("source", sourceKey).Info("removed client for deleted catalogsource") - - metrics.DeleteCatalogSourceStateMetric(catsrc.GetName(), catsrc.GetNamespace()) + catsrc, ok := obj.(metav1.Object) + if !ok { + if !ok { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone %#v", obj)) + return + } + + catsrc, ok = tombstone.Obj.(metav1.Object) + if !ok { + utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a Namespace %#v", obj)) + return + } + } + } + sourceKey := registry.CatalogKey{Name: catsrc.GetName(), Namespace: catsrc.GetNamespace()} + if err := o.sources.Remove(sourceKey); err != nil { + o.logger.WithError(err).Warn("error closing client") + } + o.logger.WithField("source", sourceKey).Info("removed client for deleted catalogsource") + + metrics.DeleteCatalogSourceStateMetric(catsrc.GetName(), catsrc.GetNamespace()) } func validateSourceType(logger *logrus.Entry, in *v1alpha1.CatalogSource) (out *v1alpha1.CatalogSource, continueSync bool, _ error) { - out = in - var err error - switch sourceType := out.Spec.SourceType; sourceType { - case v1alpha1.SourceTypeInternal, v1alpha1.SourceTypeConfigmap: - if out.Spec.ConfigMap == "" { - err = fmt.Errorf("configmap name unset: must be set for sourcetype: %s", sourceType) - } - case v1alpha1.SourceTypeGrpc: - if out.Spec.Image == "" && out.Spec.Address == "" { - err = fmt.Errorf("image and address unset: at least one must be set for sourcetype: %s", sourceType) - } - default: - err = fmt.Errorf("unknown sourcetype: %s", sourceType) - } - if err != nil { - out.SetError(v1alpha1.CatalogSourceSpecInvalidError, err) - return - } - - // The sourceType is valid, clear all status (other than status conditions array) if there's existing invalid spec reason - if out.Status.Reason == v1alpha1.CatalogSourceSpecInvalidError { - out.Status = v1alpha1.CatalogSourceStatus{ - Conditions: out.Status.Conditions, - } - } - continueSync = true - - return + out = in + var err error + switch sourceType := out.Spec.SourceType; sourceType { + case v1alpha1.SourceTypeInternal, v1alpha1.SourceTypeConfigmap: + if out.Spec.ConfigMap == "" { + err = fmt.Errorf("configmap name unset: must be set for sourcetype: %s", sourceType) + } + case v1alpha1.SourceTypeGrpc: + if out.Spec.Image == "" && out.Spec.Address == "" { + err = fmt.Errorf("image and address unset: at least one must be set for sourcetype: %s", sourceType) + } + default: + err = fmt.Errorf("unknown sourcetype: %s", sourceType) + } + if err != nil { + out.SetError(v1alpha1.CatalogSourceSpecInvalidError, err) + return + } + + // The sourceType is valid, clear all status (other than status conditions array) if there's existing invalid spec reason + if out.Status.Reason == v1alpha1.CatalogSourceSpecInvalidError { + out.Status = v1alpha1.CatalogSourceStatus{ + Conditions: out.Status.Conditions, + } + } + continueSync = true + + return } func (o *Operator) syncConfigMap(logger *logrus.Entry, in *v1alpha1.CatalogSource) (out *v1alpha1.CatalogSource, continueSync bool, syncError error) { - out = in - if !(in.Spec.SourceType == v1alpha1.SourceTypeInternal || in.Spec.SourceType == v1alpha1.SourceTypeConfigmap) { - continueSync = true - return - } - - out = in.DeepCopy() - - logger.Debug("checking catsrc configmap state") - - var updateLabel bool - // Get the catalog source's config map - configMap, err := o.lister.CoreV1().ConfigMapLister().ConfigMaps(in.GetNamespace()).Get(in.Spec.ConfigMap) - // Attempt to look up the CM via api call if there is a cache miss - if apierrors.IsNotFound(err) { - configMap, err = o.opClient.KubernetesInterface().CoreV1().ConfigMaps(in.GetNamespace()).Get(context.TODO(), in.Spec.ConfigMap, metav1.GetOptions{}) - // Found cm in the cluster, add managed label to configmap - if err == nil { - labels := configMap.GetLabels() - if labels == nil { - labels = make(map[string]string) - } - - labels[install.OLMManagedLabelKey] = "false" - configMap.SetLabels(labels) - updateLabel = true - } - } - if err != nil { - syncError = fmt.Errorf("failed to get catalog config map %s: %s", in.Spec.ConfigMap, err) - out.SetError(v1alpha1.CatalogSourceConfigMapError, syncError) - return - } - - if wasOwned := ownerutil.EnsureOwner(configMap, in); !wasOwned || updateLabel { - configMap, err = o.opClient.KubernetesInterface().CoreV1().ConfigMaps(configMap.GetNamespace()).Update(context.TODO(), configMap, metav1.UpdateOptions{}) - if err != nil { - syncError = fmt.Errorf("unable to write owner onto catalog source configmap - %v", err) - out.SetError(v1alpha1.CatalogSourceConfigMapError, syncError) - return - } - - logger.Debug("adopted configmap") - } - - if in.Status.ConfigMapResource == nil || !in.Status.ConfigMapResource.IsAMatch(&configMap.ObjectMeta) { - logger.Debug("updating catsrc configmap state") - // configmap ref nonexistent or updated, write out the new configmap ref to status and exit - out.Status.ConfigMapResource = &v1alpha1.ConfigMapResourceReference{ - Name: configMap.GetName(), - Namespace: configMap.GetNamespace(), - UID: configMap.GetUID(), - ResourceVersion: configMap.GetResourceVersion(), - LastUpdateTime: o.now(), - } - - return - } - - continueSync = true - return + out = in + if !(in.Spec.SourceType == v1alpha1.SourceTypeInternal || in.Spec.SourceType == v1alpha1.SourceTypeConfigmap) { + continueSync = true + return + } + + out = in.DeepCopy() + + logger.Debug("checking catsrc configmap state") + + var updateLabel bool + // Get the catalog source's config map + configMap, err := o.lister.CoreV1().ConfigMapLister().ConfigMaps(in.GetNamespace()).Get(in.Spec.ConfigMap) + // Attempt to look up the CM via api call if there is a cache miss + if apierrors.IsNotFound(err) { + configMap, err = o.opClient.KubernetesInterface().CoreV1().ConfigMaps(in.GetNamespace()).Get(context.TODO(), in.Spec.ConfigMap, metav1.GetOptions{}) + // Found cm in the cluster, add managed label to configmap + if err == nil { + labels := configMap.GetLabels() + if labels == nil { + labels = make(map[string]string) + } + + labels[install.OLMManagedLabelKey] = "false" + configMap.SetLabels(labels) + updateLabel = true + } + } + if err != nil { + syncError = fmt.Errorf("failed to get catalog config map %s: %s", in.Spec.ConfigMap, err) + out.SetError(v1alpha1.CatalogSourceConfigMapError, syncError) + return + } + + if wasOwned := ownerutil.EnsureOwner(configMap, in); !wasOwned || updateLabel { + configMap, err = o.opClient.KubernetesInterface().CoreV1().ConfigMaps(configMap.GetNamespace()).Update(context.TODO(), configMap, metav1.UpdateOptions{}) + if err != nil { + syncError = fmt.Errorf("unable to write owner onto catalog source configmap - %v", err) + out.SetError(v1alpha1.CatalogSourceConfigMapError, syncError) + return + } + + logger.Debug("adopted configmap") + } + + if in.Status.ConfigMapResource == nil || !in.Status.ConfigMapResource.IsAMatch(&configMap.ObjectMeta) { + logger.Debug("updating catsrc configmap state") + // configmap ref nonexistent or updated, write out the new configmap ref to status and exit + out.Status.ConfigMapResource = &v1alpha1.ConfigMapResourceReference{ + Name: configMap.GetName(), + Namespace: configMap.GetNamespace(), + UID: configMap.GetUID(), + ResourceVersion: configMap.GetResourceVersion(), + LastUpdateTime: o.now(), + } + + return + } + + continueSync = true + return } func (o *Operator) syncRegistryServer(logger *logrus.Entry, in *v1alpha1.CatalogSource) (out *v1alpha1.CatalogSource, continueSync bool, syncError error) { - out = in.DeepCopy() - - sourceKey := registry.CatalogKey{Name: in.GetName(), Namespace: in.GetNamespace()} - srcReconciler := o.reconciler.ReconcilerForSource(in) - if srcReconciler == nil { - // TODO: Add failure status on catalogsource and remove from sources - syncError = fmt.Errorf("no reconciler for source type %s", in.Spec.SourceType) - out.SetError(v1alpha1.CatalogSourceRegistryServerError, syncError) - return - } - - healthy, err := srcReconciler.CheckRegistryServer(in) - if err != nil { - syncError = err - out.SetError(v1alpha1.CatalogSourceRegistryServerError, syncError) - return - } - - logger.Debugf("check registry server healthy: %t", healthy) - - if healthy && in.Status.RegistryServiceStatus != nil { - logger.Debug("registry state good") - continueSync = true - // return here if catalog does not have polling enabled - if !out.Poll() { - return - } - } - - // Registry pod hasn't been created or hasn't been updated since the last configmap update, recreate it - logger.Debug("ensuring registry server") - - err = srcReconciler.EnsureRegistryServer(out) - if err != nil { - if _, ok := err.(reconciler.UpdateNotReadyErr); ok { - logger.Debug("requeueing registry server for catalog update check: update pod not yet ready") - o.catsrcQueueSet.RequeueAfter(out.GetNamespace(), out.GetName(), reconciler.CatalogPollingRequeuePeriod) - return - } - syncError = fmt.Errorf("couldn't ensure registry server - %v", err) - out.SetError(v1alpha1.CatalogSourceRegistryServerError, syncError) - return - } - - logger.Debug("ensured registry server") - - // requeue the catalog sync based on the polling interval, for accurate syncs of catalogs with polling enabled - if out.Spec.UpdateStrategy != nil && out.Spec.UpdateStrategy.RegistryPoll != nil { - if out.Spec.UpdateStrategy.Interval == nil { - syncError = fmt.Errorf("empty polling interval; cannot requeue registry server sync without a provided polling interval") - out.SetError(v1alpha1.CatalogSourceIntervalInvalidError, syncError) - return - } - if out.Spec.UpdateStrategy.RegistryPoll.ParsingError != "" && out.Status.Reason != v1alpha1.CatalogSourceIntervalInvalidError { - out.SetError(v1alpha1.CatalogSourceIntervalInvalidError, fmt.Errorf(out.Spec.UpdateStrategy.RegistryPoll.ParsingError)) - } - logger.Debugf("requeuing registry server sync based on polling interval %s", out.Spec.UpdateStrategy.Interval.Duration.String()) - resyncPeriod := reconciler.SyncRegistryUpdateInterval(out, time.Now()) - o.catsrcQueueSet.RequeueAfter(out.GetNamespace(), out.GetName(), queueinformer.ResyncWithJitter(resyncPeriod, 0.1)()) - return - } - - if err := o.sources.Remove(sourceKey); err != nil { - o.logger.WithError(err).Debug("error closing client connection") - } - - return + out = in.DeepCopy() + + sourceKey := registry.CatalogKey{Name: in.GetName(), Namespace: in.GetNamespace()} + srcReconciler := o.reconciler.ReconcilerForSource(in) + if srcReconciler == nil { + // TODO: Add failure status on catalogsource and remove from sources + syncError = fmt.Errorf("no reconciler for source type %s", in.Spec.SourceType) + out.SetError(v1alpha1.CatalogSourceRegistryServerError, syncError) + return + } + + healthy, err := srcReconciler.CheckRegistryServer(in) + if err != nil { + syncError = err + out.SetError(v1alpha1.CatalogSourceRegistryServerError, syncError) + return + } + + logger.Debugf("check registry server healthy: %t", healthy) + + if healthy && in.Status.RegistryServiceStatus != nil { + logger.Debug("registry state good") + continueSync = true + // return here if catalog does not have polling enabled + if !out.Poll() { + return + } + } + + // Registry pod hasn't been created or hasn't been updated since the last configmap update, recreate it + logger.Debug("ensuring registry server") + + err = srcReconciler.EnsureRegistryServer(out) + if err != nil { + if _, ok := err.(reconciler.UpdateNotReadyErr); ok { + logger.Debug("requeueing registry server for catalog update check: update pod not yet ready") + o.catsrcQueueSet.RequeueAfter(out.GetNamespace(), out.GetName(), reconciler.CatalogPollingRequeuePeriod) + return + } + syncError = fmt.Errorf("couldn't ensure registry server - %v", err) + out.SetError(v1alpha1.CatalogSourceRegistryServerError, syncError) + return + } + + logger.Debug("ensured registry server") + + // requeue the catalog sync based on the polling interval, for accurate syncs of catalogs with polling enabled + if out.Spec.UpdateStrategy != nil && out.Spec.UpdateStrategy.RegistryPoll != nil { + if out.Spec.UpdateStrategy.Interval == nil { + syncError = fmt.Errorf("empty polling interval; cannot requeue registry server sync without a provided polling interval") + out.SetError(v1alpha1.CatalogSourceIntervalInvalidError, syncError) + return + } + if out.Spec.UpdateStrategy.RegistryPoll.ParsingError != "" && out.Status.Reason != v1alpha1.CatalogSourceIntervalInvalidError { + out.SetError(v1alpha1.CatalogSourceIntervalInvalidError, fmt.Errorf(out.Spec.UpdateStrategy.RegistryPoll.ParsingError)) + } + logger.Debugf("requeuing registry server sync based on polling interval %s", out.Spec.UpdateStrategy.Interval.Duration.String()) + resyncPeriod := reconciler.SyncRegistryUpdateInterval(out, time.Now()) + o.catsrcQueueSet.RequeueAfter(out.GetNamespace(), out.GetName(), queueinformer.ResyncWithJitter(resyncPeriod, 0.1)()) + return + } + + if err := o.sources.Remove(sourceKey); err != nil { + o.logger.WithError(err).Debug("error closing client connection") + } + + return } func (o *Operator) syncConnection(logger *logrus.Entry, in *v1alpha1.CatalogSource) (out *v1alpha1.CatalogSource, continueSync bool, syncError error) { - out = in.DeepCopy() - - sourceKey := registry.CatalogKey{Name: in.GetName(), Namespace: in.GetNamespace()} - // update operator's view of sources - now := o.now() - address := in.Address() - - connectFunc := func() (source *grpc.SourceMeta, connErr error) { - newSource, err := o.sources.Add(sourceKey, address) - if err != nil { - connErr = fmt.Errorf("couldn't connect to registry - %v", err) - return - } - - if newSource == nil { - connErr = errors.New("couldn't connect to registry") - return - } - - source = &newSource.SourceMeta - return - } - - updateConnectionStateFunc := func(out *v1alpha1.CatalogSource, source *grpc.SourceMeta) { - out.Status.GRPCConnectionState = &v1alpha1.GRPCConnectionState{ - Address: source.Address, - LastObservedState: source.ConnectionState.String(), - LastConnectTime: source.LastConnect, - } - } - - source := o.sources.GetMeta(sourceKey) - if source == nil { - source, syncError = connectFunc() - if syncError != nil { - out.SetError(v1alpha1.CatalogSourceRegistryServerError, syncError) - return - } - - // Set connection status and return. - updateConnectionStateFunc(out, source) - return - } - - if source.Address != address { - source, syncError = connectFunc() - if syncError != nil { - out.SetError(v1alpha1.CatalogSourceRegistryServerError, syncError) - return - } - - // Set connection status and return. - updateConnectionStateFunc(out, source) - } - - // GRPCConnectionState update must fail before - if out.Status.GRPCConnectionState == nil { - updateConnectionStateFunc(out, source) - } - - // connection is already good, but we need to update the sync time - if o.sourcesLastUpdate.After(out.Status.GRPCConnectionState.LastConnectTime.Time) { - // Set connection status and return. - out.Status.GRPCConnectionState.LastConnectTime = now - out.Status.GRPCConnectionState.LastObservedState = source.ConnectionState.String() - out.Status.GRPCConnectionState.Address = source.Address - } - - return + out = in.DeepCopy() + + sourceKey := registry.CatalogKey{Name: in.GetName(), Namespace: in.GetNamespace()} + // update operator's view of sources + now := o.now() + address := in.Address() + + connectFunc := func() (source *grpc.SourceMeta, connErr error) { + newSource, err := o.sources.Add(sourceKey, address) + if err != nil { + connErr = fmt.Errorf("couldn't connect to registry - %v", err) + return + } + + if newSource == nil { + connErr = errors.New("couldn't connect to registry") + return + } + + source = &newSource.SourceMeta + return + } + + updateConnectionStateFunc := func(out *v1alpha1.CatalogSource, source *grpc.SourceMeta) { + out.Status.GRPCConnectionState = &v1alpha1.GRPCConnectionState{ + Address: source.Address, + LastObservedState: source.ConnectionState.String(), + LastConnectTime: source.LastConnect, + } + } + + source := o.sources.GetMeta(sourceKey) + if source == nil { + source, syncError = connectFunc() + if syncError != nil { + out.SetError(v1alpha1.CatalogSourceRegistryServerError, syncError) + return + } + + // Set connection status and return. + updateConnectionStateFunc(out, source) + return + } + + if source.Address != address { + source, syncError = connectFunc() + if syncError != nil { + out.SetError(v1alpha1.CatalogSourceRegistryServerError, syncError) + return + } + + // Set connection status and return. + updateConnectionStateFunc(out, source) + } + + // GRPCConnectionState update must fail before + if out.Status.GRPCConnectionState == nil { + updateConnectionStateFunc(out, source) + } + + // connection is already good, but we need to update the sync time + if o.sourcesLastUpdate.After(out.Status.GRPCConnectionState.LastConnectTime.Time) { + // Set connection status and return. + out.Status.GRPCConnectionState.LastConnectTime = now + out.Status.GRPCConnectionState.LastObservedState = source.ConnectionState.String() + out.Status.GRPCConnectionState.Address = source.Address + } + + return } func (o *Operator) syncCatalogSources(obj interface{}) (syncError error) { - catsrc, ok := obj.(*v1alpha1.CatalogSource) - if !ok { - o.logger.Debugf("wrong type: %#v", obj) - syncError = nil - return - } - - logger := o.logger.WithFields(logrus.Fields{ - "source": catsrc.GetName(), - "id": queueinformer.NewLoopID(), - }) - logger.Debug("syncing catsrc") - - syncFunc := func(in *v1alpha1.CatalogSource, chain []CatalogSourceSyncFunc) (out *v1alpha1.CatalogSource, syncErr error) { - out = in - for _, syncFunc := range chain { - cont := false - out, cont, syncErr = syncFunc(logger, in) - if syncErr != nil { - return - } - - if !cont { - return - } - - in = out - } - - return - } - - equalFunc := func(a, b *v1alpha1.CatalogSourceStatus) bool { - return reflect.DeepEqual(a, b) - } - - chain := []CatalogSourceSyncFunc{ - validateSourceType, - o.syncConfigMap, - o.syncRegistryServer, - o.syncConnection, - } - - in := catsrc.DeepCopy() - in.SetError("", nil) - - out, syncError := syncFunc(in, chain) - - if out == nil { - return - } - - if equalFunc(&catsrc.Status, &out.Status) { - return - } - - updateErr := catalogsource.UpdateStatus(logger, o.client, out) - if syncError == nil && updateErr != nil { - syncError = updateErr - } - - return + catsrc, ok := obj.(*v1alpha1.CatalogSource) + if !ok { + o.logger.Debugf("wrong type: %#v", obj) + syncError = nil + return + } + + logger := o.logger.WithFields(logrus.Fields{ + "source": catsrc.GetName(), + "id": queueinformer.NewLoopID(), + }) + logger.Debug("syncing catsrc") + + syncFunc := func(in *v1alpha1.CatalogSource, chain []CatalogSourceSyncFunc) (out *v1alpha1.CatalogSource, syncErr error) { + out = in + for _, syncFunc := range chain { + cont := false + out, cont, syncErr = syncFunc(logger, in) + if syncErr != nil { + return + } + + if !cont { + return + } + + in = out + } + + return + } + + equalFunc := func(a, b *v1alpha1.CatalogSourceStatus) bool { + return reflect.DeepEqual(a, b) + } + + chain := []CatalogSourceSyncFunc{ + validateSourceType, + o.syncConfigMap, + o.syncRegistryServer, + o.syncConnection, + } + + in := catsrc.DeepCopy() + in.SetError("", nil) + + out, syncError := syncFunc(in, chain) + + if out == nil { + return + } + + if equalFunc(&catsrc.Status, &out.Status) { + return + } + + updateErr := catalogsource.UpdateStatus(logger, o.client, out) + if syncError == nil && updateErr != nil { + syncError = updateErr + } + + return } func (o *Operator) syncResolvingNamespace(obj interface{}) error { - ns, ok := obj.(*corev1.Namespace) - if !ok { - o.logger.Debugf("wrong type: %#v", obj) - return fmt.Errorf("casting Namespace failed") - } - namespace := ns.GetName() - - logger := o.logger.WithFields(logrus.Fields{ - "namespace": namespace, - "id": queueinformer.NewLoopID(), - }) - - o.gcInstallPlans(logger, namespace) - - // get the set of sources that should be used for resolution and best-effort get their connections working - logger.Debug("resolving sources") - - logger.Debug("checking if subscriptions need update") - - subs, err := o.listSubscriptions(namespace) - if err != nil { - logger.WithError(err).Debug("couldn't list subscriptions") - return err - } - - // If there are no subscriptions, don't attempt to sync the namespace. - if len(subs) == 0 { - logger.Debug(fmt.Sprintf("No subscriptions were found in namespace %v", namespace)) - return nil - } - - ogLister := o.lister.OperatorsV1().OperatorGroupLister().OperatorGroups(namespace) - failForwardEnabled, err := resolver.IsFailForwardEnabled(ogLister) - if err != nil { - return err - } - - unpackTimeout, err := bundle.OperatorGroupBundleUnpackTimeout(ogLister) - if err != nil { - return err - } - - minUnpackRetryInterval, err := bundle.OperatorGroupBundleUnpackRetryInterval(ogLister) - if err != nil { - return err - } - - // TODO: parallel - maxGeneration := 0 - subscriptionUpdated := false - for i, sub := range subs { - logger := logger.WithFields(logrus.Fields{ - "sub": sub.GetName(), - "source": sub.Spec.CatalogSource, - "pkg": sub.Spec.Package, - "channel": sub.Spec.Channel, - }) - - if sub.Status.InstallPlanGeneration > maxGeneration { - maxGeneration = sub.Status.InstallPlanGeneration - } - - // ensure the installplan reference is correct - sub, changedIP, err := o.ensureSubscriptionInstallPlanState(logger, sub, failForwardEnabled) - if err != nil { - logger.Debugf("error ensuring installplan state: %v", err) - return err - } - subscriptionUpdated = subscriptionUpdated || changedIP - - // record the current state of the desired corresponding CSV in the status. no-op if we don't know the csv yet. - sub, changedCSV, err := o.ensureSubscriptionCSVState(logger, sub, failForwardEnabled) - if err != nil { - logger.Debugf("error recording current state of CSV in status: %v", err) - return err - } - - subscriptionUpdated = subscriptionUpdated || changedCSV - subs[i] = sub - } - if subscriptionUpdated { - logger.Debug("subscriptions were updated, wait for a new resolution") - return nil - } - - shouldUpdate := false - for _, sub := range subs { - shouldUpdate = shouldUpdate || !o.nothingToUpdate(logger, sub) - } - if !shouldUpdate { - logger.Debug("all subscriptions up to date") - return nil - } - - logger.Debug("resolving subscriptions in namespace") - - // resolve a set of steps to apply to a cluster, a set of subscriptions to create/update, and any errors - steps, bundleLookups, updatedSubs, err := o.resolver.ResolveSteps(namespace) - if err != nil { - go o.recorder.Event(ns, corev1.EventTypeWarning, "ResolutionFailed", err.Error()) - // If the error is constraints not satisfiable, then simply project the - // resolution failure event and move on without returning the error. - // Returning the error only triggers the namespace resync which is unnecessary - // given not-satisfiable error is terminal and most likely require intervention - // from users/admins. Resyncing the namespace again is unlikely to resolve - // not-satisfiable error - if _, ok := err.(solver.NotSatisfiable); ok { - logger.WithError(err).Debug("resolution failed") - _, updateErr := o.updateSubscriptionStatuses( - o.setSubsCond(subs, v1alpha1.SubscriptionCondition{ - Type: v1alpha1.SubscriptionResolutionFailed, - Reason: "ConstraintsNotSatisfiable", - Message: err.Error(), - Status: corev1.ConditionTrue, - })) - if updateErr != nil { - logger.WithError(updateErr).Debug("failed to update subs conditions") - return updateErr - } - return nil - } - - _, updateErr := o.updateSubscriptionStatuses( - o.setSubsCond(subs, v1alpha1.SubscriptionCondition{ - Type: v1alpha1.SubscriptionResolutionFailed, - Reason: "ErrorPreventedResolution", - Message: err.Error(), - Status: corev1.ConditionTrue, - })) - if updateErr != nil { - logger.WithError(updateErr).Debug("failed to update subs conditions") - return updateErr - } - return err - } - - // Attempt to unpack bundles before installing - // Note: This should probably use the attenuated client to prevent users from resolving resources they otherwise don't have access to. - if len(bundleLookups) > 0 { - logger.Debug("unpacking bundles") - - var unpacked bool - unpacked, steps, bundleLookups, err = o.unpackBundles(namespace, steps, bundleLookups, unpackTimeout, minUnpackRetryInterval) - if err != nil { - // If the error was fatal capture and fail - if olmerrors.IsFatal(err) { - _, updateErr := o.updateSubscriptionStatuses( - o.setSubsCond(subs, v1alpha1.SubscriptionCondition{ - Type: v1alpha1.SubscriptionBundleUnpackFailed, - Reason: "ErrorPreventedUnpacking", - Message: err.Error(), - Status: corev1.ConditionTrue, - })) - if updateErr != nil { - logger.WithError(updateErr).Debug("failed to update subs conditions") - return updateErr - } - return nil - } - // Retry sync if non-fatal error - return fmt.Errorf("bundle unpacking failed with an error: %w", err) - } - - // Check BundleLookup status conditions to see if the BundleLookupFailed condtion is true - // which means bundle lookup has failed and subscriptions need to be updated - // with a condition indicating the failure. - isFailed, cond := hasBundleLookupFailureCondition(bundleLookups) - if isFailed { - err := fmt.Errorf("bundle unpacking failed. Reason: %v, and Message: %v", cond.Reason, cond.Message) - logger.Infof("%v", err) - - _, updateErr := o.updateSubscriptionStatuses( - o.setSubsCond(subs, v1alpha1.SubscriptionCondition{ - Type: v1alpha1.SubscriptionBundleUnpackFailed, - Reason: "BundleUnpackFailed", - Message: err.Error(), - Status: corev1.ConditionTrue, - })) - if updateErr != nil { - logger.WithError(updateErr).Debug("failed to update subs conditions") - return updateErr - } - // Since this is likely requires intervention we do not want to - // requeue too often. We return no error here and rely on a - // periodic resync which will help to automatically resolve - // some issues such as unreachable bundle images caused by - // bad catalog updates. - return nil - } - - // This means that the unpack job is still running (most likely) or - // there was some issue which we did not handle above. - if !unpacked { - _, updateErr := o.updateSubscriptionStatuses( - o.setSubsCond(subs, v1alpha1.SubscriptionCondition{ - Type: v1alpha1.SubscriptionBundleUnpacking, - Reason: "UnpackingInProgress", - Status: corev1.ConditionTrue, - })) - if updateErr != nil { - logger.WithError(updateErr).Debug("failed to update subs conditions") - return updateErr - } - - logger.Debug("unpacking is not complete yet, requeueing") - o.nsResolveQueue.AddAfter(namespace, 5*time.Second) - return nil - } - } - - // create installplan if anything updated - if len(updatedSubs) > 0 { - logger.Debug("resolution caused subscription changes, creating installplan") - // Finish calculating max generation by checking the existing installplans - installPlans, err := o.listInstallPlans(namespace) - if err != nil { - return err - } - for _, ip := range installPlans { - if gen := ip.Spec.Generation; gen > maxGeneration { - maxGeneration = gen - } - } - - // any subscription in the namespace with manual approval will force generated installplans to be manual - // TODO: this is an odd artifact of the older resolver, and will probably confuse users. approval mode could be on the operatorgroup? - installPlanApproval := v1alpha1.ApprovalAutomatic - for _, sub := range subs { - if sub.Spec.InstallPlanApproval == v1alpha1.ApprovalManual { - installPlanApproval = v1alpha1.ApprovalManual - break - } - } - - installPlanReference, err := o.ensureInstallPlan(logger, namespace, maxGeneration+1, subs, installPlanApproval, steps, bundleLookups) - if err != nil { - logger.WithError(err).Debug("error ensuring installplan") - return err - } - updatedSubs = o.setIPReference(updatedSubs, maxGeneration+1, installPlanReference) - } else { - logger.Debugf("no subscriptions were updated") - } - - // Make sure that we no longer indicate unpacking progress - o.removeSubsCond(subs, v1alpha1.SubscriptionBundleUnpacking) - - // Remove BundleUnpackFailed condition from subscriptions - o.removeSubsCond(subs, v1alpha1.SubscriptionBundleUnpackFailed) - - // Remove resolutionfailed condition from subscriptions - o.removeSubsCond(subs, v1alpha1.SubscriptionResolutionFailed) - - newSub := true - for _, updatedSub := range updatedSubs { - updatedSub.Status.RemoveConditions(v1alpha1.SubscriptionResolutionFailed) - for i, sub := range subs { - if sub.Name == updatedSub.Name && sub.Namespace == updatedSub.Namespace { - subs[i] = updatedSub - newSub = false - break - } - } - if newSub { - subs = append(subs, updatedSub) - continue - } - newSub = true - } - - // Update subscriptions with all changes so far - _, updateErr := o.updateSubscriptionStatuses(subs) - if updateErr != nil { - logger.WithError(updateErr).Warn("failed to update subscription conditions") - return updateErr - } - - return nil + ns, ok := obj.(*corev1.Namespace) + if !ok { + o.logger.Debugf("wrong type: %#v", obj) + return fmt.Errorf("casting Namespace failed") + } + namespace := ns.GetName() + + logger := o.logger.WithFields(logrus.Fields{ + "namespace": namespace, + "id": queueinformer.NewLoopID(), + }) + + o.gcInstallPlans(logger, namespace) + + // get the set of sources that should be used for resolution and best-effort get their connections working + logger.Debug("resolving sources") + + logger.Debug("checking if subscriptions need update") + + subs, err := o.listSubscriptions(namespace) + if err != nil { + logger.WithError(err).Debug("couldn't list subscriptions") + return err + } + + // If there are no subscriptions, don't attempt to sync the namespace. + if len(subs) == 0 { + logger.Debug(fmt.Sprintf("No subscriptions were found in namespace %v", namespace)) + return nil + } + + ogLister := o.lister.OperatorsV1().OperatorGroupLister().OperatorGroups(namespace) + failForwardEnabled, err := resolver.IsFailForwardEnabled(ogLister) + if err != nil { + return err + } + + unpackTimeout, err := bundle.OperatorGroupBundleUnpackTimeout(ogLister) + if err != nil { + return err + } + + minUnpackRetryInterval, err := bundle.OperatorGroupBundleUnpackRetryInterval(ogLister) + if err != nil { + return err + } + + // TODO: parallel + maxGeneration := 0 + subscriptionUpdated := false + for i, sub := range subs { + logger := logger.WithFields(logrus.Fields{ + "sub": sub.GetName(), + "source": sub.Spec.CatalogSource, + "pkg": sub.Spec.Package, + "channel": sub.Spec.Channel, + }) + + if sub.Status.InstallPlanGeneration > maxGeneration { + maxGeneration = sub.Status.InstallPlanGeneration + } + + // ensure the installplan reference is correct + sub, changedIP, err := o.ensureSubscriptionInstallPlanState(logger, sub, failForwardEnabled) + if err != nil { + logger.Debugf("error ensuring installplan state: %v", err) + return err + } + subscriptionUpdated = subscriptionUpdated || changedIP + + // record the current state of the desired corresponding CSV in the status. no-op if we don't know the csv yet. + sub, changedCSV, err := o.ensureSubscriptionCSVState(logger, sub, failForwardEnabled) + if err != nil { + logger.Debugf("error recording current state of CSV in status: %v", err) + return err + } + + subscriptionUpdated = subscriptionUpdated || changedCSV + subs[i] = sub + } + if subscriptionUpdated { + logger.Debug("subscriptions were updated, wait for a new resolution") + return nil + } + + shouldUpdate := false + for _, sub := range subs { + shouldUpdate = shouldUpdate || !o.nothingToUpdate(logger, sub) + } + if !shouldUpdate { + logger.Debug("all subscriptions up to date") + return nil + } + + logger.Debug("resolving subscriptions in namespace") + + // resolve a set of steps to apply to a cluster, a set of subscriptions to create/update, and any errors + steps, bundleLookups, updatedSubs, err := o.resolver.ResolveSteps(namespace) + if err != nil { + go o.recorder.Event(ns, corev1.EventTypeWarning, "ResolutionFailed", err.Error()) + // If the error is constraints not satisfiable, then simply project the + // resolution failure event and move on without returning the error. + // Returning the error only triggers the namespace resync which is unnecessary + // given not-satisfiable error is terminal and most likely require intervention + // from users/admins. Resyncing the namespace again is unlikely to resolve + // not-satisfiable error + if _, ok := err.(solver.NotSatisfiable); ok { + logger.WithError(err).Debug("resolution failed") + _, updateErr := o.updateSubscriptionStatuses( + o.setSubsCond(subs, v1alpha1.SubscriptionCondition{ + Type: v1alpha1.SubscriptionResolutionFailed, + Reason: "ConstraintsNotSatisfiable", + Message: err.Error(), + Status: corev1.ConditionTrue, + })) + if updateErr != nil { + logger.WithError(updateErr).Debug("failed to update subs conditions") + return updateErr + } + return nil + } + + _, updateErr := o.updateSubscriptionStatuses( + o.setSubsCond(subs, v1alpha1.SubscriptionCondition{ + Type: v1alpha1.SubscriptionResolutionFailed, + Reason: "ErrorPreventedResolution", + Message: err.Error(), + Status: corev1.ConditionTrue, + })) + if updateErr != nil { + logger.WithError(updateErr).Debug("failed to update subs conditions") + return updateErr + } + return err + } + + // Attempt to unpack bundles before installing + // Note: This should probably use the attenuated client to prevent users from resolving resources they otherwise don't have access to. + if len(bundleLookups) > 0 { + logger.Debug("unpacking bundles") + + var unpacked bool + unpacked, steps, bundleLookups, err = o.unpackBundles(namespace, steps, bundleLookups, unpackTimeout, minUnpackRetryInterval) + if err != nil { + // If the error was fatal capture and fail + if olmerrors.IsFatal(err) { + _, updateErr := o.updateSubscriptionStatuses( + o.setSubsCond(subs, v1alpha1.SubscriptionCondition{ + Type: v1alpha1.SubscriptionBundleUnpackFailed, + Reason: "ErrorPreventedUnpacking", + Message: err.Error(), + Status: corev1.ConditionTrue, + })) + if updateErr != nil { + logger.WithError(updateErr).Debug("failed to update subs conditions") + return updateErr + } + return nil + } + // Retry sync if non-fatal error + return fmt.Errorf("bundle unpacking failed with an error: %w", err) + } + + // Check BundleLookup status conditions to see if the BundleLookupFailed condtion is true + // which means bundle lookup has failed and subscriptions need to be updated + // with a condition indicating the failure. + isFailed, cond := hasBundleLookupFailureCondition(bundleLookups) + if isFailed { + err := fmt.Errorf("bundle unpacking failed. Reason: %v, and Message: %v", cond.Reason, cond.Message) + logger.Infof("%v", err) + + _, updateErr := o.updateSubscriptionStatuses( + o.setSubsCond(subs, v1alpha1.SubscriptionCondition{ + Type: v1alpha1.SubscriptionBundleUnpackFailed, + Reason: "BundleUnpackFailed", + Message: err.Error(), + Status: corev1.ConditionTrue, + })) + if updateErr != nil { + logger.WithError(updateErr).Debug("failed to update subs conditions") + return updateErr + } + // Since this is likely requires intervention we do not want to + // requeue too often. We return no error here and rely on a + // periodic resync which will help to automatically resolve + // some issues such as unreachable bundle images caused by + // bad catalog updates. + return nil + } + + // This means that the unpack job is still running (most likely) or + // there was some issue which we did not handle above. + if !unpacked { + _, updateErr := o.updateSubscriptionStatuses( + o.setSubsCond(subs, v1alpha1.SubscriptionCondition{ + Type: v1alpha1.SubscriptionBundleUnpacking, + Reason: "UnpackingInProgress", + Status: corev1.ConditionTrue, + })) + if updateErr != nil { + logger.WithError(updateErr).Debug("failed to update subs conditions") + return updateErr + } + + logger.Debug("unpacking is not complete yet, requeueing") + o.nsResolveQueue.AddAfter(namespace, 5*time.Second) + return nil + } + } + + // create installplan if anything updated + if len(updatedSubs) > 0 { + logger.Debug("resolution caused subscription changes, creating installplan") + // Finish calculating max generation by checking the existing installplans + installPlans, err := o.listInstallPlans(namespace) + if err != nil { + return err + } + for _, ip := range installPlans { + if gen := ip.Spec.Generation; gen > maxGeneration { + maxGeneration = gen + } + } + + // any subscription in the namespace with manual approval will force generated installplans to be manual + // TODO: this is an odd artifact of the older resolver, and will probably confuse users. approval mode could be on the operatorgroup? + installPlanApproval := v1alpha1.ApprovalAutomatic + for _, sub := range subs { + if sub.Spec.InstallPlanApproval == v1alpha1.ApprovalManual { + installPlanApproval = v1alpha1.ApprovalManual + break + } + } + + installPlanReference, err := o.ensureInstallPlan(logger, namespace, maxGeneration+1, subs, installPlanApproval, steps, bundleLookups) + if err != nil { + logger.WithError(err).Debug("error ensuring installplan") + return err + } + updatedSubs = o.setIPReference(updatedSubs, maxGeneration+1, installPlanReference) + } else { + logger.Debugf("no subscriptions were updated") + } + + // Make sure that we no longer indicate unpacking progress + o.removeSubsCond(subs, v1alpha1.SubscriptionBundleUnpacking) + + // Remove BundleUnpackFailed condition from subscriptions + o.removeSubsCond(subs, v1alpha1.SubscriptionBundleUnpackFailed) + + // Remove resolutionfailed condition from subscriptions + o.removeSubsCond(subs, v1alpha1.SubscriptionResolutionFailed) + + newSub := true + for _, updatedSub := range updatedSubs { + updatedSub.Status.RemoveConditions(v1alpha1.SubscriptionResolutionFailed) + for i, sub := range subs { + if sub.Name == updatedSub.Name && sub.Namespace == updatedSub.Namespace { + subs[i] = updatedSub + newSub = false + break + } + } + if newSub { + subs = append(subs, updatedSub) + continue + } + newSub = true + } + + // Update subscriptions with all changes so far + _, updateErr := o.updateSubscriptionStatuses(subs) + if updateErr != nil { + logger.WithError(updateErr).Warn("failed to update subscription conditions") + return updateErr + } + + return nil } func (o *Operator) syncSubscriptions(obj interface{}) error { - sub, ok := obj.(*v1alpha1.Subscription) - if !ok { - o.logger.Debugf("wrong type: %#v", obj) - return fmt.Errorf("casting Subscription failed") - } + sub, ok := obj.(*v1alpha1.Subscription) + if !ok { + o.logger.Debugf("wrong type: %#v", obj) + return fmt.Errorf("casting Subscription failed") + } - o.nsResolveQueue.Add(sub.GetNamespace()) + o.nsResolveQueue.Add(sub.GetNamespace()) - return nil + return nil } // syncOperatorGroups requeues the namespace resolution queue on changes to an operatorgroup // This is because the operatorgroup is now an input to resolution via the global catalog exclusion annotation func (o *Operator) syncOperatorGroups(obj interface{}) error { - og, ok := obj.(*operatorsv1.OperatorGroup) - if !ok { - o.logger.Debugf("wrong type: %#v", obj) - return fmt.Errorf("casting OperatorGroup failed") - } + og, ok := obj.(*operatorsv1.OperatorGroup) + if !ok { + o.logger.Debugf("wrong type: %#v", obj) + return fmt.Errorf("casting OperatorGroup failed") + } - o.nsResolveQueue.Add(og.GetNamespace()) + o.nsResolveQueue.Add(og.GetNamespace()) - return nil + return nil } func (o *Operator) nothingToUpdate(logger *logrus.Entry, sub *v1alpha1.Subscription) bool { - if sub.Status.InstallPlanRef != nil && sub.Status.State == v1alpha1.SubscriptionStateUpgradePending { - logger.Debugf("skipping update: installplan already created") - return true - } - return false + if sub.Status.InstallPlanRef != nil && sub.Status.State == v1alpha1.SubscriptionStateUpgradePending { + logger.Debugf("skipping update: installplan already created") + return true + } + return false } func (o *Operator) ensureSubscriptionInstallPlanState(logger *logrus.Entry, sub *v1alpha1.Subscription, failForwardEnabled bool) (*v1alpha1.Subscription, bool, error) { - if sub.Status.InstallPlanRef != nil || sub.Status.Install != nil { - return sub, false, nil - } - - logger.Debug("checking for existing installplan") - - // check if there's an installplan that created this subscription (only if it doesn't have a reference yet) - // this indicates it was newly resolved by another operator, and we should reference that installplan in the status - ipName, ok := sub.GetAnnotations()[generatedByKey] - if !ok { - return sub, false, nil - } - - ip, err := o.client.OperatorsV1alpha1().InstallPlans(sub.GetNamespace()).Get(context.TODO(), ipName, metav1.GetOptions{}) - if err != nil { - logger.WithField("installplan", ipName).Warn("unable to get installplan from cache") - return nil, false, err - } - logger.WithField("installplan", ipName).Debug("found installplan that generated subscription") - - out := sub.DeepCopy() - ref, err := reference.GetReference(ip) - if err != nil { - logger.WithError(err).Warn("unable to generate installplan reference") - return nil, false, err - } - out.Status.InstallPlanRef = ref - out.Status.Install = v1alpha1.NewInstallPlanReference(ref) - out.Status.State = v1alpha1.SubscriptionStateUpgradePending - if failForwardEnabled && ip.Status.Phase == v1alpha1.InstallPlanPhaseFailed { - out.Status.State = v1alpha1.SubscriptionStateFailed - } - out.Status.CurrentCSV = out.Spec.StartingCSV - out.Status.LastUpdated = o.now() - - return out, true, nil + if sub.Status.InstallPlanRef != nil || sub.Status.Install != nil { + return sub, false, nil + } + + logger.Debug("checking for existing installplan") + + // check if there's an installplan that created this subscription (only if it doesn't have a reference yet) + // this indicates it was newly resolved by another operator, and we should reference that installplan in the status + ipName, ok := sub.GetAnnotations()[generatedByKey] + if !ok { + return sub, false, nil + } + + ip, err := o.client.OperatorsV1alpha1().InstallPlans(sub.GetNamespace()).Get(context.TODO(), ipName, metav1.GetOptions{}) + if err != nil { + logger.WithField("installplan", ipName).Warn("unable to get installplan from cache") + return nil, false, err + } + logger.WithField("installplan", ipName).Debug("found installplan that generated subscription") + + out := sub.DeepCopy() + ref, err := reference.GetReference(ip) + if err != nil { + logger.WithError(err).Warn("unable to generate installplan reference") + return nil, false, err + } + out.Status.InstallPlanRef = ref + out.Status.Install = v1alpha1.NewInstallPlanReference(ref) + out.Status.State = v1alpha1.SubscriptionStateUpgradePending + if failForwardEnabled && ip.Status.Phase == v1alpha1.InstallPlanPhaseFailed { + out.Status.State = v1alpha1.SubscriptionStateFailed + } + out.Status.CurrentCSV = out.Spec.StartingCSV + out.Status.LastUpdated = o.now() + + return out, true, nil } func (o *Operator) ensureSubscriptionCSVState(logger *logrus.Entry, sub *v1alpha1.Subscription, failForwardEnabled bool) (*v1alpha1.Subscription, bool, error) { - if sub.Status.CurrentCSV == "" { - return sub, false, nil - } - - _, err := o.client.OperatorsV1alpha1().ClusterServiceVersions(sub.GetNamespace()).Get(context.TODO(), sub.Status.CurrentCSV, metav1.GetOptions{}) - out := sub.DeepCopy() - if err != nil { - logger.WithError(err).WithField("currentCSV", sub.Status.CurrentCSV).Debug("error fetching csv listed in subscription status") - out.Status.State = v1alpha1.SubscriptionStateUpgradePending - if failForwardEnabled && sub.Status.InstallPlanRef != nil { - ip, err := o.client.OperatorsV1alpha1().InstallPlans(sub.GetNamespace()).Get(context.TODO(), sub.Status.InstallPlanRef.Name, metav1.GetOptions{}) - if err != nil { - logger.WithError(err).WithField("currentCSV", sub.Status.CurrentCSV).Debug("error fetching installplan listed in subscription status") - } else if ip.Status.Phase == v1alpha1.InstallPlanPhaseFailed { - out.Status.State = v1alpha1.SubscriptionStateFailed - } - } - } else { - out.Status.State = v1alpha1.SubscriptionStateAtLatest - out.Status.InstalledCSV = sub.Status.CurrentCSV - } - - if sub.Status.State == out.Status.State { - // The subscription status represents the cluster state - return sub, false, nil - } - out.Status.LastUpdated = o.now() - - // Update Subscription with status of transition. Log errors if we can't write them to the status. - updatedSub, err := o.client.OperatorsV1alpha1().Subscriptions(out.GetNamespace()).UpdateStatus(context.TODO(), out, metav1.UpdateOptions{}) - if err != nil { - logger.WithError(err).Info("error updating subscription status") - return nil, false, fmt.Errorf("error updating Subscription status: " + err.Error()) - } - - // subscription status represents cluster state - return updatedSub, true, nil + if sub.Status.CurrentCSV == "" { + return sub, false, nil + } + + _, err := o.client.OperatorsV1alpha1().ClusterServiceVersions(sub.GetNamespace()).Get(context.TODO(), sub.Status.CurrentCSV, metav1.GetOptions{}) + out := sub.DeepCopy() + if err != nil { + logger.WithError(err).WithField("currentCSV", sub.Status.CurrentCSV).Debug("error fetching csv listed in subscription status") + out.Status.State = v1alpha1.SubscriptionStateUpgradePending + if failForwardEnabled && sub.Status.InstallPlanRef != nil { + ip, err := o.client.OperatorsV1alpha1().InstallPlans(sub.GetNamespace()).Get(context.TODO(), sub.Status.InstallPlanRef.Name, metav1.GetOptions{}) + if err != nil { + logger.WithError(err).WithField("currentCSV", sub.Status.CurrentCSV).Debug("error fetching installplan listed in subscription status") + } else if ip.Status.Phase == v1alpha1.InstallPlanPhaseFailed { + out.Status.State = v1alpha1.SubscriptionStateFailed + } + } + } else { + out.Status.State = v1alpha1.SubscriptionStateAtLatest + out.Status.InstalledCSV = sub.Status.CurrentCSV + } + + if sub.Status.State == out.Status.State { + // The subscription status represents the cluster state + return sub, false, nil + } + out.Status.LastUpdated = o.now() + + // Update Subscription with status of transition. Log errors if we can't write them to the status. + updatedSub, err := o.client.OperatorsV1alpha1().Subscriptions(out.GetNamespace()).UpdateStatus(context.TODO(), out, metav1.UpdateOptions{}) + if err != nil { + logger.WithError(err).Info("error updating subscription status") + return nil, false, fmt.Errorf("error updating Subscription status: " + err.Error()) + } + + // subscription status represents cluster state + return updatedSub, true, nil } func (o *Operator) setIPReference(subs []*v1alpha1.Subscription, gen int, installPlanRef *corev1.ObjectReference) []*v1alpha1.Subscription { - var ( - lastUpdated = o.now() - ) - for _, sub := range subs { - sub.Status.LastUpdated = lastUpdated - if installPlanRef != nil { - sub.Status.InstallPlanRef = installPlanRef - sub.Status.Install = v1alpha1.NewInstallPlanReference(installPlanRef) - sub.Status.State = v1alpha1.SubscriptionStateUpgradePending - sub.Status.InstallPlanGeneration = gen - } - } - return subs + var ( + lastUpdated = o.now() + ) + for _, sub := range subs { + sub.Status.LastUpdated = lastUpdated + if installPlanRef != nil { + sub.Status.InstallPlanRef = installPlanRef + sub.Status.Install = v1alpha1.NewInstallPlanReference(installPlanRef) + sub.Status.State = v1alpha1.SubscriptionStateUpgradePending + sub.Status.InstallPlanGeneration = gen + } + } + return subs } func (o *Operator) ensureInstallPlan(logger *logrus.Entry, namespace string, gen int, subs []*v1alpha1.Subscription, installPlanApproval v1alpha1.Approval, steps []*v1alpha1.Step, bundleLookups []v1alpha1.BundleLookup) (*corev1.ObjectReference, error) { - if len(steps) == 0 && len(bundleLookups) == 0 { - return nil, nil - } - - // Check if any existing installplans are creating the same resources - installPlans, err := o.listInstallPlans(namespace) - if err != nil { - return nil, err - } - - // There are multiple(2) worker threads process the namespaceQueue. - // Both worker can work at the same time when 2 separate updates are made for the namespace. - // The following sequence causes 2 installplans are created for a subscription - // 1. worker 1 doesn't find the installplan - // 2. worker 2 doesn't find the installplan - // 3. both worker 1 and 2 create the installplan - // - // This lock prevents the step 2 in the sequence so that only one installplan is created for a subscription. - // The sequence is like the following with this lock - // 1. worker 1 locks - // 2. worker 1 doesn't find the installplan - // 3. worker 2 wait for unlock <--- difference - // 4. worker 1 creates the installplan - // 5. worker 1 unlocks - // 6. worker 2 locks - // 7. worker 2 finds the installplan <--- difference - // 8. worker 2 unlocks - o.muInstallPlan.Lock() - defer o.muInstallPlan.Unlock() - - for _, installPlan := range installPlans { - if installPlan.Spec.Generation == gen { - return reference.GetReference(installPlan) - } - } - logger.Warn("no installplan found with matching generation, creating new one") - - return o.createInstallPlan(namespace, gen, subs, installPlanApproval, steps, bundleLookups) + if len(steps) == 0 && len(bundleLookups) == 0 { + return nil, nil + } + + // Check if any existing installplans are creating the same resources + installPlans, err := o.listInstallPlans(namespace) + if err != nil { + return nil, err + } + + // There are multiple(2) worker threads process the namespaceQueue. + // Both worker can work at the same time when 2 separate updates are made for the namespace. + // The following sequence causes 2 installplans are created for a subscription + // 1. worker 1 doesn't find the installplan + // 2. worker 2 doesn't find the installplan + // 3. both worker 1 and 2 create the installplan + // + // This lock prevents the step 2 in the sequence so that only one installplan is created for a subscription. + // The sequence is like the following with this lock + // 1. worker 1 locks + // 2. worker 1 doesn't find the installplan + // 3. worker 2 wait for unlock <--- difference + // 4. worker 1 creates the installplan + // 5. worker 1 unlocks + // 6. worker 2 locks + // 7. worker 2 finds the installplan <--- difference + // 8. worker 2 unlocks + o.muInstallPlan.Lock() + defer o.muInstallPlan.Unlock() + + for _, installPlan := range installPlans { + if installPlan.Spec.Generation == gen { + return reference.GetReference(installPlan) + } + } + logger.Warn("no installplan found with matching generation, creating new one") + + return o.createInstallPlan(namespace, gen, subs, installPlanApproval, steps, bundleLookups) } func (o *Operator) createInstallPlan(namespace string, gen int, subs []*v1alpha1.Subscription, installPlanApproval v1alpha1.Approval, steps []*v1alpha1.Step, bundleLookups []v1alpha1.BundleLookup) (*corev1.ObjectReference, error) { - if len(steps) == 0 && len(bundleLookups) == 0 { - return nil, nil - } - - csvNames := []string{} - catalogSourceMap := map[string]struct{}{} - for _, s := range steps { - if s.Resource.Kind == "ClusterServiceVersion" { - csvNames = append(csvNames, s.Resource.Name) - } - catalogSourceMap[s.Resource.CatalogSource] = struct{}{} - } - - catalogSources := []string{} - for s := range catalogSourceMap { - catalogSources = append(catalogSources, s) - } - - phase := v1alpha1.InstallPlanPhaseInstalling - if installPlanApproval == v1alpha1.ApprovalManual { - phase = v1alpha1.InstallPlanPhaseRequiresApproval - } - ip := &v1alpha1.InstallPlan{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "install-", - Namespace: namespace, - }, - Spec: v1alpha1.InstallPlanSpec{ - ClusterServiceVersionNames: csvNames, - Approval: installPlanApproval, - Approved: installPlanApproval == v1alpha1.ApprovalAutomatic, - Generation: gen, - }, - } - for _, sub := range subs { - ownerutil.AddNonBlockingOwner(ip, sub) - } - - res, err := o.client.OperatorsV1alpha1().InstallPlans(namespace).Create(context.TODO(), ip, metav1.CreateOptions{}) - if err != nil { - return nil, err - } - - res.Status = v1alpha1.InstallPlanStatus{ - Phase: phase, - Plan: steps, - CatalogSources: catalogSources, - BundleLookups: bundleLookups, - } - res, err = o.client.OperatorsV1alpha1().InstallPlans(namespace).UpdateStatus(context.TODO(), res, metav1.UpdateOptions{}) - if err != nil { - return nil, err - } - - return reference.GetReference(res) + if len(steps) == 0 && len(bundleLookups) == 0 { + return nil, nil + } + + csvNames := []string{} + catalogSourceMap := map[string]struct{}{} + for _, s := range steps { + if s.Resource.Kind == "ClusterServiceVersion" { + csvNames = append(csvNames, s.Resource.Name) + } + catalogSourceMap[s.Resource.CatalogSource] = struct{}{} + } + + catalogSources := []string{} + for s := range catalogSourceMap { + catalogSources = append(catalogSources, s) + } + + phase := v1alpha1.InstallPlanPhaseInstalling + if installPlanApproval == v1alpha1.ApprovalManual { + phase = v1alpha1.InstallPlanPhaseRequiresApproval + } + ip := &v1alpha1.InstallPlan{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "install-", + Namespace: namespace, + }, + Spec: v1alpha1.InstallPlanSpec{ + ClusterServiceVersionNames: csvNames, + Approval: installPlanApproval, + Approved: installPlanApproval == v1alpha1.ApprovalAutomatic, + Generation: gen, + }, + } + for _, sub := range subs { + ownerutil.AddNonBlockingOwner(ip, sub) + } + + res, err := o.client.OperatorsV1alpha1().InstallPlans(namespace).Create(context.TODO(), ip, metav1.CreateOptions{}) + if err != nil { + return nil, err + } + + res.Status = v1alpha1.InstallPlanStatus{ + Phase: phase, + Plan: steps, + CatalogSources: catalogSources, + BundleLookups: bundleLookups, + } + res, err = o.client.OperatorsV1alpha1().InstallPlans(namespace).UpdateStatus(context.TODO(), res, metav1.UpdateOptions{}) + if err != nil { + return nil, err + } + + return reference.GetReference(res) } // setSubsCond will set the condition to the subscription if it doesn't already // exist or if it is different // Only return the list of updated subscriptions func (o *Operator) setSubsCond(subs []*v1alpha1.Subscription, cond v1alpha1.SubscriptionCondition) []*v1alpha1.Subscription { - var ( - lastUpdated = o.now() - subList []*v1alpha1.Subscription - ) - - for _, sub := range subs { - subCond := sub.Status.GetCondition(cond.Type) - if subCond.Equals(cond) { - continue - } - sub.Status.LastUpdated = lastUpdated - sub.Status.SetCondition(cond) - subList = append(subList, sub) - } - return subList + var ( + lastUpdated = o.now() + subList []*v1alpha1.Subscription + ) + + for _, sub := range subs { + subCond := sub.Status.GetCondition(cond.Type) + if subCond.Equals(cond) { + continue + } + sub.Status.LastUpdated = lastUpdated + sub.Status.SetCondition(cond) + subList = append(subList, sub) + } + return subList } // removeSubsCond removes the given condition from all of the subscriptions in the input func (o *Operator) removeSubsCond(subs []*v1alpha1.Subscription, condType v1alpha1.SubscriptionConditionType) { - lastUpdated := o.now() - for _, sub := range subs { - cond := sub.Status.GetCondition(condType) - // if status is ConditionUnknown, the condition doesn't exist. Just skip - if cond.Status == corev1.ConditionUnknown { - continue - } - sub.Status.LastUpdated = lastUpdated - sub.Status.RemoveConditions(condType) - } + lastUpdated := o.now() + for _, sub := range subs { + cond := sub.Status.GetCondition(condType) + // if status is ConditionUnknown, the condition doesn't exist. Just skip + if cond.Status == corev1.ConditionUnknown { + continue + } + sub.Status.LastUpdated = lastUpdated + sub.Status.RemoveConditions(condType) + } } func (o *Operator) updateSubscriptionStatuses(subs []*v1alpha1.Subscription) ([]*v1alpha1.Subscription, error) { - var ( - errs []error - mu sync.Mutex - wg sync.WaitGroup - getOpts = metav1.GetOptions{} - updateOpts = metav1.UpdateOptions{} - ) - - for _, sub := range subs { - wg.Add(1) - go func(sub *v1alpha1.Subscription) { - defer wg.Done() - - update := func() error { - // Update the status of the latest revision - latest, err := o.client.OperatorsV1alpha1().Subscriptions(sub.GetNamespace()).Get(context.TODO(), sub.GetName(), getOpts) - if err != nil { - return err - } - latest.Status = sub.Status - *sub = *latest - _, err = o.client.OperatorsV1alpha1().Subscriptions(sub.Namespace).UpdateStatus(context.TODO(), latest, updateOpts) - return err - } - if err := retry.RetryOnConflict(retry.DefaultRetry, update); err != nil { - mu.Lock() - defer mu.Unlock() - errs = append(errs, err) - } - }(sub) - } - wg.Wait() - return subs, utilerrors.NewAggregate(errs) + var ( + errs []error + mu sync.Mutex + wg sync.WaitGroup + getOpts = metav1.GetOptions{} + updateOpts = metav1.UpdateOptions{} + ) + + for _, sub := range subs { + wg.Add(1) + go func(sub *v1alpha1.Subscription) { + defer wg.Done() + + update := func() error { + // Update the status of the latest revision + latest, err := o.client.OperatorsV1alpha1().Subscriptions(sub.GetNamespace()).Get(context.TODO(), sub.GetName(), getOpts) + if err != nil { + return err + } + latest.Status = sub.Status + *sub = *latest + _, err = o.client.OperatorsV1alpha1().Subscriptions(sub.Namespace).UpdateStatus(context.TODO(), latest, updateOpts) + return err + } + if err := retry.RetryOnConflict(retry.DefaultRetry, update); err != nil { + mu.Lock() + defer mu.Unlock() + errs = append(errs, err) + } + }(sub) + } + wg.Wait() + return subs, utilerrors.NewAggregate(errs) } type UnpackedBundleReference struct { - Kind string `json:"kind"` - Name string `json:"name"` - Namespace string `json:"namespace"` - CatalogSourceName string `json:"catalogSourceName"` - CatalogSourceNamespace string `json:"catalogSourceNamespace"` - Replaces string `json:"replaces"` - Properties string `json:"properties"` + Kind string `json:"kind"` + Name string `json:"name"` + Namespace string `json:"namespace"` + CatalogSourceName string `json:"catalogSourceName"` + CatalogSourceNamespace string `json:"catalogSourceNamespace"` + Replaces string `json:"replaces"` + Properties string `json:"properties"` } func (o *Operator) unpackBundles(namespace string, installPlanSteps []*v1alpha1.Step, bundleLookups []v1alpha1.BundleLookup, unpackTimeout, unpackRetryInterval time.Duration) (bool, []*v1alpha1.Step, []v1alpha1.BundleLookup, error) { - unpacked := true - - outBundleLookups := make([]v1alpha1.BundleLookup, len(bundleLookups)) - for i := range bundleLookups { - bundleLookups[i].DeepCopyInto(&outBundleLookups[i]) - } - outInstallPlanSteps := make([]*v1alpha1.Step, len(installPlanSteps)) - for i := range installPlanSteps { - outInstallPlanSteps[i] = installPlanSteps[i].DeepCopy() - } - - var errs []error - for i := 0; i < len(outBundleLookups); i++ { - lookup := outBundleLookups[i] - res, err := o.bundleUnpacker.UnpackBundle(&lookup, unpackTimeout, unpackRetryInterval) - if err != nil { - errs = append(errs, err) - continue - } - outBundleLookups[i] = *res.BundleLookup - - // if the failed condition is present it means the bundle unpacking has failed - failedCondition := res.GetCondition(v1alpha1.BundleLookupFailed) - if failedCondition.Status == corev1.ConditionTrue { - unpacked = false - continue - } - - // if the bundle lookup pending condition is present it means that the bundle has not been unpacked - // status=true means we're still waiting for the job to unpack to configmap - pendingCondition := res.GetCondition(v1alpha1.BundleLookupPending) - if pendingCondition.Status == corev1.ConditionTrue { - unpacked = false - continue - } - - // if packed condition is missing, bundle has already been unpacked into steps, continue - if res.GetCondition(resolver.BundleLookupConditionPacked).Status == corev1.ConditionUnknown { - continue - } - - // Ensure that bundle can be applied by the current version of OLM by converting to bundleSteps - bundleSteps, err := resolver.NewStepsFromBundle(res.Bundle(), namespace, res.Replaces, res.CatalogSourceRef.Name, res.CatalogSourceRef.Namespace) - if err != nil { - if fatal := olmerrors.IsFatal(err); fatal { - return false, nil, nil, err - } - - errs = append(errs, fmt.Errorf("failed to turn bundle into steps: %v", err)) - unpacked = false - continue - } - - // step manifests are replaced with references to the configmap containing them - for i, s := range bundleSteps { - ref := UnpackedBundleReference{ - Kind: "ConfigMap", - Namespace: res.CatalogSourceRef.Namespace, - Name: res.Name(), - CatalogSourceName: res.CatalogSourceRef.Name, - CatalogSourceNamespace: res.CatalogSourceRef.Namespace, - Replaces: res.Replaces, - Properties: res.Properties, - } - r, err := json.Marshal(&ref) - if err != nil { - errs = append(errs, fmt.Errorf("failed to generate reference for configmap: %v", err)) - unpacked = false - continue - } - s.Resource.Manifest = string(r) - bundleSteps[i] = s - } - res.RemoveCondition(resolver.BundleLookupConditionPacked) - outBundleLookups[i] = *res.BundleLookup - outInstallPlanSteps = append(outInstallPlanSteps, bundleSteps...) - } - - if err := utilerrors.NewAggregate(errs); err != nil { - o.logger.Debugf("failed to unpack bundles: %v", err) - return false, nil, nil, err - } - - return unpacked, outInstallPlanSteps, outBundleLookups, nil + unpacked := true + + outBundleLookups := make([]v1alpha1.BundleLookup, len(bundleLookups)) + for i := range bundleLookups { + bundleLookups[i].DeepCopyInto(&outBundleLookups[i]) + } + outInstallPlanSteps := make([]*v1alpha1.Step, len(installPlanSteps)) + for i := range installPlanSteps { + outInstallPlanSteps[i] = installPlanSteps[i].DeepCopy() + } + + var errs []error + for i := 0; i < len(outBundleLookups); i++ { + lookup := outBundleLookups[i] + res, err := o.bundleUnpacker.UnpackBundle(&lookup, unpackTimeout, unpackRetryInterval) + if err != nil { + errs = append(errs, err) + continue + } + outBundleLookups[i] = *res.BundleLookup + + // if the failed condition is present it means the bundle unpacking has failed + failedCondition := res.GetCondition(v1alpha1.BundleLookupFailed) + if failedCondition.Status == corev1.ConditionTrue { + unpacked = false + continue + } + + // if the bundle lookup pending condition is present it means that the bundle has not been unpacked + // status=true means we're still waiting for the job to unpack to configmap + pendingCondition := res.GetCondition(v1alpha1.BundleLookupPending) + if pendingCondition.Status == corev1.ConditionTrue { + unpacked = false + continue + } + + // if packed condition is missing, bundle has already been unpacked into steps, continue + if res.GetCondition(resolver.BundleLookupConditionPacked).Status == corev1.ConditionUnknown { + continue + } + + // Ensure that bundle can be applied by the current version of OLM by converting to bundleSteps + bundleSteps, err := resolver.NewStepsFromBundle(res.Bundle(), namespace, res.Replaces, res.CatalogSourceRef.Name, res.CatalogSourceRef.Namespace) + if err != nil { + if fatal := olmerrors.IsFatal(err); fatal { + return false, nil, nil, err + } + + errs = append(errs, fmt.Errorf("failed to turn bundle into steps: %v", err)) + unpacked = false + continue + } + + // step manifests are replaced with references to the configmap containing them + for i, s := range bundleSteps { + ref := UnpackedBundleReference{ + Kind: "ConfigMap", + Namespace: res.CatalogSourceRef.Namespace, + Name: res.Name(), + CatalogSourceName: res.CatalogSourceRef.Name, + CatalogSourceNamespace: res.CatalogSourceRef.Namespace, + Replaces: res.Replaces, + Properties: res.Properties, + } + r, err := json.Marshal(&ref) + if err != nil { + errs = append(errs, fmt.Errorf("failed to generate reference for configmap: %v", err)) + unpacked = false + continue + } + s.Resource.Manifest = string(r) + bundleSteps[i] = s + } + res.RemoveCondition(resolver.BundleLookupConditionPacked) + outBundleLookups[i] = *res.BundleLookup + outInstallPlanSteps = append(outInstallPlanSteps, bundleSteps...) + } + + if err := utilerrors.NewAggregate(errs); err != nil { + o.logger.Debugf("failed to unpack bundles: %v", err) + return false, nil, nil, err + } + + return unpacked, outInstallPlanSteps, outBundleLookups, nil } // gcInstallPlans garbage collects installplans that are too old // installplans are ownerrefd to all subscription inputs, so they will not otherwise // be GCd unless all inputs have been deleted. func (o *Operator) gcInstallPlans(log logrus.FieldLogger, namespace string) { - allIps, err := o.lister.OperatorsV1alpha1().InstallPlanLister().InstallPlans(namespace).List(labels.Everything()) - if err != nil { - log.Warn("unable to list installplans for GC") - } - - if len(allIps) <= maxInstallPlanCount { - return - } - - // we only consider maxDeletesPerSweep more than the allowed number of installplans for delete at one time - ips := allIps - if len(ips) > maxInstallPlanCount+maxDeletesPerSweep { - ips = allIps[:maxInstallPlanCount+maxDeletesPerSweep] - } - - byGen := map[int][]*v1alpha1.InstallPlan{} - for _, ip := range ips { - gen, ok := byGen[ip.Spec.Generation] - if !ok { - gen = make([]*v1alpha1.InstallPlan, 0) - } - byGen[ip.Spec.Generation] = append(gen, ip) - } - - gens := make([]int, 0) - for i := range byGen { - gens = append(gens, i) - } - - sort.Ints(gens) - - toDelete := make([]*v1alpha1.InstallPlan, 0) - - for _, i := range gens { - g := byGen[i] - - if len(ips)-len(toDelete) <= maxInstallPlanCount { - break - } - - // if removing all installplans at this generation doesn't dip below the max, safe to delete all of them - if len(ips)-len(toDelete)-len(g) >= maxInstallPlanCount { - toDelete = append(toDelete, g...) - continue - } - - // CreationTimestamp sorting shouldn't ever be hit unless there is a bug that causes installplans to be - // generated without bumping the generation. It is here as a safeguard only. - - // sort by creation time - sort.Slice(g, func(i, j int) bool { - if !g[i].CreationTimestamp.Equal(&g[j].CreationTimestamp) { - return g[i].CreationTimestamp.Before(&g[j].CreationTimestamp) - } - // final fallback to lexicographic sort, in case many installplans are created with the same timestamp - return g[i].GetName() < g[j].GetName() - }) - toDelete = append(toDelete, g[:len(ips)-len(toDelete)-maxInstallPlanCount]...) - } - - for _, i := range toDelete { - if err := o.client.OperatorsV1alpha1().InstallPlans(namespace).Delete(context.TODO(), i.GetName(), metav1.DeleteOptions{}); err != nil { - log.WithField("deleting", i.GetName()).WithError(err).Warn("error GCing old installplan - may have already been deleted") - } - } + allIps, err := o.lister.OperatorsV1alpha1().InstallPlanLister().InstallPlans(namespace).List(labels.Everything()) + if err != nil { + log.Warn("unable to list installplans for GC") + } + + if len(allIps) <= maxInstallPlanCount { + return + } + + // we only consider maxDeletesPerSweep more than the allowed number of installplans for delete at one time + ips := allIps + if len(ips) > maxInstallPlanCount+maxDeletesPerSweep { + ips = allIps[:maxInstallPlanCount+maxDeletesPerSweep] + } + + byGen := map[int][]*v1alpha1.InstallPlan{} + for _, ip := range ips { + gen, ok := byGen[ip.Spec.Generation] + if !ok { + gen = make([]*v1alpha1.InstallPlan, 0) + } + byGen[ip.Spec.Generation] = append(gen, ip) + } + + gens := make([]int, 0) + for i := range byGen { + gens = append(gens, i) + } + + sort.Ints(gens) + + toDelete := make([]*v1alpha1.InstallPlan, 0) + + for _, i := range gens { + g := byGen[i] + + if len(ips)-len(toDelete) <= maxInstallPlanCount { + break + } + + // if removing all installplans at this generation doesn't dip below the max, safe to delete all of them + if len(ips)-len(toDelete)-len(g) >= maxInstallPlanCount { + toDelete = append(toDelete, g...) + continue + } + + // CreationTimestamp sorting shouldn't ever be hit unless there is a bug that causes installplans to be + // generated without bumping the generation. It is here as a safeguard only. + + // sort by creation time + sort.Slice(g, func(i, j int) bool { + if !g[i].CreationTimestamp.Equal(&g[j].CreationTimestamp) { + return g[i].CreationTimestamp.Before(&g[j].CreationTimestamp) + } + // final fallback to lexicographic sort, in case many installplans are created with the same timestamp + return g[i].GetName() < g[j].GetName() + }) + toDelete = append(toDelete, g[:len(ips)-len(toDelete)-maxInstallPlanCount]...) + } + + for _, i := range toDelete { + if err := o.client.OperatorsV1alpha1().InstallPlans(namespace).Delete(context.TODO(), i.GetName(), metav1.DeleteOptions{}); err != nil { + log.WithField("deleting", i.GetName()).WithError(err).Warn("error GCing old installplan - may have already been deleted") + } + } } func (o *Operator) syncInstallPlans(obj interface{}) (syncError error) { - plan, ok := obj.(*v1alpha1.InstallPlan) - if !ok { - o.logger.Debugf("wrong type: %#v", obj) - return fmt.Errorf("casting InstallPlan failed") - } - - logger := o.logger.WithFields(logrus.Fields{ - "id": queueinformer.NewLoopID(), - "ip": plan.GetName(), - "namespace": plan.GetNamespace(), - "phase": plan.Status.Phase, - }) - - logger.Info("syncing") - - if len(plan.Status.Plan) == 0 && len(plan.Status.BundleLookups) == 0 { - logger.Info("skip processing installplan without status - subscription sync responsible for initial status") - return - } - - // Complete and Failed are terminal phases - if plan.Status.Phase == v1alpha1.InstallPlanPhaseFailed || plan.Status.Phase == v1alpha1.InstallPlanPhaseComplete { - return - } - - querier := o.serviceAccountQuerier.NamespaceQuerier(plan.GetNamespace()) - ref, err := querier() - out := plan.DeepCopy() - if err != nil { - // Set status condition/message and retry sync if any error - ipFailError := fmt.Errorf("attenuated service account query failed - %v", err) - logger.Infof(ipFailError.Error()) - _, err := o.setInstallPlanInstalledCond(out, v1alpha1.InstallPlanReasonInstallCheckFailed, err.Error(), logger) - if err != nil { - syncError = err - return - } - syncError = ipFailError - return - } - // reset condition/message if it had been set in previous sync. This condition is being reset since any delay in the next steps - // (bundle unpacking/plan step errors being retried for a duration) could lead to this condition sticking around, even after - // the serviceAccountQuerier returns no error since the error has been resolved (by creating the required resources), which would - // be confusing to the user - - // NOTE: this makes the assumption that the InstallPlanInstalledCheckFailed reason is only set in the previous if clause, which is - // true in the current iteration of the catalog operator. Any future implementation change that aims at setting the reason as - // InstallPlanInstalledCheckFailed must make sure that either this assumption is not breached, or the condition being set elsewhere - // is not being unset here unintentionally. - if cond := out.Status.GetCondition(v1alpha1.InstallPlanInstalled); cond.Reason == v1alpha1.InstallPlanReasonInstallCheckFailed { - plan, err = o.setInstallPlanInstalledCond(out, v1alpha1.InstallPlanConditionReason(corev1.ConditionUnknown), "", logger) - if err != nil { - syncError = err - return - } - } - - if ref != nil { - out := plan.DeepCopy() - out.Status.AttenuatedServiceAccountRef = ref - - if !reflect.DeepEqual(plan, out) { - if _, updateErr := o.client.OperatorsV1alpha1().InstallPlans(out.GetNamespace()).UpdateStatus(context.TODO(), out, metav1.UpdateOptions{}); updateErr != nil { - syncError = fmt.Errorf("failed to attach attenuated ServiceAccount to status - %v", updateErr) - return - } - - logger.WithField("attenuated-sa", ref.Name).Info("successfully attached attenuated ServiceAccount to status") - return - } - } - - outInstallPlan, syncError := transitionInstallPlanState(logger.Logger, o, *plan, o.now(), o.installPlanTimeout) - - if syncError != nil { - logger = logger.WithField("syncError", syncError) - } - - if outInstallPlan.Status.Phase == v1alpha1.InstallPlanPhaseInstalling { - defer o.ipQueueSet.RequeueAfter(outInstallPlan.GetNamespace(), outInstallPlan.GetName(), time.Second*5) - } - - defer o.requeueSubscriptionForInstallPlan(plan, logger) - - // Update InstallPlan with status of transition. Log errors if we can't write them to the status. - if _, err := o.client.OperatorsV1alpha1().InstallPlans(plan.GetNamespace()).UpdateStatus(context.TODO(), outInstallPlan, metav1.UpdateOptions{}); err != nil { - logger = logger.WithField("updateError", err.Error()) - updateErr := errors.New("error updating InstallPlan status: " + err.Error()) - if syncError == nil { - logger.Info("error updating InstallPlan status") - return updateErr - } - logger.Info("error transitioning InstallPlan") - syncError = fmt.Errorf("error transitioning InstallPlan: %s and error updating InstallPlan status: %s", syncError, updateErr) - } - - return + plan, ok := obj.(*v1alpha1.InstallPlan) + if !ok { + o.logger.Debugf("wrong type: %#v", obj) + return fmt.Errorf("casting InstallPlan failed") + } + + logger := o.logger.WithFields(logrus.Fields{ + "id": queueinformer.NewLoopID(), + "ip": plan.GetName(), + "namespace": plan.GetNamespace(), + "phase": plan.Status.Phase, + }) + + logger.Info("syncing") + + if len(plan.Status.Plan) == 0 && len(plan.Status.BundleLookups) == 0 { + logger.Info("skip processing installplan without status - subscription sync responsible for initial status") + return + } + + // Complete and Failed are terminal phases + if plan.Status.Phase == v1alpha1.InstallPlanPhaseFailed || plan.Status.Phase == v1alpha1.InstallPlanPhaseComplete { + return + } + + querier := o.serviceAccountQuerier.NamespaceQuerier(plan.GetNamespace()) + ref, err := querier() + out := plan.DeepCopy() + if err != nil { + // Set status condition/message and retry sync if any error + ipFailError := fmt.Errorf("attenuated service account query failed - %v", err) + logger.Infof(ipFailError.Error()) + _, err := o.setInstallPlanInstalledCond(out, v1alpha1.InstallPlanReasonInstallCheckFailed, err.Error(), logger) + if err != nil { + syncError = err + return + } + syncError = ipFailError + return + } + // reset condition/message if it had been set in previous sync. This condition is being reset since any delay in the next steps + // (bundle unpacking/plan step errors being retried for a duration) could lead to this condition sticking around, even after + // the serviceAccountQuerier returns no error since the error has been resolved (by creating the required resources), which would + // be confusing to the user + + // NOTE: this makes the assumption that the InstallPlanInstalledCheckFailed reason is only set in the previous if clause, which is + // true in the current iteration of the catalog operator. Any future implementation change that aims at setting the reason as + // InstallPlanInstalledCheckFailed must make sure that either this assumption is not breached, or the condition being set elsewhere + // is not being unset here unintentionally. + if cond := out.Status.GetCondition(v1alpha1.InstallPlanInstalled); cond.Reason == v1alpha1.InstallPlanReasonInstallCheckFailed { + plan, err = o.setInstallPlanInstalledCond(out, v1alpha1.InstallPlanConditionReason(corev1.ConditionUnknown), "", logger) + if err != nil { + syncError = err + return + } + } + + if ref != nil { + out := plan.DeepCopy() + out.Status.AttenuatedServiceAccountRef = ref + + if !reflect.DeepEqual(plan, out) { + if _, updateErr := o.client.OperatorsV1alpha1().InstallPlans(out.GetNamespace()).UpdateStatus(context.TODO(), out, metav1.UpdateOptions{}); updateErr != nil { + syncError = fmt.Errorf("failed to attach attenuated ServiceAccount to status - %v", updateErr) + return + } + + logger.WithField("attenuated-sa", ref.Name).Info("successfully attached attenuated ServiceAccount to status") + return + } + } + + outInstallPlan, syncError := transitionInstallPlanState(logger.Logger, o, *plan, o.now(), o.installPlanTimeout) + + if syncError != nil { + logger = logger.WithField("syncError", syncError) + } + + if outInstallPlan.Status.Phase == v1alpha1.InstallPlanPhaseInstalling { + defer o.ipQueueSet.RequeueAfter(outInstallPlan.GetNamespace(), outInstallPlan.GetName(), time.Second*5) + } + + defer o.requeueSubscriptionForInstallPlan(plan, logger) + + // Update InstallPlan with status of transition. Log errors if we can't write them to the status. + if _, err := o.client.OperatorsV1alpha1().InstallPlans(plan.GetNamespace()).UpdateStatus(context.TODO(), outInstallPlan, metav1.UpdateOptions{}); err != nil { + logger = logger.WithField("updateError", err.Error()) + updateErr := errors.New("error updating InstallPlan status: " + err.Error()) + if syncError == nil { + logger.Info("error updating InstallPlan status") + return updateErr + } + logger.Info("error transitioning InstallPlan") + syncError = fmt.Errorf("error transitioning InstallPlan: %s and error updating InstallPlan status: %s", syncError, updateErr) + } + + return } func hasBundleLookupFailureCondition(bundleLookups []v1alpha1.BundleLookup) (bool, *v1alpha1.BundleLookupCondition) { - for _, bundleLookup := range bundleLookups { - for _, cond := range bundleLookup.Conditions { - if cond.Type == v1alpha1.BundleLookupFailed && cond.Status == corev1.ConditionTrue { - return true, &cond - } - } - } - return false, nil + for _, bundleLookup := range bundleLookups { + for _, cond := range bundleLookup.Conditions { + if cond.Type == v1alpha1.BundleLookupFailed && cond.Status == corev1.ConditionTrue { + return true, &cond + } + } + } + return false, nil } func (o *Operator) requeueSubscriptionForInstallPlan(plan *v1alpha1.InstallPlan, logger *logrus.Entry) { - // Notify subscription loop of installplan changes - owners := ownerutil.GetOwnersByKind(plan, v1alpha1.SubscriptionKind) - - if len(owners) == 0 { - logger.Trace("no installplan owner subscriptions found to requeue") - return - } - - for _, owner := range owners { - logger.WithField("owner", owner).Debug("requeueing installplan owner") - if err := o.subQueueSet.Requeue(plan.GetNamespace(), owner.Name); err != nil { - logger.WithError(err).Warn("error requeuing installplan owner") - } - } + // Notify subscription loop of installplan changes + owners := ownerutil.GetOwnersByKind(plan, v1alpha1.SubscriptionKind) + + if len(owners) == 0 { + logger.Trace("no installplan owner subscriptions found to requeue") + return + } + + for _, owner := range owners { + logger.WithField("owner", owner).Debug("requeueing installplan owner") + if err := o.subQueueSet.Requeue(plan.GetNamespace(), owner.Name); err != nil { + logger.WithError(err).Warn("error requeuing installplan owner") + } + } } func (o *Operator) setInstallPlanInstalledCond(ip *v1alpha1.InstallPlan, reason v1alpha1.InstallPlanConditionReason, message string, logger *logrus.Entry) (*v1alpha1.InstallPlan, error) { - now := o.now() - ip.Status.SetCondition(v1alpha1.ConditionFailed(v1alpha1.InstallPlanInstalled, reason, message, &now)) - outIP, err := o.client.OperatorsV1alpha1().InstallPlans(ip.GetNamespace()).UpdateStatus(context.TODO(), ip, metav1.UpdateOptions{}) - if err != nil { - logger = logger.WithField("updateError", err.Error()) - logger.Errorf("error updating InstallPlan status") - return nil, nil - } - return outIP, nil + now := o.now() + ip.Status.SetCondition(v1alpha1.ConditionFailed(v1alpha1.InstallPlanInstalled, reason, message, &now)) + outIP, err := o.client.OperatorsV1alpha1().InstallPlans(ip.GetNamespace()).UpdateStatus(context.TODO(), ip, metav1.UpdateOptions{}) + if err != nil { + logger = logger.WithField("updateError", err.Error()) + logger.Errorf("error updating InstallPlan status") + return nil, nil + } + return outIP, nil } type installPlanTransitioner interface { - ExecutePlan(*v1alpha1.InstallPlan) error + ExecutePlan(*v1alpha1.InstallPlan) error } var _ installPlanTransitioner = &Operator{} func transitionInstallPlanState(log logrus.FieldLogger, transitioner installPlanTransitioner, in v1alpha1.InstallPlan, now metav1.Time, timeout time.Duration) (*v1alpha1.InstallPlan, error) { - out := in.DeepCopy() - - switch in.Status.Phase { - case v1alpha1.InstallPlanPhaseRequiresApproval: - if out.Spec.Approved { - out.Status.Phase = v1alpha1.InstallPlanPhaseInstalling - out.Status.Message = "" - log.Debugf("approved, setting to %s", out.Status.Phase) - } else { - log.Debug("not approved, skipping sync") - } - return out, nil - - case v1alpha1.InstallPlanPhaseInstalling: - if out.Status.StartTime == nil { - out.Status.StartTime = &now - } - log.Debug("attempting to install") - if err := transitioner.ExecutePlan(out); err != nil { - if now.Sub(out.Status.StartTime.Time) >= timeout { - out.Status.SetCondition(v1alpha1.ConditionFailed(v1alpha1.InstallPlanInstalled, - v1alpha1.InstallPlanReasonComponentFailed, err.Error(), &now)) - out.Status.Phase = v1alpha1.InstallPlanPhaseFailed - out.Status.Message = err.Error() - } else { - out.Status.Message = fmt.Sprintf("retrying execution due to error: %s", err.Error()) - } - return out, err - } else if !out.Status.NeedsRequeue() { - // Loop over one final time to check and see if everything is good. - out.Status.SetCondition(v1alpha1.ConditionMet(v1alpha1.InstallPlanInstalled, &now)) - out.Status.Phase = v1alpha1.InstallPlanPhaseComplete - out.Status.Message = "" - } - return out, nil - default: - return out, nil - } + out := in.DeepCopy() + + switch in.Status.Phase { + case v1alpha1.InstallPlanPhaseRequiresApproval: + if out.Spec.Approved { + out.Status.Phase = v1alpha1.InstallPlanPhaseInstalling + out.Status.Message = "" + log.Debugf("approved, setting to %s", out.Status.Phase) + } else { + log.Debug("not approved, skipping sync") + } + return out, nil + + case v1alpha1.InstallPlanPhaseInstalling: + if out.Status.StartTime == nil { + out.Status.StartTime = &now + } + log.Debug("attempting to install") + if err := transitioner.ExecutePlan(out); err != nil { + if now.Sub(out.Status.StartTime.Time) >= timeout { + out.Status.SetCondition(v1alpha1.ConditionFailed(v1alpha1.InstallPlanInstalled, + v1alpha1.InstallPlanReasonComponentFailed, err.Error(), &now)) + out.Status.Phase = v1alpha1.InstallPlanPhaseFailed + out.Status.Message = err.Error() + } else { + out.Status.Message = fmt.Sprintf("retrying execution due to error: %s", err.Error()) + } + return out, err + } else if !out.Status.NeedsRequeue() { + // Loop over one final time to check and see if everything is good. + out.Status.SetCondition(v1alpha1.ConditionMet(v1alpha1.InstallPlanInstalled, &now)) + out.Status.Phase = v1alpha1.InstallPlanPhaseComplete + out.Status.Message = "" + } + return out, nil + default: + return out, nil + } } // Validate all existing served versions against new CRD's validation (if changed) func validateV1CRDCompatibility(dynamicClient dynamic.Interface, oldCRD *apiextensionsv1.CustomResourceDefinition, newCRD *apiextensionsv1.CustomResourceDefinition) error { - logrus.Debugf("Comparing %#v to %#v", oldCRD.Spec.Versions, newCRD.Spec.Versions) - - // If validation schema is unchanged, return right away - newestSchema := newCRD.Spec.Versions[len(newCRD.Spec.Versions)-1].Schema - for i, oldVersion := range oldCRD.Spec.Versions { - if !reflect.DeepEqual(oldVersion.Schema, newestSchema) { - break - } - if i == len(oldCRD.Spec.Versions)-1 { - // we are on the last iteration - // schema has not changed between versions at this point. - return nil - } - } - - convertedCRD := &apiextensions.CustomResourceDefinition{} - if err := apiextensionsv1.Convert_v1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(newCRD, convertedCRD, nil); err != nil { - return err - } - for _, version := range oldCRD.Spec.Versions { - if version.Served { - gvr := schema.GroupVersionResource{Group: oldCRD.Spec.Group, Version: version.Name, Resource: oldCRD.Spec.Names.Plural} - err := validateExistingCRs(dynamicClient, gvr, convertedCRD) - if err != nil { - return err - } - } - } - - logrus.Debugf("Successfully validated CRD %s\n", newCRD.Name) - return nil + logrus.Debugf("Comparing %#v to %#v", oldCRD.Spec.Versions, newCRD.Spec.Versions) + + oldVersionSet := sets.New[string]() + for _, oldVersion := range oldCRD.Spec.Versions { + if !oldVersionSet.Has(oldVersion.Name) && oldVersion.Served { + oldVersionSet.Insert(oldVersion.Name) + } + } + + validationsMap := make(map[string]*apiextensions.CustomResourceValidation, 0) + for _, newVersion := range newCRD.Spec.Versions { + if oldVersionSet.Has(newVersion.Name) && newVersion.Served { + // If the new CRD's version is present in the cluster and still + // served then fill the map entry with the new validation + convertedValidation := &apiextensions.CustomResourceValidation{} + if err := apiextensionsv1.Convert_v1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(newVersion.Schema, convertedValidation, nil); err != nil { + return err + } + validationsMap[newVersion.Name] = convertedValidation + } + } + return validateExistingCRs(dynamicClient, schema.GroupResource{Group: newCRD.Spec.Group, Resource: newCRD.Spec.Names.Plural}, validationsMap) } // Validate all existing served versions against new CRD's validation (if changed) func validateV1Beta1CRDCompatibility(dynamicClient dynamic.Interface, oldCRD *apiextensionsv1beta1.CustomResourceDefinition, newCRD *apiextensionsv1beta1.CustomResourceDefinition) error { - logrus.Debugf("Comparing %#v to %#v", oldCRD.Spec.Validation, newCRD.Spec.Validation) - - // TODO return early of all versions are equal - convertedCRD := &apiextensions.CustomResourceDefinition{} - if err := apiextensionsv1beta1.Convert_v1beta1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(newCRD, convertedCRD, nil); err != nil { - return err - } - for _, version := range oldCRD.Spec.Versions { - if version.Served { - gvr := schema.GroupVersionResource{Group: oldCRD.Spec.Group, Version: version.Name, Resource: oldCRD.Spec.Names.Plural} - err := validateExistingCRs(dynamicClient, gvr, convertedCRD) - if err != nil { - return err - } - } - } - - if oldCRD.Spec.Version != "" { - gvr := schema.GroupVersionResource{Group: oldCRD.Spec.Group, Version: oldCRD.Spec.Version, Resource: oldCRD.Spec.Names.Plural} - err := validateExistingCRs(dynamicClient, gvr, convertedCRD) - if err != nil { - return err - } - } - logrus.Debugf("Successfully validated CRD %s\n", newCRD.Name) - return nil + logrus.Debugf("Comparing %#v to %#v", oldCRD.Spec.Validation, newCRD.Spec.Validation) + oldVersionSet := sets.New[string]() + if len(oldCRD.Spec.Versions) == 0 { + // apiextensionsv1beta1 special case: if spec.Versions is empty, use the global version and validation + oldVersionSet.Insert(oldCRD.Spec.Version) + } + for _, oldVersion := range oldCRD.Spec.Versions { + // collect served versions from spec.Versions if the list is present + if !oldVersionSet.Has(oldVersion.Name) && oldVersion.Served { + oldVersionSet.Insert(oldVersion.Name) + } + } + + validationsMap := make(map[string]*apiextensions.CustomResourceValidation, 0) + gr := schema.GroupResource{Group: newCRD.Spec.Group, Resource: newCRD.Spec.Names.Plural} + if len(newCRD.Spec.Versions) == 0 { + // apiextensionsv1beta1 special case: if spec.Versions of newCRD is empty, use the global version and validation + if oldVersionSet.Has(newCRD.Spec.Version) { + convertedValidation := &apiextensions.CustomResourceValidation{} + if err := apiextensionsv1beta1.Convert_v1beta1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(newCRD.Spec.Validation, convertedValidation, nil); err != nil { + return err + } + validationsMap[newCRD.Spec.Version] = convertedValidation + } + } + for _, newVersion := range newCRD.Spec.Versions { + if oldVersionSet.Has(newVersion.Name) && newVersion.Served { + // If the new CRD's version is present in the cluster and still + // served then fill the map entry with the new validation + if newCRD.Spec.Validation != nil { + // apiextensionsv1beta1 special case: spec.Validation and spec.Versions[].Schema are mutually exclusive; + // if spec.Versions is non-empty and spec.Validation is set then we can validate once against any + // single existing version. + convertedValidation := &apiextensions.CustomResourceValidation{} + if err := apiextensionsv1beta1.Convert_v1beta1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(newCRD.Spec.Validation, convertedValidation, nil); err != nil { + return err + } + return validateExistingCRs(dynamicClient, gr, map[string]*apiextensions.CustomResourceValidation{newVersion.Name: convertedValidation}) + } + convertedValidation := &apiextensions.CustomResourceValidation{} + if err := apiextensionsv1beta1.Convert_v1beta1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(newVersion.Schema, convertedValidation, nil); err != nil { + return err + } + validationsMap[newVersion.Name] = convertedValidation + } + } + return validateExistingCRs(dynamicClient, gr, validationsMap) } -func validateExistingCRs(dynamicClient dynamic.Interface, gvr schema.GroupVersionResource, newCRD *apiextensions.CustomResourceDefinition) error { - pager := pager.New(pager.SimplePageFunc(func(opts metav1.ListOptions) (runtime.Object, error) { - return dynamicClient.Resource(gvr).List(context.TODO(), opts) - })) - validationFn := func(obj runtime.Object) error { - // lister will only provide unstructured objects as runtime.Object, so this should never fail to convert - // if it does, it's a programming error - cr := obj.(*unstructured.Unstructured) - validator, _, err := validation.NewSchemaValidator(newCRD.Spec.Validation) - if err != nil { - return fmt.Errorf("error creating validator for schema %#v: %s", newCRD.Spec.Validation, err) - } - err = validation.ValidateCustomResource(field.NewPath(""), cr.UnstructuredContent(), validator).ToAggregate() - if err != nil { - var namespacedName string - if cr.GetNamespace() == "" { - namespacedName = cr.GetName() - } else { - namespacedName = fmt.Sprintf("%s/%s", cr.GetNamespace(), cr.GetName()) - } - return fmt.Errorf("error validating %s %q: updated validation is too restrictive: %v", cr.GroupVersionKind(), namespacedName, err) - } - return nil - } - err := pager.EachListItem(context.Background(), metav1.ListOptions{}, validationFn) - if err != nil { - return err - } - - return nil +// validateExistingCRs lists all CRs for each version entry in validationsMap, then validates each using the paired validation. +func validateExistingCRs(dynamicClient dynamic.Interface, gr schema.GroupResource, validationsMap map[string]*apiextensions.CustomResourceValidation) error { + for version, schemaValidation := range validationsMap { + // create validator from given crdValidation + validator, _, err := validation.NewSchemaValidator(schemaValidation) + if err != nil { + return fmt.Errorf("error creating validator for schema version %s: %s", version, err) + } + + gvr := schema.GroupVersionResource{Group: gr.Group, Version: version, Resource: gr.Resource} + crList, err := dynamicClient.Resource(gvr).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return fmt.Errorf("error listing resources in GroupVersionResource %#v: %s", gvr, err) + } + + // validate each CR against this version schema + for _, cr := range crList.Items { + err = validation.ValidateCustomResource(field.NewPath(""), cr.UnstructuredContent(), validator).ToAggregate() + if err != nil { + var namespacedName string + if cr.GetNamespace() == "" { + namespacedName = cr.GetName() + } else { + namespacedName = fmt.Sprintf("%s/%s", cr.GetNamespace(), cr.GetName()) + } + return fmt.Errorf("error validating %s %q: updated validation is too restrictive: %v", cr.GroupVersionKind(), namespacedName, err) + } + } + return nil + } + return nil } type warningRecorder struct { - m sync.Mutex - warnings []string + m sync.Mutex + warnings []string } func (wr *warningRecorder) HandleWarningHeader(code int, agent string, text string) { - if code != 299 { - return - } - wr.m.Lock() - defer wr.m.Unlock() - wr.warnings = append(wr.warnings, text) + if code != 299 { + return + } + wr.m.Lock() + defer wr.m.Unlock() + wr.warnings = append(wr.warnings, text) } func (wr *warningRecorder) PopWarnings() []string { - wr.m.Lock() - defer wr.m.Unlock() + wr.m.Lock() + defer wr.m.Unlock() - result := wr.warnings - wr.warnings = nil - return result + result := wr.warnings + wr.warnings = nil + return result } // ExecutePlan applies a planned InstallPlan to a namespace. func (o *Operator) ExecutePlan(plan *v1alpha1.InstallPlan) error { - if plan.Status.Phase != v1alpha1.InstallPlanPhaseInstalling { - panic("attempted to install a plan that wasn't in the installing phase") - } - - namespace := plan.GetNamespace() - - // Get the set of initial installplan csv names - initialCSVNames := getCSVNameSet(plan) - // Get pre-existing CRD owners to make decisions about applying resolved CSVs - existingCRDOwners, err := o.getExistingAPIOwners(plan.GetNamespace()) - if err != nil { - return err - } - - var wr warningRecorder - factory := o.clientFactory.WithConfigTransformer(clients.SetWarningHandler(&wr)) - - // Does the namespace have an operator group that specifies a user defined - // service account? If so, then we should use a scoped client for plan - // execution. - attenuate, err := o.clientAttenuator.AttenuateToServiceAccount(scoped.StaticQuerier(plan.Status.AttenuatedServiceAccountRef)) - if err != nil { - o.logger.Errorf("failed to get a client for plan execution: %v", err) - return err - } - attenuatedFactory := factory.WithConfigTransformer(attenuate) - kubeclient, err := attenuatedFactory.NewOperatorClient() - if err != nil { - o.logger.Errorf("failed to get a client for plan execution: %v", err) - return err - } - crclient, err := attenuatedFactory.NewKubernetesClient() - if err != nil { - o.logger.Errorf("failed to get a client for plan execution: %v", err) - return err - } - dynamicClient, err := attenuatedFactory.NewDynamicClient() - if err != nil { - o.logger.Errorf("failed to get a client for plan execution: %v", err) - return err - } - - ensurer := newStepEnsurer(kubeclient, crclient, dynamicClient) - r := newManifestResolver(plan.GetNamespace(), o.lister.CoreV1().ConfigMapLister(), o.logger) - - discoveryQuerier := newDiscoveryQuerier(o.opClient.KubernetesInterface().Discovery()) - - // CRDs should be installed via the default OLM (cluster-admin) client and not the scoped client specified by the AttenuatedServiceAccount - // the StepBuilder is currently only implemented for CRD types - // TODO give the StepBuilder both OLM and scoped clients when it supports new scoped types - builderKubeClient, err := factory.NewOperatorClient() - if err != nil { - o.logger.Errorf("failed to get a client for plan execution- %v", err) - return err - } - builderDynamicClient, err := factory.NewDynamicClient() - if err != nil { - o.logger.Errorf("failed to get a client for plan execution- %v", err) - return err - } - b := newBuilder(plan, o.lister.OperatorsV1alpha1().ClusterServiceVersionLister(), builderKubeClient, builderDynamicClient, r, o.logger) - - for i, step := range plan.Status.Plan { - if err := func(i int, step *v1alpha1.Step) error { - wr.PopWarnings() - defer func() { - warnings := wr.PopWarnings() - if len(warnings) == 0 { - return - } - var obj runtime.Object - if ref, err := reference.GetReference(plan); err != nil { - o.logger.WithError(err).Warnf("error getting plan reference") - obj = plan - } else { - ref.FieldPath = fmt.Sprintf("status.plan[%d]", i) - obj = ref - } - msg := fmt.Sprintf("%d warning(s) generated during operator installation (%s %q): %s", len(warnings), step.Resource.Kind, step.Resource.Name, strings.Join(warnings, ", ")) - if step.Resolving != "" { - msg = fmt.Sprintf("%d warning(s) generated during installation of operator %q (%s %q): %s", len(warnings), step.Resolving, step.Resource.Kind, step.Resource.Name, strings.Join(warnings, ", ")) - } - o.recorder.Event(obj, corev1.EventTypeWarning, "AppliedWithWarnings", msg) - metrics.EmitInstallPlanWarning() - }() - - doStep := true - s, err := b.create(*step) - if err != nil { - if _, ok := err.(notSupportedStepperErr); ok { - // stepper not implemented for this type yet - // stepper currently only implemented for CRD types - doStep = false - } else { - return err - } - } - if doStep { - status, err := s.Status() - if err != nil { - return err - } - plan.Status.Plan[i].Status = status - return nil - } - - switch step.Status { - case v1alpha1.StepStatusPresent, v1alpha1.StepStatusCreated, v1alpha1.StepStatusWaitingForAPI: - return nil - case v1alpha1.StepStatusUnknown, v1alpha1.StepStatusNotPresent: - manifest, err := r.ManifestForStep(step) - if err != nil { - return err - } - o.logger.WithFields(logrus.Fields{"kind": step.Resource.Kind, "name": step.Resource.Name}).Debug("execute resource") - switch step.Resource.Kind { - case v1alpha1.ClusterServiceVersionKind: - // Marshal the manifest into a CSV instance. - var csv v1alpha1.ClusterServiceVersion - err := json.Unmarshal([]byte(manifest), &csv) - if err != nil { - return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) - } - - // Check if the resolved CSV is in the initial set - if _, ok := initialCSVNames[csv.GetName()]; !ok { - // Check for pre-existing CSVs that own the same CRDs - competingOwners, err := competingCRDOwnersExist(plan.GetNamespace(), &csv, existingCRDOwners) - if err != nil { - return errorwrap.Wrapf(err, "error checking crd owners for: %s", csv.GetName()) - } - - // TODO: decide on fail/continue logic for pre-existing dependent CSVs that own the same CRD(s) - if competingOwners { - // For now, error out - return fmt.Errorf("pre-existing CRD owners found for owned CRD(s) of dependent CSV %s", csv.GetName()) - } - } - - // Attempt to create the CSV. - csv.SetNamespace(namespace) - - status, err := ensurer.EnsureClusterServiceVersion(&csv) - if err != nil { - return err - } - - plan.Status.Plan[i].Status = status - - case v1alpha1.SubscriptionKind: - // Marshal the manifest into a subscription instance. - var sub v1alpha1.Subscription - err := json.Unmarshal([]byte(manifest), &sub) - if err != nil { - return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) - } - - // Add the InstallPlan's name as an annotation - if annotations := sub.GetAnnotations(); annotations != nil { - annotations[generatedByKey] = plan.GetName() - } else { - sub.SetAnnotations(map[string]string{generatedByKey: plan.GetName()}) - } - - // Attempt to create the Subscription - sub.SetNamespace(namespace) - - status, err := ensurer.EnsureSubscription(&sub) - if err != nil { - return err - } - - plan.Status.Plan[i].Status = status - - case resolver.BundleSecretKind: - var s corev1.Secret - err := json.Unmarshal([]byte(manifest), &s) - if err != nil { - return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) - } - - // add ownerrefs on the secret that point to the CSV in the bundle - if step.Resolving != "" { - owner := &v1alpha1.ClusterServiceVersion{} - owner.SetNamespace(plan.GetNamespace()) - owner.SetName(step.Resolving) - ownerutil.AddNonBlockingOwner(&s, owner) - } - - // Update UIDs on all CSV OwnerReferences - updated, err := o.getUpdatedOwnerReferences(s.OwnerReferences, plan.Namespace) - if err != nil { - return errorwrap.Wrapf(err, "error generating ownerrefs for secret %s", s.GetName()) - } - s.SetOwnerReferences(updated) - s.SetNamespace(namespace) - - status, err := ensurer.EnsureBundleSecret(plan.Namespace, &s) - if err != nil { - return err - } - - plan.Status.Plan[i].Status = status - - case secretKind: - status, err := ensurer.EnsureSecret(o.namespace, plan.GetNamespace(), step.Resource.Name) - if err != nil { - return err - } - - plan.Status.Plan[i].Status = status - - case clusterRoleKind: - // Marshal the manifest into a ClusterRole instance. - var cr rbacv1.ClusterRole - err := json.Unmarshal([]byte(manifest), &cr) - if err != nil { - return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) - } - - status, err := ensurer.EnsureClusterRole(&cr, step) - if err != nil { - return err - } - - plan.Status.Plan[i].Status = status - - case clusterRoleBindingKind: - // Marshal the manifest into a RoleBinding instance. - var rb rbacv1.ClusterRoleBinding - err := json.Unmarshal([]byte(manifest), &rb) - if err != nil { - return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) - } - - status, err := ensurer.EnsureClusterRoleBinding(&rb, step) - if err != nil { - return err - } - - plan.Status.Plan[i].Status = status - - case roleKind: - // Marshal the manifest into a Role instance. - var r rbacv1.Role - err := json.Unmarshal([]byte(manifest), &r) - if err != nil { - return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) - } - - // Update UIDs on all CSV OwnerReferences - updated, err := o.getUpdatedOwnerReferences(r.OwnerReferences, plan.Namespace) - if err != nil { - return errorwrap.Wrapf(err, "error generating ownerrefs for role %s", r.GetName()) - } - r.SetOwnerReferences(updated) - r.SetNamespace(namespace) - - status, err := ensurer.EnsureRole(plan.Namespace, &r) - if err != nil { - return err - } - - plan.Status.Plan[i].Status = status - - case roleBindingKind: - // Marshal the manifest into a RoleBinding instance. - var rb rbacv1.RoleBinding - err := json.Unmarshal([]byte(manifest), &rb) - if err != nil { - return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) - } - - // Update UIDs on all CSV OwnerReferences - updated, err := o.getUpdatedOwnerReferences(rb.OwnerReferences, plan.Namespace) - if err != nil { - return errorwrap.Wrapf(err, "error generating ownerrefs for rolebinding %s", rb.GetName()) - } - rb.SetOwnerReferences(updated) - rb.SetNamespace(namespace) - - status, err := ensurer.EnsureRoleBinding(plan.Namespace, &rb) - if err != nil { - return err - } - - plan.Status.Plan[i].Status = status - - case serviceAccountKind: - // Marshal the manifest into a ServiceAccount instance. - var sa corev1.ServiceAccount - err := json.Unmarshal([]byte(manifest), &sa) - if err != nil { - return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) - } - - // Update UIDs on all CSV OwnerReferences - updated, err := o.getUpdatedOwnerReferences(sa.OwnerReferences, plan.Namespace) - if err != nil { - return errorwrap.Wrapf(err, "error generating ownerrefs for service account: %s", sa.GetName()) - } - sa.SetOwnerReferences(updated) - sa.SetNamespace(namespace) - - status, err := ensurer.EnsureServiceAccount(namespace, &sa) - if err != nil { - return err - } - - plan.Status.Plan[i].Status = status - - case serviceKind: - // Marshal the manifest into a Service instance - var s corev1.Service - err := json.Unmarshal([]byte(manifest), &s) - if err != nil { - return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) - } - - // add ownerrefs on the service that point to the CSV in the bundle - if step.Resolving != "" { - owner := &v1alpha1.ClusterServiceVersion{} - owner.SetNamespace(plan.GetNamespace()) - owner.SetName(step.Resolving) - ownerutil.AddNonBlockingOwner(&s, owner) - } - - // Update UIDs on all CSV OwnerReferences - updated, err := o.getUpdatedOwnerReferences(s.OwnerReferences, plan.Namespace) - if err != nil { - return errorwrap.Wrapf(err, "error generating ownerrefs for service: %s", s.GetName()) - } - s.SetOwnerReferences(updated) - s.SetNamespace(namespace) - - status, err := ensurer.EnsureService(namespace, &s) - if err != nil { - return err - } - - plan.Status.Plan[i].Status = status - - case configMapKind: - var cfg corev1.ConfigMap - err := json.Unmarshal([]byte(manifest), &cfg) - if err != nil { - return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) - } - - // add ownerrefs on the configmap that point to the CSV in the bundle - if step.Resolving != "" { - owner := &v1alpha1.ClusterServiceVersion{} - owner.SetNamespace(plan.GetNamespace()) - owner.SetName(step.Resolving) - ownerutil.AddNonBlockingOwner(&cfg, owner) - } - - // Update UIDs on all CSV OwnerReferences - updated, err := o.getUpdatedOwnerReferences(cfg.OwnerReferences, plan.Namespace) - if err != nil { - return errorwrap.Wrapf(err, "error generating ownerrefs for configmap: %s", cfg.GetName()) - } - cfg.SetOwnerReferences(updated) - cfg.SetNamespace(namespace) - - status, err := ensurer.EnsureConfigMap(plan.Namespace, &cfg) - if err != nil { - return err - } - - plan.Status.Plan[i].Status = status - - default: - if !isSupported(step.Resource.Kind) { - // Not a supported resource - plan.Status.Plan[i].Status = v1alpha1.StepStatusUnsupportedResource - return v1alpha1.ErrInvalidInstallPlan - } - - // Marshal the manifest into an unstructured object - dec := yaml.NewYAMLOrJSONDecoder(strings.NewReader(manifest), 10) - unstructuredObject := &unstructured.Unstructured{} - if err := dec.Decode(unstructuredObject); err != nil { - return errorwrap.Wrapf(err, "error decoding %s object to an unstructured object", step.Resource.Name) - } - - // Get the resource from the GVK. - gvk := unstructuredObject.GroupVersionKind() - r, err := o.apiresourceFromGVK(gvk) - if err != nil { - return err - } - - // Create the GVR - gvr := schema.GroupVersionResource{ - Group: gvk.Group, - Version: gvk.Version, - Resource: r.Name, - } - - if step.Resolving != "" { - owner := &v1alpha1.ClusterServiceVersion{} - owner.SetNamespace(plan.GetNamespace()) - owner.SetName(step.Resolving) - - if r.Namespaced { - // Set OwnerReferences for namespace-scoped resource - ownerutil.AddNonBlockingOwner(unstructuredObject, owner) - - // Update UIDs on all CSV OwnerReferences - updated, err := o.getUpdatedOwnerReferences(unstructuredObject.GetOwnerReferences(), plan.Namespace) - if err != nil { - return errorwrap.Wrapf(err, "error generating ownerrefs for unstructured object: %s", unstructuredObject.GetName()) - } - - unstructuredObject.SetOwnerReferences(updated) - } else { - // Add owner labels to cluster-scoped resource - if err := ownerutil.AddOwnerLabels(unstructuredObject, owner); err != nil { - return err - } - } - } - - // Set up the dynamic client ResourceInterface and set ownerrefs - var resourceInterface dynamic.ResourceInterface - if r.Namespaced { - unstructuredObject.SetNamespace(namespace) - resourceInterface = dynamicClient.Resource(gvr).Namespace(namespace) - } else { - resourceInterface = dynamicClient.Resource(gvr) - } - - // Ensure Unstructured Object - status, err := ensurer.EnsureUnstructuredObject(resourceInterface, unstructuredObject) - if err != nil { - return err - } - - plan.Status.Plan[i].Status = status - } - default: - return v1alpha1.ErrInvalidInstallPlan - } - return nil - }(i, step); err != nil { - if apierrors.IsNotFound(err) { - // Check for APIVersions present in the installplan steps that are not available on the server. - // The check is made via discovery per step in the plan. Transient communication failures to the api-server are handled by the plan retry logic. - notFoundErr := discoveryQuerier.WithStepResource(step.Resource).QueryForGVK() - if notFoundErr != nil { - return notFoundErr - } - } - return err - } - } - - // Loop over one final time to check and see if everything is good. - for _, step := range plan.Status.Plan { - switch step.Status { - case v1alpha1.StepStatusCreated, v1alpha1.StepStatusPresent: - default: - return nil - } - } - - return nil + if plan.Status.Phase != v1alpha1.InstallPlanPhaseInstalling { + panic("attempted to install a plan that wasn't in the installing phase") + } + + namespace := plan.GetNamespace() + + // Get the set of initial installplan csv names + initialCSVNames := getCSVNameSet(plan) + // Get pre-existing CRD owners to make decisions about applying resolved CSVs + existingCRDOwners, err := o.getExistingAPIOwners(plan.GetNamespace()) + if err != nil { + return err + } + + var wr warningRecorder + factory := o.clientFactory.WithConfigTransformer(clients.SetWarningHandler(&wr)) + + // Does the namespace have an operator group that specifies a user defined + // service account? If so, then we should use a scoped client for plan + // execution. + attenuate, err := o.clientAttenuator.AttenuateToServiceAccount(scoped.StaticQuerier(plan.Status.AttenuatedServiceAccountRef)) + if err != nil { + o.logger.Errorf("failed to get a client for plan execution: %v", err) + return err + } + attenuatedFactory := factory.WithConfigTransformer(attenuate) + kubeclient, err := attenuatedFactory.NewOperatorClient() + if err != nil { + o.logger.Errorf("failed to get a client for plan execution: %v", err) + return err + } + crclient, err := attenuatedFactory.NewKubernetesClient() + if err != nil { + o.logger.Errorf("failed to get a client for plan execution: %v", err) + return err + } + dynamicClient, err := attenuatedFactory.NewDynamicClient() + if err != nil { + o.logger.Errorf("failed to get a client for plan execution: %v", err) + return err + } + + ensurer := newStepEnsurer(kubeclient, crclient, dynamicClient) + r := newManifestResolver(plan.GetNamespace(), o.lister.CoreV1().ConfigMapLister(), o.logger) + + discoveryQuerier := newDiscoveryQuerier(o.opClient.KubernetesInterface().Discovery()) + + // CRDs should be installed via the default OLM (cluster-admin) client and not the scoped client specified by the AttenuatedServiceAccount + // the StepBuilder is currently only implemented for CRD types + // TODO give the StepBuilder both OLM and scoped clients when it supports new scoped types + builderKubeClient, err := factory.NewOperatorClient() + if err != nil { + o.logger.Errorf("failed to get a client for plan execution- %v", err) + return err + } + builderDynamicClient, err := factory.NewDynamicClient() + if err != nil { + o.logger.Errorf("failed to get a client for plan execution- %v", err) + return err + } + b := newBuilder(plan, o.lister.OperatorsV1alpha1().ClusterServiceVersionLister(), builderKubeClient, builderDynamicClient, r, o.logger) + + for i, step := range plan.Status.Plan { + if err := func(i int, step *v1alpha1.Step) error { + wr.PopWarnings() + defer func() { + warnings := wr.PopWarnings() + if len(warnings) == 0 { + return + } + var obj runtime.Object + if ref, err := reference.GetReference(plan); err != nil { + o.logger.WithError(err).Warnf("error getting plan reference") + obj = plan + } else { + ref.FieldPath = fmt.Sprintf("status.plan[%d]", i) + obj = ref + } + msg := fmt.Sprintf("%d warning(s) generated during operator installation (%s %q): %s", len(warnings), step.Resource.Kind, step.Resource.Name, strings.Join(warnings, ", ")) + if step.Resolving != "" { + msg = fmt.Sprintf("%d warning(s) generated during installation of operator %q (%s %q): %s", len(warnings), step.Resolving, step.Resource.Kind, step.Resource.Name, strings.Join(warnings, ", ")) + } + o.recorder.Event(obj, corev1.EventTypeWarning, "AppliedWithWarnings", msg) + metrics.EmitInstallPlanWarning() + }() + + doStep := true + s, err := b.create(*step) + if err != nil { + if _, ok := err.(notSupportedStepperErr); ok { + // stepper not implemented for this type yet + // stepper currently only implemented for CRD types + doStep = false + } else { + return err + } + } + if doStep { + status, err := s.Status() + if err != nil { + return err + } + plan.Status.Plan[i].Status = status + return nil + } + + switch step.Status { + case v1alpha1.StepStatusPresent, v1alpha1.StepStatusCreated, v1alpha1.StepStatusWaitingForAPI: + return nil + case v1alpha1.StepStatusUnknown, v1alpha1.StepStatusNotPresent: + manifest, err := r.ManifestForStep(step) + if err != nil { + return err + } + o.logger.WithFields(logrus.Fields{"kind": step.Resource.Kind, "name": step.Resource.Name}).Debug("execute resource") + switch step.Resource.Kind { + case v1alpha1.ClusterServiceVersionKind: + // Marshal the manifest into a CSV instance. + var csv v1alpha1.ClusterServiceVersion + err := json.Unmarshal([]byte(manifest), &csv) + if err != nil { + return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) + } + + // Check if the resolved CSV is in the initial set + if _, ok := initialCSVNames[csv.GetName()]; !ok { + // Check for pre-existing CSVs that own the same CRDs + competingOwners, err := competingCRDOwnersExist(plan.GetNamespace(), &csv, existingCRDOwners) + if err != nil { + return errorwrap.Wrapf(err, "error checking crd owners for: %s", csv.GetName()) + } + + // TODO: decide on fail/continue logic for pre-existing dependent CSVs that own the same CRD(s) + if competingOwners { + // For now, error out + return fmt.Errorf("pre-existing CRD owners found for owned CRD(s) of dependent CSV %s", csv.GetName()) + } + } + + // Attempt to create the CSV. + csv.SetNamespace(namespace) + + status, err := ensurer.EnsureClusterServiceVersion(&csv) + if err != nil { + return err + } + + plan.Status.Plan[i].Status = status + + case v1alpha1.SubscriptionKind: + // Marshal the manifest into a subscription instance. + var sub v1alpha1.Subscription + err := json.Unmarshal([]byte(manifest), &sub) + if err != nil { + return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) + } + + // Add the InstallPlan's name as an annotation + if annotations := sub.GetAnnotations(); annotations != nil { + annotations[generatedByKey] = plan.GetName() + } else { + sub.SetAnnotations(map[string]string{generatedByKey: plan.GetName()}) + } + + // Attempt to create the Subscription + sub.SetNamespace(namespace) + + status, err := ensurer.EnsureSubscription(&sub) + if err != nil { + return err + } + + plan.Status.Plan[i].Status = status + + case resolver.BundleSecretKind: + var s corev1.Secret + err := json.Unmarshal([]byte(manifest), &s) + if err != nil { + return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) + } + + // add ownerrefs on the secret that point to the CSV in the bundle + if step.Resolving != "" { + owner := &v1alpha1.ClusterServiceVersion{} + owner.SetNamespace(plan.GetNamespace()) + owner.SetName(step.Resolving) + ownerutil.AddNonBlockingOwner(&s, owner) + } + + // Update UIDs on all CSV OwnerReferences + updated, err := o.getUpdatedOwnerReferences(s.OwnerReferences, plan.Namespace) + if err != nil { + return errorwrap.Wrapf(err, "error generating ownerrefs for secret %s", s.GetName()) + } + s.SetOwnerReferences(updated) + s.SetNamespace(namespace) + + status, err := ensurer.EnsureBundleSecret(plan.Namespace, &s) + if err != nil { + return err + } + + plan.Status.Plan[i].Status = status + + case secretKind: + status, err := ensurer.EnsureSecret(o.namespace, plan.GetNamespace(), step.Resource.Name) + if err != nil { + return err + } + + plan.Status.Plan[i].Status = status + + case clusterRoleKind: + // Marshal the manifest into a ClusterRole instance. + var cr rbacv1.ClusterRole + err := json.Unmarshal([]byte(manifest), &cr) + if err != nil { + return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) + } + + status, err := ensurer.EnsureClusterRole(&cr, step) + if err != nil { + return err + } + + plan.Status.Plan[i].Status = status + + case clusterRoleBindingKind: + // Marshal the manifest into a RoleBinding instance. + var rb rbacv1.ClusterRoleBinding + err := json.Unmarshal([]byte(manifest), &rb) + if err != nil { + return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) + } + + status, err := ensurer.EnsureClusterRoleBinding(&rb, step) + if err != nil { + return err + } + + plan.Status.Plan[i].Status = status + + case roleKind: + // Marshal the manifest into a Role instance. + var r rbacv1.Role + err := json.Unmarshal([]byte(manifest), &r) + if err != nil { + return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) + } + + // Update UIDs on all CSV OwnerReferences + updated, err := o.getUpdatedOwnerReferences(r.OwnerReferences, plan.Namespace) + if err != nil { + return errorwrap.Wrapf(err, "error generating ownerrefs for role %s", r.GetName()) + } + r.SetOwnerReferences(updated) + r.SetNamespace(namespace) + + status, err := ensurer.EnsureRole(plan.Namespace, &r) + if err != nil { + return err + } + + plan.Status.Plan[i].Status = status + + case roleBindingKind: + // Marshal the manifest into a RoleBinding instance. + var rb rbacv1.RoleBinding + err := json.Unmarshal([]byte(manifest), &rb) + if err != nil { + return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) + } + + // Update UIDs on all CSV OwnerReferences + updated, err := o.getUpdatedOwnerReferences(rb.OwnerReferences, plan.Namespace) + if err != nil { + return errorwrap.Wrapf(err, "error generating ownerrefs for rolebinding %s", rb.GetName()) + } + rb.SetOwnerReferences(updated) + rb.SetNamespace(namespace) + + status, err := ensurer.EnsureRoleBinding(plan.Namespace, &rb) + if err != nil { + return err + } + + plan.Status.Plan[i].Status = status + + case serviceAccountKind: + // Marshal the manifest into a ServiceAccount instance. + var sa corev1.ServiceAccount + err := json.Unmarshal([]byte(manifest), &sa) + if err != nil { + return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) + } + + // Update UIDs on all CSV OwnerReferences + updated, err := o.getUpdatedOwnerReferences(sa.OwnerReferences, plan.Namespace) + if err != nil { + return errorwrap.Wrapf(err, "error generating ownerrefs for service account: %s", sa.GetName()) + } + sa.SetOwnerReferences(updated) + sa.SetNamespace(namespace) + + status, err := ensurer.EnsureServiceAccount(namespace, &sa) + if err != nil { + return err + } + + plan.Status.Plan[i].Status = status + + case serviceKind: + // Marshal the manifest into a Service instance + var s corev1.Service + err := json.Unmarshal([]byte(manifest), &s) + if err != nil { + return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) + } + + // add ownerrefs on the service that point to the CSV in the bundle + if step.Resolving != "" { + owner := &v1alpha1.ClusterServiceVersion{} + owner.SetNamespace(plan.GetNamespace()) + owner.SetName(step.Resolving) + ownerutil.AddNonBlockingOwner(&s, owner) + } + + // Update UIDs on all CSV OwnerReferences + updated, err := o.getUpdatedOwnerReferences(s.OwnerReferences, plan.Namespace) + if err != nil { + return errorwrap.Wrapf(err, "error generating ownerrefs for service: %s", s.GetName()) + } + s.SetOwnerReferences(updated) + s.SetNamespace(namespace) + + status, err := ensurer.EnsureService(namespace, &s) + if err != nil { + return err + } + + plan.Status.Plan[i].Status = status + + case configMapKind: + var cfg corev1.ConfigMap + err := json.Unmarshal([]byte(manifest), &cfg) + if err != nil { + return errorwrap.Wrapf(err, "error parsing step manifest: %s", step.Resource.Name) + } + + // add ownerrefs on the configmap that point to the CSV in the bundle + if step.Resolving != "" { + owner := &v1alpha1.ClusterServiceVersion{} + owner.SetNamespace(plan.GetNamespace()) + owner.SetName(step.Resolving) + ownerutil.AddNonBlockingOwner(&cfg, owner) + } + + // Update UIDs on all CSV OwnerReferences + updated, err := o.getUpdatedOwnerReferences(cfg.OwnerReferences, plan.Namespace) + if err != nil { + return errorwrap.Wrapf(err, "error generating ownerrefs for configmap: %s", cfg.GetName()) + } + cfg.SetOwnerReferences(updated) + cfg.SetNamespace(namespace) + + status, err := ensurer.EnsureConfigMap(plan.Namespace, &cfg) + if err != nil { + return err + } + + plan.Status.Plan[i].Status = status + + default: + if !isSupported(step.Resource.Kind) { + // Not a supported resource + plan.Status.Plan[i].Status = v1alpha1.StepStatusUnsupportedResource + return v1alpha1.ErrInvalidInstallPlan + } + + // Marshal the manifest into an unstructured object + dec := yaml.NewYAMLOrJSONDecoder(strings.NewReader(manifest), 10) + unstructuredObject := &unstructured.Unstructured{} + if err := dec.Decode(unstructuredObject); err != nil { + return errorwrap.Wrapf(err, "error decoding %s object to an unstructured object", step.Resource.Name) + } + + // Get the resource from the GVK. + gvk := unstructuredObject.GroupVersionKind() + r, err := o.apiresourceFromGVK(gvk) + if err != nil { + return err + } + + // Create the GVR + gvr := schema.GroupVersionResource{ + Group: gvk.Group, + Version: gvk.Version, + Resource: r.Name, + } + + if step.Resolving != "" { + owner := &v1alpha1.ClusterServiceVersion{} + owner.SetNamespace(plan.GetNamespace()) + owner.SetName(step.Resolving) + + if r.Namespaced { + // Set OwnerReferences for namespace-scoped resource + ownerutil.AddNonBlockingOwner(unstructuredObject, owner) + + // Update UIDs on all CSV OwnerReferences + updated, err := o.getUpdatedOwnerReferences(unstructuredObject.GetOwnerReferences(), plan.Namespace) + if err != nil { + return errorwrap.Wrapf(err, "error generating ownerrefs for unstructured object: %s", unstructuredObject.GetName()) + } + + unstructuredObject.SetOwnerReferences(updated) + } else { + // Add owner labels to cluster-scoped resource + if err := ownerutil.AddOwnerLabels(unstructuredObject, owner); err != nil { + return err + } + } + } + + // Set up the dynamic client ResourceInterface and set ownerrefs + var resourceInterface dynamic.ResourceInterface + if r.Namespaced { + unstructuredObject.SetNamespace(namespace) + resourceInterface = dynamicClient.Resource(gvr).Namespace(namespace) + } else { + resourceInterface = dynamicClient.Resource(gvr) + } + + // Ensure Unstructured Object + status, err := ensurer.EnsureUnstructuredObject(resourceInterface, unstructuredObject) + if err != nil { + return err + } + + plan.Status.Plan[i].Status = status + } + default: + return v1alpha1.ErrInvalidInstallPlan + } + return nil + }(i, step); err != nil { + if apierrors.IsNotFound(err) { + // Check for APIVersions present in the installplan steps that are not available on the server. + // The check is made via discovery per step in the plan. Transient communication failures to the api-server are handled by the plan retry logic. + notFoundErr := discoveryQuerier.WithStepResource(step.Resource).QueryForGVK() + if notFoundErr != nil { + return notFoundErr + } + } + return err + } + } + + // Loop over one final time to check and see if everything is good. + for _, step := range plan.Status.Plan { + switch step.Status { + case v1alpha1.StepStatusCreated, v1alpha1.StepStatusPresent: + default: + return nil + } + } + + return nil } // getExistingAPIOwners creates a map of CRD names to existing owner CSVs in the given namespace func (o *Operator) getExistingAPIOwners(namespace string) (map[string][]string, error) { - // Get a list of CSVs in the namespace - csvList, err := o.client.OperatorsV1alpha1().ClusterServiceVersions(namespace).List(context.TODO(), metav1.ListOptions{}) - - if err != nil { - return nil, err - } - - // Map CRD names to existing owner CSV CRs in the namespace - owners := make(map[string][]string) - for _, csv := range csvList.Items { - for _, crd := range csv.Spec.CustomResourceDefinitions.Owned { - owners[crd.Name] = append(owners[crd.Name], csv.GetName()) - } - for _, api := range csv.Spec.APIServiceDefinitions.Owned { - owners[api.Group] = append(owners[api.Group], csv.GetName()) - } - } - - return owners, nil + // Get a list of CSVs in the namespace + csvList, err := o.client.OperatorsV1alpha1().ClusterServiceVersions(namespace).List(context.TODO(), metav1.ListOptions{}) + + if err != nil { + return nil, err + } + + // Map CRD names to existing owner CSV CRs in the namespace + owners := make(map[string][]string) + for _, csv := range csvList.Items { + for _, crd := range csv.Spec.CustomResourceDefinitions.Owned { + owners[crd.Name] = append(owners[crd.Name], csv.GetName()) + } + for _, api := range csv.Spec.APIServiceDefinitions.Owned { + owners[api.Group] = append(owners[api.Group], csv.GetName()) + } + } + + return owners, nil } func (o *Operator) getUpdatedOwnerReferences(refs []metav1.OwnerReference, namespace string) ([]metav1.OwnerReference, error) { - updated := append([]metav1.OwnerReference(nil), refs...) - - for i, owner := range refs { - if owner.Kind == v1alpha1.ClusterServiceVersionKind { - csv, err := o.client.OperatorsV1alpha1().ClusterServiceVersions(namespace).Get(context.TODO(), owner.Name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - owner.UID = csv.GetUID() - updated[i] = owner - } - } - return updated, nil + updated := append([]metav1.OwnerReference(nil), refs...) + + for i, owner := range refs { + if owner.Kind == v1alpha1.ClusterServiceVersionKind { + csv, err := o.client.OperatorsV1alpha1().ClusterServiceVersions(namespace).Get(context.TODO(), owner.Name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + owner.UID = csv.GetUID() + updated[i] = owner + } + } + return updated, nil } func (o *Operator) listSubscriptions(namespace string) (subs []*v1alpha1.Subscription, err error) { - list, err := o.client.OperatorsV1alpha1().Subscriptions(namespace).List(context.TODO(), metav1.ListOptions{}) - if err != nil { - return - } + list, err := o.client.OperatorsV1alpha1().Subscriptions(namespace).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return + } - subs = make([]*v1alpha1.Subscription, 0) - for i := range list.Items { - subs = append(subs, &list.Items[i]) - } + subs = make([]*v1alpha1.Subscription, 0) + for i := range list.Items { + subs = append(subs, &list.Items[i]) + } - return + return } func (o *Operator) listInstallPlans(namespace string) (ips []*v1alpha1.InstallPlan, err error) { - list, err := o.client.OperatorsV1alpha1().InstallPlans(namespace).List(context.TODO(), metav1.ListOptions{}) - if err != nil { - return - } + list, err := o.client.OperatorsV1alpha1().InstallPlans(namespace).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return + } - ips = make([]*v1alpha1.InstallPlan, 0) - for i := range list.Items { - ips = append(ips, &list.Items[i]) - } + ips = make([]*v1alpha1.InstallPlan, 0) + for i := range list.Items { + ips = append(ips, &list.Items[i]) + } - return + return } // competingCRDOwnersExist returns true if there exists a CSV that owns at least one of the given CSVs owned CRDs (that's not the given CSV) func competingCRDOwnersExist(namespace string, csv *v1alpha1.ClusterServiceVersion, existingOwners map[string][]string) (bool, error) { - // Attempt to find a pre-existing owner in the namespace for any owned crd - for _, crdDesc := range csv.Spec.CustomResourceDefinitions.Owned { - crdOwners := existingOwners[crdDesc.Name] - l := len(crdOwners) - switch { - case l == 1: - // One competing owner found - if crdOwners[0] != csv.GetName() { - return true, nil - } - case l > 1: - return true, olmerrors.NewMultipleExistingCRDOwnersError(crdOwners, crdDesc.Name, namespace) - } - } - - return false, nil + // Attempt to find a pre-existing owner in the namespace for any owned crd + for _, crdDesc := range csv.Spec.CustomResourceDefinitions.Owned { + crdOwners := existingOwners[crdDesc.Name] + l := len(crdOwners) + switch { + case l == 1: + // One competing owner found + if crdOwners[0] != csv.GetName() { + return true, nil + } + case l > 1: + return true, olmerrors.NewMultipleExistingCRDOwnersError(crdOwners, crdDesc.Name, namespace) + } + } + + return false, nil } // getCSVNameSet returns a set of the given installplan's csv names func getCSVNameSet(plan *v1alpha1.InstallPlan) map[string]struct{} { - csvNameSet := make(map[string]struct{}) - for _, name := range plan.Spec.ClusterServiceVersionNames { - csvNameSet[name] = struct{}{} - } + csvNameSet := make(map[string]struct{}) + for _, name := range plan.Spec.ClusterServiceVersionNames { + csvNameSet[name] = struct{}{} + } - return csvNameSet + return csvNameSet } func (o *Operator) apiresourceFromGVK(gvk schema.GroupVersionKind) (metav1.APIResource, error) { - logger := o.logger.WithFields(logrus.Fields{ - "group": gvk.Group, - "version": gvk.Version, - "kind": gvk.Kind, - }) - - resources, err := o.opClient.KubernetesInterface().Discovery().ServerResourcesForGroupVersion(gvk.GroupVersion().String()) - if err != nil { - logger.WithField("err", err).Info("could not query for GVK in api discovery") - return metav1.APIResource{}, err - } - for _, r := range resources.APIResources { - if r.Kind == gvk.Kind { - return r, nil - } - } - logger.Info("couldn't find GVK in api discovery") - return metav1.APIResource{}, olmerrors.GroupVersionKindNotFoundError{Group: gvk.Group, Version: gvk.Version, Kind: gvk.Kind} + logger := o.logger.WithFields(logrus.Fields{ + "group": gvk.Group, + "version": gvk.Version, + "kind": gvk.Kind, + }) + + resources, err := o.opClient.KubernetesInterface().Discovery().ServerResourcesForGroupVersion(gvk.GroupVersion().String()) + if err != nil { + logger.WithField("err", err).Info("could not query for GVK in api discovery") + return metav1.APIResource{}, err + } + for _, r := range resources.APIResources { + if r.Kind == gvk.Kind { + return r, nil + } + } + logger.Info("couldn't find GVK in api discovery") + return metav1.APIResource{}, olmerrors.GroupVersionKindNotFoundError{Group: gvk.Group, Version: gvk.Version, Kind: gvk.Kind} }