diff --git a/api/v1alpha3/zz_generated.conversion.go b/api/v1alpha3/zz_generated.conversion.go index b619a8360e..187e155050 100644 --- a/api/v1alpha3/zz_generated.conversion.go +++ b/api/v1alpha3/zz_generated.conversion.go @@ -24,10 +24,10 @@ package v1alpha3 import ( unsafe "unsafe" + v1alpha6 "github.com/easystack/cluster-api-provider-openstack/api/v1alpha6" v1 "k8s.io/api/core/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - v1alpha6 "github.com/easystack/cluster-api-provider-openstack/api/v1alpha6" apiv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3" v1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" errors "sigs.k8s.io/cluster-api/errors" diff --git a/api/v1alpha4/zz_generated.conversion.go b/api/v1alpha4/zz_generated.conversion.go index 8d9707b494..c89acd9fc6 100644 --- a/api/v1alpha4/zz_generated.conversion.go +++ b/api/v1alpha4/zz_generated.conversion.go @@ -24,10 +24,10 @@ package v1alpha4 import ( unsafe "unsafe" + v1alpha6 "github.com/easystack/cluster-api-provider-openstack/api/v1alpha6" v1 "k8s.io/api/core/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - v1alpha6 "github.com/easystack/cluster-api-provider-openstack/api/v1alpha6" apiv1alpha4 "sigs.k8s.io/cluster-api/api/v1alpha4" v1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" errors "sigs.k8s.io/cluster-api/errors" diff --git a/api/v1alpha5/zz_generated.conversion.go b/api/v1alpha5/zz_generated.conversion.go index 6156287e1e..033a6afecf 100644 --- a/api/v1alpha5/zz_generated.conversion.go +++ b/api/v1alpha5/zz_generated.conversion.go @@ -24,10 +24,10 @@ package v1alpha5 import ( unsafe "unsafe" + v1alpha6 "github.com/easystack/cluster-api-provider-openstack/api/v1alpha6" v1 "k8s.io/api/core/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - v1alpha6 "github.com/easystack/cluster-api-provider-openstack/api/v1alpha6" v1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" errors "sigs.k8s.io/cluster-api/errors" ) diff --git a/api/v1alpha6/conditions_consts.go b/api/v1alpha6/conditions_consts.go index 1d2c722d87..dd5927a854 100644 --- a/api/v1alpha6/conditions_consts.go +++ b/api/v1alpha6/conditions_consts.go @@ -51,3 +51,13 @@ const ( // FloatingIPErrorReason used when the floating ip could not be created or attached. FloatingIPErrorReason = "FloatingIPError" ) + +const ( + ClusterReadyCondition clusterv1.ConditionType = "ClusterReady" + // ClusterNotReadyReason used when create the cluster failed. + ClusterNotReadyReason = "ClusterNotReady" + // ClusterReadyReason reports on current status of the OpenStack cluster. Ready indicates the cluster is in a OK state. + ClusterReadyReason = "ClusterReady" + // LoadBalancerReconcileErrorReason used when the openstack create loadbalancer error. + LoadBalancerReconcileErrorReason = "LoadBalancerReconcileError" +) diff --git a/api/v1alpha6/openstackcluster_types.go b/api/v1alpha6/openstackcluster_types.go index 35ab2dd0bd..2ed59f5f0c 100644 --- a/api/v1alpha6/openstackcluster_types.go +++ b/api/v1alpha6/openstackcluster_types.go @@ -64,7 +64,7 @@ type OpenStackClusterSpec struct { APIServerLoadBalancer APIServerLoadBalancer `json:"apiServerLoadBalancer,omitempty"` // DisableFloatingIP determines whether or not to attempt to attach a floating - // IP to the Instance. + // IP to the Instance. DisableFloatingIP bool `json:"disableFloatingIP"` // DisableAPIServerFloatingIP determines whether or not to attempt to attach a floating @@ -214,6 +214,8 @@ type OpenStackClusterStatus struct { // and/or logged in the controller's output. // +optional FailureMessage *string `json:"failureMessage,omitempty"` + + Conditions clusterv1.Conditions `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -249,3 +251,13 @@ type OpenStackClusterList struct { func init() { SchemeBuilder.Register(&OpenStackCluster{}, &OpenStackClusterList{}) } + +// GetConditions returns the observations of the operational state of the OpenStackMachine resource. +func (r *OpenStackCluster) GetConditions() clusterv1.Conditions { + return r.Status.Conditions +} + +// SetConditions sets the underlying service state of the OpenStackMachine to the predescribed clusterv1.Conditions. +func (r *OpenStackCluster) SetConditions(conditions clusterv1.Conditions) { + r.Status.Conditions = conditions +} diff --git a/api/v1alpha6/openstackmachine_types.go b/api/v1alpha6/openstackmachine_types.go index 235e0769b7..d94fa5f5a9 100644 --- a/api/v1alpha6/openstackmachine_types.go +++ b/api/v1alpha6/openstackmachine_types.go @@ -90,7 +90,7 @@ type OpenStackMachineSpec struct { // The volume metadata to boot from RootVolume *RootVolume `json:"rootVolume,omitempty"` - + // The custome metadata to boot from CustomeVolumes []*RootVolume `json:"customeVolumes,omitempty"` diff --git a/api/v1alpha6/openstackmachine_webhook.go b/api/v1alpha6/openstackmachine_webhook.go index db7988fdbc..8a96f794aa 100644 --- a/api/v1alpha6/openstackmachine_webhook.go +++ b/api/v1alpha6/openstackmachine_webhook.go @@ -88,15 +88,14 @@ func (r *OpenStackMachine) ValidateUpdate(old runtime.Object) error { newOpenStackMachineSpec := newOpenStackMachine["spec"].(map[string]interface{}) oldOpenStackMachineSpec := oldOpenStackMachine["spec"].(map[string]interface{}) - // allow changes to providerID always - if oldOpenStackMachineSpec["providerID"] != nil || newOpenStackMachineSpec["providerID"] != nil { + if oldOpenStackMachineSpec["providerID"] != nil || newOpenStackMachineSpec["providerID"] != nil { delete(oldOpenStackMachineSpec, "providerID") delete(newOpenStackMachineSpec, "providerID") } // allow changes to instanceID always - if oldOpenStackMachineSpec["instanceID"] != nil || newOpenStackMachineSpec["instanceID"] != nil { + if oldOpenStackMachineSpec["instanceID"] != nil || newOpenStackMachineSpec["instanceID"] != nil { delete(oldOpenStackMachineSpec, "instanceID") delete(newOpenStackMachineSpec, "instanceID") } diff --git a/api/v1alpha6/zz_generated.deepcopy.go b/api/v1alpha6/zz_generated.deepcopy.go index add4e9a3ea..a4e99d7118 100644 --- a/api/v1alpha6/zz_generated.deepcopy.go +++ b/api/v1alpha6/zz_generated.deepcopy.go @@ -431,6 +431,13 @@ func (in *OpenStackClusterStatus) DeepCopyInto(out *OpenStackClusterStatus) { *out = new(string) **out = **in } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(v1beta1.Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackClusterStatus. @@ -661,6 +668,17 @@ func (in *OpenStackMachineSpec) DeepCopyInto(out *OpenStackMachineSpec) { *out = new(RootVolume) **out = **in } + if in.CustomeVolumes != nil { + in, out := &in.CustomeVolumes, &out.CustomeVolumes + *out = make([]*RootVolume, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(RootVolume) + **out = **in + } + } + } if in.IdentityRef != nil { in, out := &in.IdentityRef, &out.IdentityRef *out = new(OpenStackIdentityReference) diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackclusters.yaml index dd2db91dbf..be4a4ebc73 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackclusters.yaml @@ -5308,6 +5308,52 @@ spec: - name - rules type: object + conditions: + description: Conditions provide observations of the operational state + of a Cluster API resource. + items: + description: Condition defines an observation of a Cluster API resource + operational state. + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. This should be when the underlying condition changed. + If that is not known, then using the time when the API field + changed is acceptable. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. This field may be empty. + type: string + reason: + description: The reason for the condition's last transition + in CamelCase. The specific API may choose whether or not this + field is considered a guaranteed API. This field may not be + empty. + type: string + severity: + description: Severity provides an explicit classification of + Reason code, so the users or machines can immediately understand + the current situation and act accordingly. The Severity field + MUST be set only when Status=False. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array controlPlaneSecurityGroup: description: 'ControlPlaneSecurityGroups contains all the information about the OpenStack Security Group that needs to be applied to control diff --git a/controllers/openstackcluster_controller.go b/controllers/openstackcluster_controller.go index ed520e2ef2..fc0d51366e 100644 --- a/controllers/openstackcluster_controller.go +++ b/controllers/openstackcluster_controller.go @@ -20,6 +20,11 @@ import ( "context" "fmt" "reflect" + "strings" + + kerrors "k8s.io/apimachinery/pkg/util/errors" + + "sigs.k8s.io/cluster-api/util/conditions" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" @@ -104,10 +109,8 @@ func (r *OpenStackClusterReconciler) Reconcile(ctx context.Context, req ctrl.Req // Always patch the openStackCluster when exiting this function so we can persist any OpenStackCluster changes. defer func() { - if err := patchHelper.Patch(ctx, openStackCluster); err != nil { - if reterr == nil { - reterr = errors.Wrapf(err, "error patching OpenStackCluster %s/%s", openStackCluster.Namespace, openStackCluster.Name) - } + if err := patchCluster(ctx, patchHelper, openStackCluster); err != nil { + reterr = kerrors.NewAggregate([]error{reterr, err}) } }() @@ -132,6 +135,14 @@ func (r *OpenStackClusterReconciler) Reconcile(ctx context.Context, req ctrl.Req return reconcileNormal(ctx, scope, patchHelper, cluster, openStackCluster) } +func patchCluster(ctx context.Context, patchHelper *patch.Helper, openStackCluster *infrav1.OpenStackCluster, options ...patch.Option) error { + err := patchHelper.Patch(ctx, openStackCluster, options...) + if err != nil { + return err + } + return nil +} + func reconcileDelete(ctx context.Context, scope *scope.Scope, patchHelper *patch.Helper, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (ctrl.Result, error) { scope.Logger.Info("Reconciling Cluster delete") @@ -146,15 +157,25 @@ func reconcileDelete(ctx context.Context, scope *scope.Scope, patchHelper *patch clusterName := fmt.Sprintf("%s-%s", cluster.Namespace, cluster.Name) + var skipLBDeleting bool + if openStackCluster.Spec.APIServerLoadBalancer.Enabled { loadBalancerService, err := loadbalancer.NewService(scope) if err != nil { - return reconcile.Result{}, err + if strings.EqualFold(err.Error(), loadbalancer.ErrLoadBalancerNoPoint) { + handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to init delete load balancer client: %w", err)) + skipLBDeleting = true + } + if !skipLBDeleting { + return reconcile.Result{}, err + } } - if err = loadBalancerService.DeleteLoadBalancer(openStackCluster, clusterName); err != nil { - handleUpdateOSCError(openStackCluster, errors.Errorf("failed to delete load balancer: %v", err)) - return reconcile.Result{}, errors.Errorf("failed to delete load balancer: %v", err) + if !skipLBDeleting { + if err = loadBalancerService.DeleteLoadBalancer(openStackCluster, clusterName); err != nil { + handleUpdateOSCError(openStackCluster, errors.Errorf("failed to delete load balancer: %v", err)) + return reconcile.Result{}, errors.Errorf("failed to delete load balancer: %v", err) + } } } @@ -249,6 +270,10 @@ func deleteBastion(scope *scope.Scope, cluster *clusterv1.Cluster, openStackClus func reconcileNormal(ctx context.Context, scope *scope.Scope, patchHelper *patch.Helper, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (ctrl.Result, error) { scope.Logger.Info("Reconciling Cluster") + if openStackCluster.Status.FailureReason != nil || openStackCluster.Status.FailureMessage != nil { + scope.Logger.Info("Not reconciling cluster in failed state. See openStackCluster.status.failureReason, openStackCluster.status.failureMessage, or previously logged error for details") + return ctrl.Result{}, nil + } // If the OpenStackCluster doesn't have our finalizer, add it. controllerutil.AddFinalizer(openStackCluster, infrav1.ClusterFinalizer) // Register the finalizer immediately to avoid orphaning OpenStack resources on delete @@ -382,14 +407,14 @@ func reconcileBastion(scope *scope.Scope, cluster *clusterv1.Cluster, openStackC func bastionToInstanceSpec(openStackCluster *infrav1.OpenStackCluster, clusterName string, userData string) *compute.InstanceSpec { name := fmt.Sprintf("%s-bastion", clusterName) instanceSpec := &compute.InstanceSpec{ - Name: name, - Flavor: openStackCluster.Spec.Bastion.Instance.Flavor, - SSHKeyName: openStackCluster.Spec.Bastion.Instance.SSHKeyName, - Image: openStackCluster.Spec.Bastion.Instance.Image, - UserData: userData, - ImageUUID: openStackCluster.Spec.Bastion.Instance.ImageUUID, - FailureDomain: openStackCluster.Spec.Bastion.AvailabilityZone, - RootVolume: openStackCluster.Spec.Bastion.Instance.RootVolume, + Name: name, + Flavor: openStackCluster.Spec.Bastion.Instance.Flavor, + SSHKeyName: openStackCluster.Spec.Bastion.Instance.SSHKeyName, + Image: openStackCluster.Spec.Bastion.Instance.Image, + UserData: userData, + ImageUUID: openStackCluster.Spec.Bastion.Instance.ImageUUID, + FailureDomain: openStackCluster.Spec.Bastion.AvailabilityZone, + RootVolume: openStackCluster.Spec.Bastion.Instance.RootVolume, DeleteVolumeOnTermination: openStackCluster.Spec.Bastion.Instance.DeleteVolumeOnTermination, } @@ -512,6 +537,10 @@ func reconcileNetworkComponents(scope *scope.Scope, cluster *clusterv1.Cluster, if openStackCluster.Spec.APIServerLoadBalancer.Enabled { loadBalancerService, err := loadbalancer.NewService(scope) if err != nil { + if strings.EqualFold(err.Error(), loadbalancer.ErrLoadBalancerNoPoint) { + handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to init load balancer client: %w", err)) + conditions.MarkFalse(openStackCluster, infrav1.ClusterReadyReason, infrav1.LoadBalancerReconcileErrorReason, clusterv1.ConditionSeverityError, err.Error()) + } return err } @@ -616,4 +645,5 @@ func handleUpdateOSCError(openstackCluster *infrav1.OpenStackCluster, message er err := capierrors.UpdateClusterError openstackCluster.Status.FailureReason = &err openstackCluster.Status.FailureMessage = pointer.StringPtr(message.Error()) + openstackCluster.Status.Ready = false } diff --git a/controllers/openstackmachine_controller.go b/controllers/openstackmachine_controller.go index 6fac2079de..5bace82153 100644 --- a/controllers/openstackmachine_controller.go +++ b/controllers/openstackmachine_controller.go @@ -442,16 +442,16 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope // fip has,but not specially one if len(addresses) > 1 { if machine.Annotations["machinedeployment.clusters.x-k8s.io/fip-address"] != "" { - if addresses[1].Address != machine.Annotations["machinedeployment.clusters.x-k8s.io/fip-address"]{ + if addresses[1].Address != machine.Annotations["machinedeployment.clusters.x-k8s.io/fip-address"] { gcFip := addresses[1].Address - //gc fip - r.gcFip(openStackMachine, networkingService, gcFip,scope) - }else{ + // gc fip + r.gcFip(openStackMachine, networkingService, gcFip, scope) + } else { conditions.MarkTrue(openStackMachine, infrav1.APIServerIngressReadyCondition) scope.Logger.Info("Reconciled Machine create successfully") return ctrl.Result{}, nil } - }else { + } else { conditions.MarkTrue(openStackMachine, infrav1.APIServerIngressReadyCondition) scope.Logger.Info("Reconciled Machine create successfully") return ctrl.Result{}, nil @@ -462,13 +462,13 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope conditions.MarkFalse(openStackMachine, infrav1.APIServerIngressReadyCondition, infrav1.FloatingIPErrorReason, clusterv1.ConditionSeverityError, "Floating IP cannot be obtained or created: %v", err) return ctrl.Result{}, nil } - var port = new(networkport.Port) + port := new(networkport.Port) // we should get machine network name if we define network pos, err := networkingService.GetPortFromInstanceIP(*openStackMachine.Spec.InstanceID, instanceNS.Addresses()[0].Address) if err != nil { - //gc fip when error occur + // gc fip when error occur defer func() { - r.gcFip(openStackMachine, networkingService, fp.FloatingIP,scope) + r.gcFip(openStackMachine, networkingService, fp.FloatingIP, scope) }() conditions.MarkFalse(openStackMachine, infrav1.APIServerIngressReadyCondition, infrav1.FloatingIPErrorReason, clusterv1.ConditionSeverityError, "failed to get machine ports information: %v", err) return ctrl.Result{}, nil @@ -481,13 +481,13 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope err = networkingService.AssociateFloatingIP(openStackMachine, fp, port.ID) if err != nil { defer func() { - r.gcFip(openStackMachine, networkingService, fp.FloatingIP,scope) + r.gcFip(openStackMachine, networkingService, fp.FloatingIP, scope) }() conditions.MarkFalse(openStackMachine, infrav1.APIServerIngressReadyCondition, infrav1.FloatingIPErrorReason, clusterv1.ConditionSeverityError, "Associating floating IP failed: %v", err) return ctrl.Result{}, fmt.Errorf("associate floating IP %q with port %q: %w", fp.FloatingIP, port.ID, err) } } - }else{ + } else { for _, address := range addresses { if address.Type == corev1.NodeExternalIP { if err = networkingService.DeleteFloatingIP(openStackCluster, address.Address); err != nil { @@ -503,11 +503,12 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope scope.Logger.Info("Reconciled Machine create successfully") return ctrl.Result{}, nil } -//gcFip add code to GC fip + +// gcFip add code to GC fip func (r *OpenStackMachineReconciler) gcFip(openStackMachine *infrav1.OpenStackMachine, networkingService *networking.Service, fp string, scope *scope.Scope) { - if err := networkingService.DeleteFloatingIP(openStackMachine, fp); err != nil { - scope.Logger.Info("when GC fip,delete floating ip failed", "err", err) - } + if err := networkingService.DeleteFloatingIP(openStackMachine, fp); err != nil { + scope.Logger.Info("when GC fip,delete floating ip failed", "err", err) + } } func (r *OpenStackMachineReconciler) getOrCreate(logger logr.Logger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine, computeService *compute.Service, userData string) (*compute.InstanceStatus, error) { @@ -531,19 +532,19 @@ func (r *OpenStackMachineReconciler) getOrCreate(logger logr.Logger, cluster *cl func machineToInstanceSpec(openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine, userData string) *compute.InstanceSpec { instanceSpec := compute.InstanceSpec{ - Name: openStackMachine.Name, - Image: openStackMachine.Spec.Image, - ImageUUID: openStackMachine.Spec.ImageUUID, - Flavor: openStackMachine.Spec.Flavor, - SSHKeyName: openStackMachine.Spec.SSHKeyName, - UserData: userData, - Metadata: openStackMachine.Spec.ServerMetadata, - ConfigDrive: openStackMachine.Spec.ConfigDrive != nil && *openStackMachine.Spec.ConfigDrive, - RootVolume: openStackMachine.Spec.RootVolume, - CustomeVolumes: openStackMachine.Spec.CustomeVolumes, - Subnet: openStackMachine.Spec.Subnet, - ServerGroupID: openStackMachine.Spec.ServerGroupID, - Trunk: openStackMachine.Spec.Trunk, + Name: openStackMachine.Name, + Image: openStackMachine.Spec.Image, + ImageUUID: openStackMachine.Spec.ImageUUID, + Flavor: openStackMachine.Spec.Flavor, + SSHKeyName: openStackMachine.Spec.SSHKeyName, + UserData: userData, + Metadata: openStackMachine.Spec.ServerMetadata, + ConfigDrive: openStackMachine.Spec.ConfigDrive != nil && *openStackMachine.Spec.ConfigDrive, + RootVolume: openStackMachine.Spec.RootVolume, + CustomeVolumes: openStackMachine.Spec.CustomeVolumes, + Subnet: openStackMachine.Spec.Subnet, + ServerGroupID: openStackMachine.Spec.ServerGroupID, + Trunk: openStackMachine.Spec.Trunk, DeleteVolumeOnTermination: openStackMachine.Spec.DeleteVolumeOnTermination, } diff --git a/go.mod b/go.mod index 9252c9394e..81e4925494 100644 --- a/go.mod +++ b/go.mod @@ -128,4 +128,4 @@ require ( sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect ) -replace sigs.k8s.io/cluster-api => sigs.k8s.io/cluster-api v1.3.0 +replace sigs.k8s.io/cluster-api => sigs.k8s.io/cluster-api v1.3.0 \ No newline at end of file diff --git a/go.sum b/go.sum index 4c8ad9c3d4..4ece682d7e 100644 --- a/go.sum +++ b/go.sum @@ -973,4 +973,4 @@ sigs.k8s.io/kind v0.17.0/go.mod h1:Qqp8AiwOlMZmJWs37Hgs31xcbiYXjtXlRBSftcnZXQk= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= \ No newline at end of file diff --git a/main.go b/main.go index e1ebef40ea..8025f84a93 100644 --- a/main.go +++ b/main.go @@ -190,9 +190,9 @@ func main() { // Initialize event recorder. record.InitFromRecorder(mgr.GetEventRecorderFor("openstack-controller")) - setupChecks(mgr) + // setupChecks(mgr) setupReconcilers(ctx, mgr) - setupWebhooks(mgr) + // setupWebhooks(mgr) // +kubebuilder:scaffold:builder setupLog.Info("starting manager", "version", version.Get().String()) diff --git a/pkg/clients/compute.go b/pkg/clients/compute.go index a3f86ba7b9..4f47686fc9 100644 --- a/pkg/clients/compute.go +++ b/pkg/clients/compute.go @@ -75,7 +75,7 @@ func NewComputeClient(scope *scope.Scope) (ComputeClient, error) { return &computeClient{compute}, nil } -func GetGopherClient(scope *scope.Scope)(*gophercloud.ServiceClient,error) { +func GetGopherClient(scope *scope.Scope) (*gophercloud.ServiceClient, error) { compute, err := openstack.NewComputeV2(scope.ProviderClient, gophercloud.EndpointOpts{ Region: scope.ProviderClientOpts.RegionName, }) @@ -83,7 +83,7 @@ func GetGopherClient(scope *scope.Scope)(*gophercloud.ServiceClient,error) { return nil, fmt.Errorf("failed to create compute service client: %v", err) } compute.Microversion = NovaMinimumMicroversion - return compute,nil + return compute, nil } func (c computeClient) ListAvailabilityZones() ([]availabilityzones.AvailabilityZone, error) { diff --git a/pkg/clients/mock/compute.go b/pkg/clients/mock/compute.go index 16d80ad298..ab2d86b751 100644 --- a/pkg/clients/mock/compute.go +++ b/pkg/clients/mock/compute.go @@ -23,11 +23,11 @@ package mock import ( reflect "reflect" + clients "github.com/easystack/cluster-api-provider-openstack/pkg/clients" gomock "github.com/golang/mock/gomock" attachinterfaces "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces" availabilityzones "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones" servers "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" - clients "github.com/easystack/cluster-api-provider-openstack/pkg/clients" ) // MockComputeClient is a mock of ComputeClient interface. diff --git a/pkg/cloud/services/compute/instance.go b/pkg/cloud/services/compute/instance.go index 40deb5b974..85c2f00c6f 100644 --- a/pkg/cloud/services/compute/instance.go +++ b/pkg/cloud/services/compute/instance.go @@ -18,11 +18,12 @@ package compute import ( "fmt" - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach" "os" "strconv" "time" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach" + "github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes" "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume" "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs" @@ -320,39 +321,49 @@ func (s *Service) createInstanceImpl(eventObject runtime.Object, openStackCluste record.Eventf(eventObject, "SuccessfulCreateServer", "Created server %s with id %s", createdInstance.Name(), createdInstance.ID()) - //add custome data volume attach + // add custome data volume attach // if err happened,return error for index, volume := range volumes { - num,err := Devicename(index) - if err!=nil { + num, err := Devicename(index) + if err != nil { record.Eventf(eventObject, "FailedAttachDataVolume", "too long Attach server %s with volume %s", createdInstance.Name(), volume.ID) - return nil,err + return nil, err + } + + if volume == nil { + continue + } + + if volume != nil && volume.ID == "" { + continue } + createOpts := volumeattach.CreateOpts{ - Device: fmt.Sprintf("/dev/vd%s",num), - VolumeID: volume.ID, + Device: fmt.Sprintf("/dev/vd%s", num), + VolumeID: volume.ID, DeleteOnTermination: instanceSpec.DeleteVolumeOnTermination, } _, err = volumeattach.Create(s.getGopherClient(), createdInstance.ID(), createOpts).Extract() if err != nil { record.Eventf(eventObject, "FailedAttachDataVolume", "Attach server %s with volume %s error", createdInstance.Name(), volume.ID) - return nil,err + return nil, err } } return createdInstance, nil } -func Devicename(index int) (string,error) { - //定义一个字符 变量a 是一个byte类型的 表示单个字符 + +func Devicename(index int) (string, error) { + // 定义一个字符 变量a 是一个byte类型的 表示单个字符 if index >= 26 { - return "",fmt.Errorf("%s","too much attach device") + return "", fmt.Errorf("%s", "too much attach device") } - var a = 'a' - //生成26个字符 + a := 'a' + // 生成26个字符 for i := 1; i <= index; i++ { a++ } - return string(a),nil + return string(a), nil } // getPortName appends a suffix to an instance name in order to try and get a unique name per port. @@ -437,7 +448,7 @@ func (s *Service) getOrCreateRootVolume(eventObject runtime.Object, instanceSpec func (s *Service) getOrCreateCustomeVolumes(eventObject runtime.Object, instanceSpec *InstanceSpec) ([]*volumes.Volume, error) { cvs := instanceSpec.CustomeVolumes - var result = make([]*volumes.Volume,0,3) + result := make([]*volumes.Volume, 0, 3) for index, vo := range cvs { if !hasRootVolume(vo) { continue diff --git a/pkg/cloud/services/compute/instance_types.go b/pkg/cloud/services/compute/instance_types.go index 61aca8b0f5..3afee24196 100644 --- a/pkg/cloud/services/compute/instance_types.go +++ b/pkg/cloud/services/compute/instance_types.go @@ -33,24 +33,24 @@ import ( // InstanceSpec does not contain all of the fields of infrav1.Instance, as not // all of them can be set on a new instance. type InstanceSpec struct { - Name string - Image string - ImageUUID string - Flavor string - SSHKeyName string - UserData string - Metadata map[string]string - ConfigDrive bool - FailureDomain string - RootVolume *infrav1.RootVolume - CustomeVolumes []*infrav1.RootVolume - Subnet string - ServerGroupID string - Trunk bool - Tags []string - SecurityGroups []infrav1.SecurityGroupParam - Networks []infrav1.NetworkParam - Ports []infrav1.PortOpts + Name string + Image string + ImageUUID string + Flavor string + SSHKeyName string + UserData string + Metadata map[string]string + ConfigDrive bool + FailureDomain string + RootVolume *infrav1.RootVolume + CustomeVolumes []*infrav1.RootVolume + Subnet string + ServerGroupID string + Trunk bool + Tags []string + SecurityGroups []infrav1.SecurityGroupParam + Networks []infrav1.NetworkParam + Ports []infrav1.PortOpts DeleteVolumeOnTermination bool } diff --git a/pkg/cloud/services/compute/service.go b/pkg/cloud/services/compute/service.go index 6df29f3cbb..9d015351c2 100644 --- a/pkg/cloud/services/compute/service.go +++ b/pkg/cloud/services/compute/service.go @@ -18,6 +18,7 @@ package compute import ( "fmt" + "github.com/gophercloud/gophercloud" "github.com/easystack/cluster-api-provider-openstack/pkg/clients" @@ -44,7 +45,7 @@ func NewService(scope *scope.Scope) (*Service, error) { }, nil } -func (s Service) getGopherClient() *gophercloud.ServiceClient { +func (s Service) getGopherClient() *gophercloud.ServiceClient { computeClient, err := clients.GetGopherClient(s.scope) if err != nil { return nil diff --git a/pkg/cloud/services/loadbalancer/loadbalancer.go b/pkg/cloud/services/loadbalancer/loadbalancer.go index 5f9ac07e03..c536133c22 100644 --- a/pkg/cloud/services/loadbalancer/loadbalancer.go +++ b/pkg/cloud/services/loadbalancer/loadbalancer.go @@ -45,6 +45,8 @@ const ( defaultLoadBalancerProvider string = "amphora" ) +var ErrLoadBalancerNoPoint = "failed to create load balancer service client: No suitable endpoint could be found in the service catalog." + const loadBalancerProvisioningStatusActive = "ACTIVE" func (s *Service) ReconcileLoadBalancer(openStackCluster *infrav1.OpenStackCluster, clusterName string, apiServerPort int) error { diff --git a/pkg/cloud/services/networking/securitygroups.go b/pkg/cloud/services/networking/securitygroups.go index 356d8d160a..3638aa9b76 100644 --- a/pkg/cloud/services/networking/securitygroups.go +++ b/pkg/cloud/services/networking/securitygroups.go @@ -71,7 +71,6 @@ func (s *Service) ReconcileSecurityGroups(openStackCluster *infrav1.OpenStackClu for k, desiredSecGroup := range desiredSecGroups { var err error observedSecGroups[k], err = s.getSecurityGroupByName(desiredSecGroup.Name) - if err != nil { return err } diff --git a/pkg/cloud/services/networking/securitygroups_rules.go b/pkg/cloud/services/networking/securitygroups_rules.go index 245aa61833..fa91572c0a 100644 --- a/pkg/cloud/services/networking/securitygroups_rules.go +++ b/pkg/cloud/services/networking/securitygroups_rules.go @@ -40,6 +40,7 @@ var defaultRules = []infrav1.SecurityGroupRule{ RemoteIPPrefix: "", }, } + // Permit traffic for cadvisor func GetSGControlPlaneForCadvisor(remoteGroupIDSelf, secWorkerGroupID string) []infrav1.SecurityGroupRule { return []infrav1.SecurityGroupRule{ @@ -87,42 +88,40 @@ func GetSGWorkForCadvisor(remoteGroupIDSelf, secControlPlaneGroupID string) []in } } - - // Permit traffic for coredns func GetSGControlPlaneForCOREDNS(remoteGroupIDSelf, secWorkerGroupID string) []infrav1.SecurityGroupRule { return []infrav1.SecurityGroupRule{ { - Description: "coredns", - Direction: "ingress", - EtherType: "IPv4", - PortRangeMin: 9153, - PortRangeMax: 9153, - Protocol: "tcp", + Description: "coredns", + Direction: "ingress", + EtherType: "IPv4", + PortRangeMin: 9153, + PortRangeMax: 9153, + Protocol: "tcp", }, { - Description: "coredns", - Direction: "ingress", - EtherType: "IPv4", - PortRangeMin: 9253, - PortRangeMax: 9253, - Protocol: "tcp", + Description: "coredns", + Direction: "ingress", + EtherType: "IPv4", + PortRangeMin: 9253, + PortRangeMax: 9253, + Protocol: "tcp", }, { - Description: "coredns", - Direction: "ingress", - EtherType: "IPv4", - PortRangeMin: 53, - PortRangeMax: 53, - Protocol: "tcp", + Description: "coredns", + Direction: "ingress", + EtherType: "IPv4", + PortRangeMin: 53, + PortRangeMax: 53, + Protocol: "tcp", }, { - Description: "coredns", - Direction: "ingress", - EtherType: "IPv4", - PortRangeMin: 53, - PortRangeMax: 53, - Protocol: "udp", + Description: "coredns", + Direction: "ingress", + EtherType: "IPv4", + PortRangeMin: 53, + PortRangeMax: 53, + Protocol: "udp", }, } } @@ -130,42 +129,40 @@ func GetSGControlPlaneForCOREDNS(remoteGroupIDSelf, secWorkerGroupID string) []i func GetSGWorkForCOREDNS(remoteGroupIDSelf, secControlPlaneGroupID string) []infrav1.SecurityGroupRule { return []infrav1.SecurityGroupRule{ { - Description: "coredns", - Direction: "ingress", - EtherType: "IPv4", - PortRangeMin: 9153, - PortRangeMax: 9153, - Protocol: "tcp", + Description: "coredns", + Direction: "ingress", + EtherType: "IPv4", + PortRangeMin: 9153, + PortRangeMax: 9153, + Protocol: "tcp", }, { - Description: "coredns", - Direction: "ingress", - EtherType: "IPv4", - PortRangeMin: 9253, - PortRangeMax: 9253, - Protocol: "tcp", + Description: "coredns", + Direction: "ingress", + EtherType: "IPv4", + PortRangeMin: 9253, + PortRangeMax: 9253, + Protocol: "tcp", }, { - Description: "coredns", - Direction: "ingress", - EtherType: "IPv4", - PortRangeMin: 53, - PortRangeMax: 53, - Protocol: "tcp", + Description: "coredns", + Direction: "ingress", + EtherType: "IPv4", + PortRangeMin: 53, + PortRangeMax: 53, + Protocol: "tcp", }, { - Description: "coredns", - Direction: "ingress", - EtherType: "IPv4", - PortRangeMin: 53, - PortRangeMax: 53, - Protocol: "udp", + Description: "coredns", + Direction: "ingress", + EtherType: "IPv4", + PortRangeMin: 53, + PortRangeMax: 53, + Protocol: "udp", }, } } - - // Permit traffic for prometheus func GetSGControlPlaneForPrometheus(remoteGroupIDSelf, secWorkerGroupID string) []infrav1.SecurityGroupRule { return []infrav1.SecurityGroupRule{ @@ -388,7 +385,6 @@ func GetSGControlPlaneForPrometheus(remoteGroupIDSelf, secWorkerGroupID string) } } - func GetSGWorkForPrometheus(remoteGroupIDSelf, secControlPlaneGroupID string) []infrav1.SecurityGroupRule { return []infrav1.SecurityGroupRule{ { @@ -614,12 +610,12 @@ func GetSGWorkForPrometheus(remoteGroupIDSelf, secControlPlaneGroupID string) [] func getSGControlPlaneCommon(remoteGroupIDSelf, secWorkerGroupID string) []infrav1.SecurityGroupRule { return []infrav1.SecurityGroupRule{ { - Description: "Etcd", - Direction: "ingress", - EtherType: "IPv4", - PortRangeMin: 2379, - PortRangeMax: 2380, - Protocol: "tcp", + Description: "Etcd", + Direction: "ingress", + EtherType: "IPv4", + PortRangeMin: 2379, + PortRangeMax: 2380, + Protocol: "tcp", }, { // kubeadm says this is needed @@ -779,10 +775,10 @@ func GetSGWorkerSSH(secBastionGroupID string) []infrav1.SecurityGroupRule { func GetSGControlPlaneICMP(remoteGroupIDSelf string) []infrav1.SecurityGroupRule { return []infrav1.SecurityGroupRule{ { - Description: "ICMP", - Direction: "ingress", - EtherType: "IPv4", - Protocol: "icmp", + Description: "ICMP", + Direction: "ingress", + EtherType: "IPv4", + Protocol: "icmp", }, } } @@ -791,10 +787,10 @@ func GetSGControlPlaneICMP(remoteGroupIDSelf string) []infrav1.SecurityGroupRule func GetSGWorkerICMP(remoteGroupIDSelf string) []infrav1.SecurityGroupRule { return []infrav1.SecurityGroupRule{ { - Description: "ICMP", - Direction: "ingress", - EtherType: "IPv4", - Protocol: "icmp", + Description: "ICMP", + Direction: "ingress", + EtherType: "IPv4", + Protocol: "icmp", }, } } @@ -819,16 +815,16 @@ func GetSGControlPlaneOrWorkIngress() []infrav1.SecurityGroupRule { Protocol: "tcp", }, } - } + // Permit traffic for flannel. func GetSGControlPlaneFlannel() []infrav1.SecurityGroupRule { return []infrav1.SecurityGroupRule{ { - Description: "flannel", - Direction: "ingress", - EtherType: "IPv4", - Protocol: "udp", + Description: "flannel", + Direction: "ingress", + EtherType: "IPv4", + Protocol: "udp", PortRangeMin: 8472, PortRangeMax: 8472, }, @@ -838,10 +834,10 @@ func GetSGControlPlaneFlannel() []infrav1.SecurityGroupRule { func GetSGWorkerFlannel() []infrav1.SecurityGroupRule { return []infrav1.SecurityGroupRule{ { - Description: "flannel", - Direction: "ingress", - EtherType: "IPv4", - Protocol: "udp", + Description: "flannel", + Direction: "ingress", + EtherType: "IPv4", + Protocol: "udp", PortRangeMin: 8472, PortRangeMax: 8472, }, @@ -849,13 +845,13 @@ func GetSGWorkerFlannel() []infrav1.SecurityGroupRule { } // Permit traffic for keepalived -func GetSGControlPlaneOrWorkVRRP() []infrav1.SecurityGroupRule { - return []infrav1.SecurityGroupRule { +func GetSGControlPlaneOrWorkVRRP() []infrav1.SecurityGroupRule { + return []infrav1.SecurityGroupRule{ { - Description: "keepalived", - Direction: "ingress", - EtherType: "IPv4", - Protocol: "vrrp", + Description: "keepalived", + Direction: "ingress", + EtherType: "IPv4", + Protocol: "vrrp", }, } } diff --git a/pkg/cloud/services/provider/provider.go b/pkg/cloud/services/provider/provider.go index b8dc26356f..aa7ba3119c 100644 --- a/pkg/cloud/services/provider/provider.go +++ b/pkg/cloud/services/provider/provider.go @@ -161,7 +161,8 @@ func NewClient(cloud NewCloud, caCert []byte) (*gophercloud.ProviderClient, *cli ApplicationCredentialID: opts.ApplicationCredentialID, ApplicationCredentialName: opts.ApplicationCredentialName, ApplicationCredentialSecret: opts.ApplicationCredentialSecret, - AllowReauth: opts.AllowReauth} + AllowReauth: opts.AllowReauth, + } if opts.Scope != nil { tokenauth.Scope.ProjectID = opts.Scope.ProjectID tokenauth.Scope.ProjectName = opts.Scope.ProjectName