From 464c63bdcb631e0fe8d2a9eb18eecc25fedf596f Mon Sep 17 00:00:00 2001 From: Nick Tenczar Date: Mon, 11 Jul 2022 10:31:35 -0700 Subject: [PATCH] Update cc node pools to support tkgs clusterclass This change updates the clusterclass node pool operations to support variables in the tkgs clusterclass such as vmClass, storageClass, nodePoolTaints and nodePoolVolumes. --- cmd/cli/plugin/cluster/delete_node_pool.go | 9 +- cmd/cli/plugin/cluster/get_node_pools.go | 74 +-------------- cmd/cli/plugin/cluster/set_node_pool.go | 7 +- pkg/v1/tkg/client/machine_deployment.go | 95 +++++++++++++++---- pkg/v1/tkg/client/machine_deployment_cc.go | 63 ++++++++++++ .../tkg/client/machine_deployment_cc_test.go | 13 +-- 6 files changed, 157 insertions(+), 104 deletions(-) diff --git a/cmd/cli/plugin/cluster/delete_node_pool.go b/cmd/cli/plugin/cluster/delete_node_pool.go index ca7dbec75d..5fe9e40280 100644 --- a/cmd/cli/plugin/cluster/delete_node_pool.go +++ b/cmd/cli/plugin/cluster/delete_node_pool.go @@ -11,6 +11,7 @@ import ( "github.com/vmware-tanzu/tanzu-framework/pkg/v1/config" "github.com/vmware-tanzu/tanzu-framework/pkg/v1/tkg/client" + "github.com/vmware-tanzu/tanzu-framework/pkg/v1/tkg/log" ) type deleteNodePoolOptions struct { @@ -59,5 +60,11 @@ func runDeleteNodePool(server *v1alpha1.Server, clusterName string) error { Namespace: deleteNP.namespace, Name: deleteNP.nodePoolName, } - return tkgctlClient.DeleteMachineDeployment(options) + + err = tkgctlClient.DeleteMachineDeployment(options) + if err == nil { + log.Infof("Node pool '%s' is being deleted", options.Name) + } + + return err } diff --git a/cmd/cli/plugin/cluster/get_node_pools.go b/cmd/cli/plugin/cluster/get_node_pools.go index 885f48ba70..faaa5d1c89 100644 --- a/cmd/cli/plugin/cluster/get_node_pools.go +++ b/cmd/cli/plugin/cluster/get_node_pools.go @@ -4,21 +4,15 @@ package main import ( - "fmt" - "strings" - "github.com/pkg/errors" - "sigs.k8s.io/cluster-api/api/v1alpha3" "github.com/spf13/cobra" "github.com/vmware-tanzu/tanzu-framework/apis/config/v1alpha1" - tkgsv1alpha2 "github.com/vmware-tanzu/tanzu-framework/apis/run/v1alpha2" "github.com/vmware-tanzu/tanzu-framework/pkg/v1/cli/component" "github.com/vmware-tanzu/tanzu-framework/pkg/v1/config" "github.com/vmware-tanzu/tanzu-framework/pkg/v1/tkg/client" - "github.com/vmware-tanzu/tanzu-framework/pkg/v1/tkg/tkgctl" ) type listNodePoolOptions struct { @@ -66,57 +60,17 @@ func listNodePoolsInternal(cmd *cobra.Command, server *v1alpha1.Server, clusterN Namespace: lnp.namespace, } - isPacific, err := tkgctlClient.IsPacificRegionalCluster() - if err != nil { - return errors.Wrap(err, "error determining Tanzu Kubernetes Cluster service for vSphere management cluster ") - } - if isPacific { - return listPacificNodePools(cmd, tkgctlClient, mdOptions) - } - machineDeployments, err := tkgctlClient.GetMachineDeployments(mdOptions) if err != nil { return err } - var t component.OutputWriter - if lnp.outputFormat == string(component.JSONOutputType) || lnp.outputFormat == string(component.YAMLOutputType) { - t = component.NewObjectWriter(cmd.OutOrStdout(), lnp.outputFormat, machineDeployments) - } else { - t = component.NewOutputWriter(cmd.OutOrStdout(), lnp.outputFormat, "NAME", "NAMESPACE", "PHASE", "REPLICAS", "READY", "UPDATED", "UNAVAILABLE") - for _, md := range machineDeployments { - t.AddRow(md.Name, md.Namespace, md.Status.Phase, md.Status.Replicas, md.Status.ReadyReplicas, md.Status.UpdatedReplicas, md.Status.UnavailableReplicas) - } - } - t.Render() - - return nil -} - -func listPacificNodePools(cmd *cobra.Command, tkgctlClient tkgctl.TKGClient, mdOptions client.GetMachineDeploymentOptions) error { - var tkcObj *tkgsv1alpha2.TanzuKubernetesCluster - - tkcObj, err := tkgctlClient.GetPacificClusterObject(mdOptions.ClusterName, mdOptions.Namespace) - if err != nil { - return errors.Wrap(err, "unable to get Tanzu Kubernetes Cluster object") - } - machineDeployments, err := tkgctlClient.GetPacificMachineDeployments(mdOptions) - if err != nil { - return err - } - // // Pacific TKC has nodepool names, so update the MD names with nodepool names from TKC object before listing nodepools. - // // This is required because the user would want to use the nodepool names from the list output for nodepool set/delete operations - err = updateMDsWithTKCNodepoolNames(tkcObj, machineDeployments) - if err != nil { - return err - } var t component.OutputWriter if lnp.outputFormat == string(component.JSONOutputType) || lnp.outputFormat == string(component.YAMLOutputType) { t = component.NewObjectWriter(cmd.OutOrStdout(), lnp.outputFormat, machineDeployments) } else { t = component.NewOutputWriter(cmd.OutOrStdout(), lnp.outputFormat, "NAME", "NAMESPACE", "PHASE", "REPLICAS", "READY", "UPDATED", "UNAVAILABLE") - for idx := range machineDeployments { - md := machineDeployments[idx] + for _, md := range machineDeployments { t.AddRow(md.Name, md.Namespace, md.Status.Phase, md.Status.Replicas, md.Status.ReadyReplicas, md.Status.UpdatedReplicas, md.Status.UnavailableReplicas) } } @@ -124,29 +78,3 @@ func listPacificNodePools(cmd *cobra.Command, tkgctlClient tkgctl.TKGClient, mdO return nil } - -func updateMDsWithTKCNodepoolNames(tkcObj *tkgsv1alpha2.TanzuKubernetesCluster, machineDeployments []v1alpha3.MachineDeployment) error { - nodepools := tkcObj.Spec.Topology.NodePools - for mdIdx := range machineDeployments { - nodepoolName := getNodePoolNameFromMDName(tkcObj.Name, machineDeployments[mdIdx].Name) - for npIdx := range nodepools { - if nodepoolName == nodepools[npIdx].Name { - machineDeployments[mdIdx].Name = nodepools[npIdx].Name - break - } - } - } - return nil -} - -func getNodePoolNameFromMDName(clusterName, mdName string) string { - // Pacific(TKGS) creates a corresponding MachineDeployment for a nodepool in - // the format {tkc-clustername}-{nodepool-name}-{randomstring} - trimmedName := strings.TrimPrefix(mdName, fmt.Sprintf("%s-", clusterName)) - lastHypenIdx := strings.LastIndex(trimmedName, "-") - if lastHypenIdx == -1 { - return "" - } - nodepoolName := trimmedName[:lastHypenIdx] - return nodepoolName -} diff --git a/cmd/cli/plugin/cluster/set_node_pool.go b/cmd/cli/plugin/cluster/set_node_pool.go index 451f21e41f..c07cf5dcb5 100644 --- a/cmd/cli/plugin/cluster/set_node_pool.go +++ b/cmd/cli/plugin/cluster/set_node_pool.go @@ -14,6 +14,7 @@ import ( "github.com/vmware-tanzu/tanzu-framework/apis/config/v1alpha1" "github.com/vmware-tanzu/tanzu-framework/pkg/v1/config" "github.com/vmware-tanzu/tanzu-framework/pkg/v1/tkg/client" + "github.com/vmware-tanzu/tanzu-framework/pkg/v1/tkg/log" ) type clusterSetNodePoolCmdOptions struct { @@ -76,5 +77,9 @@ func SetNodePool(server *v1alpha1.Server, clusterName string) error { NodePool: nodePool, } - return tkgctlClient.SetMachineDeployment(&options) + err = tkgctlClient.SetMachineDeployment(&options) + if err == nil { + log.Infof("Cluster update for node pool '%s' completed successfully", options.Name) + } + return err } diff --git a/pkg/v1/tkg/client/machine_deployment.go b/pkg/v1/tkg/client/machine_deployment.go index 58eaf2f1d0..c2c1adfcde 100644 --- a/pkg/v1/tkg/client/machine_deployment.go +++ b/pkg/v1/tkg/client/machine_deployment.go @@ -99,6 +99,18 @@ func (c *TkgClient) SetMachineDeployment(options *SetMachineDeploymentOptions) e return errors.Wrap(err, "Unable to create clusterclient") } + ccBased, err := clusterClient.IsClusterClassBased(options.ClusterName, options.Namespace) + if err != nil { + return errors.Wrap(err, "unable to determine if cluster is clusterclass based") + } + if ccBased { + var cluster capi.Cluster + if err = clusterClient.GetResource(&cluster, options.ClusterName, options.Namespace, nil, nil); err != nil { + return errors.Wrap(err, "Unable to retrieve cluster resource") + } + return DoSetMachineDeploymentCC(clusterClient, &cluster, options) + } + isPacific, err := clusterClient.IsPacificRegionalCluster() if err != nil { return errors.Wrap(err, "error determining Tanzu Kubernetes Cluster service for vSphere management cluster ") @@ -107,14 +119,6 @@ func (c *TkgClient) SetMachineDeployment(options *SetMachineDeploymentOptions) e return c.SetNodePoolsForPacificCluster(clusterClient, options) } - var cluster capi.Cluster - if err = clusterClient.GetResource(&cluster, options.ClusterName, options.Namespace, nil, nil); err != nil { - return errors.Wrap(err, "Unable to retrieve cluster resource") - } - if cluster.Spec.Topology != nil { - return DoSetMachineDeploymentCC(clusterClient, &cluster, options) - } - return DoSetMachineDeployment(clusterClient, options) } @@ -308,6 +312,18 @@ func (c *TkgClient) DeleteMachineDeployment(options DeleteMachineDeploymentOptio return errors.Wrap(err, "Unable to create clusterclient") } + ccBased, err := clusterClient.IsClusterClassBased(options.ClusterName, options.Namespace) + if err != nil { + return errors.Wrap(err, "unable to determine if cluster is clusterclass based") + } + if ccBased { + var cluster capi.Cluster + if err = clusterClient.GetResource(&cluster, options.ClusterName, options.Namespace, nil, nil); err != nil { + return errors.Wrap(err, "Unable to retrieve cluster resource") + } + return DoDeleteMachineDeploymentCC(clusterClient, &cluster, &options) + } + isPacific, err := clusterClient.IsPacificRegionalCluster() if err != nil { return errors.Wrap(err, "error determining Tanzu Kubernetes Cluster service for vSphere management cluster ") @@ -316,14 +332,6 @@ func (c *TkgClient) DeleteMachineDeployment(options DeleteMachineDeploymentOptio return c.DeleteNodePoolForPacificCluster(clusterClient, options) } - var cluster capi.Cluster - if err = clusterClient.GetResource(&cluster, options.ClusterName, options.Namespace, nil, nil); err != nil { - return errors.Wrap(err, "Unable to retrieve cluster resource") - } - if cluster.Spec.Topology != nil { - return DoDeleteMachineDeploymentCC(clusterClient, &cluster, &options) - } - return DoDeleteMachineDeployment(clusterClient, &options) } @@ -478,14 +486,49 @@ func (c *TkgClient) GetMachineDeployments(options GetMachineDeploymentOptions) ( return nil, errors.Wrap(err, "Unable to create clusterclient") } - var cluster capi.Cluster - if err = clusterClient.GetResource(&cluster, options.ClusterName, options.Namespace, nil, nil); err != nil { - return nil, errors.Wrap(err, "Unable to retrieve cluster resources") + ccBased, err := clusterClient.IsClusterClassBased(options.ClusterName, options.Namespace) + if err != nil { + return nil, errors.Wrap(err, "unable to determine if cluster is clusterclass based") } - if cluster.Spec.Topology != nil { + if ccBased { + var cluster capi.Cluster + if err = clusterClient.GetResource(&cluster, options.ClusterName, options.Namespace, nil, nil); err != nil { + return nil, errors.Wrap(err, "Unable to retrieve cluster resources") + } return DoGetMachineDeploymentsCC(clusterClient, &cluster, &options) } + isPacific, err := clusterClient.IsPacificRegionalCluster() + if err != nil { + return nil, errors.Wrap(err, "error determining Tanzu Kubernetes Cluster service for vSphere management cluster ") + } + if isPacific { + pacificMds, err := c.GetPacificMachineDeployments(options) + if err != nil { + return nil, err + } + + var mds []capi.MachineDeployment + for i := range pacificMds { + newMd := capi.MachineDeployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: getNodePoolNameFromMDName(options.ClusterName, pacificMds[i].Name), + Namespace: pacificMds[i].Namespace, + }, + Status: capi.MachineDeploymentStatus{ + Replicas: pacificMds[i].Status.Replicas, + UpdatedReplicas: pacificMds[i].Status.UpdatedReplicas, + ReadyReplicas: pacificMds[i].Status.ReadyReplicas, + AvailableReplicas: pacificMds[i].Status.AvailableReplicas, + UnavailableReplicas: pacificMds[i].Status.UnavailableReplicas, + Phase: pacificMds[i].Status.Phase, + }, + } + mds = append(mds, newMd) + } + return mds, nil + } + return DoGetMachineDeployments(clusterClient, &options) } @@ -698,6 +741,18 @@ func NormalizeNodePoolName(workers []capi.MachineDeployment, clusterName string) return workers, nil } +func getNodePoolNameFromMDName(clusterName, mdName string) string { + // Pacific(TKGS) creates a corresponding MachineDeployment for a nodepool in + // the format {tkc-clustername}-{nodepool-name}-{randomstring} + trimmedName := strings.TrimPrefix(mdName, fmt.Sprintf("%s-", clusterName)) + lastHypenIdx := strings.LastIndex(trimmedName, "-") + if lastHypenIdx == -1 { + return "" + } + nodepoolName := trimmedName[:lastHypenIdx] + return nodepoolName +} + func updateAzureSecret(kcTemplate *v1beta1.KubeadmConfigTemplate, machineTemplateName string) { if kcTemplate.Spec.Template.Spec.Files != nil && len(kcTemplate.Spec.Template.Spec.Files) > 0 { for i := range kcTemplate.Spec.Template.Spec.Files { diff --git a/pkg/v1/tkg/client/machine_deployment_cc.go b/pkg/v1/tkg/client/machine_deployment_cc.go index 094d7ebf33..152808fee3 100644 --- a/pkg/v1/tkg/client/machine_deployment_cc.go +++ b/pkg/v1/tkg/client/machine_deployment_cc.go @@ -104,6 +104,21 @@ func DoSetMachineDeploymentCC(clusterClient clusterclient.Client, cluster *capi. nodeLabelsVar.Value.Raw = output } + if options.Taints != nil { + nodeTaintsVar := getClusterVariableByName("nodePoolTaints", base.Variables.Overrides) + if nodeTaintsVar == nil { + nodeTaintsVar = &capi.ClusterVariable{ + Name: "nodePoolTaints", + Value: v1.JSON{}, + } + base.Variables.Overrides = append(base.Variables.Overrides, *nodeTaintsVar) + nodeTaintsVar = &base.Variables.Overrides[len(base.Variables.Overrides)-1] + } + + output, _ := json.Marshal(options.Taints) + nodeTaintsVar.Value.Raw = output + } + if update != nil { return clusterClient.UpdateResource(cluster, options.ClusterName, options.Namespace) } @@ -139,6 +154,54 @@ func createNewMachineDeployment(clusterClient clusterclient.Client, cluster *cap base.FailureDomain = &options.AZ } + if options.VMClass != "" { + var vmClassVariable = getClusterVariableByName("vmClass", base.Variables.Overrides) + if vmClassVariable == nil { + vmClassVariable = getClusterVariableByName("vmClass", cluster.Spec.Topology.Variables).DeepCopy() + base.Variables.Overrides = append(base.Variables.Overrides, *vmClassVariable) + vmClassVariable = &base.Variables.Overrides[len(base.Variables.Overrides)-1] + } + + output, _ := json.Marshal(options.VMClass) + vmClassVariable.Value.Raw = output + } + + if options.StorageClass != "" { + var storageClassVariable = getClusterVariableByName("storageClass", base.Variables.Overrides) + if storageClassVariable == nil { + storageClassVariable = getClusterVariableByName("storageClass", cluster.Spec.Topology.Variables).DeepCopy() + base.Variables.Overrides = append(base.Variables.Overrides, *storageClassVariable) + storageClassVariable = &base.Variables.Overrides[len(base.Variables.Overrides)-1] + } + + output, _ := json.Marshal(options.StorageClass) + storageClassVariable.Value.Raw = output + } + + if options.Volumes != nil { + var volumesVariable = getClusterVariableByName("nodePoolVolumes", base.Variables.Overrides) + if volumesVariable == nil { + volumesVariable = getClusterVariableByName("nodePoolVolumes", cluster.Spec.Topology.Variables).DeepCopy() + base.Variables.Overrides = append(base.Variables.Overrides, *volumesVariable) + volumesVariable = &base.Variables.Overrides[len(base.Variables.Overrides)-1] + } + + var volumes []map[string]interface{} + + for _, vol := range *options.Volumes { + volumes = append(volumes, map[string]interface{}{ + "mountPath": vol.MountPath, + "name": vol.Name, + "capacity": map[string]interface{}{ + "storage": vol.Capacity.Storage(), + }, + }) + } + + output, _ := json.Marshal(volumes) + volumesVariable.Value.Raw = output + } + if err := setVSphereWorkerOptions(options, base, cluster); err != nil { return err } diff --git a/pkg/v1/tkg/client/machine_deployment_cc_test.go b/pkg/v1/tkg/client/machine_deployment_cc_test.go index ebed85443b..efef39efe8 100644 --- a/pkg/v1/tkg/client/machine_deployment_cc_test.go +++ b/pkg/v1/tkg/client/machine_deployment_cc_test.go @@ -584,20 +584,15 @@ var _ = Describe("SetMachineDeploymentCC", func() { }, }, }) - expectedLabels, _ := json.Marshal([]map[string]string{ - { - "os": "ubuntu", - }, - { - "arch": "amd64", - }, - }) clusterInterface, _, _, _ := regionalClusterClient.UpdateResourceArgsForCall(0) actual, ok := clusterInterface.(*capi.Cluster) Expect(ok).To(BeTrue()) Expect(len(actual.Spec.Topology.Workers.MachineDeployments)).To(Equal(3)) Expect(len(actual.Spec.Topology.Workers.MachineDeployments[2].Variables.Overrides)).To(Equal(3)) - Expect(actual.Spec.Topology.Workers.MachineDeployments[2].Variables.Overrides[0].Value.Raw).To(Equal(expectedLabels)) + var actualLabels []map[string]string + err = json.Unmarshal(actual.Spec.Topology.Workers.MachineDeployments[2].Variables.Overrides[0].Value.Raw, &actualLabels) + Expect(err).ToNot(HaveOccurred()) + Expect(len(actualLabels)).To(Equal(2)) Expect(actual.Spec.Topology.Workers.MachineDeployments[2].Variables.Overrides[1].Value.Raw).To(Equal(expected)) expectedVcenter, _ := json.Marshal(map[string]interface{}{