|
| 1 | +// Copyright 2021 VMware, Inc. All Rights Reserved. |
| 2 | +// SPDX-License-Identifier: Apache-2.0 |
| 3 | + |
| 4 | +// nolint: testpackage |
| 5 | +package e2e |
| 6 | + |
| 7 | +import ( |
| 8 | + "context" |
| 9 | + "fmt" |
| 10 | + "github.com/docker/docker/api/types" |
| 11 | + "github.com/docker/docker/client" |
| 12 | + . "github.com/onsi/ginkgo" |
| 13 | + . "github.com/onsi/gomega" |
| 14 | + corev1 "k8s.io/api/core/v1" |
| 15 | + "k8s.io/utils/pointer" |
| 16 | + "os" |
| 17 | + "path/filepath" |
| 18 | + "sigs.k8s.io/cluster-api/test/framework" |
| 19 | + "sigs.k8s.io/cluster-api/test/framework/clusterctl" |
| 20 | + "sigs.k8s.io/cluster-api/util" |
| 21 | +) |
| 22 | + |
| 23 | +var _ = Describe("Cluster upgrade test for k8s patch [K8s-upgrade]", func() { |
| 24 | + |
| 25 | + var ( |
| 26 | + ctx context.Context |
| 27 | + specName = "upgrade" |
| 28 | + namespace *corev1.Namespace |
| 29 | + cancelWatches context.CancelFunc |
| 30 | + clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult |
| 31 | + dockerClient *client.Client |
| 32 | + byoHostCapacityPool = 4 |
| 33 | + byoHostName string |
| 34 | + allbyohostContainerIDs []string |
| 35 | + allAgentLogFiles []string |
| 36 | + coreDNSUpgradeVersion = "v1.8.6" |
| 37 | + kubernetesVersionUpgradeTo = "v1.23.5" |
| 38 | + kubernetesVersionUpgradeFrom = "v1.23.4" |
| 39 | + etcdUpgradeVersion = "3.5.1-0" |
| 40 | + ) |
| 41 | + |
| 42 | + BeforeEach(func() { |
| 43 | + ctx = context.TODO() |
| 44 | + Expect(ctx).NotTo(BeNil(), "ctx is required for %s spec", specName) |
| 45 | + |
| 46 | + Expect(e2eConfig).NotTo(BeNil(), "Invalid argument. e2eConfig can't be nil when calling %s spec", specName) |
| 47 | + Expect(clusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. clusterctlConfigPath must be an existing file when calling %s spec", specName) |
| 48 | + Expect(bootstrapClusterProxy).NotTo(BeNil(), "Invalid argument. bootstrapClusterProxy can't be nil when calling %s spec", specName) |
| 49 | + Expect(os.MkdirAll(artifactFolder, 0755)).To(Succeed(), "Invalid argument. artifactFolder can't be created for %s spec", specName) |
| 50 | + Expect(e2eConfig.Variables).To(HaveKey(KubernetesVersion)) |
| 51 | + |
| 52 | + // set up a Namespace where to host objects for this spec and create a watcher for the namespace events. |
| 53 | + namespace, cancelWatches = setupSpecNamespace(ctx, specName, bootstrapClusterProxy, artifactFolder) |
| 54 | + clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult) |
| 55 | + }) |
| 56 | + |
| 57 | + It("Should successfully upgrade cluster", func() { |
| 58 | + clusterName := fmt.Sprintf("%s-%s", specName, util.RandomString(6)) |
| 59 | + |
| 60 | + dClient, err := client.NewClientWithOpts(client.FromEnv) |
| 61 | + dockerClient = dClient |
| 62 | + Expect(err).NotTo(HaveOccurred()) |
| 63 | + |
| 64 | + By("Creating byohost capacity pool containing 4 hosts") |
| 65 | + for i := 0; i < byoHostCapacityPool; i++ { |
| 66 | + |
| 67 | + byoHostName = fmt.Sprintf("byohost-%s", util.RandomString(6)) |
| 68 | + |
| 69 | + runner := ByoHostRunner{ |
| 70 | + Context: ctx, |
| 71 | + clusterConName: clusterConName, |
| 72 | + ByoHostName: byoHostName, |
| 73 | + Namespace: namespace.Name, |
| 74 | + PathToHostAgentBinary: pathToHostAgentBinary, |
| 75 | + DockerClient: dockerClient, |
| 76 | + NetworkInterface: "kind", |
| 77 | + bootstrapClusterProxy: bootstrapClusterProxy, |
| 78 | + CommandArgs: map[string]string{ |
| 79 | + "--bootstrap-kubeconfig": "/bootstrap.conf", |
| 80 | + "--namespace": namespace.Name, |
| 81 | + "--v": "1", |
| 82 | + }, |
| 83 | + } |
| 84 | + runner.BootstrapKubeconfigData = generateBootstrapKubeconfig(runner.Context, bootstrapClusterProxy, clusterConName) |
| 85 | + byohost, err := runner.SetupByoDockerHost() |
| 86 | + Expect(err).NotTo(HaveOccurred()) |
| 87 | + output, byohostContainerID, err := runner.ExecByoDockerHost(byohost) |
| 88 | + allbyohostContainerIDs = append(allbyohostContainerIDs, byohostContainerID) |
| 89 | + Expect(err).NotTo(HaveOccurred()) |
| 90 | + |
| 91 | + // read the log of host agent container in backend, and write it |
| 92 | + agentLogFile := fmt.Sprintf("/tmp/host-agent-%d.log", i) |
| 93 | + |
| 94 | + f := WriteDockerLog(output, agentLogFile) |
| 95 | + defer func() { |
| 96 | + deferredErr := f.Close() |
| 97 | + if deferredErr != nil { |
| 98 | + Showf("error closing file %s:, %v", agentLogFile, deferredErr) |
| 99 | + } |
| 100 | + }() |
| 101 | + allAgentLogFiles = append(allAgentLogFiles, agentLogFile) |
| 102 | + } |
| 103 | + |
| 104 | + By("creating a workload cluster with one control plane node and one worker node") |
| 105 | + |
| 106 | + setControlPlaneIP(context.Background(), dockerClient) |
| 107 | + clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ |
| 108 | + ClusterProxy: bootstrapClusterProxy, |
| 109 | + ConfigCluster: clusterctl.ConfigClusterInput{ |
| 110 | + LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()), |
| 111 | + ClusterctlConfigPath: clusterctlConfigPath, |
| 112 | + KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(), |
| 113 | + InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, |
| 114 | + Flavor: clusterctl.DefaultFlavor, |
| 115 | + Namespace: namespace.Name, |
| 116 | + ClusterName: clusterName, |
| 117 | + KubernetesVersion: kubernetesVersionUpgradeFrom, |
| 118 | + ControlPlaneMachineCount: pointer.Int64Ptr(1), |
| 119 | + WorkerMachineCount: pointer.Int64Ptr(1), |
| 120 | + }, |
| 121 | + WaitForClusterIntervals: e2eConfig.GetIntervals(specName, "wait-cluster"), |
| 122 | + WaitForControlPlaneIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"), |
| 123 | + WaitForMachineDeployments: e2eConfig.GetIntervals(specName, "wait-worker-nodes"), |
| 124 | + }, clusterResources) |
| 125 | + |
| 126 | + By("Upgrading the control plane") |
| 127 | + framework.UpgradeControlPlaneAndWaitForUpgrade(ctx, framework.UpgradeControlPlaneAndWaitForUpgradeInput{ |
| 128 | + ClusterProxy: bootstrapClusterProxy, |
| 129 | + Cluster: clusterResources.Cluster, |
| 130 | + ControlPlane: clusterResources.ControlPlane, |
| 131 | + EtcdImageTag: etcdUpgradeVersion, |
| 132 | + DNSImageTag: coreDNSUpgradeVersion, |
| 133 | + KubernetesUpgradeVersion: kubernetesVersionUpgradeTo, |
| 134 | + WaitForMachinesToBeUpgraded: e2eConfig.GetIntervals(specName, "wait-machine-upgrade"), |
| 135 | + WaitForKubeProxyUpgrade: e2eConfig.GetIntervals(specName, "wait-machine-upgrade"), |
| 136 | + WaitForDNSUpgrade: e2eConfig.GetIntervals(specName, "wait-machine-upgrade"), |
| 137 | + WaitForEtcdUpgrade: e2eConfig.GetIntervals(specName, "wait-machine-upgrade"), |
| 138 | + }) |
| 139 | + |
| 140 | + By("Upgrading the machine deployment") |
| 141 | + framework.UpgradeMachineDeploymentsAndWait(ctx, framework.UpgradeMachineDeploymentsAndWaitInput{ |
| 142 | + ClusterProxy: bootstrapClusterProxy, |
| 143 | + Cluster: clusterResources.Cluster, |
| 144 | + UpgradeVersion: kubernetesVersionUpgradeTo, |
| 145 | + MachineDeployments: clusterResources.MachineDeployments, |
| 146 | + WaitForMachinesToBeUpgraded: e2eConfig.GetIntervals(specName, "wait-worker-nodes"), |
| 147 | + }) |
| 148 | + |
| 149 | + By("Waiting until nodes are ready") |
| 150 | + workloadProxy := bootstrapClusterProxy.GetWorkloadCluster(ctx, namespace.Name, clusterResources.Cluster.Name) |
| 151 | + workloadClient := workloadProxy.GetClient() |
| 152 | + framework.WaitForNodesReady(ctx, framework.WaitForNodesReadyInput{ |
| 153 | + Lister: workloadClient, |
| 154 | + KubernetesVersion: kubernetesVersionUpgradeTo, |
| 155 | + Count: int(clusterResources.ExpectedTotalNodes()), |
| 156 | + WaitForNodesReady: e2eConfig.GetIntervals(specName, "wait-nodes-ready"), |
| 157 | + }) |
| 158 | + }) |
| 159 | + JustAfterEach(func() { |
| 160 | + if CurrentGinkgoTestDescription().Failed { |
| 161 | + ShowInfo(allAgentLogFiles) |
| 162 | + } |
| 163 | + }) |
| 164 | + |
| 165 | + AfterEach(func() { |
| 166 | + // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. |
| 167 | + dumpSpecResourcesAndCleanup(ctx, specName, bootstrapClusterProxy, artifactFolder, namespace, cancelWatches, clusterResources.Cluster, e2eConfig.GetIntervals, skipCleanup) |
| 168 | + |
| 169 | + if dockerClient != nil { |
| 170 | + for _, byohostContainerID := range allbyohostContainerIDs { |
| 171 | + err := dockerClient.ContainerStop(ctx, byohostContainerID, nil) |
| 172 | + Expect(err).NotTo(HaveOccurred()) |
| 173 | + |
| 174 | + err = dockerClient.ContainerRemove(ctx, byohostContainerID, types.ContainerRemoveOptions{}) |
| 175 | + Expect(err).NotTo(HaveOccurred()) |
| 176 | + } |
| 177 | + |
| 178 | + } |
| 179 | + |
| 180 | + for _, agentLogFile := range allAgentLogFiles { |
| 181 | + err := os.Remove(agentLogFile) |
| 182 | + if err != nil { |
| 183 | + Showf("error removing file %s: %v", agentLogFile, err) |
| 184 | + } |
| 185 | + } |
| 186 | + err := os.Remove(ReadByohControllerManagerLogShellFile) |
| 187 | + if err != nil { |
| 188 | + Showf("error removing file %s: %v", ReadByohControllerManagerLogShellFile, err) |
| 189 | + } |
| 190 | + err = os.Remove(ReadAllPodsShellFile) |
| 191 | + if err != nil { |
| 192 | + Showf("error removing file %s: %v", ReadAllPodsShellFile, err) |
| 193 | + } |
| 194 | + }) |
| 195 | +}) |
0 commit comments