Skip to content

Commit d768b0d

Browse files
committed
Deploy new VM images for MacStadium Kubernetes nodes
We don't want to replace the master because it's not under disk pressure and we would lose the existing configuration. So now nodes and master can have different VM templates. Nodes now use a CentOS 7 template with Kubernetes already installed during the Packer build, so the provisioning is substantially simpler.
1 parent 6ef6624 commit d768b0d

File tree

5 files changed

+28
-24
lines changed

5 files changed

+28
-24
lines changed

macstadium-staging/main.tf

+2
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,8 @@ provider "aws" {
2525
}
2626

2727
provider "vsphere" {
28+
version = "~> 1.8"
29+
2830
user = "${var.vsphere_user}"
2931
password = "${var.vsphere_password}"
3032
vsphere_server = "${var.vsphere_server}"

modules/macstadium_k8s_cluster/master.tf

+6-6
Original file line numberDiff line numberDiff line change
@@ -11,22 +11,22 @@ resource "vsphere_virtual_machine" "master" {
1111

1212
num_cpus = 4
1313
memory = 4096
14-
guest_id = "${data.vsphere_virtual_machine.vanilla_template.guest_id}"
15-
scsi_type = "${data.vsphere_virtual_machine.vanilla_template.scsi_type}"
14+
guest_id = "${data.vsphere_virtual_machine.master_vanilla_template.guest_id}"
15+
scsi_type = "${data.vsphere_virtual_machine.master_vanilla_template.scsi_type}"
1616

1717
disk {
1818
label = "disk0"
19-
size = "${data.vsphere_virtual_machine.vanilla_template.disks.0.size}"
20-
eagerly_scrub = "${data.vsphere_virtual_machine.vanilla_template.disks.0.eagerly_scrub}"
21-
thin_provisioned = "${data.vsphere_virtual_machine.vanilla_template.disks.0.thin_provisioned}"
19+
size = "${data.vsphere_virtual_machine.master_vanilla_template.disks.0.size}"
20+
eagerly_scrub = "${data.vsphere_virtual_machine.master_vanilla_template.disks.0.eagerly_scrub}"
21+
thin_provisioned = "${data.vsphere_virtual_machine.master_vanilla_template.disks.0.thin_provisioned}"
2222
}
2323

2424
network_interface {
2525
network_id = "${data.vsphere_network.internal.id}"
2626
}
2727

2828
clone {
29-
template_uuid = "${data.vsphere_virtual_machine.vanilla_template.id}"
29+
template_uuid = "${data.vsphere_virtual_machine.master_vanilla_template.id}"
3030

3131
customize {
3232
network_interface {

modules/macstadium_k8s_cluster/nodes.tf

+6-14
Original file line numberDiff line numberDiff line change
@@ -14,14 +14,14 @@ resource "vsphere_virtual_machine" "nodes" {
1414

1515
num_cpus = 4
1616
memory = 4096
17-
guest_id = "${data.vsphere_virtual_machine.vanilla_template.guest_id}"
18-
scsi_type = "${data.vsphere_virtual_machine.vanilla_template.scsi_type}"
17+
guest_id = "${data.vsphere_virtual_machine.node_vanilla_template.guest_id}"
18+
scsi_type = "${data.vsphere_virtual_machine.node_vanilla_template.scsi_type}"
1919

2020
disk {
2121
label = "disk0"
22-
size = "${data.vsphere_virtual_machine.vanilla_template.disks.0.size}"
23-
eagerly_scrub = "${data.vsphere_virtual_machine.vanilla_template.disks.0.eagerly_scrub}"
24-
thin_provisioned = "${data.vsphere_virtual_machine.vanilla_template.disks.0.thin_provisioned}"
22+
size = "${data.vsphere_virtual_machine.node_vanilla_template.disks.0.size}"
23+
eagerly_scrub = "${data.vsphere_virtual_machine.node_vanilla_template.disks.0.eagerly_scrub}"
24+
thin_provisioned = "${data.vsphere_virtual_machine.node_vanilla_template.disks.0.thin_provisioned}"
2525
}
2626

2727
network_interface {
@@ -35,7 +35,7 @@ resource "vsphere_virtual_machine" "nodes" {
3535
}
3636

3737
clone {
38-
template_uuid = "${data.vsphere_virtual_machine.vanilla_template.id}"
38+
template_uuid = "${data.vsphere_virtual_machine.node_vanilla_template.id}"
3939

4040
customize {
4141
network_interface {
@@ -67,16 +67,8 @@ resource "vsphere_virtual_machine" "nodes" {
6767
agent = true
6868
}
6969

70-
provisioner "file" {
71-
source = "${path.module}/scripts/"
72-
destination = "/tmp"
73-
}
74-
7570
provisioner "remote-exec" {
7671
inline = [
77-
"sudo chmod a+x /tmp/*.sh",
78-
"sudo /tmp/install-docker.sh",
79-
"sudo /tmp/install-kubernetes.sh",
8072
"sudo ${lookup(data.external.kubeadm_join.result, "command")}",
8173
]
8274
}

modules/macstadium_k8s_cluster/providers.tf

+7-2
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,12 @@ data "vsphere_network" "management" {
2929
}
3030
*/
3131

32-
data "vsphere_virtual_machine" "vanilla_template" {
33-
name = "Vanilla VMs/${var.vanilla_image}"
32+
data "vsphere_virtual_machine" "master_vanilla_template" {
33+
name = "Vanilla VMs/${var.master_vanilla_image}"
34+
datacenter_id = "${data.vsphere_datacenter.dc.id}"
35+
}
36+
37+
data "vsphere_virtual_machine" "node_vanilla_template" {
38+
name = "Vanilla VMs/${var.node_vanilla_image}"
3439
datacenter_id = "${data.vsphere_datacenter.dc.id}"
3540
}

modules/macstadium_k8s_cluster/variables.tf

+7-2
Original file line numberDiff line numberDiff line change
@@ -14,9 +14,14 @@ variable "node_count" {
1414
default = 1
1515
}
1616

17-
variable "vanilla_image" {
17+
variable "master_vanilla_image" {
1818
default = "travis-ci-ubuntu16.04-internal-vanilla-1540931726"
19-
description = "The image to clone VMs from. Needs to be at least Xenial to support Kubernetes."
19+
description = "The image to clone the master VM from. Needs to be at least Xenial to support Kubernetes."
20+
}
21+
22+
variable "node_vanilla_image" {
23+
default = "travis-ci-centos7-internal-kubernetes-1549480185"
24+
description = "The image to clone node VMs from. It should already have Kubernetes installed."
2025
}
2126

2227
variable "datacenter" {

0 commit comments

Comments
 (0)