Skip to content
This repository was archived by the owner on Sep 30, 2020. It is now read-only.

Commit d268c5a

Browse files
authored
Merge pull request #1856 from jorge07/0.12.x-flartcar
[v0.12.x] Flatcar
2 parents 0ea4367 + a5d83c3 commit d268c5a

File tree

21 files changed

+171
-88
lines changed

21 files changed

+171
-88
lines changed

cmd/ami.go

+56
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
package cmd
2+
3+
import (
4+
"fmt"
5+
6+
"github.com/kubernetes-incubator/kube-aws/core/root"
7+
"github.com/kubernetes-incubator/kube-aws/flatcar/amiregistry"
8+
"github.com/kubernetes-incubator/kube-aws/logger"
9+
"github.com/spf13/cobra"
10+
)
11+
12+
var (
13+
cmdAmi = &cobra.Command{
14+
Use: "ami",
15+
Short: "Compare AMIID of cluster.yaml VS the last release",
16+
Long: ``,
17+
RunE: runCmdAmi,
18+
SilenceUsage: true,
19+
}
20+
)
21+
22+
func init() {
23+
RootCmd.AddCommand(cmdAmi)
24+
25+
}
26+
27+
func runCmdAmi(_ *cobra.Command, _ []string) error {
28+
opts := root.NewOptions(true, true)
29+
cluster, err := root.ClusterFromFile(configPath, opts, false)
30+
if err != nil {
31+
return fmt.Errorf("failed to read cluster config: %v", err)
32+
}
33+
34+
region := cluster.ControlPlane().Region.Name
35+
channel := string(cluster.ControlPlane().ReleaseChannel)
36+
37+
amiID, err := amiregistry.GetAMI(region, cluster.ControlPlane().ReleaseChannel)
38+
if err != nil {
39+
return fmt.Errorf("Impossible to retrieve FlatCar AMI for region %s, channel %s", region, channel)
40+
}
41+
42+
if cluster.ControlPlane().AmiId == amiID {
43+
logger.Infof("AmiID up to date")
44+
return nil
45+
}
46+
47+
successMsg := `
48+
The Flatcar AmiId for region %s and release channel %s is different than the one in cluster definition.
49+
50+
Cluster.yaml:
51+
- amiId: %s
52+
+ amiId: %s
53+
`
54+
logger.Infof(successMsg, region, channel, cluster.ControlPlane().AmiId, amiID)
55+
return nil
56+
}

cmd/init.go

+10-5
Original file line numberDiff line numberDiff line change
@@ -5,9 +5,10 @@ import (
55
"fmt"
66

77
"github.com/kubernetes-incubator/kube-aws/core/root/config"
8-
"github.com/kubernetes-incubator/kube-aws/coreos/amiregistry"
98
"github.com/kubernetes-incubator/kube-aws/filegen"
9+
"github.com/kubernetes-incubator/kube-aws/flatcar/amiregistry"
1010
"github.com/kubernetes-incubator/kube-aws/logger"
11+
"github.com/kubernetes-incubator/kube-aws/model"
1112
"github.com/spf13/cobra"
1213
)
1314

@@ -20,7 +21,8 @@ var (
2021
SilenceUsage: true,
2122
}
2223

23-
initOpts = config.InitialConfig{}
24+
initOpts = config.InitialConfig{}
25+
releaseChannel = ""
2426
)
2527

2628
const (
@@ -37,7 +39,8 @@ func init() {
3739
cmdInit.Flags().StringVar(&initOpts.AvailabilityZone, "availability-zone", "", "The AWS availability-zone to deploy to")
3840
cmdInit.Flags().StringVar(&initOpts.KeyName, "key-name", "", "The AWS key-pair for ssh access to nodes")
3941
cmdInit.Flags().StringVar(&initOpts.KMSKeyARN, "kms-key-arn", "", "The ARN of the AWS KMS key for encrypting TLS assets")
40-
cmdInit.Flags().StringVar(&initOpts.AmiId, "ami-id", "", "The AMI ID of CoreOS. Last CoreOS Stable Channel selected by default if empty")
42+
cmdInit.Flags().StringVar(&initOpts.AmiId, "ami-id", "", "The AMI ID of Flatcar. Last Flatcar Stable Channel selected by default if empty")
43+
cmdInit.Flags().StringVar(&releaseChannel, "release-channel", defaultReleaseChannel, "Flatcar release channel for AMI")
4144
cmdInit.Flags().BoolVar(&initOpts.NoRecordSet, "no-record-set", false, "Instruct kube-aws to not manage Route53 record sets for your K8S API endpoints")
4245
}
4346

@@ -55,12 +58,14 @@ func runCmdInit(_ *cobra.Command, _ []string) error {
5558

5659
if initOpts.AmiId == "" {
5760
amiID, err := amiregistry.GetAMI(initOpts.Region.Name, defaultReleaseChannel)
58-
initOpts.AmiId = amiID
5961
if err != nil {
60-
return fmt.Errorf("cannot retrieve CoreOS AMI for region %s, channel %s", initOpts.Region.Name, defaultReleaseChannel)
62+
return fmt.Errorf("cannot retrieve Flatcar AMI for region %s, channel %s", initOpts.Region.Name, defaultReleaseChannel)
6163
}
64+
initOpts.AmiId = amiID
6265
}
6366

67+
initOpts.ReleaseChannel = model.ReleaseChannel(defaultReleaseChannel)
68+
6469
if !initOpts.NoRecordSet && initOpts.HostedZoneID == "" {
6570
return errors.New("missing required flags: either --hosted-zone-id or --no-record-set is required")
6671
}

core/controlplane/cluster/cluster_test.go

+1
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@ func defaultConfigValues(t *testing.T, configYaml string) string {
2929
defaultYaml := `
3030
externalDNSName: test.staging.core-os.net
3131
keyName: test-key-name
32+
releaseChannel: stable
3233
s3URI: s3://mybucket/mydir
3334
region: us-west-1
3435
clusterName: test-cluster-name

core/controlplane/config/config.go

+4-12
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ import (
1717
"github.com/go-yaml/yaml"
1818
"github.com/kubernetes-incubator/kube-aws/cfnresource"
1919
"github.com/kubernetes-incubator/kube-aws/cfnstack"
20-
"github.com/kubernetes-incubator/kube-aws/coreos/amiregistry"
20+
"github.com/kubernetes-incubator/kube-aws/flatcar/amiregistry"
2121
"github.com/kubernetes-incubator/kube-aws/gzipcompressor"
2222
"github.com/kubernetes-incubator/kube-aws/logger"
2323
"github.com/kubernetes-incubator/kube-aws/model"
@@ -170,11 +170,11 @@ func NewDefaultCluster() *Cluster {
170170
DeploymentSettings: DeploymentSettings{
171171
ClusterName: "kubernetes",
172172
VPCCIDR: "10.0.0.0/16",
173-
ReleaseChannel: "stable",
174173
KubeAWSVersion: "UNKNOWN",
175174
K8sVer: k8sVer,
176175
ContainerRuntime: "docker",
177176
Subnets: []model.Subnet{},
177+
ReleaseChannel: model.DefaultReleaseChannel(),
178178
EIPAllocationIDs: []string{},
179179
Experimental: experimental,
180180
Kubelet: kubelet,
@@ -513,7 +513,7 @@ type DeploymentSettings struct {
513513
KeyName string `yaml:"keyName,omitempty"`
514514
Region model.Region `yaml:",inline"`
515515
AvailabilityZone string `yaml:"availabilityZone,omitempty"`
516-
ReleaseChannel string `yaml:"releaseChannel,omitempty"`
516+
ReleaseChannel model.ReleaseChannel `yaml:"releaseChannel,omitempty"`
517517
AmiId string `yaml:"amiId,omitempty"`
518518
DeprecatedVPCID string `yaml:"vpcId,omitempty"`
519519
VPC model.VPC `yaml:"vpc,omitempty"`
@@ -921,12 +921,6 @@ const (
921921
internetGatewayLogicalName = "InternetGateway"
922922
)
923923

924-
var supportedReleaseChannels = map[string]bool{
925-
"alpha": true,
926-
"beta": true,
927-
"stable": true,
928-
}
929-
930924
func (c DeploymentSettings) ApiServerLeaseEndpointReconciler() (bool, error) {
931925
constraint, err := semver.NewConstraint(">= 1.9")
932926
if err != nil {
@@ -1391,11 +1385,9 @@ type DeploymentValidationResult struct {
13911385
}
13921386

13931387
func (c DeploymentSettings) Validate() (*DeploymentValidationResult, error) {
1394-
releaseChannelSupported := supportedReleaseChannels[c.ReleaseChannel]
1395-
if !releaseChannelSupported {
1388+
if err := c.ReleaseChannel.IsValid(); err != nil {
13961389
return nil, fmt.Errorf("releaseChannel %s is not supported", c.ReleaseChannel)
13971390
}
1398-
13991391
if c.KeyName == "" && len(c.SSHAuthorizedKeys) == 0 {
14001392
return nil, errors.New("Either keyName or sshAuthorizedKeys must be set")
14011393
}

core/controlplane/config/config_test.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -515,7 +515,7 @@ releaseChannel: non-existent #this release channel will never exist
515515
t.Errorf("failed to parse config %s: %v", confBody, err)
516516
continue
517517
}
518-
if c.ReleaseChannel != conf.channel {
518+
if string(c.ReleaseChannel) != conf.channel {
519519
t.Errorf(
520520
"parsed release channel %s does not match config: %s",
521521
c.ReleaseChannel,

core/nodepool/config/config.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ import (
1111
"github.com/aws/aws-sdk-go/aws/session"
1212
"github.com/kubernetes-incubator/kube-aws/cfnresource"
1313
cfg "github.com/kubernetes-incubator/kube-aws/core/controlplane/config"
14-
"github.com/kubernetes-incubator/kube-aws/coreos/amiregistry"
14+
"github.com/kubernetes-incubator/kube-aws/flatcar/amiregistry"
1515
"github.com/kubernetes-incubator/kube-aws/logger"
1616
"github.com/kubernetes-incubator/kube-aws/model"
1717
"github.com/kubernetes-incubator/kube-aws/model/derived"

core/nodepool/config/templates/cloud-config-worker

+3-3
Original file line numberDiff line numberDiff line change
@@ -1258,10 +1258,10 @@ write_files:
12581258
echo "Keeping container around after build: ${KEEP_CONTAINER}"
12591259
echo "Additional flags: ${EMERGE_SOURCES}"
12601260

1261-
# If we are on CoreOS by default build for the current CoreOS version
1262-
if [[ -f /etc/lsb-release && -f /etc/coreos/update.conf ]]; then
1261+
# If we are on flatcar by default build for the current flatcar version
1262+
if [[ -f /etc/lsb-release && -f /etc/flatcar/update.conf ]]; then
12631263
source /etc/lsb-release
1264-
source /etc/coreos/update.conf
1264+
source /etc/flatcar/update.conf
12651265

12661266
COREOS_TRACK_DEFAULT=$GROUP
12671267
COREOS_VERSION_DEFAULT=$DISTRIB_RELEASE

core/root/cluster.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -776,7 +776,7 @@ func (c *clusterImpl) ValidateStack(opts ...OperationTargets) (string, error) {
776776
func streamJournaldLogs(c *clusterImpl, q chan struct{}) error {
777777
logger.Infof("Streaming filtered Journald logs for log group '%s'...\nNOTE: Due to high initial entropy, '.service' failures may occur during the early stages of booting.\n", c.controlPlane.ClusterName)
778778
cwlSvc := cloudwatchlogs.New(c.session)
779-
s := time.Now().Unix() * 1E3
779+
s := time.Now().Unix() * 1e3
780780
t := s
781781
in := cloudwatchlogs.FilterLogEventsInput{
782782
LogGroupName: &c.controlPlane.ClusterName,
@@ -800,7 +800,7 @@ func streamJournaldLogs(c *clusterImpl, q chan struct{}) error {
800800
ms[*event.Message] = *event.Timestamp
801801
res := model.SystemdMessageResponse{}
802802
json.Unmarshal([]byte(*event.Message), &res)
803-
s := int(((*event.Timestamp) - t) / 1E3)
803+
s := int(((*event.Timestamp) - t) / 1e3)
804804
d := fmt.Sprintf("+%.2d:%.2d:%.2d", s/3600, (s/60)%60, s%60)
805805
logger.Infof("%s\t%s: \"%s\"\n", d, res.Hostname, res.Message)
806806
}

core/root/config/config.go

+1
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ type InitialConfig struct {
2424
KeyName string
2525
NoRecordSet bool
2626
Region model.Region
27+
ReleaseChannel model.ReleaseChannel
2728
S3URI string
2829
}
2930

core/root/config/templates/cluster.yaml

+5-6
Original file line numberDiff line numberDiff line change
@@ -6,17 +6,16 @@ clusterName: {{.ClusterName}}
66
# The URI of the S3 bucket for the cluster
77
s3URI: {{.S3URI}}
88

9-
# CoreOS release channel to use. Currently supported options: alpha, beta, stable
9+
# Flatcar release channel to use. Currently supported options: alpha, beta, stable
1010
# See coreos.com/releases for more information
11-
#releaseChannel: stable
11+
releaseChannel: stable
1212

13-
# The AMI ID of CoreOS.
13+
# The AMI ID of Flatcar.
1414
#
15-
# To update this to the latest AMI run the following command with the appropriate region and channel then place the resulting ID here
16-
# REGION=eu-west-1 && CHANNEL=stable && curl -s https://coreos.com/dist/aws/aws-$CHANNEL.json | jq -r ".\"$REGION\".hvm"
15+
# To get this to the latest AMI run the following command: kube-aws ami
1716
amiId: "{{.AmiId}}"
1817

19-
# Container Linux has automatic updates https://coreos.com/os/docs/latest/update-strategies.html. This can be a risk in certain situations and this is why is disabled by default and you can enable it by setting this param to false.
18+
# Flatcar has automatic updates https://docs.flatcar-linux.org/os/update-strategies/#disable-automatic-updates-daemon. This can be a risk in certain situations and this is why is disabled by default and you can enable it by setting this param to false.
2019
disableContainerLinuxAutomaticUpdates: true
2120

2221
# Override the CloudFormation logical sub-stack names of control plane, etcd and/or network.

coreos/amiregistry/amiregistry.go

-47
This file was deleted.

docs/cli-reference/README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ Initialize the base configuration for a cluster ready for customization prior to
88

99
| Flag | Description | Default |
1010
| -- | -- | -- |
11-
| `ami-id` | The AMI ID of CoreOS Container Linux to deploy | The latest AMI for the Container Linux release channel specified in `cluster.yaml` |
11+
| `ami-id` | The AMI ID of Flatcar Container Linux to deploy | The latest AMI for the Container Linux release channel specified in `cluster.yaml` |
1212
| `availability-zone` | The AWS availability-zone to deploy to. Note, this can be changed to multi AZ in `cluster.yaml` | none |
1313
| `cluster-name` | The name of this cluster. This will be the name of the cloudformation stack | none |
1414
| `external-dns-name` | The hostname that will route to the api server | none |

docs/getting-started/step-2-render.md

+5-5
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ This is the second step of [running Kubernetes on AWS](README.md). Before we lau
66

77
### EC2 key pair
88

9-
The keypair that will authenticate SSH access to your EC2 instances. The public half of this key pair will be configured on each CoreOS node.
9+
The keypair that will authenticate SSH access to your EC2 instances. The public half of this key pair will be configured on each Flatcar node.
1010

1111
After creating a key pair, you will use the name you gave the keys to configure the cluster. Key pairs are only available to EC2 instances in the same region. More info in the [EC2 Keypair docs](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html).
1212

@@ -167,7 +167,7 @@ Each component certificate is only valid for 90 days, while the CA is valid for
167167
If deploying a production Kubernetes cluster, consider establishing PKI independently of this tool first. [Read more below.][tls-note]
168168

169169
**Did everything render correctly?**
170-
If you are familiar with CoreOS and the AWS platform, you may want to include some additional customizations or optional features. Read on below to explore more.
170+
If you are familiar with Flatcar and the AWS platform, you may want to include some additional customizations or optional features. Read on below to explore more.
171171

172172
[Yes, ready to launch the cluster][getting-started-step-3]
173173

@@ -196,15 +196,15 @@ You can now customize your cluster by editing asset files. Any changes to these
196196
* `cloud-config-worker`
197197
* `cloud-config-controller`
198198

199-
This directory contains the [cloud-init](https://github.com/coreos/coreos-cloudinit) cloud-config userdata files. The CoreOS operating system supports automated provisioning via cloud-config files, which describe the various files, scripts and systemd actions necessary to produce a working cluster machine. These files are templated with your cluster configuration parameters and embedded into the CloudFormation stack template.
199+
This directory contains the [cloud-init](https://github.com/coreos/coreos-cloudinit) cloud-config userdata files. The Flatcar operating system supports automated provisioning via cloud-config files, which describe the various files, scripts and systemd actions necessary to produce a working cluster machine. These files are templated with your cluster configuration parameters and embedded into the CloudFormation stack template.
200200

201201
Some common customizations are:
202202

203203
- [mounting ephemeral disks][mount-disks]
204204
- [allow pods to mount RDB][rdb] or [iSCSI volumes][iscsi]
205205
- [allowing access to insecure container registries][insecure-registry]
206206
- [use host DNS configuration instead of a public DNS server][host-dns]
207-
- [changing your CoreOS auto-update settings][update]
207+
- [changing your Flatcar auto-update settings][update]
208208
<br/><br/>
209209

210210
* **stack-template.json**
@@ -230,7 +230,7 @@ You can now customize your cluster by editing asset files. Any changes to these
230230

231231
### Kubernetes Container Runtime
232232

233-
The kube-aws tool now optionally supports using rkt as the kubernetes container runtime. To configure rkt as the container runtime you must run with a CoreOS version >= `v1151.0.0` and configure the runtime flag.
233+
The kube-aws tool now optionally supports using rkt as the kubernetes container runtime. To configure rkt as the container runtime you must run with a Flatcar version >= `v1151.0.0` and configure the runtime flag.
234234

235235
Edit the `cluster.yaml` file:
236236

docs/getting-started/step-4-update.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ some of your system pods will break (especially `kube-dns`). Deleting the said s
4444

4545
There is no solution for hosting an etcd cluster in a way that is easily updateable in this fashion- so updates are automatically masked for the etcd instances. This means that, after the cluster is created, nothing about the etcd ec2 instances is allowed to be updated.
4646

47-
Fortunately, CoreOS update engine will take care of keeping the members of the etcd cluster up-to-date, but you as the operator will not be able to modify them after creation via the update mechanism.
47+
Fortunately, Flatcar update engine will take care of keeping the members of the etcd cluster up-to-date, but you as the operator will not be able to modify them after creation via the update mechanism.
4848

4949
In the (near) future, etcd will be hosted on Kubernetes and this problem will no longer be relevant. Rather than concocting overly complex band-aid, we've decided to "punt" on this issue of the time being.
5050

docs/tutorials/quick-start.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# Quick Start
22

3-
Get started with kube-aws and deploy a fully-functional Kubernetes cluster running on CoreOS Container Linux using AWS CloudFormation.
3+
Get started with kube-aws and deploy a fully-functional Kubernetes cluster running on Flatcar Container Linux using AWS CloudFormation.
44

55
After completing this guide, you will be able to deploy applications to Kubernetes on AWS and interact with the Kubernetes API using the `kubectl` CLI tool.
66

etcdadm/README.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -34,8 +34,8 @@ save it in S3
3434
* `etcdadm restore` restores the etcd member running on the same node as etcdadm from a snapshot saved in S3
3535
* `etcdadm check` runs health checks against all the members in an etcd cluster so that `kubeadm reconfigure` updates the etcd member accordingly to the situation
3636
* `etcdadm reconfigure` reconfigures the etcd member on the same node as etcdadm so that it survives:
37-
* `N/2` or less permanently failed members, by automatically removing a permanently failed member and then re-add it as a brand-new member with empty data according to ["Replace a failed etcd member on CoreOS Container Linux"](https://coreos.com/etcd/docs/latest/etcd-live-cluster-reconfiguration.html#replace-a-failed-etcd-member-on-coreos-container-linux)
38-
* `(N/2)+1` or more permanently failed members, by automatically initiating a new cluster, from a snapshot if it exists, according to ["etcd disaster recovery on CoreOS Container Linux"](https://coreos.com/etcd/docs/latest/etcd-live-cluster-reconfiguration.html#etcd-disaster-recovery-on-coreos-container-linux)
37+
* `N/2` or less permanently failed members, by automatically removing a permanently failed member and then re-add it as a brand-new member with empty data according to ["Replace a failed etcd member on Flatcar Container Linux"](https://coreos.com/etcd/docs/latest/etcd-live-cluster-reconfiguration.html#replace-a-failed-etcd-member-on-coreos-container-linux)
38+
* `(N/2)+1` or more permanently failed members, by automatically initiating a new cluster, from a snapshot if it exists, according to ["etcd disaster recovery on Flatcar Container Linux"](https://coreos.com/etcd/docs/latest/etcd-live-cluster-reconfiguration.html#etcd-disaster-recovery-on-coreos-container-linux)
3939
* `etcdadm replace` is used to manually recover from an etcd member from a permanent failure. It resets the etcd member running on the same node as etcdadm by:
4040
1. clearing the contents of the etcd data dir
4141
2. removing and then re-adding the etcd member by running `etcdctl member remove` and then `etcdctl memer add`

0 commit comments

Comments
 (0)