Skip to content

Commit b4200fb

Browse files
committed
test/e2e: add tests around ssh/accessed annotation
- add ssh/accessed annotation test - add test no accessed on reboot Signed-off-by: Antonio Murdaca <[email protected]>
1 parent 21f4da6 commit b4200fb

File tree

1 file changed

+260
-35
lines changed

1 file changed

+260
-35
lines changed

test/e2e/mcd_test.go

Lines changed: 260 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,11 @@
11
package e2e_test
22

33
import (
4+
"bytes"
5+
"encoding/json"
46
"fmt"
7+
"os"
8+
"os/exec"
59
"strings"
610
"testing"
711
"time"
@@ -10,11 +14,16 @@ import (
1014
"k8s.io/api/core/v1"
1115
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
1216
"k8s.io/apimachinery/pkg/labels"
17+
"k8s.io/apimachinery/pkg/types"
18+
"k8s.io/apimachinery/pkg/util/strategicpatch"
1319
"k8s.io/apimachinery/pkg/util/uuid"
1420
"k8s.io/apimachinery/pkg/util/wait"
21+
"k8s.io/client-go/kubernetes"
1522

1623
"github.com/openshift/machine-config-operator/cmd/common"
1724
mcv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
25+
"github.com/openshift/machine-config-operator/pkg/daemon"
26+
mcfgclientset "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned"
1827
)
1928

2029
// Test case for https://github.com/openshift/machine-config-operator/issues/358
@@ -47,9 +56,13 @@ func TestMCDToken(t *testing.T) {
4756
}
4857
}
4958

50-
func mcLabelForWorkers() map[string]string {
59+
func mcRoleLabelForWorkers() map[string]string {
60+
return mcRoleLabelFor("worker")
61+
}
62+
63+
func mcRoleLabelFor(role string) map[string]string {
5164
mcLabels := make(map[string]string)
52-
mcLabels["machineconfiguration.openshift.io/role"] = "worker"
65+
mcLabels["machineconfiguration.openshift.io/role"] = role
5366
return mcLabels
5467
}
5568

@@ -68,44 +81,29 @@ func createMCFile(path, content string, mode int) ignv2_2types.File {
6881
}
6982
}
7083

71-
func TestMCDeployed(t *testing.T) {
72-
cb, err := common.NewClientBuilder("")
73-
if err != nil {
74-
t.Errorf("%#v", err)
84+
func createMC(name, role string, files []ignv2_2types.File) *mcv1.MachineConfig {
85+
mc := &mcv1.MachineConfig{}
86+
mc.ObjectMeta = metav1.ObjectMeta{
87+
Name: name,
88+
Labels: mcRoleLabelFor(role),
7589
}
76-
mcClient := cb.MachineConfigClientOrDie("mc-file-add")
77-
k := cb.KubeClientOrDie("mc-file-add")
78-
79-
// create a dummy MC
80-
mcName := fmt.Sprintf("00-0add-a-file-%s", uuid.NewUUID())
81-
mcadd := &mcv1.MachineConfig{}
82-
mcadd.ObjectMeta = metav1.ObjectMeta{
83-
Name: mcName,
84-
Labels: mcLabelForWorkers(),
85-
}
86-
mcadd.Spec = mcv1.MachineConfigSpec{
90+
mc.Spec = mcv1.MachineConfigSpec{
8791
Config: ignv2_2types.Config{
8892
Ignition: ignv2_2types.Ignition{
8993
Version: "2.2.0",
9094
},
9195
Storage: ignv2_2types.Storage{
92-
Files: []ignv2_2types.File{
93-
createMCFile("/etc/mytestconf", "data:,test", 420),
94-
},
96+
Files: files,
9597
},
9698
},
9799
}
100+
return mc
101+
}
98102

99-
// create the dummy MC now
100-
_, err = mcClient.MachineconfigurationV1().MachineConfigs().Create(mcadd)
101-
if err != nil {
102-
t.Errorf("failed to create machine config %v", err)
103-
}
104-
105-
// grab the latest worker- MC
103+
func getGeneratedMCFromMCName(mcClient mcfgclientset.Interface, mcName, role string) (string, error) {
106104
var newMCName string
107-
err = wait.Poll(2*time.Second, 5*time.Minute, func() (bool, error) {
108-
mcp, err := mcClient.MachineconfigurationV1().MachineConfigPools().Get("worker", metav1.GetOptions{})
105+
err := wait.Poll(2*time.Second, 5*time.Minute, func() (bool, error) {
106+
mcp, err := mcClient.MachineconfigurationV1().MachineConfigPools().Get(role, metav1.GetOptions{})
109107
if err != nil {
110108
return false, err
111109
}
@@ -117,32 +115,259 @@ func TestMCDeployed(t *testing.T) {
117115
}
118116
return false, nil
119117
})
118+
if err != nil {
119+
return "", err
120+
}
121+
return newMCName, nil
122+
}
120123

124+
func waitForMCDeployedOnNodes(kubeClient kubernetes.Interface, mcName string, nodeCount int) error {
121125
listOptions := metav1.ListOptions{
122126
LabelSelector: labels.SelectorFromSet(labels.Set{"k8s-app": "machine-config-daemon"}).String(),
123127
}
124128

125-
err = wait.Poll(3*time.Second, 5*time.Minute, func() (bool, error) {
126-
mcdList, err := k.CoreV1().Pods("openshift-machine-config-operator").List(listOptions)
129+
var seen int
130+
err := wait.Poll(3*time.Second, 5*time.Minute, func() (bool, error) {
131+
// TODO(runcom): we need to select mcd for just a given role for the nodeCount to be really true
132+
mcdList, err := kubeClient.CoreV1().Pods("openshift-machine-config-operator").List(listOptions)
127133
if err != nil {
128134
return false, err
129135
}
130136

131137
for _, pod := range mcdList.Items {
132-
res, err := k.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &v1.PodLogOptions{}).DoRaw()
138+
res, err := kubeClient.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &v1.PodLogOptions{}).DoRaw()
133139
if err != nil {
134140
// do not error out, we may be rebooting, that's why we list at every iteration
135141
return false, nil
136142
}
137143
for _, line := range strings.Split(string(res), "\n") {
138-
if strings.Contains(line, "completed update for config "+newMCName) {
139-
return true, nil
144+
if strings.Contains(line, "completed update for config "+mcName) {
145+
if seen == nodeCount {
146+
return true, nil
147+
}
148+
seen++
149+
return false, nil
140150
}
141151
}
142152
}
143153
return false, nil
144154
})
145155
if err != nil {
146-
t.Errorf("machine config didn't result in file being on any worker: %v", err)
156+
return fmt.Errorf("machine config didn't result in file being on any node: %v, rolled on just %d out of %d", err, seen, nodeCount)
157+
}
158+
return nil
159+
}
160+
161+
func waitForMCDeployed(kubeClient kubernetes.Interface, mcName string) error {
162+
return waitForMCDeployedOnNodes(kubeClient, mcName, 1)
163+
}
164+
165+
func TestMCDeployed(t *testing.T) {
166+
cb, err := common.NewClientBuilder("")
167+
if err != nil {
168+
t.Errorf("%#v", err)
169+
}
170+
mcClient := cb.MachineConfigClientOrDie("mc-file-add")
171+
k := cb.KubeClientOrDie("mc-file-add")
172+
173+
mcName := fmt.Sprintf("00-0add-a-file-%s", uuid.NewUUID())
174+
role := "worker"
175+
mcadd := createMC(mcName, role, []ignv2_2types.File{createMCFile("/etc/mytestconf", "data:,test", 420)})
176+
177+
// create the dummy MC now
178+
_, err = mcClient.MachineconfigurationV1().MachineConfigs().Create(mcadd)
179+
if err != nil {
180+
t.Errorf("failed to create machine config %v", err)
181+
}
182+
183+
newMCName, err := getGeneratedMCFromMCName(mcClient, mcName, role)
184+
if err != nil {
185+
t.Error(err)
186+
}
187+
188+
err = waitForMCDeployed(k, newMCName)
189+
if err != nil {
190+
t.Errorf("error waiting for the new MC to be deployed %v", err)
191+
}
192+
}
193+
194+
// sshWithCommand execs ssh to the specified ip, run the command provided
195+
// and returns the combined output and an error
196+
func sshWithCommand(t *testing.T, ip string, command []string) (string, string, error) {
197+
sshKeyPath := os.Getenv("KUBE_SSH_KEY_PATH")
198+
sshIdentityOpt := ""
199+
if sshKeyPath != "" {
200+
sshIdentityOpt = "-i" + sshKeyPath
201+
}
202+
sshOpts := []string{
203+
"-oUserKnownHostsFile=/dev/null",
204+
"-oStrictHostKeyChecking=no",
205+
}
206+
if sshIdentityOpt != "" {
207+
sshOpts = append(sshOpts, sshIdentityOpt)
208+
}
209+
sshOpts = append(sshOpts, "core@"+ip)
210+
sshOpts = append(sshOpts, command...)
211+
212+
t.Logf(`running "ssh %s"`, strings.Join(sshOpts, " "))
213+
cmd := exec.Command("ssh", sshOpts...)
214+
var stdout bytes.Buffer
215+
var stderr bytes.Buffer
216+
cmd.Stdout = &stdout
217+
cmd.Stderr = &stderr
218+
if err := cmd.Run(); err != nil {
219+
return "", "", fmt.Errorf("error ssh'ing into node %q: %v, %v", ip, err, stderr.String())
220+
}
221+
return stdout.String(), stderr.String(), nil
222+
}
223+
224+
// XXX: this function now just returns the bastion IP and name till we work something out
225+
// like https://github.com/kubernetes/kubernetes/blob/master/test/e2e/framework/ssh.go
226+
func pickNodeNameAndIPWithExternalIP(kubeClient kubernetes.Interface) (string, string, error) {
227+
nodes, err := kubeClient.CoreV1().Nodes().List(metav1.ListOptions{})
228+
if err != nil {
229+
return "", "", fmt.Errorf("failed to list nodes %v", err)
230+
}
231+
var (
232+
nodeName string
233+
nodeIP string
234+
)
235+
bastionIP := strings.TrimRight(os.Getenv("KUBE_SSH_BASTION"), ":22")
236+
fmt.Printf("bastion ip %q\n", bastionIP)
237+
for _, node := range nodes.Items {
238+
for _, addr := range node.Status.Addresses {
239+
if addr.Address == bastionIP {
240+
nodeIP = bastionIP
241+
nodeName = node.Name
242+
break
243+
}
244+
// just pick a master with an external IP
245+
if addr.Type == v1.NodeExternalIP {
246+
nodeIP = addr.Address
247+
nodeName = node.Name
248+
// we don't break here cause we still prefer to have the bastion
249+
// if it's there
250+
}
251+
252+
}
253+
}
254+
return nodeName, nodeIP, nil
255+
}
256+
257+
func TestSSHAccessedAnnotation(t *testing.T) {
258+
cb, err := common.NewClientBuilder("")
259+
if err != nil {
260+
t.Errorf("%#v", err)
261+
}
262+
k := cb.KubeClientOrDie("test-ssh-accessed")
263+
264+
nodeName, nodeIP, err := pickNodeNameAndIPWithExternalIP(k)
265+
if err != nil {
266+
t.Errorf("failed to pick a node %v", err)
267+
}
268+
269+
node, err := k.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
270+
if err != nil {
271+
t.Errorf("cannot get node %q: %v", nodeName, err)
272+
}
273+
sshAnnotation, ok := node.ObjectMeta.Annotations[daemon.MachineConfigDaemonSSHAccessAnnotationKey]
274+
if ok && sshAnnotation == daemon.MachineConfigDaemonSSHAccessValue {
275+
t.Errorf("node %q has ssh/accessed annotation but it shouldn't", nodeName)
276+
}
277+
278+
_, _, err = sshWithCommand(t, nodeIP, []string{"true"})
279+
if err != nil {
280+
t.Error(err)
281+
}
282+
defer clearOutAnnotationFromNode(t, k, nodeName, daemon.MachineConfigDaemonSSHAccessAnnotationKey)
283+
284+
err = wait.Poll(2*time.Second, 5*time.Minute, func() (bool, error) {
285+
node, err := k.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
286+
if err != nil {
287+
return false, err
288+
}
289+
sshAnnotation, ok := node.ObjectMeta.Annotations[daemon.MachineConfigDaemonSSHAccessAnnotationKey]
290+
if !ok {
291+
return false, nil
292+
}
293+
if sshAnnotation == daemon.MachineConfigDaemonSSHAccessValue {
294+
return true, nil
295+
}
296+
return false, nil
297+
})
298+
}
299+
300+
func clearOutAnnotationFromNode(t *testing.T, k kubernetes.Interface, nodeName, key string) {
301+
node, err := k.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
302+
if err != nil {
303+
t.Error(err)
304+
}
305+
oldData, err := json.Marshal(node)
306+
if err != nil {
307+
t.Error(err)
308+
}
309+
delete(node.ObjectMeta.Annotations, daemon.MachineConfigDaemonSSHAccessAnnotationKey)
310+
newData, err := json.Marshal(node)
311+
if err != nil {
312+
t.Error(err)
313+
}
314+
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{})
315+
if err != nil {
316+
t.Error(err)
147317
}
318+
_, err = k.CoreV1().Nodes().Patch(node.Name, types.StrategicMergePatchType, patchBytes)
319+
if err != nil {
320+
t.Error(err)
321+
}
322+
}
323+
324+
// Test case for https://github.com/openshift/machine-config-operator/issues/372
325+
func TestMCDeployedNoSSHAccessedAfterReboot(t *testing.T) {
326+
cb, err := common.NewClientBuilder("")
327+
if err != nil {
328+
t.Errorf("%#v", err)
329+
}
330+
mcClient := cb.MachineConfigClientOrDie("no-ssh-reboot")
331+
k := cb.KubeClientOrDie("no-ssh-reboot")
332+
333+
mcName := fmt.Sprintf("00-0add-a-file-%s", uuid.NewUUID())
334+
role := "worker"
335+
mcadd := createMC(mcName, role, []ignv2_2types.File{createMCFile("/etc/mytestconf", "data:,test", 420)})
336+
337+
// create the dummy MC now
338+
_, err = mcClient.MachineconfigurationV1().MachineConfigs().Create(mcadd)
339+
if err != nil {
340+
t.Errorf("failed to create machine config %v", err)
341+
}
342+
343+
newMCName, err := getGeneratedMCFromMCName(mcClient, mcName, role)
344+
if err != nil {
345+
t.Error(err)
346+
}
347+
348+
listOptions := metav1.ListOptions{
349+
LabelSelector: labels.SelectorFromSet(labels.Set{"node-role.kubernetes.io/worker": ""}).String(),
350+
}
351+
nodes, err := k.CoreV1().Nodes().List(listOptions)
352+
if err != nil {
353+
t.Errorf("failed to list nodes %v", err)
354+
}
355+
356+
err = waitForMCDeployedOnNodes(k, newMCName, len(nodes.Items))
357+
if err != nil {
358+
t.Errorf("error waiting for the new MC to be deployed %v", err)
359+
}
360+
361+
for _, node := range nodes.Items {
362+
sshAnnotation, ok := node.ObjectMeta.Annotations[daemon.MachineConfigDaemonSSHAccessAnnotationKey]
363+
if ok && sshAnnotation == daemon.MachineConfigDaemonSSHAccessValue {
364+
t.Errorf("node %q has the ssh/annotation but it shouldn't", node.Name)
365+
}
366+
}
367+
}
368+
369+
// Test case for https://github.com/openshift/machine-config-operator/pull/375
370+
func TestSSHAccessedOnDegraded(t *testing.T) {
371+
// TODO(runcom): we can't really degraded nodes right now, hold on and find a way
372+
// to degrade just a test pool or something like that
148373
}

0 commit comments

Comments
 (0)