@@ -19,27 +19,35 @@ package csimock
19
19
import (
20
20
"context"
21
21
"fmt"
22
- "os"
23
- "os/exec"
24
- "strings"
25
22
26
23
"github.com/onsi/ginkgo/v2"
27
24
"github.com/onsi/gomega"
28
25
apierrors "k8s.io/apimachinery/pkg/api/errors"
29
26
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
30
- "k8s.io/kubernetes/test/e2e/feature"
31
27
"k8s.io/kubernetes/test/e2e/framework"
32
28
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
29
+ e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
30
+ "k8s.io/kubernetes/test/e2e/storage/drivers"
33
31
"k8s.io/kubernetes/test/e2e/storage/utils"
34
32
admissionapi "k8s.io/pod-security-admission/api"
35
33
)
36
34
37
- var _ = utils .SIGDescribe ("CSI Mock when kubelet restart" , feature . Kind , framework .WithSerial (), framework .WithDisruptive (), func () {
35
+ var _ = utils .SIGDescribe ("CSI Mock when kubelet restart" , framework .WithSerial (), framework .WithDisruptive (), func () {
38
36
f := framework .NewDefaultFramework ("csi-mock-when-kubelet-restart" )
39
37
f .NamespacePodSecurityLevel = admissionapi .LevelPrivileged
40
38
m := newMockDriverSetup (f )
41
39
40
+ ginkgo .BeforeEach (func () {
41
+ // These tests requires SSH to nodes, so the provider check should be identical to there
42
+ // (the limiting factor is the implementation of util.go's e2essh.GetSigner(...)).
43
+
44
+ // Cluster must support node reboot
45
+ e2eskipper .SkipUnlessProviderIs (framework .ProvidersWithSSH ... )
46
+ e2eskipper .SkipUnlessSSHKeyPresent ()
47
+ })
48
+
42
49
ginkgo .It ("should not umount volume when the pvc is terminating but still used by a running pod" , func (ctx context.Context ) {
50
+
43
51
m .init (ctx , testParameters {
44
52
registerDriver : true ,
45
53
})
@@ -51,16 +59,16 @@ var _ = utils.SIGDescribe("CSI Mock when kubelet restart", feature.Kind, framewo
51
59
ginkgo .By ("Waiting for the Pod to be running" )
52
60
err := e2epod .WaitForPodRunningInNamespace (ctx , f .ClientSet , pod )
53
61
framework .ExpectNoError (err , "failed to wait for pod %s to be running" , pod .Name )
62
+ pod , err = f .ClientSet .CoreV1 ().Pods (pod .Namespace ).Get (ctx , pod .Name , metav1.GetOptions {})
63
+ framework .ExpectNoError (err , "failed to get pod %s" , pod .Name )
54
64
55
65
ginkgo .By ("Deleting the PVC" )
56
66
err = f .ClientSet .CoreV1 ().PersistentVolumeClaims (pvc .Namespace ).Delete (ctx , pvc .Name , metav1.DeleteOptions {})
57
67
framework .ExpectNoError (err , "failed to delete PVC %s" , pvc .Name )
58
68
59
69
ginkgo .By ("Restarting kubelet" )
60
- err = stopKindKubelet (ctx )
61
- framework .ExpectNoError (err , "failed to stop kubelet" )
62
- err = startKindKubelet (ctx )
63
- framework .ExpectNoError (err , "failed to start kubelet" )
70
+ utils .KubeletCommand (ctx , utils .KRestart , f .ClientSet , pod )
71
+ ginkgo .DeferCleanup (utils .KubeletCommand , utils .KStart , f .ClientSet , pod )
64
72
65
73
ginkgo .By ("Verifying the PVC is terminating during kubelet restart" )
66
74
pvc , err = f .ClientSet .CoreV1 ().PersistentVolumeClaims (pvc .Namespace ).Get (ctx , pvc .Name , metav1.GetOptions {})
@@ -69,7 +77,7 @@ var _ = utils.SIGDescribe("CSI Mock when kubelet restart", feature.Kind, framewo
69
77
70
78
ginkgo .By (fmt .Sprintf ("Verifying that the driver didn't receive NodeUnpublishVolume call for PVC %s" , pvc .Name ))
71
79
gomega .Consistently (ctx ,
72
- func (ctx context.Context ) interface {} {
80
+ func (ctx context.Context ) []drivers. MockCSICall {
73
81
calls , err := m .driver .GetCalls (ctx )
74
82
if err != nil {
75
83
if apierrors .IsUnexpectedServerError (err ) {
@@ -90,39 +98,3 @@ var _ = utils.SIGDescribe("CSI Mock when kubelet restart", feature.Kind, framewo
90
98
framework .ExpectNoError (err , "failed to wait for pod %s to be running" , pod .Name )
91
99
})
92
100
})
93
-
94
- func stopKindKubelet (ctx context.Context ) error {
95
- return kubeletExec ("systemctl" , "stop" , "kubelet" )
96
- }
97
-
98
- func startKindKubelet (ctx context.Context ) error {
99
- return kubeletExec ("systemctl" , "start" , "kubelet" )
100
- }
101
-
102
- // Run a command in container with kubelet (and the whole control plane as containers)
103
- func kubeletExec (command ... string ) error {
104
- containerName := getKindContainerName ()
105
- args := []string {"exec" , containerName }
106
- args = append (args , command ... )
107
- cmd := exec .Command ("docker" , args ... )
108
-
109
- out , err := cmd .CombinedOutput ()
110
- if err != nil {
111
- return fmt .Errorf ("command %q failed: %v\n output:%s" , prettyCmd (cmd ), err , string (out ))
112
- }
113
-
114
- framework .Logf ("command %q succeeded:\n %s" , prettyCmd (cmd ), string (out ))
115
- return nil
116
- }
117
-
118
- func getKindContainerName () string {
119
- clusterName := os .Getenv ("KIND_CLUSTER_NAME" )
120
- if clusterName == "" {
121
- clusterName = "kind"
122
- }
123
- return clusterName + "-control-plane"
124
- }
125
-
126
- func prettyCmd (cmd * exec.Cmd ) string {
127
- return fmt .Sprintf ("%s %s" , cmd .Path , strings .Join (cmd .Args , " " ))
128
- }
0 commit comments