|
| 1 | +package networking |
| 2 | + |
| 3 | +import ( |
| 4 | + "context" |
| 5 | + "encoding/json" |
| 6 | + "fmt" |
| 7 | + |
| 8 | + corev1 "k8s.io/api/core/v1" |
| 9 | + kapiv1 "k8s.io/api/core/v1" |
| 10 | + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" |
| 11 | + e2e "k8s.io/kubernetes/test/e2e/framework" |
| 12 | + |
| 13 | + nadtypes "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" |
| 14 | + g "github.com/onsi/ginkgo/v2" |
| 15 | + o "github.com/onsi/gomega" |
| 16 | + exutil "github.com/openshift/origin/test/extended/util" |
| 17 | +) |
| 18 | + |
| 19 | +const nodeLabelSelectorWorker = "node-role.kubernetes.io/worker" |
| 20 | + |
| 21 | +var _ = g.Describe("[sig-network][Feature:tap]", func() { |
| 22 | + oc := exutil.NewCLI("tap") |
| 23 | + f := oc.KubeFramework() |
| 24 | + var worker *corev1.Node |
| 25 | + |
| 26 | + g.BeforeEach(func() { |
| 27 | + // Fetch worker nodes. |
| 28 | + workerNodes, err := f.ClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{ |
| 29 | + LabelSelector: nodeLabelSelectorWorker, |
| 30 | + }) |
| 31 | + o.Expect(err).NotTo(o.HaveOccurred()) |
| 32 | + |
| 33 | + if len(workerNodes.Items) == 0 { |
| 34 | + e2e.Failf("cluster should have nodes") |
| 35 | + } |
| 36 | + worker = &workerNodes.Items[0] |
| 37 | + |
| 38 | + // Load tun module. |
| 39 | + _, err = exutil.ExecCommandOnMachineConfigDaemon(f.ClientSet, oc, worker, []string{ |
| 40 | + "sh", "-c", "nsenter --mount=/proc/1/ns/mnt -- sh -c 'modprobe tun'", |
| 41 | + }) |
| 42 | + o.Expect(err).NotTo(o.HaveOccurred()) |
| 43 | + |
| 44 | + // Enable container_user_devices selinux boolean. |
| 45 | + _, err = exutil.ExecCommandOnMachineConfigDaemon(f.ClientSet, oc, worker, []string{ |
| 46 | + "sh", "-c", "nsenter --mount=/proc/1/ns/mnt -- sh -c 'setsebool container_use_devices 1'", |
| 47 | + }) |
| 48 | + o.Expect(err).NotTo(o.HaveOccurred()) |
| 49 | + }) |
| 50 | + |
| 51 | + g.It(fmt.Sprintf("should create a pod with a tap interface [apigroup:k8s.cni.cncf.io]"), func() { |
| 52 | + ns := f.Namespace.Name |
| 53 | + podName := "pod1" |
| 54 | + nadName := "nad-tap" |
| 55 | + ifName := "tap1" |
| 56 | + nadConfig := `{ |
| 57 | + "cniVersion":"0.4.0", |
| 58 | + "name":"%s", |
| 59 | + "type": "tap", |
| 60 | + "selinuxcontext": "system_u:system_r:container_t:s0" |
| 61 | + }` |
| 62 | + |
| 63 | + g.By("creating a network attachment definition") |
| 64 | + err := createNetworkAttachmentDefinition( |
| 65 | + oc.AdminConfig(), |
| 66 | + ns, |
| 67 | + nadName, |
| 68 | + fmt.Sprintf(nadConfig, nadName), |
| 69 | + ) |
| 70 | + o.Expect(err).NotTo(o.HaveOccurred(), "unable to create tap network-attachment-definition") |
| 71 | + |
| 72 | + g.By("creating a pod on worker with container_use_devices on") |
| 73 | + exutil.CreateExecPodOrFail(f.ClientSet, ns, podName, func(pod *kapiv1.Pod) { |
| 74 | + tapAnnotation := fmt.Sprintf("%s/%s@%s", ns, nadName, ifName) |
| 75 | + pod.ObjectMeta.Annotations = map[string]string{"k8s.v1.cni.cncf.io/networks": fmt.Sprintf("%s", tapAnnotation)} |
| 76 | + pod.Spec.NodeSelector = worker.Labels |
| 77 | + }) |
| 78 | + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), podName, metav1.GetOptions{}) |
| 79 | + o.Expect(err).ToNot(o.HaveOccurred()) |
| 80 | + |
| 81 | + g.By("checking annotations") |
| 82 | + networkStatusString, ok := pod.Annotations["k8s.v1.cni.cncf.io/network-status"] |
| 83 | + o.Expect(ok).To(o.BeTrue()) |
| 84 | + o.Expect(networkStatusString).ToNot(o.BeNil()) |
| 85 | + |
| 86 | + var networkStatuses []nadtypes.NetworkStatus |
| 87 | + o.Expect(json.Unmarshal([]byte(networkStatusString), &networkStatuses)).ToNot(o.HaveOccurred()) |
| 88 | + o.Expect(networkStatuses).To(o.HaveLen(2)) |
| 89 | + o.Expect(networkStatuses[1].Interface).To(o.Equal(ifName)) |
| 90 | + o.Expect(networkStatuses[1].Name).To(o.Equal(fmt.Sprintf("%s/%s", ns, nadName))) |
| 91 | + }) |
| 92 | +}) |
0 commit comments