Skip to content

Commit 6d72edb

Browse files
authored
[test] Subnet filter testing (#357)
* Add options to specify vpc name and generate capl manifests with different names * Added make targets for generating manifests and modifying clusters * Updating script calls and started testing chainsaw test * WIP: Updating make target and chainsaw test * Finished chainsaw test * Updated process for running subnet testing * Switched tabs to spaces * Addressed Makefile comments * Addressed Makefile comments and added log checking to test
1 parent 4acb50c commit 6d72edb

File tree

4 files changed

+113
-4
lines changed

4 files changed

+113
-4
lines changed

.github/workflows/ci.yml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,9 @@ jobs:
123123
- name: Run Cilium BGP e2e test
124124
run: devbox run e2e-test-bgp
125125

126+
- name: Run subnet filtering test
127+
run: devbox run e2e-test-subnet
128+
126129
- name: Cleanup Resources
127130
if: always()
128131
run: devbox run cleanup-cluster

Makefile

Lines changed: 31 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -14,16 +14,21 @@ HELM_VERSION ?= v3.16.3
1414
# Dev Setup
1515
#####################################################################
1616
CLUSTER_NAME ?= ccm-$(shell git rev-parse --short HEAD)
17+
SUBNET_CLUSTER_NAME ?= subnet-testing-$(shell git rev-parse --short HEAD)
18+
VPC_NAME ?= $(CLUSTER_NAME)
19+
MANIFEST_NAME ?= capl-cluster-manifests
20+
SUBNET_MANIFEST_NAME ?= subnet-testing-manifests
1721
K8S_VERSION ?= "v1.31.2"
1822
CAPI_VERSION ?= "v1.8.5"
1923
CAAPH_VERSION ?= "v0.2.1"
20-
CAPL_VERSION ?= "v0.7.1"
24+
CAPL_VERSION ?= "v0.8.5"
2125
CONTROLPLANE_NODES ?= 1
2226
WORKER_NODES ?= 1
2327
LINODE_FIREWALL_ENABLED ?= true
2428
LINODE_REGION ?= us-lax
2529
LINODE_OS ?= linode/ubuntu22.04
2630
KUBECONFIG_PATH ?= $(CURDIR)/test-cluster-kubeconfig.yaml
31+
SUBNET_KUBECONFIG_PATH ?= $(CURDIR)/subnet-testing-kubeconfig.yaml
2732
MGMT_KUBECONFIG_PATH ?= $(CURDIR)/mgmt-cluster-kubeconfig.yaml
2833

2934
# if the $DEVBOX_PACKAGES_DIR env variable exists that means we are within a devbox shell and can safely
@@ -144,14 +149,15 @@ capl-cluster: generate-capl-cluster-manifests create-capl-cluster patch-linode-c
144149
.PHONY: generate-capl-cluster-manifests
145150
generate-capl-cluster-manifests:
146151
# Create the CAPL cluster manifests without any CSI driver stuff
147-
LINODE_FIREWALL_ENABLED=$(LINODE_FIREWALL_ENABLED) LINODE_OS=$(LINODE_OS) clusterctl generate cluster $(CLUSTER_NAME) \
152+
LINODE_FIREWALL_ENABLED=$(LINODE_FIREWALL_ENABLED) LINODE_OS=$(LINODE_OS) VPC_NAME=$(VPC_NAME) clusterctl generate cluster $(CLUSTER_NAME) \
148153
--kubernetes-version $(K8S_VERSION) --infrastructure linode-linode:$(CAPL_VERSION) \
149-
--control-plane-machine-count $(CONTROLPLANE_NODES) --worker-machine-count $(WORKER_NODES) > capl-cluster-manifests.yaml
154+
--control-plane-machine-count $(CONTROLPLANE_NODES) --worker-machine-count $(WORKER_NODES) > $(MANIFEST_NAME).yaml
155+
yq -i e 'select(.kind == "LinodeVPC").spec.subnets = [{"ipv4": "10.0.0.0/8", "label": "default"}, {"ipv4": "172.16.0.0/16", "label": "testing"}]' $(MANIFEST_NAME).yaml
150156

151157
.PHONY: create-capl-cluster
152158
create-capl-cluster:
153159
# Create a CAPL cluster with updated CCM and wait for it to be ready
154-
kubectl apply -f capl-cluster-manifests.yaml
160+
kubectl apply -f $(MANIFEST_NAME).yaml
155161
kubectl wait --for=condition=ControlPlaneReady cluster/$(CLUSTER_NAME) --timeout=600s || (kubectl get cluster -o yaml; kubectl get linodecluster -o yaml; kubectl get linodemachines -o yaml)
156162
kubectl wait --for=condition=NodeHealthy=true machines -l cluster.x-k8s.io/cluster-name=$(CLUSTER_NAME) --timeout=900s
157163
clusterctl get kubeconfig $(CLUSTER_NAME) > $(KUBECONFIG_PATH)
@@ -207,6 +213,27 @@ e2e-test-bgp:
207213
LINODE_TOKEN=$(LINODE_TOKEN) \
208214
chainsaw test e2e/bgp-test/lb-cilium-bgp $(E2E_FLAGS)
209215

216+
.PHONY: e2e-test-subnet
217+
e2e-test-subnet:
218+
# Generate cluster manifests for second cluster
219+
SUBNET_NAME=testing CLUSTER_NAME=$(SUBNET_CLUSTER_NAME) MANIFEST_NAME=$(SUBNET_MANIFEST_NAME) VPC_NAME=$(CLUSTER_NAME) \
220+
VPC_NETWORK_CIDR=172.16.0.0/16 K8S_CLUSTER_CIDR=172.16.64.0/18 make generate-capl-cluster-manifests
221+
# Add subnetNames to HelmChartProxy
222+
yq e 'select(.kind == "HelmChartProxy" and .spec.chartName == "ccm-linode").spec.valuesTemplate' $(SUBNET_MANIFEST_NAME).yaml > tmp.yaml
223+
yq -i e '.routeController += {"subnetNames": "testing"}' tmp.yaml
224+
yq -i e '.routeController.vpcNames = "{{.InfraCluster.spec.vpcRef.name}}"' tmp.yaml
225+
yq -i e 'select(.kind == "HelmChartProxy" and .spec.chartName == "ccm-linode").spec.valuesTemplate = load_str("tmp.yaml")' $(SUBNET_MANIFEST_NAME).yaml
226+
rm tmp.yaml
227+
# Create the second cluster
228+
MANIFEST_NAME=$(SUBNET_MANIFEST_NAME) CLUSTER_NAME=$(SUBNET_CLUSTER_NAME) KUBECONFIG_PATH=$(SUBNET_KUBECONFIG_PATH) \
229+
make create-capl-cluster
230+
KUBECONFIG_PATH=$(SUBNET_KUBECONFIG_PATH) make patch-linode-ccm
231+
# Run chainsaw test
232+
LINODE_TOKEN=$(LINODE_TOKEN) \
233+
FIRST_CONFIG=$(KUBECONFIG_PATH) \
234+
SECOND_CONFIG=$(SUBNET_KUBECONFIG_PATH) \
235+
chainsaw test e2e/subnet-test $(E2E_FLAGS)
236+
210237
#####################################################################
211238
# OS / ARCH
212239
#####################################################################

devbox.json

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
"mgmt-and-capl-cluster": "make mgmt-and-capl-cluster",
2323
"e2e-test": "make e2e-test",
2424
"e2e-test-bgp": "make e2e-test-bgp",
25+
"e2e-test-subnet": "make e2e-test-subnet",
2526
"cleanup-cluster": "make cleanup-cluster"
2627
}
2728
},

e2e/subnet-test/chainsaw-test.yaml

Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,78 @@
1+
# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json
2+
apiVersion: chainsaw.kyverno.io/v1alpha1
3+
kind: Test
4+
metadata:
5+
name: subnet-filtering-test
6+
labels:
7+
all:
8+
spec:
9+
bindings:
10+
- name: fwname
11+
value: (join('-', ['ccm-fwtest', env('CLUSTER_NAME')]))
12+
namespace: "subnet-filtering-test"
13+
steps:
14+
- name: Check if the CCM for each cluster focus on their individual subnets
15+
try:
16+
- script:
17+
content: |
18+
set -e
19+
20+
if [ -z "$FIRST_CONFIG" ] || [ -z "$SECOND_CONFIG" ] || [ -z "$LINODE_TOKEN" ]; then
21+
echo "Error: FIRST_CONFIG, SECOND_CONFIG, and LINODE_TOKEN environment variables must be set"
22+
exit 1
23+
fi
24+
25+
# Iterate through both clusters
26+
for config in "$FIRST_CONFIG" "$SECOND_CONFIG"; do
27+
# Get all node names
28+
nodes=$(KUBECONFIG=$config kubectl get nodes -o jsonpath='{.items[*].metadata.name}')
29+
if [ -z "$nodes" ]; then
30+
echo "Error: No nodes found in cluster"
31+
exit 1
32+
fi
33+
34+
# Process each node
35+
for node in $nodes; do
36+
echo "Checking node: $node"
37+
38+
# Get pod CIDR and instance ID
39+
pod_cidr=$(KUBECONFIG=$config kubectl get node "$node" -o jsonpath='{.spec.podCIDR}')
40+
instance_id=$(KUBECONFIG=$config kubectl get node "$node" -o jsonpath='{.spec.providerID}' | sed 's/linode:\/\///')
41+
42+
echo " Pod CIDR: $pod_cidr"
43+
echo " Instance ID: $instance_id"
44+
45+
# Get interface details for this config
46+
interfaces=$(curl -s \
47+
-H "Authorization: Bearer $LINODE_TOKEN" \
48+
"https://api.linode.com/v4/linode/instances/$instance_id/configs" \
49+
| jq -r '.data[0].interfaces')
50+
51+
# Check if pod CIDR is in the VPC interface IP ranges
52+
if echo "$interfaces" | jq -e --arg cidr "$pod_cidr" '.[] | select(.purpose == "vpc") | .ip_ranges[] | select(. == $cidr)' > /dev/null; then
53+
echo "Pod CIDR found in VPC interface configuration"
54+
else
55+
echo "Pod CIDR not found in VPC interface configuration"
56+
echo "Current VPC interface configuration:"
57+
echo "$interfaces" | jq '.[] | select(.purpose == "vpc")'
58+
fi
59+
60+
echo "---"
61+
done
62+
done
63+
64+
# Grep logs of each cluster for IPs from the other cluster
65+
echo "Checking logs of each CCM"
66+
if ! [ $(KUBECONFIG=$FIRST_CONFIG kubectl logs daemonset/ccm-linode -n kube-system | grep "172.16" | wc -l) -eq 0 ]; then
67+
echo "IP address from testing subnet found in logs of test cluster"
68+
exit 1
69+
fi
70+
71+
if ! [ $(KUBECONFIG=$SECOND_CONFIG kubectl logs daemonset/ccm-linode -n kube-system | grep "10.192" | wc -l) -eq 0 ]; then
72+
echo "IP address from default subnet found in logs of second cluster"
73+
exit 1
74+
fi
75+
76+
check:
77+
($error == null): true
78+
(contains($stdout, 'Pod CIDR not found in VPC interface configuration')): false

0 commit comments

Comments
 (0)