Skip to content

Commit 14c5df7

Browse files
committed
Move to use newer IPsec DaemonSets when MCP is in paused state
When MCP is in paused state, network operator continues to render older IPsec daemonsets which blocks network cluster operator not getting upgraded to newer version. Hence this commit renders newer IPsec daemonsets for intermediate period. When MCPs are moved to unpaused state and IPsec machine configs are installed on it, then it goes ahead with rendering only host flavored IPsec daemonset. Signed-off-by: Periyasamy Palanisamy <[email protected]>
1 parent f34768b commit 14c5df7

File tree

5 files changed

+432
-154
lines changed

5 files changed

+432
-154
lines changed

bindata/network/ovn-kubernetes/common/ipsec-containerized.yaml

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,14 @@ spec:
4949
- |
5050
#!/bin/bash
5151
set -exuo pipefail
52+
53+
{{ if .IPsecCheckForLibreswan }}
54+
if rpm --dbpath=/usr/share/rpm -q libreswan; then
55+
echo "host has libreswan and therefore ipsec will be configured by ipsec host daemonset, this ovn ipsec container doesnt need to init anything"
56+
exit 0
57+
fi
58+
{{ end }}
59+
5260
{{ if .NETWORK_NODE_IDENTITY_ENABLE }}
5361
# When NETWORK_NODE_IDENTITY_ENABLE is true, use the per-node certificate to create a kubeconfig
5462
# that will be used to talk to the API
@@ -189,6 +197,9 @@ spec:
189197
name: signer-ca
190198
- mountPath: /etc/openvswitch
191199
name: etc-openvswitch
200+
- mountPath: /usr/share/rpm
201+
name: host-usr-share-rpm
202+
readOnly: true
192203
resources:
193204
requests:
194205
cpu: 10m
@@ -220,6 +231,13 @@ spec:
220231
}
221232
trap cleanup SIGTERM
222233
234+
{{ if .IPsecCheckForLibreswan }}
235+
if rpm --dbpath=/usr/share/rpm -q libreswan; then
236+
echo "host has libreswan and therefore ipsec will be configured by ipsec host daemonset, this ovn ipsec container will sleep to infinity"
237+
sleep infinity
238+
fi
239+
{{ end }}
240+
223241
# Don't start IPsec until ovnkube-node has finished setting up the node
224242
counter=0
225243
until [ -f /etc/cni/net.d/10-ovn-kubernetes.conf ]
@@ -276,6 +294,9 @@ spec:
276294
name: host-var-log-ovs
277295
- mountPath: /etc/openvswitch
278296
name: etc-openvswitch
297+
- mountPath: /usr/share/rpm
298+
name: host-usr-share-rpm
299+
readOnly: true
279300
resources:
280301
requests:
281302
cpu: 10m
@@ -288,6 +309,12 @@ spec:
288309
- -c
289310
- |
290311
#!/bin/bash
312+
{{ if .IPsecCheckForLibreswan }}
313+
if rpm --dbpath=/usr/share/rpm -q libreswan; then
314+
echo "host has libreswan and therefore ipsec will be configured by ipsec host daemonset, this ovn ipsec container is always \"alive\""
315+
exit 0
316+
fi
317+
{{ end }}
291318
if [[ $(ipsec whack --trafficstatus | wc -l) -eq 0 ]]; then
292319
echo "no ipsec traffic configured"
293320
exit 10
@@ -321,6 +348,10 @@ spec:
321348
- name: host-cni-netd
322349
hostPath:
323350
path: "{{.CNIConfDir}}"
351+
- name: host-usr-share-rpm
352+
hostPath:
353+
path: /usr/share/rpm
354+
type: DirectoryOrCreate
324355
tolerations:
325356
- operator: "Exists"
326357
{{end}}

bindata/network/ovn-kubernetes/common/ipsec-host.yaml

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,12 @@ spec:
5050
- |
5151
#!/bin/bash
5252
set -exuo pipefail
53+
{{ if .IPsecCheckForLibreswan }}
54+
if ! rpm --dbpath=/usr/share/rpm -q libreswan; then
55+
echo "host doesnt have libreswan, therefore ipsec will be configured by ipsec-containerized daemonset, this ovn ipsec container has nothing to init"
56+
exit 0
57+
fi
58+
{{ end }}
5359
{{ if .NETWORK_NODE_IDENTITY_ENABLE }}
5460
# When NETWORK_NODE_IDENTITY_ENABLE is true, use the per-node certificate to create a kubeconfig
5561
# that will be used to talk to the API
@@ -194,6 +200,9 @@ spec:
194200
name: etc-openvswitch
195201
- mountPath: /etc
196202
name: host-etc
203+
- mountPath: /usr/share/rpm
204+
name: host-usr-share-rpm
205+
readOnly: true
197206
resources:
198207
requests:
199208
cpu: 10m
@@ -210,6 +219,12 @@ spec:
210219
#!/bin/bash
211220
set -exuo pipefail
212221
222+
{{ if .IPsecCheckForLibreswan }}
223+
if ! rpm --dbpath=/usr/share/rpm -q libreswan; then
224+
echo "host doesnt have libreswan, therefore ipsec will be configured by ipsec-containerized daemonset, this ovn ipsec container will sleep to infinity"
225+
sleep infinity
226+
fi
227+
{{ end }}
213228

214229
# Don't start IPsec until ovnkube-node has finished setting up the node
215230
counter=0
@@ -268,6 +283,13 @@ spec:
268283
# In order to maintain traffic flows during container restart, we
269284
# need to ensure that xfrm state and policies are not flushed.
270285
286+
{{ if .IPsecCheckForLibreswan }}
287+
if ! rpm --dbpath=/usr/share/rpm -q libreswan; then
288+
echo "host doesnt have libreswan, therefore ipsec will be configured by ipsec-containerized daemonset, preStop wont do anything"
289+
exit 0
290+
fi
291+
{{ end }}
292+
271293
# Don't allow ovs monitor to cleanup persistent state
272294
kill "$(cat /var/run/openvswitch/ovs-monitor-ipsec.pid 2>/dev/null)" 2>/dev/null || true
273295
env:
@@ -291,6 +313,9 @@ spec:
291313
name: host-var-lib
292314
- mountPath: /etc
293315
name: host-etc
316+
- mountPath: /usr/share/rpm
317+
name: host-usr-share-rpm
318+
readOnly: true
294319
resources:
295320
requests:
296321
cpu: 10m
@@ -303,6 +328,12 @@ spec:
303328
- -c
304329
- |
305330
#!/bin/bash
331+
{{ if .IPsecCheckForLibreswan }}
332+
if ! rpm --dbpath=/usr/share/rpm -q libreswan; then
333+
echo "host doesnt have libreswan, therefore ipsec will be configured by ipsec-containerized daemonset, this ovn ipsec container is always \"alive\""
334+
exit 0
335+
fi
336+
{{ end }}
306337
if [[ $(ipsec whack --trafficstatus | wc -l) -eq 0 ]]; then
307338
echo "no ipsec traffic configured"
308339
exit 10
@@ -346,6 +377,10 @@ spec:
346377
path: /etc
347378
type: Directory
348379
name: host-etc
380+
- name: host-usr-share-rpm
381+
hostPath:
382+
path: /usr/share/rpm
383+
type: DirectoryOrCreate
349384
tolerations:
350385
- operator: "Exists"
351386
{{end}}

pkg/bootstrap/types.go

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -48,8 +48,7 @@ type OVNUpdateStatus struct {
4848
// OVNIPsecStatus contains status of current IPsec configuration
4949
// in the cluster.
5050
type OVNIPsecStatus struct {
51-
LegacyIPsecUpgrade bool // true if IPsec in 4.14 or Pre-4.14 cluster is upgraded to latest version
52-
OVNIPsecActive bool // set to true unless we are sure it is not.
51+
OVNIPsecActive bool // set to true unless we are sure it is not.
5352
}
5453

5554
type OVNBootstrapResult struct {

pkg/network/ovn_kubernetes.go

Lines changed: 42 additions & 85 deletions
Original file line numberDiff line numberDiff line change
@@ -289,6 +289,7 @@ func renderOVNKubernetes(conf *operv1.NetworkSpec, bootstrapResult *bootstrap.Bo
289289
data.Data["IPsecMachineConfigEnable"] = IPsecMachineConfigEnable
290290
data.Data["OVNIPsecDaemonsetEnable"] = OVNIPsecDaemonsetEnable
291291
data.Data["OVNIPsecEnable"] = OVNIPsecEnable
292+
data.Data["IPsecCheckForLibreswan"] = renderIPsecHostDaemonSet && renderIPsecContainerizedDaemonSet
292293

293294
// Set progressing to true until IPsec DaemonSet is rendered when EW IPsec config is enabled.
294295
// TODO Do a poor man's job mapping machine config pool status to CNO progressing state for now.
@@ -499,8 +500,8 @@ func renderOVNKubernetes(conf *operv1.NetworkSpec, bootstrapResult *bootstrap.Bo
499500
objs = k8s.RemoveObjByGroupKindName(objs, "apps", "DaemonSet", util.OVN_NAMESPACE, "ovn-ipsec-containerized")
500501
}
501502

502-
// When upgrading a legacy IPsec deployment, avoid any updates until IPsec MachineConfigs
503-
// are active.
503+
// When disabling IPsec deployment, avoid any updates until IPsec is completely
504+
// disabled from OVN.
504505
if renderIPsecDaemonSetAsCreateWaitOnly {
505506
k8s.UpdateObjByGroupKindName(objs, "apps", "DaemonSet", util.OVN_NAMESPACE, "ovn-ipsec-host", func(o *uns.Unstructured) {
506507
anno := o.GetAnnotations()
@@ -519,25 +520,6 @@ func renderOVNKubernetes(conf *operv1.NetworkSpec, bootstrapResult *bootstrap.Bo
519520
anno[names.CreateWaitAnnotation] = "true"
520521
o.SetAnnotations(anno)
521522
})
522-
// The legacy ovn-ipsec deployment is only rendered during upgrades until we
523-
// are ready to remove it.
524-
ovnIPsecLegacyDS := &appsv1.DaemonSet{
525-
TypeMeta: metav1.TypeMeta{
526-
Kind: "DaemonSet",
527-
APIVersion: appsv1.SchemeGroupVersion.String(),
528-
},
529-
ObjectMeta: metav1.ObjectMeta{
530-
Name: "ovn-ipsec",
531-
Namespace: util.OVN_NAMESPACE,
532-
// We never update the legacy ovn-ipsec daemonset.
533-
Annotations: map[string]string{names.CreateWaitAnnotation: "true"},
534-
},
535-
}
536-
obj, err := k8s.ToUnstructured(ovnIPsecLegacyDS)
537-
if err != nil {
538-
return nil, progressing, fmt.Errorf("unable to render legacy ovn-ipsec daemonset: %w", err)
539-
}
540-
objs = append(objs, obj)
541523
}
542524

543525
klog.Infof("ovnk components: ovnkube-node: isRunning=%t, update=%t; ovnkube-control-plane: isRunning=%t, update=%t",
@@ -627,7 +609,6 @@ func IsIPsecLegacyAPI(conf *operv1.OVNKubernetesConfig) bool {
627609
func shouldRenderIPsec(conf *operv1.OVNKubernetesConfig, bootstrapResult *bootstrap.BootstrapResult) (renderCNOIPsecMachineConfig, renderIPsecDaemonSet,
628610
renderIPsecOVN, renderIPsecHostDaemonSet, renderIPsecContainerizedDaemonSet, renderIPsecDaemonSetAsCreateWaitOnly bool) {
629611
isHypershiftHostedCluster := bootstrapResult.Infra.HostedControlPlane != nil
630-
isIpsecUpgrade := bootstrapResult.OVN.IPsecUpdateStatus != nil && bootstrapResult.OVN.IPsecUpdateStatus.LegacyIPsecUpgrade
631612
isOVNIPsecActive := bootstrapResult.OVN.IPsecUpdateStatus != nil && bootstrapResult.OVN.IPsecUpdateStatus.OVNIPsecActive
632613

633614
mode := GetIPsecMode(conf)
@@ -636,7 +617,6 @@ func shouldRenderIPsec(conf *operv1.OVNKubernetesConfig, bootstrapResult *bootst
636617
// change to them. So during upgrade, we must keep track if IPsec MachineConfigs are
637618
// active or not for non Hybrid hosted cluster.
638619
isIPsecMachineConfigActive := isIPsecMachineConfigActive(bootstrapResult.Infra)
639-
isIPsecMachineConfigNotActiveOnUpgrade := isIpsecUpgrade && !isIPsecMachineConfigActive && !isHypershiftHostedCluster
640620
isMachineConfigClusterOperatorReady := bootstrapResult.Infra.MachineConfigClusterOperatorReady
641621
isCNOIPsecMachineConfigPresent := isCNOIPsecMachineConfigPresent(bootstrapResult.Infra)
642622

@@ -645,15 +625,19 @@ func shouldRenderIPsec(conf *operv1.OVNKubernetesConfig, bootstrapResult *bootst
645625
renderIPsecDaemonSet = isOVNIPsecActive || mode == operv1.IPsecModeFull
646626

647627
// If ipsec is enabled, we render the host ipsec deployment except for
648-
// hypershift hosted clusters and we need to wait for the ipsec MachineConfig
649-
// extensions to be active first. We must also render host ipsec deployment
650-
// at the time of upgrade though user created IPsec Machine Config is not
651-
// present/active.
652-
renderIPsecHostDaemonSet = (renderIPsecDaemonSet && isIPsecMachineConfigActive && !isHypershiftHostedCluster) || isIPsecMachineConfigNotActiveOnUpgrade
653-
654-
// The containerized ipsec deployment is only rendered during upgrades or
655-
// for hypershift hosted clusters.
656-
renderIPsecContainerizedDaemonSet = (renderIPsecDaemonSet && isHypershiftHostedCluster) || isIPsecMachineConfigNotActiveOnUpgrade
628+
// hypershift hosted clusters. We must also render host ipsec daemonset
629+
// when any of the machine config pool in paused state without ipsec machine
630+
// config deployed and at the same time unpaused pools having ipsec machine
631+
// config deployed. This helps host ipsec daemonset pods to be effective on
632+
// unpaused pool nodes.
633+
renderIPsecHostDaemonSet = renderIPsecDaemonSet && !isHypershiftHostedCluster
634+
635+
// The containerized ipsec deployment is only rendered for hypershift hosted clusters.
636+
// We must also render containerized ipsec daemonset when any of the machine config
637+
// pool in paused state without ipsec machine config deployed and at the same time
638+
// unpaused pools having ipsec machine config deployed. This helps containerized daemonset
639+
// pods to be effective on paused pool nodes.
640+
renderIPsecContainerizedDaemonSet = (renderIPsecDaemonSet && isHypershiftHostedCluster) || !isIPsecMachineConfigActive
657641

658642
// MachineConfig IPsec extensions rollout is needed for the ipsec enablement and are used in both External and Full modes.
659643
// except when the containerized deployment is used in hypershift hosted clusters.
@@ -666,9 +650,8 @@ func shouldRenderIPsec(conf *operv1.OVNKubernetesConfig, bootstrapResult *bootst
666650
// to be active if it's not an upgrade and not a hypershift hosted cluster.
667651
renderIPsecOVN = (renderIPsecHostDaemonSet || renderIPsecContainerizedDaemonSet) && mode == operv1.IPsecModeFull
668652

669-
// While OVN ipsec is being upgraded and IPsec MachineConfigs deployment is in progress
670-
// (or) IPsec config in OVN is being disabled, then ipsec deployment is not updated.
671-
renderIPsecDaemonSetAsCreateWaitOnly = isIPsecMachineConfigNotActiveOnUpgrade || (isOVNIPsecActive && !renderIPsecOVN)
653+
// IPsec config in OVN is being disabled, then ipsec deployment is not updated.
654+
renderIPsecDaemonSetAsCreateWaitOnly = isOVNIPsecActive && !renderIPsecOVN
672655

673656
return
674657
}
@@ -1203,66 +1186,40 @@ func bootstrapOVN(conf *operv1.Network, kubeClient cnoclient.Client, infraStatus
12031186
prepullerStatus.Progressing = daemonSetProgressing(prePullerDaemonSet, true)
12041187
}
12051188

1206-
ipsecDaemonSet := &appsv1.DaemonSet{
1189+
ipsecStatus := &bootstrap.OVNIPsecStatus{}
1190+
ipsecContainerizedDaemonSet := &appsv1.DaemonSet{
12071191
TypeMeta: metav1.TypeMeta{
12081192
Kind: "DaemonSet",
12091193
APIVersion: appsv1.SchemeGroupVersion.String(),
12101194
},
12111195
}
1212-
1213-
ipsecStatus := &bootstrap.OVNIPsecStatus{}
1214-
1215-
// The IPsec daemonset name is ovn-ipsec if we are upgrading from <= 4.13.
1216-
nsn = types.NamespacedName{Namespace: util.OVN_NAMESPACE, Name: "ovn-ipsec"}
1217-
if err := kubeClient.ClientFor("").CRClient().Get(context.TODO(), nsn, ipsecDaemonSet); err != nil {
1196+
ipsecHostDaemonSet := &appsv1.DaemonSet{
1197+
TypeMeta: metav1.TypeMeta{
1198+
Kind: "DaemonSet",
1199+
APIVersion: appsv1.SchemeGroupVersion.String(),
1200+
},
1201+
}
1202+
// Retrieve container based IPsec daemonset with name ovn-ipsec-containerized.
1203+
nsn = types.NamespacedName{Namespace: util.OVN_NAMESPACE, Name: "ovn-ipsec-containerized"}
1204+
if err := kubeClient.ClientFor("").CRClient().Get(context.TODO(), nsn, ipsecContainerizedDaemonSet); err != nil {
12181205
if !apierrors.IsNotFound(err) {
1219-
return nil, fmt.Errorf("Failed to retrieve existing pre-4.14 ipsec DaemonSet: %w", err)
1206+
return nil, fmt.Errorf("Failed to retrieve ipsec containerized DaemonSet: %w", err)
12201207
} else {
1221-
ipsecStatus = nil
1208+
ipsecContainerizedDaemonSet = nil
12221209
}
1223-
} else {
1224-
ipsecStatus.LegacyIPsecUpgrade = true
1225-
}
1226-
1227-
if ipsecStatus == nil {
1228-
ipsecStatus = &bootstrap.OVNIPsecStatus{}
1229-
ipsecContainerizedDaemonSet := &appsv1.DaemonSet{
1230-
TypeMeta: metav1.TypeMeta{
1231-
Kind: "DaemonSet",
1232-
APIVersion: appsv1.SchemeGroupVersion.String(),
1233-
},
1234-
}
1235-
ipsecHostDaemonSet := &appsv1.DaemonSet{
1236-
TypeMeta: metav1.TypeMeta{
1237-
Kind: "DaemonSet",
1238-
APIVersion: appsv1.SchemeGroupVersion.String(),
1239-
},
1240-
}
1241-
// Retrieve container based IPsec daemonset with name ovn-ipsec-containerized.
1242-
nsn = types.NamespacedName{Namespace: util.OVN_NAMESPACE, Name: "ovn-ipsec-containerized"}
1243-
if err := kubeClient.ClientFor("").CRClient().Get(context.TODO(), nsn, ipsecContainerizedDaemonSet); err != nil {
1244-
if !apierrors.IsNotFound(err) {
1245-
return nil, fmt.Errorf("Failed to retrieve existing ipsec containerized DaemonSet: %w", err)
1246-
} else {
1247-
ipsecContainerizedDaemonSet = nil
1248-
}
1249-
}
1250-
// Retrieve host based IPsec daemonset with name ovn-ipsec-host
1251-
nsn = types.NamespacedName{Namespace: util.OVN_NAMESPACE, Name: "ovn-ipsec-host"}
1252-
if err := kubeClient.ClientFor("").CRClient().Get(context.TODO(), nsn, ipsecHostDaemonSet); err != nil {
1253-
if !apierrors.IsNotFound(err) {
1254-
return nil, fmt.Errorf("Failed to retrieve existing ipsec host DaemonSet: %w", err)
1255-
} else {
1256-
ipsecHostDaemonSet = nil
1257-
}
1258-
}
1259-
if ipsecContainerizedDaemonSet != nil && ipsecHostDaemonSet != nil {
1260-
// Both IPsec daemonset versions exist, so this is an upgrade from 4.14.
1261-
ipsecStatus.LegacyIPsecUpgrade = true
1262-
} else if ipsecContainerizedDaemonSet == nil && ipsecHostDaemonSet == nil {
1263-
ipsecStatus = nil
1210+
}
1211+
// Retrieve host based IPsec daemonset with name ovn-ipsec-host
1212+
nsn = types.NamespacedName{Namespace: util.OVN_NAMESPACE, Name: "ovn-ipsec-host"}
1213+
if err := kubeClient.ClientFor("").CRClient().Get(context.TODO(), nsn, ipsecHostDaemonSet); err != nil {
1214+
if !apierrors.IsNotFound(err) {
1215+
return nil, fmt.Errorf("Failed to retrieve ipsec host DaemonSet: %w", err)
1216+
} else {
1217+
ipsecHostDaemonSet = nil
12641218
}
12651219
}
1220+
if ipsecContainerizedDaemonSet == nil && ipsecHostDaemonSet == nil {
1221+
ipsecStatus = nil
1222+
}
12661223

12671224
// set OVN IPsec status into ipsecStatus only when IPsec daemonset(s) exists in the cluster.
12681225
if ipsecStatus != nil {

0 commit comments

Comments
 (0)