@@ -258,6 +258,7 @@ func (r *RKE2ControlPlaneReconciler) SetupWithManager(ctx context.Context, mgr c
258
258
259
259
r .controller = c
260
260
r .recorder = mgr .GetEventRecorderFor ("rke2-control-plane-controller" )
261
+ r .ssaCache = ssa .NewCache ("rke2-control-plane" )
261
262
262
263
// Set up a clusterCache to provide to controllers
263
264
// requiring a connection to a remote cluster
@@ -503,10 +504,6 @@ func (r *RKE2ControlPlaneReconciler) reconcileNormal(
503
504
return result , err
504
505
}
505
506
506
- if err := r .syncMachines (ctx , rke2.ControlPlane {}); err != nil {
507
- return ctrl.Result {}, errors .Wrap (err , "failed to sync Machines" )
508
- }
509
-
510
507
controlPlaneMachines , err := r .managementClusterUncached .GetMachinesForCluster (
511
508
ctx ,
512
509
util .ObjectKey (cluster ),
@@ -531,6 +528,10 @@ func (r *RKE2ControlPlaneReconciler) reconcileNormal(
531
528
return ctrl.Result {}, err
532
529
}
533
530
531
+ if err := r .syncMachines (ctx , controlPlane ); err != nil {
532
+ return ctrl.Result {}, errors .Wrap (err , "failed to sync Machines" )
533
+ }
534
+
534
535
// Aggregate the operational state of all the machines; while aggregating we are adding the
535
536
// source ref (reason@machine/name) so the problem can be easily tracked down to its source machine.
536
537
conditions .SetAggregate (controlPlane .RCP , controlplanev1 .MachinesReadyCondition ,
@@ -629,7 +630,7 @@ func (r *RKE2ControlPlaneReconciler) GetWorkloadCluster(ctx context.Context, con
629
630
// reconcileEtcdMembers ensures the number of etcd members is in sync with the number of machines/nodes.
630
631
// This is usually required after a machine deletion.
631
632
//
632
- // NOTE: this func uses KCP conditions, it is required to call reconcileControlPlaneConditions before this.
633
+ // NOTE: this func uses RKE2ControlPlane conditions, it is required to call reconcileControlPlaneConditions before this.
633
634
func (r * RKE2ControlPlaneReconciler ) reconcileEtcdMembers (ctx context.Context , controlPlane * rke2.ControlPlane ) error {
634
635
log := ctrl .LoggerFrom (ctx )
635
636
@@ -1106,20 +1107,41 @@ func (r *RKE2ControlPlaneReconciler) getWorkloadCluster(ctx context.Context, clu
1106
1107
return workloadCluster , nil
1107
1108
}
1108
1109
1109
- // syncMachines updates Machines, InfrastructureMachines and Rke2Configs to propagate in-place mutable fields from KCP.
1110
- // Note: It also cleans up managed fields of all Machines so that Machines that were
1111
- // created/patched before (< v1.4.0)“ the controller adopted Server-Side-Apply (SSA) can also work with SSA.
1110
+ // syncMachines updates Machines, InfrastructureMachines and Rke2Configs to propagate in-place mutable fields from RKE2ControlPlane.
1112
1111
// Note: For InfrastructureMachines and Rke2Configs it also drops ownership of "metadata.labels" and
1113
1112
// "metadata.annotations" from "manager" so that "rke2controlplane" can own these fields and can work with SSA.
1114
1113
// Otherwise, fields would be co-owned by our "old" "manager" and "rke2controlplane" and then we would not be
1115
1114
// able to e.g. drop labels and annotations.
1116
- func (r * RKE2ControlPlaneReconciler ) syncMachines (ctx context.Context , controlPlane rke2.ControlPlane ) error {
1115
+ func (r * RKE2ControlPlaneReconciler ) syncMachines (ctx context.Context , controlPlane * rke2.ControlPlane ) error {
1117
1116
patchHelpers := map [string ]* patch.Helper {}
1118
1117
1119
1118
for machineName := range controlPlane .Machines {
1120
1119
m := controlPlane .Machines [machineName ]
1121
- // If the machine is already being deleted, we don't need to update it.
1120
+ // If the Machine is already being deleted, we only need to sync
1121
+ // the subset of fields that impact tearing down the Machine.
1122
1122
if ! m .DeletionTimestamp .IsZero () {
1123
+ patchHelper , err := patch .NewHelper (m , r .Client )
1124
+ if err != nil {
1125
+ return err
1126
+ }
1127
+
1128
+ // Set all other in-place mutable fields that impact the ability to tear down existing machines.
1129
+ m .Spec .NodeDrainTimeout = controlPlane .RCP .Spec .MachineTemplate .NodeDrainTimeout
1130
+ m .Spec .NodeDeletionTimeout = controlPlane .RCP .Spec .MachineTemplate .NodeDeletionTimeout
1131
+ m .Spec .NodeVolumeDetachTimeout = controlPlane .RCP .Spec .MachineTemplate .NodeVolumeDetachTimeout
1132
+
1133
+ if err := patchHelper .Patch (ctx , m ); err != nil {
1134
+ return err
1135
+ }
1136
+
1137
+ controlPlane .Machines [machineName ] = m
1138
+ patchHelper , err = patch .NewHelper (m , r .Client )
1139
+ if err != nil { //nolint:wsl
1140
+ return err
1141
+ }
1142
+
1143
+ patchHelpers [machineName ] = patchHelper
1144
+
1123
1145
continue
1124
1146
}
1125
1147
@@ -1155,32 +1177,40 @@ func (r *RKE2ControlPlaneReconciler) syncMachines(ctx context.Context, controlPl
1155
1177
{"f:metadata" , "f:annotations" },
1156
1178
{"f:metadata" , "f:labels" },
1157
1179
}
1158
- infraMachine := controlPlane .InfraResources [machineName ]
1159
- // Cleanup managed fields of all InfrastructureMachines to drop ownership of labels and annotations
1160
- // from "manager". We do this so that InfrastructureMachines that are created using the Create method
1161
- // can also work with SSA. Otherwise, labels and annotations would be co-owned by our "old" "manager"
1162
- // and "rke2-kubeadmcontrolplane" and then we would not be able to e.g. drop labels and annotations.
1163
- if err := ssa .DropManagedFields (ctx , r .Client , infraMachine , rke2ManagerName , labelsAndAnnotationsManagedFieldPaths ); err != nil {
1164
- return errors .Wrapf (err , "failed to clean up managedFields of InfrastructureMachine %s" , klog .KObj (infraMachine ))
1165
- }
1166
- // Update in-place mutating fields on InfrastructureMachine.
1167
- if err := r .UpdateExternalObject (ctx , infraMachine , controlPlane .RCP , controlPlane .Cluster ); err != nil {
1168
- return errors .Wrapf (err , "failed to update InfrastructureMachine %s" , klog .KObj (infraMachine ))
1180
+ infraMachine , infraMachineFound := controlPlane .InfraResources [machineName ]
1181
+ // Only update the InfraMachine if it is already found, otherwise just skip it.
1182
+ // This could happen e.g. if the cache is not up-to-date yet.
1183
+ if infraMachineFound {
1184
+ // Cleanup managed fields of all InfrastructureMachines to drop ownership of labels and annotations
1185
+ // from "manager". We do this so that InfrastructureMachines that are created using the Create method
1186
+ // can also work with SSA. Otherwise, labels and annotations would be co-owned by our "old" "manager"
1187
+ // and "rke2-controlplane" and then we would not be able to e.g. drop labels and annotations.
1188
+ if err := ssa .DropManagedFields (ctx , r .Client , infraMachine , rke2ManagerName , labelsAndAnnotationsManagedFieldPaths ); err != nil {
1189
+ return errors .Wrapf (err , "failed to clean up managedFields of InfrastructureMachine %s" , klog .KObj (infraMachine ))
1190
+ }
1191
+ // Update in-place mutating fields on InfrastructureMachine.
1192
+ if err := r .UpdateExternalObject (ctx , infraMachine , controlPlane .RCP , controlPlane .Cluster ); err != nil {
1193
+ return errors .Wrapf (err , "failed to update InfrastructureMachine %s" , klog .KObj (infraMachine ))
1194
+ }
1169
1195
}
1170
1196
1171
- rke2Config := controlPlane .Rke2Configs [machineName ]
1172
- // Note: Set the GroupVersionKind because updateExternalObject depends on it.
1173
- rke2Config .SetGroupVersionKind (m .Spec .Bootstrap .ConfigRef .GroupVersionKind ())
1174
- // Cleanup managed fields of all Rke2Configs to drop ownership of labels and annotations
1175
- // from "manager". We do this so that Rke2Configs that are created using the Create method
1176
- // can also work with SSA. Otherwise, labels and annotations would be co-owned by our "old" "manager"
1177
- // and "rke2controlplane" and then we would not be able to e.g. drop labels and annotations.
1178
- if err := ssa .DropManagedFields (ctx , r .Client , rke2Config , rke2ManagerName , labelsAndAnnotationsManagedFieldPaths ); err != nil {
1179
- return errors .Wrapf (err , "failed to clean up managedFields of KubeadmConfig %s" , klog .KObj (rke2Config ))
1180
- }
1181
- // Update in-place mutating fields on BootstrapConfig.
1182
- if err := r .UpdateExternalObject (ctx , rke2Config , controlPlane .RCP , controlPlane .Cluster ); err != nil {
1183
- return errors .Wrapf (err , "failed to update Rke2Config %s" , klog .KObj (rke2Config ))
1197
+ rke2Config , rke2ConfigFound := controlPlane .Rke2Configs [machineName ]
1198
+ // Only update the RKE2Config if it is already found, otherwise just skip it.
1199
+ // This could happen e.g. if the cache is not up-to-date yet.
1200
+ if rke2ConfigFound {
1201
+ // Note: Set the GroupVersionKind because updateExternalObject depends on it.
1202
+ rke2Config .SetGroupVersionKind (m .Spec .Bootstrap .ConfigRef .GroupVersionKind ())
1203
+ // Cleanup managed fields of all RKE2Configs to drop ownership of labels and annotations
1204
+ // from "manager". We do this so that RKE2Configs that are created using the Create method
1205
+ // can also work with SSA. Otherwise, labels and annotations would be co-owned by our "old" "manager"
1206
+ // and "rke2-controlplane" and then we would not be able to e.g. drop labels and annotations.
1207
+ if err := ssa .DropManagedFields (ctx , r .Client , rke2Config , rke2ManagerName , labelsAndAnnotationsManagedFieldPaths ); err != nil {
1208
+ return errors .Wrapf (err , "failed to clean up managedFields of RKE2Config %s" , klog .KObj (rke2Config ))
1209
+ }
1210
+ // Update in-place mutating fields on BootstrapConfig.
1211
+ if err := r .UpdateExternalObject (ctx , rke2Config , controlPlane .RCP , controlPlane .Cluster ); err != nil {
1212
+ return errors .Wrapf (err , "failed to update RKE2Config %s" , klog .KObj (rke2Config ))
1213
+ }
1184
1214
}
1185
1215
}
1186
1216
// Update the patch helpers.
0 commit comments